repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
rstoneback/pysatMagVect | pysatMagVect/_core.py | ecef_to_geocentric | python | def ecef_to_geocentric(x, y, z, ref_height=None):
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height | Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L51-L79 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | geodetic_to_ecef | python | def geodetic_to_ecef(latitude, longitude, altitude):
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z | Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L82-L110 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | ecef_to_geodetic | python | def ecef_to_geodetic(x, y, z, method=None):
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h | Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L113-L176 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | enu_to_ecef_vector | python | def enu_to_ecef_vector(east, north, up, glat, glong):
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z | Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L178-L213 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | project_ecef_vector_onto_basis | python | def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z | Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L253-L277 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | normalize_vector | python | def normalize_vector(x, y, z):
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z | Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L279-L303 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | cross_product | python | def cross_product(x1, y1, z1, x2, y2, z2):
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z | Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L305-L333 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | field_line_trace | python | def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north | Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km). | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L336-L423 | [
"def ecef_to_geodetic(x, y, z, method=None):\n \"\"\"Convert ECEF into Geodetic WGS84 coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n method : 'iterative' or 'closed' ('closed' is deafult)\n String selects method of conversion. Closed for mathematical\n solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)\n or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).\n\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n\n # quick notes on ECEF to Geodetic transformations \n # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html\n\n method = method or 'closed'\n\n # ellipticity of Earth \n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n # first eccentricity squared\n e2 = ellip ** 2 # 6.6943799901377997E-3\n\n longitude = np.arctan2(y, x)\n # cylindrical radius\n p = np.sqrt(x ** 2 + y ** 2)\n\n # closed form solution\n # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1\n if method == 'closed':\n e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)\n theta = np.arctan2(z*earth_a, p*earth_b)\n latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)\n r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)\n h = p / np.cos(latitude) - r_n\n\n # another possibility\n # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf\n\n ## iterative method\n # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf\n if method == 'iterative':\n latitude = np.arctan2(p, z)\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n for i in np.arange(6):\n # print latitude\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n h = p / np.cos(latitude) - r_n\n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))\n # print h\n # final ellipsoidal height update\n h = p / np.cos(latitude) - r_n\n\n return np.rad2deg(latitude), np.rad2deg(longitude), h\n",
"def field_line_trace(init, date, direction, height, steps=None,\n max_steps=1E4, step_size=10., recursive_loop_count=None, \n recurse=True):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n direction : int\n 1 : field aligned, generally south to north. \n -1 : anti-field aligned, generally north to south.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for initial point\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if recursive_loop_count is None: \n recursive_loop_count = 0\n # \n if steps is None:\n steps = np.arange(max_steps)\n if not isinstance(date, float):\n # recast from datetime to float, as required by IGRF12 code\n doy = (date - datetime.datetime(date.year,1,1)).days\n # number of days in year, works for leap years\n num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days\n date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.\n\n trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),\n steps,\n args=(date, step_size, direction, height),\n full_output=False,\n printmessg=False,\n ixpr=False) #,\n # mxstep=500)\n\n # check that we reached final altitude\n check = trace_north[-1, :]\n x, y, z = ecef_to_geodetic(*check) \n if height == 0:\n check_height = 1.\n else:\n check_height = height\n # fortran integration gets close to target height \n if recurse & (z > check_height*1.000001):\n if (recursive_loop_count < 1000):\n # When we have not reached the reference height, call field_line_trace \n # again by taking check value as init - recursive call\n recursive_loop_count = recursive_loop_count + 1\n trace_north1 = field_line_trace(check, date, direction, height,\n step_size=step_size, \n max_steps=max_steps,\n recursive_loop_count=recursive_loop_count,\n steps=steps)\n else:\n raise RuntimeError(\"After 1000 iterations couldn't reach target altitude\")\n return np.vstack((trace_north, trace_north1))\n else:\n # return results if we make it to the target altitude\n\n # filter points to terminate at point closest to target height\n # code below not correct, we want the first poiint that goes below target\n # height\n # code also introduces a variable length return, though I suppose\n # that already exists with the recursive functionality\n # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) \n # idx = np.argmin(np.abs(check_height - z)) \n return trace_north #[:idx+1,:]\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | full_field_line | python | def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace | Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km). | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L426-L476 | [
"def field_line_trace(init, date, direction, height, steps=None,\n max_steps=1E4, step_size=10., recursive_loop_count=None, \n recurse=True):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n direction : int\n 1 : field aligned, generally south to north. \n -1 : anti-field aligned, generally north to south.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for initial point\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if recursive_loop_count is None: \n recursive_loop_count = 0\n # \n if steps is None:\n steps = np.arange(max_steps)\n if not isinstance(date, float):\n # recast from datetime to float, as required by IGRF12 code\n doy = (date - datetime.datetime(date.year,1,1)).days\n # number of days in year, works for leap years\n num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days\n date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.\n\n trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),\n steps,\n args=(date, step_size, direction, height),\n full_output=False,\n printmessg=False,\n ixpr=False) #,\n # mxstep=500)\n\n # check that we reached final altitude\n check = trace_north[-1, :]\n x, y, z = ecef_to_geodetic(*check) \n if height == 0:\n check_height = 1.\n else:\n check_height = height\n # fortran integration gets close to target height \n if recurse & (z > check_height*1.000001):\n if (recursive_loop_count < 1000):\n # When we have not reached the reference height, call field_line_trace \n # again by taking check value as init - recursive call\n recursive_loop_count = recursive_loop_count + 1\n trace_north1 = field_line_trace(check, date, direction, height,\n step_size=step_size, \n max_steps=max_steps,\n recursive_loop_count=recursive_loop_count,\n steps=steps)\n else:\n raise RuntimeError(\"After 1000 iterations couldn't reach target altitude\")\n return np.vstack((trace_north, trace_north1))\n else:\n # return results if we make it to the target altitude\n\n # filter points to terminate at point closest to target height\n # code below not correct, we want the first poiint that goes below target\n # height\n # code also introduces a variable length return, though I suppose\n # that already exists with the recursive functionality\n # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) \n # idx = np.argmin(np.abs(check_height - z)) \n return trace_north #[:idx+1,:]\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | calculate_mag_drift_unit_vectors_ecef | python | def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz | Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L479-L631 | [
"def ecef_to_geocentric(x, y, z, ref_height=None):\n \"\"\"Convert ECEF into geocentric coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n ref_height : float or array_like\n Reference radius used for calculating height.\n Defaults to average radius of 6371 km\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n if ref_height is None:\n ref_height = earth_geo_radius\n\n r = np.sqrt(x ** 2 + y ** 2 + z ** 2)\n colatitude = np.rad2deg(np.arccos(z / r))\n longitude = np.rad2deg(np.arctan2(y, x))\n latitude = 90. - colatitude\n\n return latitude, longitude, r - ref_height\n",
"def geodetic_to_ecef(latitude, longitude, altitude):\n \"\"\"Convert WGS84 geodetic coordinates into ECEF\n\n Parameters\n ----------\n latitude : float or array_like\n Geodetic latitude (degrees)\n longitude : float or array_like\n Geodetic longitude (degrees)\n altitude : float or array_like\n Geodetic Height (km) above WGS84 reference ellipsoid.\n\n Returns\n -------\n x, y, z\n numpy arrays of x, y, z locations in km\n\n \"\"\"\n\n\n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)\n\n # colatitude = 90. - latitude\n x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))\n y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))\n z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))\n\n return x, y, z\n",
"def enu_to_ecef_vector(east, north, up, glat, glong):\n \"\"\"Converts vector from East, North, Up components to ECEF\n\n Position of vector in geospace may be specified in either\n geocentric or geodetic coordinates, with corresponding expression\n of the vector using radial or ellipsoidal unit vectors.\n\n Parameters\n ----------\n east : float or array-like\n Eastward component of vector\n north : float or array-like\n Northward component of vector\n up : float or array-like\n Upward component of vector\n latitude : float or array_like\n Geodetic or geocentric latitude (degrees)\n longitude : float or array_like\n Geodetic or geocentric longitude (degrees)\n\n Returns\n -------\n x, y, z\n Vector components along ECEF x, y, and z directions\n\n \"\"\"\n\n # convert lat and lon in degrees to radians\n rlat = np.radians(glat)\n rlon = np.radians(glong)\n\n x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)\n y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)\n z = north*np.cos(rlat) + up*np.sin(rlat)\n\n return x, y, z\n",
"def normalize_vector(x, y, z):\n \"\"\"\n Normalizes vector to produce a unit vector.\n\n Parameters\n ----------\n x : float or array-like\n X component of vector\n y : float or array-like\n Y component of vector\n z : float or array-like\n Z component of vector\n\n Returns\n -------\n x, y, z\n Unit vector x,y,z components\n\n \"\"\"\n\n mag = np.sqrt(x**2 + y**2 + z**2)\n x = x/mag\n y = y/mag\n z = z/mag\n return x, y, z\n",
"def cross_product(x1, y1, z1, x2, y2, z2):\n \"\"\"\n Cross product of two vectors, v1 x v2\n\n Parameters\n ----------\n x1 : float or array-like\n X component of vector 1\n y1 : float or array-like\n Y component of vector 1\n z1 : float or array-like\n Z component of vector 1\n x2 : float or array-like\n X component of vector 2\n y2 : float or array-like\n Y component of vector 2\n z2 : float or array-like\n Z component of vector 2\n\n Returns\n -------\n x, y, z\n Unit vector x,y,z components\n\n \"\"\"\n x = y1*z2 - y2*z1\n y = z1*x2 - x1*z2\n z = x1*y2 - y1*x2\n return x, y, z \n",
"def field_line_trace(init, date, direction, height, steps=None,\n max_steps=1E4, step_size=10., recursive_loop_count=None, \n recurse=True):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n direction : int\n 1 : field aligned, generally south to north. \n -1 : anti-field aligned, generally north to south.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for initial point\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if recursive_loop_count is None: \n recursive_loop_count = 0\n # \n if steps is None:\n steps = np.arange(max_steps)\n if not isinstance(date, float):\n # recast from datetime to float, as required by IGRF12 code\n doy = (date - datetime.datetime(date.year,1,1)).days\n # number of days in year, works for leap years\n num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days\n date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.\n\n trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),\n steps,\n args=(date, step_size, direction, height),\n full_output=False,\n printmessg=False,\n ixpr=False) #,\n # mxstep=500)\n\n # check that we reached final altitude\n check = trace_north[-1, :]\n x, y, z = ecef_to_geodetic(*check) \n if height == 0:\n check_height = 1.\n else:\n check_height = height\n # fortran integration gets close to target height \n if recurse & (z > check_height*1.000001):\n if (recursive_loop_count < 1000):\n # When we have not reached the reference height, call field_line_trace \n # again by taking check value as init - recursive call\n recursive_loop_count = recursive_loop_count + 1\n trace_north1 = field_line_trace(check, date, direction, height,\n step_size=step_size, \n max_steps=max_steps,\n recursive_loop_count=recursive_loop_count,\n steps=steps)\n else:\n raise RuntimeError(\"After 1000 iterations couldn't reach target altitude\")\n return np.vstack((trace_north, trace_north1))\n else:\n # return results if we make it to the target altitude\n\n # filter points to terminate at point closest to target height\n # code below not correct, we want the first poiint that goes below target\n # height\n # code also introduces a variable length return, though I suppose\n # that already exists with the recursive functionality\n # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) \n # idx = np.argmin(np.abs(check_height - z)) \n return trace_north #[:idx+1,:]\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | step_until_intersect | python | def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist | Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L634-L769 | [
"def full_field_line(init, date, height, step_size=100., max_steps=1000, \n steps=None, **kwargs):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n Two traces are made, one north, the other south, thus the output array\n could have double max_steps, or more via recursion.\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for southern footpoint\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if steps is None:\n steps = np.arange(max_steps)\n # trace north, then south, and combine\n trace_south = field_line_trace(init, date, -1., height, \n steps=steps,\n step_size=step_size, \n max_steps=max_steps, \n **kwargs)\n trace_north = field_line_trace(init, date, 1., height, \n steps=steps,\n step_size=step_size, \n max_steps=max_steps, \n **kwargs)\n # order of field points is generally along the field line, south to north\n # don't want to include the initial point twice\n trace = np.vstack((trace_south[::-1][:-1,:], trace_north))\n return trace\n",
"def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5., \n step_size=5., scalar=1):\n \"\"\"\n Move along 'lines' formed by following the magnetic unit vector directions.\n\n Moving along the field is effectively the same as a field line trace though\n extended movement along a field should use the specific field_line_trace \n method.\n\n\n Parameters\n ----------\n x : ECEF-x (km)\n Location to step from in ECEF (km). Scalar input.\n y : ECEF-y (km)\n Location to step from in ECEF (km). Scalar input.\n z : ECEF-z (km)\n Location to step from in ECEF (km). Scalar input.\n date : list-like of datetimes\n Date and time for magnetic field\n direction : string\n String identifier for which unit vector directino to move along.\n Supported inputs, 'meridional', 'zonal', 'aligned'\n num_steps : int\n Number of steps to take along unit vector direction\n step_size = float\n Distance taken for each step (km)\n scalar : int\n Scalar modifier for step size distance. Input a -1 to move along \n negative unit vector direction.\n\n Returns\n -------\n np.array\n [x, y, z] of ECEF location after taking num_steps along direction, \n each step_size long.\n\n \"\"\"\n\n\n # set parameters for the field line tracing routines\n field_step_size = 100.\n field_max_steps = 1000\n field_steps = np.arange(field_max_steps)\n\n for i in np.arange(num_steps):\n # x, y, z in ECEF\n # convert to geodetic\n lat, lon, alt = ecef_to_geodetic(x, y, z)\n # get unit vector directions\n zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(\n [lat], [lon], [alt], [date],\n steps=field_steps, \n max_steps=field_max_steps, \n step_size=field_step_size, \n ref_height=0.)\n # pull out the direction we need\n if direction == 'meridional':\n ux, uy, uz = mx, my, mz\n elif direction == 'zonal':\n ux, uy, uz = zvx, zvy, zvz\n elif direction == 'aligned':\n ux, uy, uz = bx, by, bz\n\n # take steps along direction\n x = x + step_size*ux[0]*scalar\n y = y + step_size*uy[0]*scalar\n z = z + step_size*uz[0]*scalar\n\n return np.array([x, y, z])\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | step_along_mag_unit_vector | python | def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z]) | Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L772-L841 | [
"def ecef_to_geodetic(x, y, z, method=None):\n \"\"\"Convert ECEF into Geodetic WGS84 coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n method : 'iterative' or 'closed' ('closed' is deafult)\n String selects method of conversion. Closed for mathematical\n solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)\n or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).\n\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n\n # quick notes on ECEF to Geodetic transformations \n # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html\n\n method = method or 'closed'\n\n # ellipticity of Earth \n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n # first eccentricity squared\n e2 = ellip ** 2 # 6.6943799901377997E-3\n\n longitude = np.arctan2(y, x)\n # cylindrical radius\n p = np.sqrt(x ** 2 + y ** 2)\n\n # closed form solution\n # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1\n if method == 'closed':\n e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)\n theta = np.arctan2(z*earth_a, p*earth_b)\n latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)\n r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)\n h = p / np.cos(latitude) - r_n\n\n # another possibility\n # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf\n\n ## iterative method\n # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf\n if method == 'iterative':\n latitude = np.arctan2(p, z)\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n for i in np.arange(6):\n # print latitude\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n h = p / np.cos(latitude) - r_n\n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))\n # print h\n # final ellipsoidal height update\n h = p / np.cos(latitude) - r_n\n\n return np.rad2deg(latitude), np.rad2deg(longitude), h\n",
"def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,\n steps=None, max_steps=1000, step_size=100.,\n ref_height=120., filter_zonal=True):\n \"\"\"Calculates unit vectors expressing the ion drift coordinate system\n organized by the geomagnetic field. Unit vectors are expressed\n in ECEF coordinates.\n\n Note\n ----\n The zonal vector is calculated by field-line tracing from\n the input locations toward the footpoint locations at ref_height.\n The cross product of these two vectors is taken to define the plane of\n the magnetic field. This vector is not always orthogonal\n with the local field-aligned vector (IGRF), thus any component of the \n zonal vector with the field-aligned direction is removed (optional). \n The meridional unit vector is defined via the cross product of the \n zonal and field-aligned directions.\n\n Parameters\n ----------\n latitude : array-like of floats (degrees)\n Latitude of location, degrees, WGS84\n longitude : array-like of floats (degrees)\n Longitude of location, degrees, WGS84\n altitude : array-like of floats (km)\n Altitude of location, height above surface, WGS84\n datetimes : array-like of datetimes\n Time to calculate vectors\n max_steps : int\n Maximum number of steps allowed for field line tracing\n step_size : float\n Maximum step size (km) allowed when field line tracing\n ref_height : float\n Altitude used as cutoff for labeling a field line location a footpoint\n filter_zonal : bool\n If True, removes any field aligned component from the calculated\n zonal unit vector. Resulting coordinate system is not-orthogonal.\n\n Returns\n -------\n zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z\n\n \"\"\"\n\n if steps is None:\n steps = np.arange(max_steps)\n # calculate satellite position in ECEF coordinates\n ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)\n # also get position in geocentric coordinates\n geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z, \n ref_height=0.)\n # filter longitudes (could use pysat's function here)\n idx, = np.where(geo_long < 0)\n geo_long[idx] = geo_long[idx] + 360.\n # prepare output lists\n north_x = [];\n north_y = [];\n north_z = []\n south_x = [];\n south_y = [];\n south_z = []\n bn = [];\n be = [];\n bd = []\n\n for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z, \n geo_alt, np.deg2rad(90. - geo_lat),\n np.deg2rad(geo_long), datetimes):\n init = np.array([x, y, z])\n # date = inst.yr + inst.doy / 366.\n # trace = full_field_line(init, time, ref_height, step_size=step_size, \n # max_steps=max_steps,\n # steps=steps)\n trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,\n step_size=step_size, max_steps=max_steps)\n trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,\n step_size=step_size, max_steps=max_steps)\n # store final location, full trace goes south to north\n trace_north = trace_north[-1, :]\n trace_south = trace_south[-1, :]\n # magnetic field at spacecraft location, using geocentric inputs\n # to get magnetic field in geocentric output\n # recast from datetime to float, as required by IGRF12 code\n doy = (time - datetime.datetime(time.year,1,1)).days\n # number of days in year, works for leap years\n num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days\n date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.\n # get IGRF field components\n # tbn, tbe, tbd, tbmag are in nT\n tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)\n # collect outputs\n south_x.append(trace_south[0])\n south_y.append(trace_south[1])\n south_z.append(trace_south[2])\n north_x.append(trace_north[0])\n north_y.append(trace_north[1])\n north_z.append(trace_north[2])\n\n bn.append(tbn);\n be.append(tbe);\n bd.append(tbd)\n\n north_x = np.array(north_x)\n north_y = np.array(north_y)\n north_z = np.array(north_z)\n south_x = np.array(south_x)\n south_y = np.array(south_y)\n south_z = np.array(south_z)\n bn = np.array(bn)\n be = np.array(be)\n bd = np.array(bd)\n\n # calculate vector from satellite to northern/southern footpoints\n north_x = north_x - ecef_x\n north_y = north_y - ecef_y\n north_z = north_z - ecef_z\n north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)\n south_x = south_x - ecef_x\n south_y = south_y - ecef_y\n south_z = south_z - ecef_z\n south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)\n # calculate magnetic unit vector\n bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)\n bx, by, bz = normalize_vector(bx, by, bz)\n\n # take cross product of southward and northward vectors to get the zonal vector\n zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,\n north_x, north_y, north_z) \n # getting zonal vector utilizing magnetic field vector instead\n zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,\n bx, by, bz)\n # getting zonal vector utilizing magnetic field vector instead and southern point\n zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,\n bx, by, bz)\n # normalize the vectors\n norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)\n\n # calculate zonal vector\n zvx = zvx_foot / norm_foot\n zvy = zvy_foot / norm_foot\n zvz = zvz_foot / norm_foot\n # remove any field aligned component to the zonal vector\n dot_fa = zvx * bx + zvy * by + zvz * bz\n zvx -= dot_fa * bx\n zvy -= dot_fa * by\n zvz -= dot_fa * bz\n zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)\n # compute meridional vector\n # cross product of zonal and magnetic unit vector\n mx, my, mz = cross_product(zvx, zvy, zvz,\n bx, by, bz)\n # add unit vectors for magnetic drifts in ecef coordinates\n return zvx, zvy, zvz, bx, by, bz, mx, my, mz\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | apex_location_info | python | def apex_location_info(glats, glons, alts, dates):
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt | Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km) | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L844-L924 | [
"def geodetic_to_ecef(latitude, longitude, altitude):\n \"\"\"Convert WGS84 geodetic coordinates into ECEF\n\n Parameters\n ----------\n latitude : float or array_like\n Geodetic latitude (degrees)\n longitude : float or array_like\n Geodetic longitude (degrees)\n altitude : float or array_like\n Geodetic Height (km) above WGS84 reference ellipsoid.\n\n Returns\n -------\n x, y, z\n numpy arrays of x, y, z locations in km\n\n \"\"\"\n\n\n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)\n\n # colatitude = 90. - latitude\n x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))\n y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))\n z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))\n\n return x, y, z\n",
"def ecef_to_geodetic(x, y, z, method=None):\n \"\"\"Convert ECEF into Geodetic WGS84 coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n method : 'iterative' or 'closed' ('closed' is deafult)\n String selects method of conversion. Closed for mathematical\n solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)\n or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).\n\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n\n # quick notes on ECEF to Geodetic transformations \n # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html\n\n method = method or 'closed'\n\n # ellipticity of Earth \n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n # first eccentricity squared\n e2 = ellip ** 2 # 6.6943799901377997E-3\n\n longitude = np.arctan2(y, x)\n # cylindrical radius\n p = np.sqrt(x ** 2 + y ** 2)\n\n # closed form solution\n # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1\n if method == 'closed':\n e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)\n theta = np.arctan2(z*earth_a, p*earth_b)\n latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)\n r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)\n h = p / np.cos(latitude) - r_n\n\n # another possibility\n # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf\n\n ## iterative method\n # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf\n if method == 'iterative':\n latitude = np.arctan2(p, z)\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n for i in np.arange(6):\n # print latitude\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n h = p / np.cos(latitude) - r_n\n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))\n # print h\n # final ellipsoidal height update\n h = p / np.cos(latitude) - r_n\n\n return np.rad2deg(latitude), np.rad2deg(longitude), h\n",
"def full_field_line(init, date, height, step_size=100., max_steps=1000, \n steps=None, **kwargs):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n Two traces are made, one north, the other south, thus the output array\n could have double max_steps, or more via recursion.\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for southern footpoint\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if steps is None:\n steps = np.arange(max_steps)\n # trace north, then south, and combine\n trace_south = field_line_trace(init, date, -1., height, \n steps=steps,\n step_size=step_size, \n max_steps=max_steps, \n **kwargs)\n trace_north = field_line_trace(init, date, 1., height, \n steps=steps,\n step_size=step_size, \n max_steps=max_steps, \n **kwargs)\n # order of field points is generally along the field line, south to north\n # don't want to include the initial point twice\n trace = np.vstack((trace_south[::-1][:-1,:], trace_north))\n return trace\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | closed_loop_edge_lengths_via_footpoint | python | def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus) | Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L927-L1072 | [
"def geodetic_to_ecef(latitude, longitude, altitude):\n \"\"\"Convert WGS84 geodetic coordinates into ECEF\n\n Parameters\n ----------\n latitude : float or array_like\n Geodetic latitude (degrees)\n longitude : float or array_like\n Geodetic longitude (degrees)\n altitude : float or array_like\n Geodetic Height (km) above WGS84 reference ellipsoid.\n\n Returns\n -------\n x, y, z\n numpy arrays of x, y, z locations in km\n\n \"\"\"\n\n\n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)\n\n # colatitude = 90. - latitude\n x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))\n y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))\n z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))\n\n return x, y, z\n",
"def ecef_to_geodetic(x, y, z, method=None):\n \"\"\"Convert ECEF into Geodetic WGS84 coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n method : 'iterative' or 'closed' ('closed' is deafult)\n String selects method of conversion. Closed for mathematical\n solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)\n or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).\n\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n\n # quick notes on ECEF to Geodetic transformations \n # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html\n\n method = method or 'closed'\n\n # ellipticity of Earth \n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n # first eccentricity squared\n e2 = ellip ** 2 # 6.6943799901377997E-3\n\n longitude = np.arctan2(y, x)\n # cylindrical radius\n p = np.sqrt(x ** 2 + y ** 2)\n\n # closed form solution\n # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1\n if method == 'closed':\n e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)\n theta = np.arctan2(z*earth_a, p*earth_b)\n latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)\n r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)\n h = p / np.cos(latitude) - r_n\n\n # another possibility\n # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf\n\n ## iterative method\n # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf\n if method == 'iterative':\n latitude = np.arctan2(p, z)\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n for i in np.arange(6):\n # print latitude\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n h = p / np.cos(latitude) - r_n\n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))\n # print h\n # final ellipsoidal height update\n h = p / np.cos(latitude) - r_n\n\n return np.rad2deg(latitude), np.rad2deg(longitude), h\n",
"def field_line_trace(init, date, direction, height, steps=None,\n max_steps=1E4, step_size=10., recursive_loop_count=None, \n recurse=True):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n direction : int\n 1 : field aligned, generally south to north. \n -1 : anti-field aligned, generally north to south.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for initial point\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if recursive_loop_count is None: \n recursive_loop_count = 0\n # \n if steps is None:\n steps = np.arange(max_steps)\n if not isinstance(date, float):\n # recast from datetime to float, as required by IGRF12 code\n doy = (date - datetime.datetime(date.year,1,1)).days\n # number of days in year, works for leap years\n num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days\n date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.\n\n trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),\n steps,\n args=(date, step_size, direction, height),\n full_output=False,\n printmessg=False,\n ixpr=False) #,\n # mxstep=500)\n\n # check that we reached final altitude\n check = trace_north[-1, :]\n x, y, z = ecef_to_geodetic(*check) \n if height == 0:\n check_height = 1.\n else:\n check_height = height\n # fortran integration gets close to target height \n if recurse & (z > check_height*1.000001):\n if (recursive_loop_count < 1000):\n # When we have not reached the reference height, call field_line_trace \n # again by taking check value as init - recursive call\n recursive_loop_count = recursive_loop_count + 1\n trace_north1 = field_line_trace(check, date, direction, height,\n step_size=step_size, \n max_steps=max_steps,\n recursive_loop_count=recursive_loop_count,\n steps=steps)\n else:\n raise RuntimeError(\"After 1000 iterations couldn't reach target altitude\")\n return np.vstack((trace_north, trace_north1))\n else:\n # return results if we make it to the target altitude\n\n # filter points to terminate at point closest to target height\n # code below not correct, we want the first poiint that goes below target\n # height\n # code also introduces a variable length return, though I suppose\n # that already exists with the recursive functionality\n # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) \n # idx = np.argmin(np.abs(check_height - z)) \n return trace_north #[:idx+1,:]\n",
"def step_until_intersect(pos, field_line, sign, time, direction=None,\n step_size_goal=5., \n field_step_size=None): \n \"\"\"Starting at pos, method steps along magnetic unit vector direction \n towards the supplied field line trace. Determines the distance of \n closest approach to field line.\n\n Routine is used when calculting the mapping of electric fields along \n magnetic field lines. Voltage remains constant along the field but the \n distance between field lines does not.This routine may be used to form the \n last leg when trying to trace out a closed field line loop.\n\n Routine will create a high resolution field line trace (.01 km step size) \n near the location of closest approach to better determine where the \n intersection occurs. \n\n Parameters\n ----------\n pos : array-like\n X, Y, and Z ECEF locations to start from\n field_line : array-like (:,3)\n X, Y, and Z ECEF locations of field line trace, produced by the\n field_line_trace method.\n sign : int\n if 1, move along positive unit vector. Negwtive direction for -1.\n time : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n direction : string ('meridional', 'zonal', or 'aligned')\n Which unit vector direction to move slong when trying to intersect\n with supplied field line trace. See step_along_mag_unit_vector method\n for more.\n step_size_goal : float\n step size goal that method will try to match when stepping towards field line. \n\n Returns\n -------\n (float, array, float)\n Total distance taken along vector direction; the position after taking \n the step [x, y, z] in ECEF; distance of closest approach from input pos \n towards the input field line trace.\n\n \"\"\" \n\n # work on a copy, probably not needed\n field_copy = field_line\n # set a high last minimum distance to ensure first loop does better than this\n last_min_dist = 2500000.\n # scalar is the distance along unit vector line that we are taking\n scalar = 0.\n # repeat boolean\n repeat=True\n # first run boolean\n first=True\n # factor is a divisor applied to the remaining distance between point and field line\n # I slowly take steps towards the field line and I don't want to overshoot\n # each time my minimum distance increases, I step back, increase factor, reducing\n # my next step size, then I try again\n factor = 1\n while repeat:\n # take a total step along magnetic unit vector\n # try to take steps near user provided step_size_goal\n unit_steps = np.abs(scalar//step_size_goal)\n if unit_steps == 0:\n unit_steps = 1\n # print (unit_steps, scalar/unit_steps)\n pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time, \n direction=direction,\n num_steps=unit_steps, \n step_size=np.abs(scalar)/unit_steps,\n scalar=sign) \n # find closest point along field line trace\n diff = field_copy - pos_step\n diff_mag = np.sqrt((diff ** 2).sum(axis=1))\n min_idx = np.argmin(diff_mag)\n if first:\n # first time in while loop, create some information\n # make a high resolution field line trace around closest distance\n # want to take a field step size in each direction\n # maintain accuracy of high res trace below to be .01 km\n init = field_copy[min_idx,:]\n field_copy = full_field_line(init, time, 0.,\n step_size=0.01, \n max_steps=int(field_step_size/.01),\n recurse=False)\n # difference with position\n diff = field_copy - pos_step\n diff_mag = np.sqrt((diff ** 2).sum(axis=1))\n # find closest one\n min_idx = np.argmin(diff_mag)\n # # reduce number of elements we really need to check\n # field_copy = field_copy[min_idx-100:min_idx+100]\n # # difference with position\n # diff = field_copy - pos_step\n # diff_mag = np.sqrt((diff ** 2).sum(axis=1))\n # # find closest one\n # min_idx = np.argmin(diff_mag)\n first = False\n\n # pull out distance of closest point \n min_dist = diff_mag[min_idx]\n\n # check how the solution is doing\n # if well, add more distance to the total step and recheck if closer\n # if worse, step back and try a smaller step\n if min_dist > last_min_dist:\n # last step we took made the solution worse\n if factor > 4:\n # we've tried enough, stop looping\n repeat = False\n # undo increment to last total distance\n scalar = scalar - last_min_dist/(2*factor)\n # calculate latest position\n pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], \n time, \n direction=direction,\n num_steps=unit_steps, \n step_size=np.abs(scalar)/unit_steps,\n scalar=sign) \n else:\n # undo increment to last total distance\n scalar = scalar - last_min_dist/(2*factor)\n # increase the divisor used to reduce the distance \n # actually stepped per increment\n factor = factor + 1.\n # try a new increment to total distance\n scalar = scalar + last_min_dist/(2*factor)\n else:\n # we did better, move even closer, a fraction of remaining distance\n # increment scalar, but only by a fraction\n scalar = scalar + min_dist/(2*factor)\n # we have a new standard to judge against, set it\n last_min_dist = min_dist.copy()\n\n # return magnitude of step\n return scalar, pos_step, min_dist\n",
"def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5., \n step_size=5., scalar=1):\n \"\"\"\n Move along 'lines' formed by following the magnetic unit vector directions.\n\n Moving along the field is effectively the same as a field line trace though\n extended movement along a field should use the specific field_line_trace \n method.\n\n\n Parameters\n ----------\n x : ECEF-x (km)\n Location to step from in ECEF (km). Scalar input.\n y : ECEF-y (km)\n Location to step from in ECEF (km). Scalar input.\n z : ECEF-z (km)\n Location to step from in ECEF (km). Scalar input.\n date : list-like of datetimes\n Date and time for magnetic field\n direction : string\n String identifier for which unit vector directino to move along.\n Supported inputs, 'meridional', 'zonal', 'aligned'\n num_steps : int\n Number of steps to take along unit vector direction\n step_size = float\n Distance taken for each step (km)\n scalar : int\n Scalar modifier for step size distance. Input a -1 to move along \n negative unit vector direction.\n\n Returns\n -------\n np.array\n [x, y, z] of ECEF location after taking num_steps along direction, \n each step_size long.\n\n \"\"\"\n\n\n # set parameters for the field line tracing routines\n field_step_size = 100.\n field_max_steps = 1000\n field_steps = np.arange(field_max_steps)\n\n for i in np.arange(num_steps):\n # x, y, z in ECEF\n # convert to geodetic\n lat, lon, alt = ecef_to_geodetic(x, y, z)\n # get unit vector directions\n zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(\n [lat], [lon], [alt], [date],\n steps=field_steps, \n max_steps=field_max_steps, \n step_size=field_step_size, \n ref_height=0.)\n # pull out the direction we need\n if direction == 'meridional':\n ux, uy, uz = mx, my, mz\n elif direction == 'zonal':\n ux, uy, uz = zvx, zvy, zvz\n elif direction == 'aligned':\n ux, uy, uz = bx, by, bz\n\n # take steps along direction\n x = x + step_size*ux[0]*scalar\n y = y + step_size*uy[0]*scalar\n z = z + step_size*uz[0]*scalar\n\n return np.array([x, y, z])\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | closed_loop_edge_lengths_via_equator | python | def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length) | Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L1075-L1227 | [
"def geodetic_to_ecef(latitude, longitude, altitude):\n \"\"\"Convert WGS84 geodetic coordinates into ECEF\n\n Parameters\n ----------\n latitude : float or array_like\n Geodetic latitude (degrees)\n longitude : float or array_like\n Geodetic longitude (degrees)\n altitude : float or array_like\n Geodetic Height (km) above WGS84 reference ellipsoid.\n\n Returns\n -------\n x, y, z\n numpy arrays of x, y, z locations in km\n\n \"\"\"\n\n\n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)\n\n # colatitude = 90. - latitude\n x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))\n y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))\n z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))\n\n return x, y, z\n",
"def ecef_to_geodetic(x, y, z, method=None):\n \"\"\"Convert ECEF into Geodetic WGS84 coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n method : 'iterative' or 'closed' ('closed' is deafult)\n String selects method of conversion. Closed for mathematical\n solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)\n or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).\n\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n\n # quick notes on ECEF to Geodetic transformations \n # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html\n\n method = method or 'closed'\n\n # ellipticity of Earth \n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n # first eccentricity squared\n e2 = ellip ** 2 # 6.6943799901377997E-3\n\n longitude = np.arctan2(y, x)\n # cylindrical radius\n p = np.sqrt(x ** 2 + y ** 2)\n\n # closed form solution\n # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1\n if method == 'closed':\n e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)\n theta = np.arctan2(z*earth_a, p*earth_b)\n latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)\n r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)\n h = p / np.cos(latitude) - r_n\n\n # another possibility\n # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf\n\n ## iterative method\n # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf\n if method == 'iterative':\n latitude = np.arctan2(p, z)\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n for i in np.arange(6):\n # print latitude\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n h = p / np.cos(latitude) - r_n\n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))\n # print h\n # final ellipsoidal height update\n h = p / np.cos(latitude) - r_n\n\n return np.rad2deg(latitude), np.rad2deg(longitude), h\n",
"def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5., \n step_size=5., scalar=1):\n \"\"\"\n Move along 'lines' formed by following the magnetic unit vector directions.\n\n Moving along the field is effectively the same as a field line trace though\n extended movement along a field should use the specific field_line_trace \n method.\n\n\n Parameters\n ----------\n x : ECEF-x (km)\n Location to step from in ECEF (km). Scalar input.\n y : ECEF-y (km)\n Location to step from in ECEF (km). Scalar input.\n z : ECEF-z (km)\n Location to step from in ECEF (km). Scalar input.\n date : list-like of datetimes\n Date and time for magnetic field\n direction : string\n String identifier for which unit vector directino to move along.\n Supported inputs, 'meridional', 'zonal', 'aligned'\n num_steps : int\n Number of steps to take along unit vector direction\n step_size = float\n Distance taken for each step (km)\n scalar : int\n Scalar modifier for step size distance. Input a -1 to move along \n negative unit vector direction.\n\n Returns\n -------\n np.array\n [x, y, z] of ECEF location after taking num_steps along direction, \n each step_size long.\n\n \"\"\"\n\n\n # set parameters for the field line tracing routines\n field_step_size = 100.\n field_max_steps = 1000\n field_steps = np.arange(field_max_steps)\n\n for i in np.arange(num_steps):\n # x, y, z in ECEF\n # convert to geodetic\n lat, lon, alt = ecef_to_geodetic(x, y, z)\n # get unit vector directions\n zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(\n [lat], [lon], [alt], [date],\n steps=field_steps, \n max_steps=field_max_steps, \n step_size=field_step_size, \n ref_height=0.)\n # pull out the direction we need\n if direction == 'meridional':\n ux, uy, uz = mx, my, mz\n elif direction == 'zonal':\n ux, uy, uz = zvx, zvy, zvz\n elif direction == 'aligned':\n ux, uy, uz = bx, by, bz\n\n # take steps along direction\n x = x + step_size*ux[0]*scalar\n y = y + step_size*uy[0]*scalar\n z = z + step_size*uz[0]*scalar\n\n return np.array([x, y, z])\n",
"def apex_location_info(glats, glons, alts, dates):\n \"\"\"Determine apex location for the field line passing through input point.\n\n Employs a two stage method. A broad step (100 km) field line trace spanning \n Northern/Southern footpoints is used to find the location with the largest \n geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to \n get a better fix on this location. Greatest geodetic height is once again \n selected.\n\n Parameters\n ----------\n glats : list-like of floats (degrees)\n Geodetic (WGS84) latitude\n glons : list-like of floats (degrees)\n Geodetic (WGS84) longitude \n alts : list-like of floats (km)\n Geodetic (WGS84) altitude, height above surface\n dates : list-like of datetimes\n Date and time for determination of scalars\n\n Returns\n -------\n (float, float, float, float, float, float)\n ECEF X (km), ECEF Y (km), ECEF Z (km), \n Geodetic Latitude (degrees), \n Geodetic Longitude (degrees), \n Geodetic Altitude (km)\n\n \"\"\"\n\n # use input location and convert to ECEF\n ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)\n # prepare parameters for field line trace\n step_size = 100.\n max_steps = 1000\n steps = np.arange(max_steps)\n # high resolution trace parameters\n fine_step_size = .01\n fine_max_steps = int(step_size/fine_step_size)+10\n fine_steps = np.arange(fine_max_steps)\n # prepare output\n out_x = []\n out_y = []\n out_z = []\n\n for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, \n glats, glons, alts, \n dates):\n # to get the apex location we need to do a field line trace\n # then find the highest point\n trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # convert all locations to geodetic coordinates\n tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2]) \n # determine location that is highest with respect to the geodetic Earth\n max_idx = np.argmax(talt)\n # repeat using a high resolution trace one big step size each \n # direction around identified max\n # recurse False ensures only max_steps are taken\n trace = full_field_line(trace[max_idx,:], date, 0., \n steps=fine_steps,\n step_size=fine_step_size, \n max_steps=fine_max_steps, \n recurse=False)\n # convert all locations to geodetic coordinates\n tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])\n # determine location that is highest with respect to the geodetic Earth\n max_idx = np.argmax(talt)\n # collect outputs\n out_x.append(trace[max_idx,0])\n out_y.append(trace[max_idx,1])\n out_z.append(trace[max_idx,2])\n\n out_x = np.array(out_x)\n out_y = np.array(out_y)\n out_z = np.array(out_z)\n glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)\n\n return out_x, out_y, out_z, glat, glon, alt\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | scalars_for_mapping_ion_drifts | python | def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out | Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L1241-L1431 | [
"def geodetic_to_ecef(latitude, longitude, altitude):\n \"\"\"Convert WGS84 geodetic coordinates into ECEF\n\n Parameters\n ----------\n latitude : float or array_like\n Geodetic latitude (degrees)\n longitude : float or array_like\n Geodetic longitude (degrees)\n altitude : float or array_like\n Geodetic Height (km) above WGS84 reference ellipsoid.\n\n Returns\n -------\n x, y, z\n numpy arrays of x, y, z locations in km\n\n \"\"\"\n\n\n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)\n\n # colatitude = 90. - latitude\n x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))\n y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))\n z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))\n\n return x, y, z\n",
"def ecef_to_geodetic(x, y, z, method=None):\n \"\"\"Convert ECEF into Geodetic WGS84 coordinates\n\n Parameters\n ----------\n x : float or array_like\n ECEF-X in km\n y : float or array_like\n ECEF-Y in km\n z : float or array_like\n ECEF-Z in km\n method : 'iterative' or 'closed' ('closed' is deafult)\n String selects method of conversion. Closed for mathematical\n solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)\n or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).\n\n Returns\n -------\n latitude, longitude, altitude\n numpy arrays of locations in degrees, degrees, and km\n\n \"\"\"\n\n # quick notes on ECEF to Geodetic transformations \n # http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html\n\n method = method or 'closed'\n\n # ellipticity of Earth \n ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)\n # first eccentricity squared\n e2 = ellip ** 2 # 6.6943799901377997E-3\n\n longitude = np.arctan2(y, x)\n # cylindrical radius\n p = np.sqrt(x ** 2 + y ** 2)\n\n # closed form solution\n # a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1\n if method == 'closed':\n e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)\n theta = np.arctan2(z*earth_a, p*earth_b)\n latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)\n r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)\n h = p / np.cos(latitude) - r_n\n\n # another possibility\n # http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf\n\n ## iterative method\n # http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf\n if method == 'iterative':\n latitude = np.arctan2(p, z)\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n for i in np.arange(6):\n # print latitude\n r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)\n h = p / np.cos(latitude) - r_n\n latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))\n # print h\n # final ellipsoidal height update\n h = p / np.cos(latitude) - r_n\n\n return np.rad2deg(latitude), np.rad2deg(longitude), h\n",
"def field_line_trace(init, date, direction, height, steps=None,\n max_steps=1E4, step_size=10., recursive_loop_count=None, \n recurse=True):\n \"\"\"Perform field line tracing using IGRF and scipy.integrate.odeint.\n\n Parameters\n ----------\n init : array-like of floats\n Position to begin field line tracing from in ECEF (x,y,z) km\n date : datetime or float\n Date to perform tracing on (year + day/365 + hours/24. + etc.)\n Accounts for leap year if datetime provided.\n direction : int\n 1 : field aligned, generally south to north. \n -1 : anti-field aligned, generally north to south.\n height : float\n Altitude to terminate trace, geodetic WGS84 (km)\n steps : array-like of ints or floats\n Number of steps along field line when field line trace positions should \n be reported. By default, each step is reported; steps=np.arange(max_steps).\n max_steps : float\n Maximum number of steps along field line that should be taken\n step_size : float\n Distance in km for each large integration step. Multiple substeps\n are taken as determined by scipy.integrate.odeint\n\n Returns\n -------\n numpy array\n 2D array. [0,:] has the x,y,z location for initial point\n [:,0] is the x positions over the integration.\n Positions are reported in ECEF (km).\n\n\n \"\"\"\n\n if recursive_loop_count is None: \n recursive_loop_count = 0\n # \n if steps is None:\n steps = np.arange(max_steps)\n if not isinstance(date, float):\n # recast from datetime to float, as required by IGRF12 code\n doy = (date - datetime.datetime(date.year,1,1)).days\n # number of days in year, works for leap years\n num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days\n date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.\n\n trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),\n steps,\n args=(date, step_size, direction, height),\n full_output=False,\n printmessg=False,\n ixpr=False) #,\n # mxstep=500)\n\n # check that we reached final altitude\n check = trace_north[-1, :]\n x, y, z = ecef_to_geodetic(*check) \n if height == 0:\n check_height = 1.\n else:\n check_height = height\n # fortran integration gets close to target height \n if recurse & (z > check_height*1.000001):\n if (recursive_loop_count < 1000):\n # When we have not reached the reference height, call field_line_trace \n # again by taking check value as init - recursive call\n recursive_loop_count = recursive_loop_count + 1\n trace_north1 = field_line_trace(check, date, direction, height,\n step_size=step_size, \n max_steps=max_steps,\n recursive_loop_count=recursive_loop_count,\n steps=steps)\n else:\n raise RuntimeError(\"After 1000 iterations couldn't reach target altitude\")\n return np.vstack((trace_north, trace_north1))\n else:\n # return results if we make it to the target altitude\n\n # filter points to terminate at point closest to target height\n # code below not correct, we want the first poiint that goes below target\n # height\n # code also introduces a variable length return, though I suppose\n # that already exists with the recursive functionality\n # x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2]) \n # idx = np.argmin(np.abs(check_height - z)) \n return trace_north #[:idx+1,:]\n",
"def apex_location_info(glats, glons, alts, dates):\n \"\"\"Determine apex location for the field line passing through input point.\n\n Employs a two stage method. A broad step (100 km) field line trace spanning \n Northern/Southern footpoints is used to find the location with the largest \n geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to \n get a better fix on this location. Greatest geodetic height is once again \n selected.\n\n Parameters\n ----------\n glats : list-like of floats (degrees)\n Geodetic (WGS84) latitude\n glons : list-like of floats (degrees)\n Geodetic (WGS84) longitude \n alts : list-like of floats (km)\n Geodetic (WGS84) altitude, height above surface\n dates : list-like of datetimes\n Date and time for determination of scalars\n\n Returns\n -------\n (float, float, float, float, float, float)\n ECEF X (km), ECEF Y (km), ECEF Z (km), \n Geodetic Latitude (degrees), \n Geodetic Longitude (degrees), \n Geodetic Altitude (km)\n\n \"\"\"\n\n # use input location and convert to ECEF\n ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)\n # prepare parameters for field line trace\n step_size = 100.\n max_steps = 1000\n steps = np.arange(max_steps)\n # high resolution trace parameters\n fine_step_size = .01\n fine_max_steps = int(step_size/fine_step_size)+10\n fine_steps = np.arange(fine_max_steps)\n # prepare output\n out_x = []\n out_y = []\n out_z = []\n\n for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, \n glats, glons, alts, \n dates):\n # to get the apex location we need to do a field line trace\n # then find the highest point\n trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # convert all locations to geodetic coordinates\n tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2]) \n # determine location that is highest with respect to the geodetic Earth\n max_idx = np.argmax(talt)\n # repeat using a high resolution trace one big step size each \n # direction around identified max\n # recurse False ensures only max_steps are taken\n trace = full_field_line(trace[max_idx,:], date, 0., \n steps=fine_steps,\n step_size=fine_step_size, \n max_steps=fine_max_steps, \n recurse=False)\n # convert all locations to geodetic coordinates\n tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])\n # determine location that is highest with respect to the geodetic Earth\n max_idx = np.argmax(talt)\n # collect outputs\n out_x.append(trace[max_idx,0])\n out_y.append(trace[max_idx,1])\n out_z.append(trace[max_idx,2])\n\n out_x = np.array(out_x)\n out_y = np.array(out_y)\n out_z = np.array(out_z)\n glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)\n\n return out_x, out_y, out_z, glat, glon, alt\n",
"def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,\n vector_direction, step_size=None, \n max_steps=None, edge_length=25., \n edge_steps=5):\n \"\"\"\n Forms closed loop integration along mag field, satrting at input\n points and goes through footpoint. At footpoint, steps along vector direction\n in both positive and negative directions, then traces back to opposite\n footpoint. Back at input location, steps toward those new field lines \n (edge_length) along vector direction until hitting distance of minimum\n approach. Loops don't always close. Returns total edge distance \n that goes through input location, along with the distances of closest approach. \n\n Note\n ----\n vector direction refers to the magnetic unit vector direction \n\n Parameters\n ----------\n glats : list-like of floats (degrees)\n Geodetic (WGS84) latitude\n glons : list-like of floats (degrees)\n Geodetic (WGS84) longitude \n alts : list-like of floats (km)\n Geodetic (WGS84) altitude, height above surface\n dates : list-like of datetimes\n Date and time for determination of scalars\n direction : string\n 'north' or 'south' for tracing through northern or\n southern footpoint locations\n vector_direction : string\n 'meridional' or 'zonal' unit vector directions\n step_size : float (km)\n Step size (km) used for field line integration\n max_steps : int\n Number of steps taken for field line integration\n edge_length : float (km)\n Half of total edge length (step) taken at footpoint location.\n edge_length step in both positive and negative directions.\n edge_steps : int\n Number of steps taken from footpoint towards new field line\n in a given direction (positive/negative) along unit vector\n\n Returns\n -------\n np.array, np.array, np.array\n A closed loop field line path through input location and footpoint in \n northern/southern hemisphere and back is taken. The return edge length\n through input location is provided. \n\n The distances of closest approach for the positive step along vector\n direction, and the negative step are returned.\n\n\n \"\"\"\n\n if step_size is None:\n step_size = 100.\n if max_steps is None:\n max_steps = 1000\n steps = np.arange(max_steps)\n\n if direction == 'south':\n direct = -1\n elif direction == 'north':\n direct = 1\n\n # use spacecraft location to get ECEF\n ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)\n\n # prepare output\n full_local_step = []\n min_distance_plus = []\n min_distance_minus = []\n\n for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, \n glats, glons, alts, \n dates):\n # going to try and form close loops via field line integration\n # start at location of interest, map down to northern or southern \n # footpoints then take symmetric steps along meridional and zonal \n # directions and trace back from location of interest, step along \n # field line directions until we intersect or hit the distance of \n # closest approach to the return field line with the known \n # distances of footpoint steps, and the closet approach distance\n # we can determine the scalar mapping of one location to another\n\n yr, doy = pysat.utils.getyrdoy(date)\n double_date = float(yr) + float(doy) / 366.\n\n # print (glat, glon, alt)\n # trace to footpoint, starting with input location\n sc_root = np.array([ecef_x, ecef_y, ecef_z])\n trace = field_line_trace(sc_root, double_date, direct, 120., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # pull out footpoint location\n ftpnt = trace[-1, :]\n ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)\n\n # take step from footpoint along + vector direction\n plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2], \n date, \n direction=vector_direction,\n num_steps=edge_steps,\n step_size=edge_length/edge_steps)\n # trace this back to other footpoint\n other_plus = field_line_trace(plus_step, double_date, -direct, 0., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # take half step from first footpoint along - vector direction\n minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2], \n date, \n direction=vector_direction, \n scalar=-1,\n num_steps=edge_steps,\n step_size=edge_length/edge_steps)\n # trace this back to other footpoint\n other_minus = field_line_trace(minus_step, double_date, -direct, 0., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # need to determine where the intersection of field line coming back from\n # footpoint through postive vector direction step and back\n # in relation to the vector direction from the s/c location. \n pos_edge_length, _, mind_pos = step_until_intersect(sc_root,\n other_plus,\n 1, date, \n direction=vector_direction,\n field_step_size=step_size,\n step_size_goal=edge_length/edge_steps) \n # take half step from S/C along - vector direction \n minus_edge_length, _, mind_minus = step_until_intersect(sc_root,\n other_minus,\n -1, date, \n direction=vector_direction,\n field_step_size=step_size,\n step_size_goal=edge_length/edge_steps)\n # collect outputs\n full_local_step.append(pos_edge_length + minus_edge_length)\n min_distance_plus.append(mind_pos)\n min_distance_minus.append(mind_minus)\n\n return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)\n",
"def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,\n vector_direction,\n edge_length=25., \n edge_steps=5):\n \"\"\"\n Calculates the distance between apex locations mapping to the input location.\n\n Using the input location, the apex location is calculated. Also from the input \n location, a step along both the positive and negative\n vector_directions is taken, and the apex locations for those points are calculated.\n The difference in position between these apex locations is the total centered\n distance between magnetic field lines at the magnetic apex when starting\n locally with a field line half distance of edge_length.\n\n An alternative method has been implemented, then commented out.\n This technique takes multiple steps from the origin apex towards the apex\n locations identified along vector_direction. In principle this is more accurate\n but more computationally intensive, similar to the footpoint model.\n A comparison is planned.\n\n\n Note\n ----\n vector direction refers to the magnetic unit vector direction \n\n Parameters\n ----------\n glats : list-like of floats (degrees)\n Geodetic (WGS84) latitude\n glons : list-like of floats (degrees)\n Geodetic (WGS84) longitude \n alts : list-like of floats (km)\n Geodetic (WGS84) altitude, height above surface\n dates : list-like of datetimes\n Date and time for determination of scalars\n vector_direction : string\n 'meridional' or 'zonal' unit vector directions\n step_size : float (km)\n Step size (km) used for field line integration\n max_steps : int\n Number of steps taken for field line integration\n edge_length : float (km)\n Half of total edge length (step) taken at footpoint location.\n edge_length step in both positive and negative directions.\n edge_steps : int\n Number of steps taken from footpoint towards new field line\n in a given direction (positive/negative) along unit vector\n\n Returns\n -------\n np.array, ### np.array, np.array\n The change in field line apex locations. \n\n ## Pending ## The return edge length through input location is provided. \n\n ## Pending ## The distances of closest approach for the positive step \n along vector direction, and the negative step are returned.\n\n\n \"\"\"\n\n # use spacecraft location to get ECEF\n ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)\n\n # prepare output\n apex_edge_length = []\n # outputs for alternative calculation\n full_local_step = []\n min_distance_plus = []\n min_distance_minus = []\n\n for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, \n glats, glons, alts, \n dates):\n\n yr, doy = pysat.utils.getyrdoy(date)\n double_date = float(yr) + float(doy) / 366.\n\n # get location of apex for s/c field line\n apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(\n [glat], [glon], \n [alt], [date])\n # apex in ecef (maps to input location)\n apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]]) \n # take step from s/c along + vector direction\n # then get the apex location\n plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date, \n direction=vector_direction,\n num_steps=edge_steps,\n step_size=edge_length/edge_steps)\n plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)\n plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \\\n apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])\n # plus apex location in ECEF\n plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]]) \n\n # take half step from s/c along - vector direction\n # then get the apex location\n minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date, \n direction=vector_direction, \n scalar=-1,\n num_steps=edge_steps,\n step_size=edge_length/edge_steps)\n minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)\n minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \\\n apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])\n minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]]) \n\n # take difference in apex locations\n apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 + \n (plus_apex_y[0]-minus_apex_y[0])**2 + \n (plus_apex_z[0]-minus_apex_z[0])**2))\n\n# # take an alternative path to calculation\n# # do field line trace around pos and neg apexes\n# # then do intersection with field line projection thing \n# \n# # do a short centered field line trace around plus apex location\n# other_trace = full_field_line(plus_apex_root, double_date, 0., \n# step_size=1., \n# max_steps=10,\n# recurse=False)\n# # need to determine where the intersection of apex field line \n# # in relation to the vector direction from the s/c field apex location.\n# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,\n # other_trace,\n # 1, date, \n # direction=vector_direction,\n # field_step_size=1.,\n # step_size_goal=edge_length/edge_steps) \n# # do a short centered field line trace around 'minus' apex location\n# other_trace = full_field_line(minus_apex_root, double_date, 0., \n# step_size=1., \n# max_steps=10,\n# recurse=False)\n# # need to determine where the intersection of apex field line \n# # in relation to the vector direction from the s/c field apex location. \n# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,\n # other_trace,\n # -1, date, \n # direction=vector_direction,\n # field_step_size=1.,\n # step_size_goal=edge_length/edge_steps) \n # full_local_step.append(pos_edge_length + minus_edge_length)\n # min_distance_plus.append(mind_pos)\n # min_distance_minus.append(mind_minus)\n\n # still sorting out alternative option for this calculation\n # commented code is 'good' as far as the plan goes\n # takes more time, so I haven't tested one vs the other yet\n # having two live methods can lead to problems\n # THIS IS A TODO (sort it out)\n return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)\n"
] |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
hayd/ctox | ctox/subst.py | expand_curlys | python | def expand_curlys(s):
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s]) | Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"] | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L33-L44 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | _split_out_of_braces | python | def _split_out_of_braces(s):
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part | Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32'] | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L69-L86 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | expand_factor_conditions | python | def expand_factor_conditions(s, env):
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return '' | If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
"" | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L89-L109 | [
"def matches_factor_conditions(s, env):\n \"\"\"\"Returns True if py{33, 34} expanded is contained in env.name.\"\"\"\n env_labels = set(env.name.split('-'))\n labels = set(bash_expand(s))\n return bool(labels & env_labels)\n"
] | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | matches_factor_conditions | python | def matches_factor_conditions(s, env):
"
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels) | Returns True if py{33, 34} expanded is contained in env.name. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L112-L116 | [
"def bash_expand(s):\n \"\"\"Usually an envlist is a comma seperated list of pyXX, however tox\n supports move advanced usage.\n\n Example\n -------\n >>> s = \"{py26,py27}-django{15,16}, py32\"\n >>> bash_expand(s)\n [\"py26-django15\", \"py26-django16\", \"py27-django15\", \"py27-django16\",\n \"py32\"]\n\n \"\"\"\n return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])\n"
] | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | split_on | python | def split_on(s, sep=" "):
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]] | Split s by sep, unless it's inside a quote. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L119-L123 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | replace_braces | python | def replace_braces(s, env):
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s | Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed). | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L133-L150 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | _replace_match | python | def _replace_match(m, env):
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s) | Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L153-L173 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | _replace_envvar | python | def _replace_envvar(s, _):
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2]) | env:KEY or env:KEY:DEFAULT | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L176-L185 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | _replace_config | python | def _replace_config(s, env):
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError() | [sectionname]optionname | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L188-L197 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError()
|
hayd/ctox | ctox/subst.py | _replace_posargs | python | def _replace_posargs(s, env):
"posargs:DEFAULT"
e = re.split(r'\s*\:\s*', s)
if e and e[0] == "posargs":
from ctox.main import positional_args
return (" ".join(positional_args(env.options)) or
(e[1] if len(e) > 1 else ""))
else:
raise ValueError() | posargs:DEFAULT | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L200-L208 | null | """This module contains functions to deal with substitution of tox.ini config
files.
The most useful are bash_expand and replace_braces.
"""
import os
import re
DEPTH = 5
def parse_commands(env):
pass
def parse_envlist(s):
"""Expand the tox.ini's envlist into a fully expanded list.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> parse_envlist(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
# TODO some other substitutions?
return bash_expand(s)
def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s])
def _replace_curly(envlist, match):
assert isinstance(envlist, list)
return [e[:match.start()] + m + e[match.end():]
for m in re.split(r"\s*,\s*", match.group()[1:-1])
for e in envlist]
def bash_expand(s):
"""Usually an envlist is a comma seperated list of pyXX, however tox
supports move advanced usage.
Example
-------
>>> s = "{py26,py27}-django{15,16}, py32"
>>> bash_expand(s)
["py26-django15", "py26-django16", "py27-django15", "py27-django16",
"py32"]
"""
return sum([expand_curlys(t) for t in _split_out_of_braces(s)], [])
def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part
def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return ''
def matches_factor_conditions(s, env):
""""Returns True if py{33, 34} expanded is contained in env.name."""
env_labels = set(env.name.split('-'))
labels = set(bash_expand(s))
return bool(labels & env_labels)
def split_on(s, sep=" "):
"""Split s by sep, unless it's inside a quote."""
pattern = '''((?:[^%s"']|"[^"]*"|'[^']*')+)''' % sep
return [_strip_speechmarks(t) for t in re.split(pattern, s)[1::2]]
def _strip_speechmarks(t):
for sm in ["'''", '"""', "'", '"']:
if t.startswith(sm) and t.endswith(sm):
return t[len(sm):-len(sm)]
return t
def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s
def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s)
def _replace_envvar(s, _):
"""env:KEY or env:KEY:DEFAULT"""
e = s.split(":")
if len(e) > 3 or len(e) == 1 or e[0] != "env":
raise ValueError()
elif len(e) == 2:
# Note: this can/should raise a KeyError (according to spec).
return os.environ[e[1]]
else: # len(e) == 3
return os.environ.get(e[1], e[2])
def _replace_config(s, env):
"""[sectionname]optionname"""
m = re.match(r"\[(.*?)\](.*)", s)
if m:
section, option = m.groups()
expanded = env.config.get(section, option)
return '\n'.join([expand_factor_conditions(e, env)
for e in expanded.split("\n")])
else:
raise ValueError()
|
hayd/ctox | ctox/shell.py | safe_shell_out | python | def safe_shell_out(cmd, verbose=False, **kwargs):
# TODO rename this suppressed_shell_out ?
# TODO this should probably return 1 if there's an error (i.e. vice-versa).
# print("cmd %s" % cmd)
try:
with open(os.devnull, "w") as fnull:
with captured_output():
check_output(cmd, stderr=fnull, **kwargs)
return True
except (CalledProcessError, OSError) as e:
if verbose:
cprint(" Error running command %s" % ' '.join(cmd), 'err')
print(e.output)
return False
except Exception as e:
# TODO no idea
# Can this be if you try and unistall pip? (don't do that)
return False | run cmd and return True if it went ok, False if something went wrong.
Suppress all output. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/shell.py#L69-L91 | [
"def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs: # pragma: no cover\n raise ValueError('stdout argument not allowed, '\n 'it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE,\n *popenargs, **kwargs)\n output, _ = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd,\n output=output)\n return output\n",
"def cprint(message, status=None):\n \"\"\"color printing based on status:\n\n None -> BRIGHT\n 'ok' -> GREEN\n 'err' -> RED\n 'warn' -> YELLOW\n\n \"\"\"\n # TODO use less obscure dict, probably \"error\", \"warn\", \"success\" as keys\n status = {'warn': Fore.YELLOW, 'err': Fore.RED,\n 'ok': Fore.GREEN, None: Style.BRIGHT}[status]\n print(status + message + Style.RESET_ALL)\n"
] | """This module contains light-weight wrappers to subprocess's check_output
and colored printing."""
from colorama import Fore, Style, init
from contextlib import contextmanager
import os
import sys
try:
from StringIO import StringIO
except ImportError: # py3, pragma: no cover
# Note: although this would import in py2, io.StringIO is fussy about bytes
# vs unicode, and this leads to pain.
from io import StringIO
# https://github.com/hayd/pep8radius/blob/master/pep8radius/shell.py
try:
from subprocess import STDOUT, check_output, CalledProcessError
except ImportError: # pragma: no cover
# python 2.6 doesn't include check_output
# monkey patch it in!
import subprocess
STDOUT = subprocess.STDOUT
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs: # pragma: no cover
raise ValueError('stdout argument not allowed, '
'it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd,
output=output)
return output
subprocess.check_output = check_output
# overwrite CalledProcessError due to `output`
# keyword not being available (in 2.6)
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
subprocess.CalledProcessError = CalledProcessError
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def shell_out(cmd, stderr=STDOUT, cwd=None):
"""Friendlier version of check_output."""
if cwd is None:
from os import getcwd
cwd = getcwd() # TODO do I need to normalize this on Windows
out = check_output(cmd, cwd=cwd, stderr=stderr, universal_newlines=True)
return _clean_output(out)
def _clean_output(out):
try:
out = out.decode('utf-8')
except AttributeError: # python3, pragma: no cover
pass
return out.strip()
def cprint(message, status=None):
"""color printing based on status:
None -> BRIGHT
'ok' -> GREEN
'err' -> RED
'warn' -> YELLOW
"""
# TODO use less obscure dict, probably "error", "warn", "success" as keys
status = {'warn': Fore.YELLOW, 'err': Fore.RED,
'ok': Fore.GREEN, None: Style.BRIGHT}[status]
print(status + message + Style.RESET_ALL)
|
hayd/ctox | ctox/shell.py | cprint | python | def cprint(message, status=None):
# TODO use less obscure dict, probably "error", "warn", "success" as keys
status = {'warn': Fore.YELLOW, 'err': Fore.RED,
'ok': Fore.GREEN, None: Style.BRIGHT}[status]
print(status + message + Style.RESET_ALL) | color printing based on status:
None -> BRIGHT
'ok' -> GREEN
'err' -> RED
'warn' -> YELLOW | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/shell.py#L111-L123 | null | """This module contains light-weight wrappers to subprocess's check_output
and colored printing."""
from colorama import Fore, Style, init
from contextlib import contextmanager
import os
import sys
try:
from StringIO import StringIO
except ImportError: # py3, pragma: no cover
# Note: although this would import in py2, io.StringIO is fussy about bytes
# vs unicode, and this leads to pain.
from io import StringIO
# https://github.com/hayd/pep8radius/blob/master/pep8radius/shell.py
try:
from subprocess import STDOUT, check_output, CalledProcessError
except ImportError: # pragma: no cover
# python 2.6 doesn't include check_output
# monkey patch it in!
import subprocess
STDOUT = subprocess.STDOUT
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs: # pragma: no cover
raise ValueError('stdout argument not allowed, '
'it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd,
output=output)
return output
subprocess.check_output = check_output
# overwrite CalledProcessError due to `output`
# keyword not being available (in 2.6)
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
subprocess.CalledProcessError = CalledProcessError
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def safe_shell_out(cmd, verbose=False, **kwargs):
"""run cmd and return True if it went ok, False if something went wrong.
Suppress all output.
"""
# TODO rename this suppressed_shell_out ?
# TODO this should probably return 1 if there's an error (i.e. vice-versa).
# print("cmd %s" % cmd)
try:
with open(os.devnull, "w") as fnull:
with captured_output():
check_output(cmd, stderr=fnull, **kwargs)
return True
except (CalledProcessError, OSError) as e:
if verbose:
cprint(" Error running command %s" % ' '.join(cmd), 'err')
print(e.output)
return False
except Exception as e:
# TODO no idea
# Can this be if you try and unistall pip? (don't do that)
return False
def shell_out(cmd, stderr=STDOUT, cwd=None):
"""Friendlier version of check_output."""
if cwd is None:
from os import getcwd
cwd = getcwd() # TODO do I need to normalize this on Windows
out = check_output(cmd, cwd=cwd, stderr=stderr, universal_newlines=True)
return _clean_output(out)
def _clean_output(out):
try:
out = out.decode('utf-8')
except AttributeError: # python3, pragma: no cover
pass
return out.strip()
|
hayd/ctox | ctox/pkg.py | prev_deps | python | def prev_deps(env):
# TODO something more clever.
if not os.path.isfile(env.envctoxfile):
return []
with open(env.envctoxfile) as f:
return f.read().split() | Naively gets the dependancies from the last time ctox was run. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/pkg.py#L79-L86 | null | """This module contains methods for installing and removing packages.
These are mostly lightweight wrappers shelling out to the conda and then
env.pip. Note that throughout this module the env variable is understood
to be a ctox.main.Env instance.
"""
import os
from subprocess import Popen, STDOUT
from ctox.shell import safe_shell_out, CalledProcessError, shell_out, cprint
def env_exists(env):
return os.path.isdir(os.path.join(env.envdir, "conda-meta"))
def create_env(env, force_remove=False):
# TODO cache cache cache!
# TODO potentially we could keep a clean env around for each basepython.
if force_remove:
shell_out(['conda', 'remove', '-p', env.name, '--all',
'--yes', '--quiet'],
cwd=env.toxdir)
shell_out(['conda', 'create', '-p', env.name,
'python=%s' % env.py_version, '--yes', '--quiet'],
cwd=env.toxdir)
def install(env, lib):
lib_ = lib.replace('==', '=') # obviously conda syntax is different
success = (safe_shell_out(["conda", "install", lib_, "-p", env.name,
"--yes", "--quiet"], cwd=env.toxdir) or
safe_shell_out([env.pip, "install",
"--quiet", lib], cwd=env.toxdir))
if success:
with open(env.envctoxfile, 'a') as f:
f.write(" " + lib)
else:
cprint(" Unable to install %s." % lib, 'err')
return success
def uninstall(env, lib):
lib_ = lib.replace('==', '=') # obviously conda syntax is different
success = (safe_shell_out(["conda", "remove", lib_, "-p", env.name,
"--yes", "--quiet"], cwd=env.toxdir) or
safe_shell_out([env.pip, "uninstall", lib,
"--yes", "--quiet"], cwd=env.toxdir))
return success
def install_deps(env):
print("installing deps...")
try:
# TODO can we do this in one pass?
return all(install(env=env, lib=d) for d in env.deps)
except (OSError, CalledProcessError) as e:
import pdb
pdb.set_trace()
def uninstall_deps(env, deps):
# TODO actually use this.
if deps:
print("removing previous deps...")
success = all(uninstall(env=env, lib=d) for d in deps[1:])
if (not success) or deps[0] != "pip":
cprint(" Environment dependancies mismatch, rebuilding.", 'err')
create_env(env, force_remove=True)
with open(env.envctoxfile, 'w') as f:
f.write("")
def make_dist(toxinidir, toxdir, package):
"""zip up the package into the toxdir."""
dist = os.path.join(toxdir, "dist")
# Suppress warnings.
success = safe_shell_out(["python", "setup.py", "sdist", "--quiet",
"--formats=zip", "--dist-dir", dist],
cwd=toxinidir)
if success:
return os.path.join(dist, package + ".zip")
def install_dist(env):
# TODO don't rebuild if not changed?
# At the moment entire dir is wiped, really we want to update, which would
# allow us to reuse previously built files (e.g. pyc) if unchanged...
# This is usually done in the setup.py into a directory...
print("installing...")
return safe_shell_out([env.pip, "install", env.package_zipped,
"--no-deps", "--upgrade",
# "-t", env.envdistdir,
],
cwd=env.toxdir)
# from zipfile import ZipFile
# with ZipFile(env.package_zipped, "r") as z:
# z.extractall(env.envdistdir)
# return safe_shell_out([env.python, "setup.py", "install"], # --no-deps
# cwd=env.envpackagedir)
def package_name(toxinidir):
return '-'.join(shell_out(["python", "setup.py", "--name", "--version"],
cwd=toxinidir).split())
def run_commands(env):
# Note: it's important all these tests are run, no short-circuiting.
failing = any([run_one_command(env, c[:]) for c in env.commands])
return failing
def run_one_command(env, command):
# TODO move large part of this function to subst.parse_command.
abbr_cmd, cmd, command = print_pretty_command(env, command)
# Skip comments.
line = " ".join(command).strip()
if not line or line.startswith("#"):
return 0
# Ensure the command is already in envbindir or in the whitelist, correct
# it if it's not in the whitelist (it'll OSError if it's not found).
if cmd not in env.whitelist and not cmd.startswith(env.envbindir):
command[0] = os.path.join(env.envbindir, cmd)
# Run the command!
try:
p = Popen(command, cwd=env.changedir, stderr=STDOUT)
p.communicate()
return p.returncode
except OSError as e:
# Command not found locally (or not in whitelist).
cprint(" OSError: %s" % e.args[1], 'err')
cprint(" Is %s in dependancies or whitelist_externals?\n"
% abbr_cmd,
'warn')
return 1
def print_pretty_command(env, command):
"""This is a hack for prettier printing.
Rather than "{envpython} foo.py" we print "python foo.py".
"""
cmd = abbr_cmd = command[0]
if cmd.startswith(env.envbindir):
abbr_cmd = os.path.relpath(cmd, env.envbindir)
if abbr_cmd == ".":
# TODO are there more edge cases?
abbr_cmd = cmd
command[0] = abbr_cmd
print('(%s)$ %s' % (env.name, ' '.join(['"%s"' % c if " " in c
else c
for c in command])))
command[0] = cmd
return abbr_cmd, cmd, command
|
hayd/ctox | ctox/pkg.py | make_dist | python | def make_dist(toxinidir, toxdir, package):
dist = os.path.join(toxdir, "dist")
# Suppress warnings.
success = safe_shell_out(["python", "setup.py", "sdist", "--quiet",
"--formats=zip", "--dist-dir", dist],
cwd=toxinidir)
if success:
return os.path.join(dist, package + ".zip") | zip up the package into the toxdir. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/pkg.py#L89-L97 | [
"def safe_shell_out(cmd, verbose=False, **kwargs):\n \"\"\"run cmd and return True if it went ok, False if something went wrong.\n\n Suppress all output.\n\n \"\"\"\n # TODO rename this suppressed_shell_out ?\n # TODO this should probably return 1 if there's an error (i.e. vice-versa).\n # print(\"cmd %s\" % cmd)\n try:\n with open(os.devnull, \"w\") as fnull:\n with captured_output():\n check_output(cmd, stderr=fnull, **kwargs)\n return True\n except (CalledProcessError, OSError) as e:\n if verbose:\n cprint(\" Error running command %s\" % ' '.join(cmd), 'err')\n print(e.output)\n return False\n except Exception as e:\n # TODO no idea\n # Can this be if you try and unistall pip? (don't do that)\n return False\n"
] | """This module contains methods for installing and removing packages.
These are mostly lightweight wrappers shelling out to the conda and then
env.pip. Note that throughout this module the env variable is understood
to be a ctox.main.Env instance.
"""
import os
from subprocess import Popen, STDOUT
from ctox.shell import safe_shell_out, CalledProcessError, shell_out, cprint
def env_exists(env):
return os.path.isdir(os.path.join(env.envdir, "conda-meta"))
def create_env(env, force_remove=False):
# TODO cache cache cache!
# TODO potentially we could keep a clean env around for each basepython.
if force_remove:
shell_out(['conda', 'remove', '-p', env.name, '--all',
'--yes', '--quiet'],
cwd=env.toxdir)
shell_out(['conda', 'create', '-p', env.name,
'python=%s' % env.py_version, '--yes', '--quiet'],
cwd=env.toxdir)
def install(env, lib):
lib_ = lib.replace('==', '=') # obviously conda syntax is different
success = (safe_shell_out(["conda", "install", lib_, "-p", env.name,
"--yes", "--quiet"], cwd=env.toxdir) or
safe_shell_out([env.pip, "install",
"--quiet", lib], cwd=env.toxdir))
if success:
with open(env.envctoxfile, 'a') as f:
f.write(" " + lib)
else:
cprint(" Unable to install %s." % lib, 'err')
return success
def uninstall(env, lib):
lib_ = lib.replace('==', '=') # obviously conda syntax is different
success = (safe_shell_out(["conda", "remove", lib_, "-p", env.name,
"--yes", "--quiet"], cwd=env.toxdir) or
safe_shell_out([env.pip, "uninstall", lib,
"--yes", "--quiet"], cwd=env.toxdir))
return success
def install_deps(env):
print("installing deps...")
try:
# TODO can we do this in one pass?
return all(install(env=env, lib=d) for d in env.deps)
except (OSError, CalledProcessError) as e:
import pdb
pdb.set_trace()
def uninstall_deps(env, deps):
# TODO actually use this.
if deps:
print("removing previous deps...")
success = all(uninstall(env=env, lib=d) for d in deps[1:])
if (not success) or deps[0] != "pip":
cprint(" Environment dependancies mismatch, rebuilding.", 'err')
create_env(env, force_remove=True)
with open(env.envctoxfile, 'w') as f:
f.write("")
def prev_deps(env):
"""Naively gets the dependancies from the last time ctox was run."""
# TODO something more clever.
if not os.path.isfile(env.envctoxfile):
return []
with open(env.envctoxfile) as f:
return f.read().split()
def install_dist(env):
# TODO don't rebuild if not changed?
# At the moment entire dir is wiped, really we want to update, which would
# allow us to reuse previously built files (e.g. pyc) if unchanged...
# This is usually done in the setup.py into a directory...
print("installing...")
return safe_shell_out([env.pip, "install", env.package_zipped,
"--no-deps", "--upgrade",
# "-t", env.envdistdir,
],
cwd=env.toxdir)
# from zipfile import ZipFile
# with ZipFile(env.package_zipped, "r") as z:
# z.extractall(env.envdistdir)
# return safe_shell_out([env.python, "setup.py", "install"], # --no-deps
# cwd=env.envpackagedir)
def package_name(toxinidir):
return '-'.join(shell_out(["python", "setup.py", "--name", "--version"],
cwd=toxinidir).split())
def run_commands(env):
# Note: it's important all these tests are run, no short-circuiting.
failing = any([run_one_command(env, c[:]) for c in env.commands])
return failing
def run_one_command(env, command):
# TODO move large part of this function to subst.parse_command.
abbr_cmd, cmd, command = print_pretty_command(env, command)
# Skip comments.
line = " ".join(command).strip()
if not line or line.startswith("#"):
return 0
# Ensure the command is already in envbindir or in the whitelist, correct
# it if it's not in the whitelist (it'll OSError if it's not found).
if cmd not in env.whitelist and not cmd.startswith(env.envbindir):
command[0] = os.path.join(env.envbindir, cmd)
# Run the command!
try:
p = Popen(command, cwd=env.changedir, stderr=STDOUT)
p.communicate()
return p.returncode
except OSError as e:
# Command not found locally (or not in whitelist).
cprint(" OSError: %s" % e.args[1], 'err')
cprint(" Is %s in dependancies or whitelist_externals?\n"
% abbr_cmd,
'warn')
return 1
def print_pretty_command(env, command):
"""This is a hack for prettier printing.
Rather than "{envpython} foo.py" we print "python foo.py".
"""
cmd = abbr_cmd = command[0]
if cmd.startswith(env.envbindir):
abbr_cmd = os.path.relpath(cmd, env.envbindir)
if abbr_cmd == ".":
# TODO are there more edge cases?
abbr_cmd = cmd
command[0] = abbr_cmd
print('(%s)$ %s' % (env.name, ' '.join(['"%s"' % c if " " in c
else c
for c in command])))
command[0] = cmd
return abbr_cmd, cmd, command
|
hayd/ctox | ctox/pkg.py | print_pretty_command | python | def print_pretty_command(env, command):
cmd = abbr_cmd = command[0]
if cmd.startswith(env.envbindir):
abbr_cmd = os.path.relpath(cmd, env.envbindir)
if abbr_cmd == ".":
# TODO are there more edge cases?
abbr_cmd = cmd
command[0] = abbr_cmd
print('(%s)$ %s' % (env.name, ' '.join(['"%s"' % c if " " in c
else c
for c in command])))
command[0] = cmd
return abbr_cmd, cmd, command | This is a hack for prettier printing.
Rather than "{envpython} foo.py" we print "python foo.py". | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/pkg.py#L158-L175 | null | """This module contains methods for installing and removing packages.
These are mostly lightweight wrappers shelling out to the conda and then
env.pip. Note that throughout this module the env variable is understood
to be a ctox.main.Env instance.
"""
import os
from subprocess import Popen, STDOUT
from ctox.shell import safe_shell_out, CalledProcessError, shell_out, cprint
def env_exists(env):
return os.path.isdir(os.path.join(env.envdir, "conda-meta"))
def create_env(env, force_remove=False):
# TODO cache cache cache!
# TODO potentially we could keep a clean env around for each basepython.
if force_remove:
shell_out(['conda', 'remove', '-p', env.name, '--all',
'--yes', '--quiet'],
cwd=env.toxdir)
shell_out(['conda', 'create', '-p', env.name,
'python=%s' % env.py_version, '--yes', '--quiet'],
cwd=env.toxdir)
def install(env, lib):
lib_ = lib.replace('==', '=') # obviously conda syntax is different
success = (safe_shell_out(["conda", "install", lib_, "-p", env.name,
"--yes", "--quiet"], cwd=env.toxdir) or
safe_shell_out([env.pip, "install",
"--quiet", lib], cwd=env.toxdir))
if success:
with open(env.envctoxfile, 'a') as f:
f.write(" " + lib)
else:
cprint(" Unable to install %s." % lib, 'err')
return success
def uninstall(env, lib):
lib_ = lib.replace('==', '=') # obviously conda syntax is different
success = (safe_shell_out(["conda", "remove", lib_, "-p", env.name,
"--yes", "--quiet"], cwd=env.toxdir) or
safe_shell_out([env.pip, "uninstall", lib,
"--yes", "--quiet"], cwd=env.toxdir))
return success
def install_deps(env):
print("installing deps...")
try:
# TODO can we do this in one pass?
return all(install(env=env, lib=d) for d in env.deps)
except (OSError, CalledProcessError) as e:
import pdb
pdb.set_trace()
def uninstall_deps(env, deps):
# TODO actually use this.
if deps:
print("removing previous deps...")
success = all(uninstall(env=env, lib=d) for d in deps[1:])
if (not success) or deps[0] != "pip":
cprint(" Environment dependancies mismatch, rebuilding.", 'err')
create_env(env, force_remove=True)
with open(env.envctoxfile, 'w') as f:
f.write("")
def prev_deps(env):
"""Naively gets the dependancies from the last time ctox was run."""
# TODO something more clever.
if not os.path.isfile(env.envctoxfile):
return []
with open(env.envctoxfile) as f:
return f.read().split()
def make_dist(toxinidir, toxdir, package):
"""zip up the package into the toxdir."""
dist = os.path.join(toxdir, "dist")
# Suppress warnings.
success = safe_shell_out(["python", "setup.py", "sdist", "--quiet",
"--formats=zip", "--dist-dir", dist],
cwd=toxinidir)
if success:
return os.path.join(dist, package + ".zip")
def install_dist(env):
# TODO don't rebuild if not changed?
# At the moment entire dir is wiped, really we want to update, which would
# allow us to reuse previously built files (e.g. pyc) if unchanged...
# This is usually done in the setup.py into a directory...
print("installing...")
return safe_shell_out([env.pip, "install", env.package_zipped,
"--no-deps", "--upgrade",
# "-t", env.envdistdir,
],
cwd=env.toxdir)
# from zipfile import ZipFile
# with ZipFile(env.package_zipped, "r") as z:
# z.extractall(env.envdistdir)
# return safe_shell_out([env.python, "setup.py", "install"], # --no-deps
# cwd=env.envpackagedir)
def package_name(toxinidir):
return '-'.join(shell_out(["python", "setup.py", "--name", "--version"],
cwd=toxinidir).split())
def run_commands(env):
# Note: it's important all these tests are run, no short-circuiting.
failing = any([run_one_command(env, c[:]) for c in env.commands])
return failing
def run_one_command(env, command):
# TODO move large part of this function to subst.parse_command.
abbr_cmd, cmd, command = print_pretty_command(env, command)
# Skip comments.
line = " ".join(command).strip()
if not line or line.startswith("#"):
return 0
# Ensure the command is already in envbindir or in the whitelist, correct
# it if it's not in the whitelist (it'll OSError if it's not found).
if cmd not in env.whitelist and not cmd.startswith(env.envbindir):
command[0] = os.path.join(env.envbindir, cmd)
# Run the command!
try:
p = Popen(command, cwd=env.changedir, stderr=STDOUT)
p.communicate()
return p.returncode
except OSError as e:
# Command not found locally (or not in whitelist).
cprint(" OSError: %s" % e.args[1], 'err')
cprint(" Is %s in dependancies or whitelist_externals?\n"
% abbr_cmd,
'warn')
return 1
|
hayd/ctox | ctox/config.py | get_changedir | python | def get_changedir(env):
"changedir = {envdir}"
from ctox.subst import replace_braces
changedir = _get_env_maybe(env, 'testenv', 'changedir')
if changedir:
return replace_braces(changedir, env)
else:
return env.toxinidir | changedir = {envdir} | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/config.py#L41-L48 | [
"def _get_env_maybe(env, section, option):\n return (_get(env.config, '%s:%s' % (section, env.name), option) or\n _get(env.config, section, option))\n",
"def replace_braces(s, env):\n \"\"\"Makes tox substitutions to s, with respect to environment env.\n\n Example\n -------\n >>> replace_braces(\"echo {posargs:{env:USER:} passed no posargs}\")\n \"echo andy passed no posargs\"\n\n Note: first \"{env:USER:}\" is replaced with os.environ.get(\"USER\", \"\"),\n the \"{posargs:andy}\" is replaced with \"andy\" (since no posargs were\n passed).\n\n \"\"\"\n def replace(m):\n return _replace_match(m, env)\n for _ in range(DEPTH):\n s = re.sub(r\"{[^{}]*}\", replace, s)\n return s\n"
] | """This module contains the config functions for reading and parsing the
tox.ini file.
Note: Substitutions functions can be found in subst.py.
"""
import re
try:
from configparser import ConfigParser as SafeConfigParser, NoSectionError, NoOptionError
except ImportError: # py2, pragma: no cover
from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError
def read_config(toxinifile):
config = SafeConfigParser()
config.read(toxinifile)
return config
def _get(config, *args):
try:
# TODO this could be a while contains braces...?
# or that could be in replace_braces itself
return config.get(*args).strip()
except (NoSectionError, NoOptionError):
# TODO should this raise??
return ''
def _get_env_maybe(env, section, option):
return (_get(env.config, '%s:%s' % (section, env.name), option) or
_get(env.config, section, option))
def get_whitelist(config):
return _get(config, 'tox', 'whitelist_externals').split("\n")
def get_envlist(config):
from ctox.subst import parse_envlist
return parse_envlist(_get(config, 'tox', 'envlist'))
def get_deps(env):
from ctox.subst import replace_braces, expand_factor_conditions
env_deps = _get_env_maybe(env, "testenv", "deps").strip()
if env_deps.startswith('-r'):
requirements_txt = replace_braces(env_deps[2:].strip(), env)
with open(requirements_txt) as f:
env_deps = f.read().strip()
env_deps = [replace_braces(expand_factor_conditions(d, env),
env)
for d in env_deps.split("\n")
if d]
env_deps = [d for d in sum((s.split() for s in env_deps), [])
if not re.match("(pip|conda)([=<>!]|$)", d)]
return ["pip"] + env_deps
def get_commands(env):
from ctox.subst import split_on, replace_braces
# TODO allow for running over new lines? Is this correct at all?
global_commands = _get(env.config, 'testenv', 'commands')
env_commands = _get(env.config, 'testenv:%s' % env.name, 'commands')
commands = (env_commands or global_commands)
return [split_on(cmd)
for cmd in split_on(replace_braces(commands, env), '\n')
if cmd]
|
hayd/ctox | ctox/main.py | main | python | def main(arguments, toxinidir=None):
"ctox: tox with conda."
try: # pragma: no cover
# Exit on broken pipe.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
import sys
sys.exit(ctox(arguments, toxinidir))
except CalledProcessError as c:
print(c.output)
return 1
except NotImplementedError as e:
gh = "https://github.com/hayd/ctox/issues"
from colorama import Style
cprint(Style.BRIGHT + str(e), 'err')
cprint("If this is a valid tox.ini substitution, please open an issue on\n"
"github and request support: %s." % gh, 'warn')
return 1
except KeyboardInterrupt: # pragma: no cover
return 1 | ctox: tox with conda. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/main.py#L159-L186 | [
"def cprint(message, status=None):\n \"\"\"color printing based on status:\n\n None -> BRIGHT\n 'ok' -> GREEN\n 'err' -> RED\n 'warn' -> YELLOW\n\n \"\"\"\n # TODO use less obscure dict, probably \"error\", \"warn\", \"success\" as keys\n status = {'warn': Fore.YELLOW, 'err': Fore.RED,\n 'ok': Fore.GREEN, None: Style.BRIGHT}[status]\n print(status + message + Style.RESET_ALL)\n",
"def ctox(arguments, toxinidir):\n \"\"\"Sets up conda environments, and sets up and runs each environment based\n on the project's tox.ini configuration file.\n\n Returns 1 if either the build or running the commands failed or 0 if\n all commmands ran successfully.\n\n \"\"\"\n if arguments is None:\n arguments = []\n if toxinidir is None:\n toxinidir = os.getcwd()\n\n args, options = parse_args(arguments)\n\n if args.version:\n print(version)\n return 0\n\n # if no conda trigger OSError\n try:\n with open(os.devnull, \"w\") as fnull:\n check_output(['conda', '--version'], stderr=fnull)\n except OSError:\n cprint(\"conda not found, you need to install it to use ctox.\\n\"\n \"The recommended way is to download miniconda,\\n\"\n \"Do not install conda via pip.\", 'err')\n return 1\n\n toxinifile = os.path.join(toxinidir, \"tox.ini\")\n\n from ctox.config import read_config, get_envlist\n config = read_config(toxinifile)\n if args.e == 'ALL':\n envlist = get_envlist(config)\n else:\n envlist = args.e.split(',')\n\n # TODO configure with option\n toxdir = os.path.join(toxinidir, \".tox\")\n\n # create a zip file for the project\n from ctox.pkg import make_dist, package_name\n cprint(\"GLOB sdist-make: %s\" % os.path.join(toxinidir, \"setup.py\"))\n package = package_name(toxinidir)\n if not make_dist(toxinidir, toxdir, package):\n cprint(\" setup.py sdist failed\", 'err')\n return 1\n\n # setup each environment and run ctox\n failing = {}\n for env_name in envlist:\n env = Env(name=env_name, config=config, options=options,\n toxdir=toxdir, toxinidir=toxinidir, package=package)\n failing[env_name] = env.ctox()\n\n # print summary of the outcomes of ctox for each environment\n cprint('Summary')\n print(\"-\" * 23)\n for env_name in envlist:\n n = failing[env_name]\n outcome = ('succeeded', 'failed', 'skipped')[n]\n status = ('ok', 'err', 'warn')[n]\n cprint(\"%s commands %s\" % (env_name, outcome), status)\n\n return any(1 == v for v in failing.values())\n"
] | """This module defines the CLI for tox via main and ctox functions.
The Env class create the environment specific methods.
"""
import os
from ctox.shell import check_output, CalledProcessError # TODO remove?
from ctox.shell import cprint
__version__ = version = '0.1.3'
SUPPORTED_ENVS = ('py26', 'py27', 'py33', 'py34', 'py35')
class Env(object):
"""A conda environment."""
# TODO it's tempting to remove all but the tox variables as attributes
# i.e. call out to pkg or config functions rather than dummy methods,
# make the config and options private and have Env.ctox a function again.
# This would makes the _replace_match substitution a little cleaner.
def __init__(self, name, config, options, toxdir, toxinidir, package):
self.config = config
self.options = options
self.name = name
self.toxdir = toxdir
self.toxinidir = toxinidir
self.envdir = os.path.join(toxdir, self.name)
self.distdir = os.path.join(self.toxdir, "dist")
self.envdistdir = os.path.join(self.envdir, "dist")
self.envctoxfile = os.path.join(self.envdir, "ctox")
self.envbindir = os.path.join(self.envdir, "bin")
self.conda = os.path.join(self.envbindir, "conda")
self.pip = os.path.join(self.envbindir, "pip")
self.python = os.path.join(self.envbindir, "python")
self.envpython = self.python
# TODO make this less of a hack
# perhaps it should also be from basepython in config
# should we use basepython as the variable name
self.py_version = '.'.join(self.name[2:4]) # e.g. "2.7"
# TODO think if package is correct, atm it's name + version
# perhaps there is a proper tox name for this?
self.package = package
self.package_zipped = os.path.join(self.distdir,
self.package + ".zip")
self.envpackagedir = os.path.join(self.envdistdir, package)
from ctox.config import (
get_commands, get_deps, get_whitelist, get_changedir)
self.changedir = get_changedir(self)
# TODO remove these as attributes and call them directly
self.whitelist = get_whitelist(self.config)
self.deps = get_deps(self)
self.commands = get_commands(self)
def ctox(self):
"""Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
"""
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT +
"Skipping unsupported python version %s\n" % self.name,
'warn')
return 2
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint("%s create: %s" % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(" deps installation failed, aborted.\n", 'err')
return 1
else:
cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint("%s inst: %s" % (self.name, self.envdistdir))
if not self.install_dist():
cprint(" install failed.\n", 'err')
return 1
cprint("%s runtests" % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands()
def prev_deps(self):
from ctox.pkg import prev_deps
return prev_deps(self)
def reusableable(self):
"""Can we use the old environment.
If this is True we don't need to
create a new env and re-install the deps.
"""
# TODO better caching !!
# This should really make use of the conda + pip tree rather than just
# rely on a crappy DIY csv. Part of the difficulty is that pip installs
# have to be done seperately to conda, would be great to somehow merge
# cleverly pip freeze? maybe needs to keep a clean env to compare with.
return self.prev_deps() != self.deps
def install_dist(self):
from ctox.pkg import install_dist
return install_dist(self)
def install_deps(self):
from ctox.pkg import install_deps
return install_deps(self)
def uninstall_deps(self, pdeps):
# from ctox.pkg import uninstall_deps
# return uninstall_deps(self, deps=pdeps)
self.create_env(force_remove=True)
def run_commands(self):
from ctox.pkg import run_commands
return run_commands(self)
def env_exists(self):
from ctox.pkg import env_exists
return env_exists(self)
def create_env(self, force_remove=False):
from ctox.pkg import create_env
return create_env(self, force_remove=force_remove)
def parse_args(arguments):
from argparse import ArgumentParser
description = ("Tox but with conda.")
epilog = ("")
parser = ArgumentParser(description=description,
epilog=epilog,
prog='ctox')
parser.add_argument('--version',
help='print version number and exit',
action='store_true')
parser.add_argument('-e',
help='choose environments to run, comma seperated',
default='ALL')
return parser.parse_known_args(arguments)
def ctox(arguments, toxinidir):
"""Sets up conda environments, and sets up and runs each environment based
on the project's tox.ini configuration file.
Returns 1 if either the build or running the commands failed or 0 if
all commmands ran successfully.
"""
if arguments is None:
arguments = []
if toxinidir is None:
toxinidir = os.getcwd()
args, options = parse_args(arguments)
if args.version:
print(version)
return 0
# if no conda trigger OSError
try:
with open(os.devnull, "w") as fnull:
check_output(['conda', '--version'], stderr=fnull)
except OSError:
cprint("conda not found, you need to install it to use ctox.\n"
"The recommended way is to download miniconda,\n"
"Do not install conda via pip.", 'err')
return 1
toxinifile = os.path.join(toxinidir, "tox.ini")
from ctox.config import read_config, get_envlist
config = read_config(toxinifile)
if args.e == 'ALL':
envlist = get_envlist(config)
else:
envlist = args.e.split(',')
# TODO configure with option
toxdir = os.path.join(toxinidir, ".tox")
# create a zip file for the project
from ctox.pkg import make_dist, package_name
cprint("GLOB sdist-make: %s" % os.path.join(toxinidir, "setup.py"))
package = package_name(toxinidir)
if not make_dist(toxinidir, toxdir, package):
cprint(" setup.py sdist failed", 'err')
return 1
# setup each environment and run ctox
failing = {}
for env_name in envlist:
env = Env(name=env_name, config=config, options=options,
toxdir=toxdir, toxinidir=toxinidir, package=package)
failing[env_name] = env.ctox()
# print summary of the outcomes of ctox for each environment
cprint('Summary')
print("-" * 23)
for env_name in envlist:
n = failing[env_name]
outcome = ('succeeded', 'failed', 'skipped')[n]
status = ('ok', 'err', 'warn')[n]
cprint("%s commands %s" % (env_name, outcome), status)
return any(1 == v for v in failing.values())
def positional_args(arguments):
""""Generator for position arguments.
Example
-------
>>> list(positional_args(["arg1", "arg2", "--kwarg"]))
["arg1", "arg2"]
>>> list(positional_args(["--", "arg1", "--kwarg"]))
["arg1", "kwarg"]
"""
# TODO this behaviour probably isn't quite right.
if arguments and arguments[0] == '--':
for a in arguments[1:]:
yield a
else:
for a in arguments:
if a.startswith('-'):
break
yield a
def _main():
"ctox: tox with conda"
from sys import argv
arguments = argv[1:]
toxinidir = os.getcwd()
return main(arguments, toxinidir)
if __name__ == '__main__':
_main()
|
hayd/ctox | ctox/main.py | ctox | python | def ctox(arguments, toxinidir):
if arguments is None:
arguments = []
if toxinidir is None:
toxinidir = os.getcwd()
args, options = parse_args(arguments)
if args.version:
print(version)
return 0
# if no conda trigger OSError
try:
with open(os.devnull, "w") as fnull:
check_output(['conda', '--version'], stderr=fnull)
except OSError:
cprint("conda not found, you need to install it to use ctox.\n"
"The recommended way is to download miniconda,\n"
"Do not install conda via pip.", 'err')
return 1
toxinifile = os.path.join(toxinidir, "tox.ini")
from ctox.config import read_config, get_envlist
config = read_config(toxinifile)
if args.e == 'ALL':
envlist = get_envlist(config)
else:
envlist = args.e.split(',')
# TODO configure with option
toxdir = os.path.join(toxinidir, ".tox")
# create a zip file for the project
from ctox.pkg import make_dist, package_name
cprint("GLOB sdist-make: %s" % os.path.join(toxinidir, "setup.py"))
package = package_name(toxinidir)
if not make_dist(toxinidir, toxdir, package):
cprint(" setup.py sdist failed", 'err')
return 1
# setup each environment and run ctox
failing = {}
for env_name in envlist:
env = Env(name=env_name, config=config, options=options,
toxdir=toxdir, toxinidir=toxinidir, package=package)
failing[env_name] = env.ctox()
# print summary of the outcomes of ctox for each environment
cprint('Summary')
print("-" * 23)
for env_name in envlist:
n = failing[env_name]
outcome = ('succeeded', 'failed', 'skipped')[n]
status = ('ok', 'err', 'warn')[n]
cprint("%s commands %s" % (env_name, outcome), status)
return any(1 == v for v in failing.values()) | Sets up conda environments, and sets up and runs each environment based
on the project's tox.ini configuration file.
Returns 1 if either the build or running the commands failed or 0 if
all commmands ran successfully. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/main.py#L206-L271 | [
"def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs: # pragma: no cover\n raise ValueError('stdout argument not allowed, '\n 'it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE,\n *popenargs, **kwargs)\n output, _ = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd,\n output=output)\n return output\n",
"def parse_args(arguments):\n from argparse import ArgumentParser\n description = (\"Tox but with conda.\")\n epilog = (\"\")\n parser = ArgumentParser(description=description,\n epilog=epilog,\n prog='ctox')\n parser.add_argument('--version',\n help='print version number and exit',\n action='store_true')\n parser.add_argument('-e',\n help='choose environments to run, comma seperated',\n default='ALL')\n\n return parser.parse_known_args(arguments)\n",
"def read_config(toxinifile):\n config = SafeConfigParser()\n config.read(toxinifile)\n\n return config\n",
"def get_envlist(config):\n from ctox.subst import parse_envlist\n return parse_envlist(_get(config, 'tox', 'envlist'))\n",
"def package_name(toxinidir):\n return '-'.join(shell_out([\"python\", \"setup.py\", \"--name\", \"--version\"],\n cwd=toxinidir).split())\n",
"def cprint(message, status=None):\n \"\"\"color printing based on status:\n\n None -> BRIGHT\n 'ok' -> GREEN\n 'err' -> RED\n 'warn' -> YELLOW\n\n \"\"\"\n # TODO use less obscure dict, probably \"error\", \"warn\", \"success\" as keys\n status = {'warn': Fore.YELLOW, 'err': Fore.RED,\n 'ok': Fore.GREEN, None: Style.BRIGHT}[status]\n print(status + message + Style.RESET_ALL)\n",
"def make_dist(toxinidir, toxdir, package):\n \"\"\"zip up the package into the toxdir.\"\"\"\n dist = os.path.join(toxdir, \"dist\")\n # Suppress warnings.\n success = safe_shell_out([\"python\", \"setup.py\", \"sdist\", \"--quiet\",\n \"--formats=zip\", \"--dist-dir\", dist],\n cwd=toxinidir)\n if success:\n return os.path.join(dist, package + \".zip\")\n",
"def ctox(self):\n \"\"\"Main method for the environment.\n\n Parse the tox.ini config, install the dependancies and run the\n commands. The output of the commands is printed.\n\n Returns 0 if they ran successfully, 1 if there was an error\n (either in setup or whilst running the commands), 2 if the build\n was skipped.\n\n \"\"\"\n # TODO make this less of a hack e.g. using basepython from config\n # if it exists (and use an attribute directly).\n if self.name[:4] not in SUPPORTED_ENVS:\n from colorama import Style\n cprint(Style.BRIGHT +\n \"Skipping unsupported python version %s\\n\" % self.name,\n 'warn')\n return 2\n\n # TODO don't remove env if there's a dependancy mis-match\n # rather \"clean\" it to the empty state (the hope being to keep\n # the dist build around - so not all files need to be rebuilt)\n # TODO extract this as a method (for readability)\n if not self.env_exists() or self.reusableable():\n cprint(\"%s create: %s\" % (self.name, self.envdir))\n self.create_env(force_remove=True)\n\n cprint(\"%s installdeps: %s\" % (self.name, ', '.join(self.deps)))\n if not self.install_deps():\n cprint(\" deps installation failed, aborted.\\n\", 'err')\n return 1\n else:\n cprint(\"%s cached (deps unchanged): %s\" % (self.name, self.envdir))\n\n # install the project from the zipped file\n # TODO think more carefully about where it should be installed\n # specifically we want to be able this to include the test files (which\n # are not always unpacked when installed so as to run the tests there)\n # if there are build files (e.g. cython) then tests must run where\n # the build was. Also, reinstalling should not overwrite the builds\n # e.g. setup.py will skip rebuilding cython files if they are unchanged\n cprint(\"%s inst: %s\" % (self.name, self.envdistdir))\n if not self.install_dist():\n cprint(\" install failed.\\n\", 'err')\n return 1\n\n cprint(\"%s runtests\" % self.name)\n # return False if all commands were successfully run\n # otherwise returns True if at least one command exited badly\n return self.run_commands()\n"
] | """This module defines the CLI for tox via main and ctox functions.
The Env class create the environment specific methods.
"""
import os
from ctox.shell import check_output, CalledProcessError # TODO remove?
from ctox.shell import cprint
__version__ = version = '0.1.3'
SUPPORTED_ENVS = ('py26', 'py27', 'py33', 'py34', 'py35')
class Env(object):
"""A conda environment."""
# TODO it's tempting to remove all but the tox variables as attributes
# i.e. call out to pkg or config functions rather than dummy methods,
# make the config and options private and have Env.ctox a function again.
# This would makes the _replace_match substitution a little cleaner.
def __init__(self, name, config, options, toxdir, toxinidir, package):
self.config = config
self.options = options
self.name = name
self.toxdir = toxdir
self.toxinidir = toxinidir
self.envdir = os.path.join(toxdir, self.name)
self.distdir = os.path.join(self.toxdir, "dist")
self.envdistdir = os.path.join(self.envdir, "dist")
self.envctoxfile = os.path.join(self.envdir, "ctox")
self.envbindir = os.path.join(self.envdir, "bin")
self.conda = os.path.join(self.envbindir, "conda")
self.pip = os.path.join(self.envbindir, "pip")
self.python = os.path.join(self.envbindir, "python")
self.envpython = self.python
# TODO make this less of a hack
# perhaps it should also be from basepython in config
# should we use basepython as the variable name
self.py_version = '.'.join(self.name[2:4]) # e.g. "2.7"
# TODO think if package is correct, atm it's name + version
# perhaps there is a proper tox name for this?
self.package = package
self.package_zipped = os.path.join(self.distdir,
self.package + ".zip")
self.envpackagedir = os.path.join(self.envdistdir, package)
from ctox.config import (
get_commands, get_deps, get_whitelist, get_changedir)
self.changedir = get_changedir(self)
# TODO remove these as attributes and call them directly
self.whitelist = get_whitelist(self.config)
self.deps = get_deps(self)
self.commands = get_commands(self)
def ctox(self):
"""Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
"""
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT +
"Skipping unsupported python version %s\n" % self.name,
'warn')
return 2
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint("%s create: %s" % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(" deps installation failed, aborted.\n", 'err')
return 1
else:
cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint("%s inst: %s" % (self.name, self.envdistdir))
if not self.install_dist():
cprint(" install failed.\n", 'err')
return 1
cprint("%s runtests" % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands()
def prev_deps(self):
from ctox.pkg import prev_deps
return prev_deps(self)
def reusableable(self):
"""Can we use the old environment.
If this is True we don't need to
create a new env and re-install the deps.
"""
# TODO better caching !!
# This should really make use of the conda + pip tree rather than just
# rely on a crappy DIY csv. Part of the difficulty is that pip installs
# have to be done seperately to conda, would be great to somehow merge
# cleverly pip freeze? maybe needs to keep a clean env to compare with.
return self.prev_deps() != self.deps
def install_dist(self):
from ctox.pkg import install_dist
return install_dist(self)
def install_deps(self):
from ctox.pkg import install_deps
return install_deps(self)
def uninstall_deps(self, pdeps):
# from ctox.pkg import uninstall_deps
# return uninstall_deps(self, deps=pdeps)
self.create_env(force_remove=True)
def run_commands(self):
from ctox.pkg import run_commands
return run_commands(self)
def env_exists(self):
from ctox.pkg import env_exists
return env_exists(self)
def create_env(self, force_remove=False):
from ctox.pkg import create_env
return create_env(self, force_remove=force_remove)
def main(arguments, toxinidir=None):
"ctox: tox with conda."
try: # pragma: no cover
# Exit on broken pipe.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
import sys
sys.exit(ctox(arguments, toxinidir))
except CalledProcessError as c:
print(c.output)
return 1
except NotImplementedError as e:
gh = "https://github.com/hayd/ctox/issues"
from colorama import Style
cprint(Style.BRIGHT + str(e), 'err')
cprint("If this is a valid tox.ini substitution, please open an issue on\n"
"github and request support: %s." % gh, 'warn')
return 1
except KeyboardInterrupt: # pragma: no cover
return 1
def parse_args(arguments):
from argparse import ArgumentParser
description = ("Tox but with conda.")
epilog = ("")
parser = ArgumentParser(description=description,
epilog=epilog,
prog='ctox')
parser.add_argument('--version',
help='print version number and exit',
action='store_true')
parser.add_argument('-e',
help='choose environments to run, comma seperated',
default='ALL')
return parser.parse_known_args(arguments)
def positional_args(arguments):
""""Generator for position arguments.
Example
-------
>>> list(positional_args(["arg1", "arg2", "--kwarg"]))
["arg1", "arg2"]
>>> list(positional_args(["--", "arg1", "--kwarg"]))
["arg1", "kwarg"]
"""
# TODO this behaviour probably isn't quite right.
if arguments and arguments[0] == '--':
for a in arguments[1:]:
yield a
else:
for a in arguments:
if a.startswith('-'):
break
yield a
def _main():
"ctox: tox with conda"
from sys import argv
arguments = argv[1:]
toxinidir = os.getcwd()
return main(arguments, toxinidir)
if __name__ == '__main__':
_main()
|
hayd/ctox | ctox/main.py | positional_args | python | def positional_args(arguments):
"
# TODO this behaviour probably isn't quite right.
if arguments and arguments[0] == '--':
for a in arguments[1:]:
yield a
else:
for a in arguments:
if a.startswith('-'):
break
yield a | Generator for position arguments.
Example
-------
>>> list(positional_args(["arg1", "arg2", "--kwarg"]))
["arg1", "arg2"]
>>> list(positional_args(["--", "arg1", "--kwarg"]))
["arg1", "kwarg"] | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/main.py#L274-L293 | null | """This module defines the CLI for tox via main and ctox functions.
The Env class create the environment specific methods.
"""
import os
from ctox.shell import check_output, CalledProcessError # TODO remove?
from ctox.shell import cprint
__version__ = version = '0.1.3'
SUPPORTED_ENVS = ('py26', 'py27', 'py33', 'py34', 'py35')
class Env(object):
"""A conda environment."""
# TODO it's tempting to remove all but the tox variables as attributes
# i.e. call out to pkg or config functions rather than dummy methods,
# make the config and options private and have Env.ctox a function again.
# This would makes the _replace_match substitution a little cleaner.
def __init__(self, name, config, options, toxdir, toxinidir, package):
self.config = config
self.options = options
self.name = name
self.toxdir = toxdir
self.toxinidir = toxinidir
self.envdir = os.path.join(toxdir, self.name)
self.distdir = os.path.join(self.toxdir, "dist")
self.envdistdir = os.path.join(self.envdir, "dist")
self.envctoxfile = os.path.join(self.envdir, "ctox")
self.envbindir = os.path.join(self.envdir, "bin")
self.conda = os.path.join(self.envbindir, "conda")
self.pip = os.path.join(self.envbindir, "pip")
self.python = os.path.join(self.envbindir, "python")
self.envpython = self.python
# TODO make this less of a hack
# perhaps it should also be from basepython in config
# should we use basepython as the variable name
self.py_version = '.'.join(self.name[2:4]) # e.g. "2.7"
# TODO think if package is correct, atm it's name + version
# perhaps there is a proper tox name for this?
self.package = package
self.package_zipped = os.path.join(self.distdir,
self.package + ".zip")
self.envpackagedir = os.path.join(self.envdistdir, package)
from ctox.config import (
get_commands, get_deps, get_whitelist, get_changedir)
self.changedir = get_changedir(self)
# TODO remove these as attributes and call them directly
self.whitelist = get_whitelist(self.config)
self.deps = get_deps(self)
self.commands = get_commands(self)
def ctox(self):
"""Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
"""
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT +
"Skipping unsupported python version %s\n" % self.name,
'warn')
return 2
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint("%s create: %s" % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(" deps installation failed, aborted.\n", 'err')
return 1
else:
cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint("%s inst: %s" % (self.name, self.envdistdir))
if not self.install_dist():
cprint(" install failed.\n", 'err')
return 1
cprint("%s runtests" % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands()
def prev_deps(self):
from ctox.pkg import prev_deps
return prev_deps(self)
def reusableable(self):
"""Can we use the old environment.
If this is True we don't need to
create a new env and re-install the deps.
"""
# TODO better caching !!
# This should really make use of the conda + pip tree rather than just
# rely on a crappy DIY csv. Part of the difficulty is that pip installs
# have to be done seperately to conda, would be great to somehow merge
# cleverly pip freeze? maybe needs to keep a clean env to compare with.
return self.prev_deps() != self.deps
def install_dist(self):
from ctox.pkg import install_dist
return install_dist(self)
def install_deps(self):
from ctox.pkg import install_deps
return install_deps(self)
def uninstall_deps(self, pdeps):
# from ctox.pkg import uninstall_deps
# return uninstall_deps(self, deps=pdeps)
self.create_env(force_remove=True)
def run_commands(self):
from ctox.pkg import run_commands
return run_commands(self)
def env_exists(self):
from ctox.pkg import env_exists
return env_exists(self)
def create_env(self, force_remove=False):
from ctox.pkg import create_env
return create_env(self, force_remove=force_remove)
def main(arguments, toxinidir=None):
"ctox: tox with conda."
try: # pragma: no cover
# Exit on broken pipe.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
import sys
sys.exit(ctox(arguments, toxinidir))
except CalledProcessError as c:
print(c.output)
return 1
except NotImplementedError as e:
gh = "https://github.com/hayd/ctox/issues"
from colorama import Style
cprint(Style.BRIGHT + str(e), 'err')
cprint("If this is a valid tox.ini substitution, please open an issue on\n"
"github and request support: %s." % gh, 'warn')
return 1
except KeyboardInterrupt: # pragma: no cover
return 1
def parse_args(arguments):
from argparse import ArgumentParser
description = ("Tox but with conda.")
epilog = ("")
parser = ArgumentParser(description=description,
epilog=epilog,
prog='ctox')
parser.add_argument('--version',
help='print version number and exit',
action='store_true')
parser.add_argument('-e',
help='choose environments to run, comma seperated',
default='ALL')
return parser.parse_known_args(arguments)
def ctox(arguments, toxinidir):
"""Sets up conda environments, and sets up and runs each environment based
on the project's tox.ini configuration file.
Returns 1 if either the build or running the commands failed or 0 if
all commmands ran successfully.
"""
if arguments is None:
arguments = []
if toxinidir is None:
toxinidir = os.getcwd()
args, options = parse_args(arguments)
if args.version:
print(version)
return 0
# if no conda trigger OSError
try:
with open(os.devnull, "w") as fnull:
check_output(['conda', '--version'], stderr=fnull)
except OSError:
cprint("conda not found, you need to install it to use ctox.\n"
"The recommended way is to download miniconda,\n"
"Do not install conda via pip.", 'err')
return 1
toxinifile = os.path.join(toxinidir, "tox.ini")
from ctox.config import read_config, get_envlist
config = read_config(toxinifile)
if args.e == 'ALL':
envlist = get_envlist(config)
else:
envlist = args.e.split(',')
# TODO configure with option
toxdir = os.path.join(toxinidir, ".tox")
# create a zip file for the project
from ctox.pkg import make_dist, package_name
cprint("GLOB sdist-make: %s" % os.path.join(toxinidir, "setup.py"))
package = package_name(toxinidir)
if not make_dist(toxinidir, toxdir, package):
cprint(" setup.py sdist failed", 'err')
return 1
# setup each environment and run ctox
failing = {}
for env_name in envlist:
env = Env(name=env_name, config=config, options=options,
toxdir=toxdir, toxinidir=toxinidir, package=package)
failing[env_name] = env.ctox()
# print summary of the outcomes of ctox for each environment
cprint('Summary')
print("-" * 23)
for env_name in envlist:
n = failing[env_name]
outcome = ('succeeded', 'failed', 'skipped')[n]
status = ('ok', 'err', 'warn')[n]
cprint("%s commands %s" % (env_name, outcome), status)
return any(1 == v for v in failing.values())
def _main():
"ctox: tox with conda"
from sys import argv
arguments = argv[1:]
toxinidir = os.getcwd()
return main(arguments, toxinidir)
if __name__ == '__main__':
_main()
|
hayd/ctox | ctox/main.py | _main | python | def _main():
"ctox: tox with conda"
from sys import argv
arguments = argv[1:]
toxinidir = os.getcwd()
return main(arguments, toxinidir) | ctox: tox with conda | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/main.py#L296-L303 | [
"def main(arguments, toxinidir=None):\n \"ctox: tox with conda.\"\n try: # pragma: no cover\n # Exit on broken pipe.\n import signal\n signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n except AttributeError: # pragma: no cover\n # SIGPIPE is not available on Windows.\n pass\n\n try:\n import sys\n sys.exit(ctox(arguments, toxinidir))\n\n except CalledProcessError as c:\n print(c.output)\n return 1\n\n except NotImplementedError as e:\n gh = \"https://github.com/hayd/ctox/issues\"\n from colorama import Style\n cprint(Style.BRIGHT + str(e), 'err')\n cprint(\"If this is a valid tox.ini substitution, please open an issue on\\n\"\n \"github and request support: %s.\" % gh, 'warn')\n return 1\n\n except KeyboardInterrupt: # pragma: no cover\n return 1\n"
] | """This module defines the CLI for tox via main and ctox functions.
The Env class create the environment specific methods.
"""
import os
from ctox.shell import check_output, CalledProcessError # TODO remove?
from ctox.shell import cprint
__version__ = version = '0.1.3'
SUPPORTED_ENVS = ('py26', 'py27', 'py33', 'py34', 'py35')
class Env(object):
"""A conda environment."""
# TODO it's tempting to remove all but the tox variables as attributes
# i.e. call out to pkg or config functions rather than dummy methods,
# make the config and options private and have Env.ctox a function again.
# This would makes the _replace_match substitution a little cleaner.
def __init__(self, name, config, options, toxdir, toxinidir, package):
self.config = config
self.options = options
self.name = name
self.toxdir = toxdir
self.toxinidir = toxinidir
self.envdir = os.path.join(toxdir, self.name)
self.distdir = os.path.join(self.toxdir, "dist")
self.envdistdir = os.path.join(self.envdir, "dist")
self.envctoxfile = os.path.join(self.envdir, "ctox")
self.envbindir = os.path.join(self.envdir, "bin")
self.conda = os.path.join(self.envbindir, "conda")
self.pip = os.path.join(self.envbindir, "pip")
self.python = os.path.join(self.envbindir, "python")
self.envpython = self.python
# TODO make this less of a hack
# perhaps it should also be from basepython in config
# should we use basepython as the variable name
self.py_version = '.'.join(self.name[2:4]) # e.g. "2.7"
# TODO think if package is correct, atm it's name + version
# perhaps there is a proper tox name for this?
self.package = package
self.package_zipped = os.path.join(self.distdir,
self.package + ".zip")
self.envpackagedir = os.path.join(self.envdistdir, package)
from ctox.config import (
get_commands, get_deps, get_whitelist, get_changedir)
self.changedir = get_changedir(self)
# TODO remove these as attributes and call them directly
self.whitelist = get_whitelist(self.config)
self.deps = get_deps(self)
self.commands = get_commands(self)
def ctox(self):
"""Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
"""
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT +
"Skipping unsupported python version %s\n" % self.name,
'warn')
return 2
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint("%s create: %s" % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(" deps installation failed, aborted.\n", 'err')
return 1
else:
cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint("%s inst: %s" % (self.name, self.envdistdir))
if not self.install_dist():
cprint(" install failed.\n", 'err')
return 1
cprint("%s runtests" % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands()
def prev_deps(self):
from ctox.pkg import prev_deps
return prev_deps(self)
def reusableable(self):
"""Can we use the old environment.
If this is True we don't need to
create a new env and re-install the deps.
"""
# TODO better caching !!
# This should really make use of the conda + pip tree rather than just
# rely on a crappy DIY csv. Part of the difficulty is that pip installs
# have to be done seperately to conda, would be great to somehow merge
# cleverly pip freeze? maybe needs to keep a clean env to compare with.
return self.prev_deps() != self.deps
def install_dist(self):
from ctox.pkg import install_dist
return install_dist(self)
def install_deps(self):
from ctox.pkg import install_deps
return install_deps(self)
def uninstall_deps(self, pdeps):
# from ctox.pkg import uninstall_deps
# return uninstall_deps(self, deps=pdeps)
self.create_env(force_remove=True)
def run_commands(self):
from ctox.pkg import run_commands
return run_commands(self)
def env_exists(self):
from ctox.pkg import env_exists
return env_exists(self)
def create_env(self, force_remove=False):
from ctox.pkg import create_env
return create_env(self, force_remove=force_remove)
def main(arguments, toxinidir=None):
"ctox: tox with conda."
try: # pragma: no cover
# Exit on broken pipe.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
import sys
sys.exit(ctox(arguments, toxinidir))
except CalledProcessError as c:
print(c.output)
return 1
except NotImplementedError as e:
gh = "https://github.com/hayd/ctox/issues"
from colorama import Style
cprint(Style.BRIGHT + str(e), 'err')
cprint("If this is a valid tox.ini substitution, please open an issue on\n"
"github and request support: %s." % gh, 'warn')
return 1
except KeyboardInterrupt: # pragma: no cover
return 1
def parse_args(arguments):
from argparse import ArgumentParser
description = ("Tox but with conda.")
epilog = ("")
parser = ArgumentParser(description=description,
epilog=epilog,
prog='ctox')
parser.add_argument('--version',
help='print version number and exit',
action='store_true')
parser.add_argument('-e',
help='choose environments to run, comma seperated',
default='ALL')
return parser.parse_known_args(arguments)
def ctox(arguments, toxinidir):
"""Sets up conda environments, and sets up and runs each environment based
on the project's tox.ini configuration file.
Returns 1 if either the build or running the commands failed or 0 if
all commmands ran successfully.
"""
if arguments is None:
arguments = []
if toxinidir is None:
toxinidir = os.getcwd()
args, options = parse_args(arguments)
if args.version:
print(version)
return 0
# if no conda trigger OSError
try:
with open(os.devnull, "w") as fnull:
check_output(['conda', '--version'], stderr=fnull)
except OSError:
cprint("conda not found, you need to install it to use ctox.\n"
"The recommended way is to download miniconda,\n"
"Do not install conda via pip.", 'err')
return 1
toxinifile = os.path.join(toxinidir, "tox.ini")
from ctox.config import read_config, get_envlist
config = read_config(toxinifile)
if args.e == 'ALL':
envlist = get_envlist(config)
else:
envlist = args.e.split(',')
# TODO configure with option
toxdir = os.path.join(toxinidir, ".tox")
# create a zip file for the project
from ctox.pkg import make_dist, package_name
cprint("GLOB sdist-make: %s" % os.path.join(toxinidir, "setup.py"))
package = package_name(toxinidir)
if not make_dist(toxinidir, toxdir, package):
cprint(" setup.py sdist failed", 'err')
return 1
# setup each environment and run ctox
failing = {}
for env_name in envlist:
env = Env(name=env_name, config=config, options=options,
toxdir=toxdir, toxinidir=toxinidir, package=package)
failing[env_name] = env.ctox()
# print summary of the outcomes of ctox for each environment
cprint('Summary')
print("-" * 23)
for env_name in envlist:
n = failing[env_name]
outcome = ('succeeded', 'failed', 'skipped')[n]
status = ('ok', 'err', 'warn')[n]
cprint("%s commands %s" % (env_name, outcome), status)
return any(1 == v for v in failing.values())
def positional_args(arguments):
""""Generator for position arguments.
Example
-------
>>> list(positional_args(["arg1", "arg2", "--kwarg"]))
["arg1", "arg2"]
>>> list(positional_args(["--", "arg1", "--kwarg"]))
["arg1", "kwarg"]
"""
# TODO this behaviour probably isn't quite right.
if arguments and arguments[0] == '--':
for a in arguments[1:]:
yield a
else:
for a in arguments:
if a.startswith('-'):
break
yield a
if __name__ == '__main__':
_main()
|
hayd/ctox | ctox/main.py | Env.ctox | python | def ctox(self):
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT +
"Skipping unsupported python version %s\n" % self.name,
'warn')
return 2
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint("%s create: %s" % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(" deps installation failed, aborted.\n", 'err')
return 1
else:
cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint("%s inst: %s" % (self.name, self.envdistdir))
if not self.install_dist():
cprint(" install failed.\n", 'err')
return 1
cprint("%s runtests" % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands() | Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped. | train | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/main.py#L63-L113 | [
"def cprint(message, status=None):\n \"\"\"color printing based on status:\n\n None -> BRIGHT\n 'ok' -> GREEN\n 'err' -> RED\n 'warn' -> YELLOW\n\n \"\"\"\n # TODO use less obscure dict, probably \"error\", \"warn\", \"success\" as keys\n status = {'warn': Fore.YELLOW, 'err': Fore.RED,\n 'ok': Fore.GREEN, None: Style.BRIGHT}[status]\n print(status + message + Style.RESET_ALL)\n",
"def env_exists(self):\n from ctox.pkg import env_exists\n return env_exists(self)\n"
] | class Env(object):
"""A conda environment."""
# TODO it's tempting to remove all but the tox variables as attributes
# i.e. call out to pkg or config functions rather than dummy methods,
# make the config and options private and have Env.ctox a function again.
# This would makes the _replace_match substitution a little cleaner.
def __init__(self, name, config, options, toxdir, toxinidir, package):
self.config = config
self.options = options
self.name = name
self.toxdir = toxdir
self.toxinidir = toxinidir
self.envdir = os.path.join(toxdir, self.name)
self.distdir = os.path.join(self.toxdir, "dist")
self.envdistdir = os.path.join(self.envdir, "dist")
self.envctoxfile = os.path.join(self.envdir, "ctox")
self.envbindir = os.path.join(self.envdir, "bin")
self.conda = os.path.join(self.envbindir, "conda")
self.pip = os.path.join(self.envbindir, "pip")
self.python = os.path.join(self.envbindir, "python")
self.envpython = self.python
# TODO make this less of a hack
# perhaps it should also be from basepython in config
# should we use basepython as the variable name
self.py_version = '.'.join(self.name[2:4]) # e.g. "2.7"
# TODO think if package is correct, atm it's name + version
# perhaps there is a proper tox name for this?
self.package = package
self.package_zipped = os.path.join(self.distdir,
self.package + ".zip")
self.envpackagedir = os.path.join(self.envdistdir, package)
from ctox.config import (
get_commands, get_deps, get_whitelist, get_changedir)
self.changedir = get_changedir(self)
# TODO remove these as attributes and call them directly
self.whitelist = get_whitelist(self.config)
self.deps = get_deps(self)
self.commands = get_commands(self)
def prev_deps(self):
from ctox.pkg import prev_deps
return prev_deps(self)
def reusableable(self):
"""Can we use the old environment.
If this is True we don't need to
create a new env and re-install the deps.
"""
# TODO better caching !!
# This should really make use of the conda + pip tree rather than just
# rely on a crappy DIY csv. Part of the difficulty is that pip installs
# have to be done seperately to conda, would be great to somehow merge
# cleverly pip freeze? maybe needs to keep a clean env to compare with.
return self.prev_deps() != self.deps
def install_dist(self):
from ctox.pkg import install_dist
return install_dist(self)
def install_deps(self):
from ctox.pkg import install_deps
return install_deps(self)
def uninstall_deps(self, pdeps):
# from ctox.pkg import uninstall_deps
# return uninstall_deps(self, deps=pdeps)
self.create_env(force_remove=True)
def run_commands(self):
from ctox.pkg import run_commands
return run_commands(self)
def env_exists(self):
from ctox.pkg import env_exists
return env_exists(self)
def create_env(self, force_remove=False):
from ctox.pkg import create_env
return create_env(self, force_remove=force_remove)
|
lordmauve/lepton | lepton/system.py | ParticleSystem.run_ahead | python | def run_ahead(self, time, framerate):
if time:
td = 1.0 / framerate
update = self.update
for i in range(int(time / td)):
update(td) | Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute. | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/system.py#L72-L88 | [
"def update(self, time_delta):\n \"\"\"Update all particle groups in the system. time_delta is the\n time since the last update (in arbitrary time units).\n\n When updating, first the global controllers are applied to\n all groups. Then update(time_delta) is called for all groups.\n\n This method can be conveniently scheduled using the Pyglet\n scheduler method: pyglet.clock.schedule_interval\n \"\"\"\n for group in self:\n group.update(time_delta)\n"
] | class ParticleSystem(object):
def __init__(self, global_controllers=()):
"""Initialize the particle system, adding the specified global
controllers, if any
"""
# Tuples are used for global controllers to prevent
# unpleasant side-affects if they are added during update or draw
self.controllers = tuple(global_controllers)
self.groups = []
def add_global_controller(self, *controllers):
"""Add a global controller applied to all groups on update"""
self.controllers += controllers
def add_group(self, group):
"""Add a particle group to the system"""
self.groups.append(group)
def remove_group(self, group):
"""Remove a particle group from the system, raise ValueError
if the group is not in the system
"""
self.groups.remove(group)
def __len__(self):
"""Return the number of particle groups in the system"""
return len(self.groups)
def __iter__(self):
"""Iterate the system's particle groups"""
# Iterate a copy of the group list to so that the groups
# can be safely changed during iteration
return iter(list(self.groups))
def __contains__(self, group):
"""Return True if the specified group is in the system"""
return group in self.groups
def update(self, time_delta):
"""Update all particle groups in the system. time_delta is the
time since the last update (in arbitrary time units).
When updating, first the global controllers are applied to
all groups. Then update(time_delta) is called for all groups.
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
for group in self:
group.update(time_delta)
def draw(self):
"""Draw all particle groups in the system using their renderers.
This method is convenient to call from your Pyglet window's
on_draw handler to redraw particles when needed.
"""
for group in self:
group.draw()
|
lordmauve/lepton | examples/fireworks.py | on_draw | python | def on_draw():
global yrot
win.clear()
glLoadIdentity()
glTranslatef(0, 0, -100)
glRotatef(yrot, 0.0, 1.0, 0.0)
default_system.draw()
'''
glBindTexture(GL_TEXTURE_2D, 1)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(0,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(50,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0)
''' | glBindTexture(GL_TEXTURE_2D, 1)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(0,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(50,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0) | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/fireworks.py#L132-L155 | [
"def draw(self):\n \"\"\"Draw all particle groups in the system using their renderers.\n\n This method is convenient to call from your Pyglet window's\n on_draw handler to redraw particles when needed.\n \"\"\"\n for group in self:\n group.draw()\n"
] | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Fireworks simulation to show off the per-particle emitter"""
__version__ = '$Id$'
import os
import math
from random import expovariate, uniform, gauss
from pyglet import image
from pyglet.gl import *
from lepton import Particle, ParticleGroup, default_system, domain
from lepton.renderer import PointRenderer
from lepton.texturizer import SpriteTexturizer, create_point_texture
from lepton.emitter import StaticEmitter, PerParticleEmitter
from lepton.controller import Gravity, Lifetime, Movement, Fader, ColorBlender
spark_tex = image.load(os.path.join(os.path.dirname(__file__), 'flare3.png')).get_texture()
spark_texturizer = SpriteTexturizer(spark_tex.id)
trail_texturizer = SpriteTexturizer(create_point_texture(8, 50))
class Kaboom:
lifetime = 5
def __init__(self):
color=(uniform(0,1), uniform(0,1), uniform(0,1), 1)
while max(color[:3]) < 0.9:
color=(uniform(0,1), uniform(0,1), uniform(0,1), 1)
spark_emitter = StaticEmitter(
template=Particle(
position=(uniform(-50, 50), uniform(-30, 30), uniform(-30, 30)),
color=color),
deviation=Particle(
velocity=(gauss(0, 5), gauss(0, 5), gauss(0, 5)),
age=1.5),
velocity=domain.Sphere((0, gauss(40, 20), 0), 60, 60))
self.sparks = ParticleGroup(
controllers=[
Lifetime(self.lifetime * 0.75),
Movement(damping=0.93),
ColorBlender([(0, (1,1,1,1)), (2, color), (self.lifetime, color)]),
Fader(fade_out_start=1.0, fade_out_end=self.lifetime * 0.5),
],
renderer=PointRenderer(abs(gauss(10, 3)), spark_texturizer))
spark_emitter.emit(int(gauss(60, 40)) + 50, self.sparks)
spread = abs(gauss(0.4, 1.0))
self.trail_emitter = PerParticleEmitter(self.sparks, rate=uniform(5,30),
template=Particle(
color=color),
deviation=Particle(
velocity=(spread, spread, spread),
age=self.lifetime * 0.75))
self.trails = ParticleGroup(
controllers=[
Lifetime(self.lifetime * 1.5),
Movement(damping=0.83),
ColorBlender([(0, (1,1,1,1)), (1, color), (self.lifetime, color)]),
Fader(max_alpha=0.75, fade_out_start=0, fade_out_end=gauss(self.lifetime, self.lifetime*0.3)),
self.trail_emitter
],
renderer=PointRenderer(10, trail_texturizer))
pyglet.clock.schedule_once(self.die, self.lifetime * 2)
def reduce_trail(self, dt=None):
if self.trail_emitter.rate > 0:
self.trail_emitter.rate -= 1
def die(self, dt=None):
default_system.remove_group(self.sparks)
default_system.remove_group(self.trails)
win = pyglet.window.Window(resizable=True, visible=False)
win.clear()
def on_resize(width, height):
"""Setup 3D projection for window"""
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1.0*width/height, 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
win.on_resize = on_resize
yrot = 0.0
@win.event
def on_mouse_motion(x, y, dx, dy):
global yrot
yrot += dx * 0.3
glEnable(GL_BLEND)
glShadeModel(GL_SMOOTH)
glBlendFunc(GL_SRC_ALPHA,GL_ONE)
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);
glDisable(GL_DEPTH_TEST)
default_system.add_global_controller(
Gravity((0,-15,0))
)
MEAN_FIRE_INTERVAL = 3.0
def fire(dt=None):
Kaboom()
pyglet.clock.schedule_once(fire, expovariate(1.0 / (MEAN_FIRE_INTERVAL - 1)) + 1)
fire()
win.set_visible(True)
pyglet.clock.schedule_interval(default_system.update, (1.0/30.0))
pyglet.clock.set_fps_limit(None)
@win.event
if __name__ == '__main__':
pyglet.app.run()
|
lordmauve/lepton | examples/games/bonk/controls.py | Controls.bind_key_name | python | def bind_key_name(self, function, object_name):
"""Bind a key to an object name"""
for funcname, name in self.name_map.items():
if funcname == function:
self.name_map[
funcname] = object_name | Bind a key to an object name | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L78-L83 | null | class Controls(object):
# Control states (-1.0 to 1.0)
collective = 0.7 # Main rotor thrust
cyclic_pitch = 0.0 # Main rotor angle
cyclic_roll = 0.0
pedal = 0.0 # Tail-rotor thrust
cyclic_mouse_sensitivity = 0.0015
cyclic_keyboard_speed = 0.01
collective_wheel_sensitivity = 0.02
collective_keyboard_speed = 0.25
pedal_keyboard_speed = 0.05
pedal_centering = 0.75
"""What is the best way to configure the key map?
key.A will always be A. Assume that a keystroke will
always pass a message, I need a way to set what the message is.
a Message map. func set message
"""
message_map = {
'key_a': "left",
'key_d': "right",
'key_w': "up",
'key_s': "down",
'left': "left",
'right': "right",
'up': "up",
'down': "down",
'pause': "pause",
}
key_map = {
'key_a': key.A,
'key_d': key.D,
'key_w': key.W,
'key_s': key.S,
'left': key.LEFT,
'right': key.RIGHT,
'up': key.UP,
'down': key.DOWN,
'pause': key.P,
}
"""
Each object has a list of keys assigned to it. When an object is added
to the game system its keys are passed in to control which binds messages
to that object. The message logic is handled by the object"""
# Function, name of object mapped to. Initially all set to none
name_map = {
"left": None,
"right": None,
"up": None,
"down": None,
"a": None,
"d": None,
"w": None,
"s": None
}
def __init__(self, window):
self.configure_keys()
window.push_handlers(self)
def bind_key_name(self, function, object_name):
"""Bind a key to an object name"""
for funcname, name in self.name_map.items():
if funcname == function:
self.name_map[
funcname] = object_name # getattr(self, funcname)
def bind_keys(self, objects):
"""Configure name map: My goal here is to associate a named object
with a specific function"""
for object in objects:
if object.keys != None:
for key in object.keys:
if key != None:
self.bind_key_name(key, object.name)
# for funcname, key in self.key_map.items():
def configure_keys(self):
"""Configure key map"""
self.active_functions = set()
self.key2func = {}
for funcname, key in self.key_map.items():
self.key2func[key] = getattr(self, funcname)
@staticmethod
def clamp(value, minval=-1.0, maxval=1.0):
return min(max(value, minval), maxval)
def on_mouse_motion(self, x, y, dx, dy):
# Mouse controls cyclic
self.cyclic_pitch = self.clamp(
self.cyclic_pitch - dy * self.cyclic_mouse_sensitivity)
self.cyclic_roll = self.clamp(
self.cyclic_roll - dx * self.cyclic_mouse_sensitivity)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.collective = self.clamp(
self.collective - scroll_y * self.collective_wheel_sensitivity, minval=0)
def on_key_press(self, symbol, modifier):
if symbol in self.key2func:
self.active_functions.add(self.key2func[symbol])
return True
def on_key_release(self, symbol, modifier):
try:
self.active_functions.remove(self.key2func[symbol])
return True
except KeyError:
pass
def update(self, gamesystem, dt):
if abs(self.pedal) < self.pedal_keyboard_speed / 2:
self.pedal = 0
'''
if abs(self.cyclic_pitch) < self.cyclic_keyboard_speed / 2:
self.cyclic_pitch = 0
if abs(self.cyclic_roll) < self.cyclic_keyboard_speed / 2:
self.cyclic_roll = 0
'''
for func in self.active_functions:
func(gamesystem, dt)
self.pedal *= self.pedal_centering
# self.cyclic_pitch *= self.pedal_centering
# self.cyclic_roll *= self.pedal_centering
# Control functions #
"""so The problem now is how do I map keys to certain functions.
I am currently mapping object names to keys. For now ignore and
verify that multiple objects can take input."""
def key_a(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['a'], "game", self.message_map['key_a']))
def key_d(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['d'], "game", self.message_map['key_d']))
def key_w(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['w'], "game", self.message_map['key_w']))
def key_s(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['s'], "game", self.message_map['key_s']))
def down(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['down'], "game", self.message_map['down']))
def up(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['up'], "game", self.message_map['up']))
def left(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['left'], "game", self.message_map['left']))
def right(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['right'], "game", self.message_map['right']))
def pause(self, dt):
global PAUSED
PAUSED = not PAUSED
|
lordmauve/lepton | examples/games/bonk/controls.py | Controls.bind_keys | python | def bind_keys(self, objects):
"""Configure name map: My goal here is to associate a named object
with a specific function"""
for object in objects:
if object.keys != None:
for key in object.keys:
if key != None:
self.bind_key_name(key, object.name) | Configure name map: My goal here is to associate a named object
with a specific function | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L85-L92 | [
"def bind_key_name(self, function, object_name):\n \"\"\"Bind a key to an object name\"\"\"\n for funcname, name in self.name_map.items():\n if funcname == function:\n self.name_map[\n funcname] = object_name # getattr(self, funcname)\n"
] | class Controls(object):
# Control states (-1.0 to 1.0)
collective = 0.7 # Main rotor thrust
cyclic_pitch = 0.0 # Main rotor angle
cyclic_roll = 0.0
pedal = 0.0 # Tail-rotor thrust
cyclic_mouse_sensitivity = 0.0015
cyclic_keyboard_speed = 0.01
collective_wheel_sensitivity = 0.02
collective_keyboard_speed = 0.25
pedal_keyboard_speed = 0.05
pedal_centering = 0.75
"""What is the best way to configure the key map?
key.A will always be A. Assume that a keystroke will
always pass a message, I need a way to set what the message is.
a Message map. func set message
"""
message_map = {
'key_a': "left",
'key_d': "right",
'key_w': "up",
'key_s': "down",
'left': "left",
'right': "right",
'up': "up",
'down': "down",
'pause': "pause",
}
key_map = {
'key_a': key.A,
'key_d': key.D,
'key_w': key.W,
'key_s': key.S,
'left': key.LEFT,
'right': key.RIGHT,
'up': key.UP,
'down': key.DOWN,
'pause': key.P,
}
"""
Each object has a list of keys assigned to it. When an object is added
to the game system its keys are passed in to control which binds messages
to that object. The message logic is handled by the object"""
# Function, name of object mapped to. Initially all set to none
name_map = {
"left": None,
"right": None,
"up": None,
"down": None,
"a": None,
"d": None,
"w": None,
"s": None
}
def __init__(self, window):
self.configure_keys()
window.push_handlers(self)
def bind_key_name(self, function, object_name):
"""Bind a key to an object name"""
for funcname, name in self.name_map.items():
if funcname == function:
self.name_map[
funcname] = object_name # getattr(self, funcname)
def bind_keys(self, objects):
"""Configure name map: My goal here is to associate a named object
with a specific function"""
for object in objects:
if object.keys != None:
for key in object.keys:
if key != None:
self.bind_key_name(key, object.name)
# for funcname, key in self.key_map.items():
def configure_keys(self):
"""Configure key map"""
self.active_functions = set()
self.key2func = {}
for funcname, key in self.key_map.items():
self.key2func[key] = getattr(self, funcname)
@staticmethod
def clamp(value, minval=-1.0, maxval=1.0):
return min(max(value, minval), maxval)
def on_mouse_motion(self, x, y, dx, dy):
# Mouse controls cyclic
self.cyclic_pitch = self.clamp(
self.cyclic_pitch - dy * self.cyclic_mouse_sensitivity)
self.cyclic_roll = self.clamp(
self.cyclic_roll - dx * self.cyclic_mouse_sensitivity)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.collective = self.clamp(
self.collective - scroll_y * self.collective_wheel_sensitivity, minval=0)
def on_key_press(self, symbol, modifier):
if symbol in self.key2func:
self.active_functions.add(self.key2func[symbol])
return True
def on_key_release(self, symbol, modifier):
try:
self.active_functions.remove(self.key2func[symbol])
return True
except KeyError:
pass
def update(self, gamesystem, dt):
if abs(self.pedal) < self.pedal_keyboard_speed / 2:
self.pedal = 0
'''
if abs(self.cyclic_pitch) < self.cyclic_keyboard_speed / 2:
self.cyclic_pitch = 0
if abs(self.cyclic_roll) < self.cyclic_keyboard_speed / 2:
self.cyclic_roll = 0
'''
for func in self.active_functions:
func(gamesystem, dt)
self.pedal *= self.pedal_centering
# self.cyclic_pitch *= self.pedal_centering
# self.cyclic_roll *= self.pedal_centering
# Control functions #
"""so The problem now is how do I map keys to certain functions.
I am currently mapping object names to keys. For now ignore and
verify that multiple objects can take input."""
def key_a(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['a'], "game", self.message_map['key_a']))
def key_d(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['d'], "game", self.message_map['key_d']))
def key_w(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['w'], "game", self.message_map['key_w']))
def key_s(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['s'], "game", self.message_map['key_s']))
def down(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['down'], "game", self.message_map['down']))
def up(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['up'], "game", self.message_map['up']))
def left(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['left'], "game", self.message_map['left']))
def right(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['right'], "game", self.message_map['right']))
def pause(self, dt):
global PAUSED
PAUSED = not PAUSED
|
lordmauve/lepton | examples/games/bonk/controls.py | Controls.configure_keys | python | def configure_keys(self):
"""Configure key map"""
self.active_functions = set()
self.key2func = {}
for funcname, key in self.key_map.items():
self.key2func[key] = getattr(self, funcname) | Configure key map | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L95-L100 | null | class Controls(object):
# Control states (-1.0 to 1.0)
collective = 0.7 # Main rotor thrust
cyclic_pitch = 0.0 # Main rotor angle
cyclic_roll = 0.0
pedal = 0.0 # Tail-rotor thrust
cyclic_mouse_sensitivity = 0.0015
cyclic_keyboard_speed = 0.01
collective_wheel_sensitivity = 0.02
collective_keyboard_speed = 0.25
pedal_keyboard_speed = 0.05
pedal_centering = 0.75
"""What is the best way to configure the key map?
key.A will always be A. Assume that a keystroke will
always pass a message, I need a way to set what the message is.
a Message map. func set message
"""
message_map = {
'key_a': "left",
'key_d': "right",
'key_w': "up",
'key_s': "down",
'left': "left",
'right': "right",
'up': "up",
'down': "down",
'pause': "pause",
}
key_map = {
'key_a': key.A,
'key_d': key.D,
'key_w': key.W,
'key_s': key.S,
'left': key.LEFT,
'right': key.RIGHT,
'up': key.UP,
'down': key.DOWN,
'pause': key.P,
}
"""
Each object has a list of keys assigned to it. When an object is added
to the game system its keys are passed in to control which binds messages
to that object. The message logic is handled by the object"""
# Function, name of object mapped to. Initially all set to none
name_map = {
"left": None,
"right": None,
"up": None,
"down": None,
"a": None,
"d": None,
"w": None,
"s": None
}
def __init__(self, window):
self.configure_keys()
window.push_handlers(self)
def bind_key_name(self, function, object_name):
"""Bind a key to an object name"""
for funcname, name in self.name_map.items():
if funcname == function:
self.name_map[
funcname] = object_name # getattr(self, funcname)
def bind_keys(self, objects):
"""Configure name map: My goal here is to associate a named object
with a specific function"""
for object in objects:
if object.keys != None:
for key in object.keys:
if key != None:
self.bind_key_name(key, object.name)
# for funcname, key in self.key_map.items():
def configure_keys(self):
"""Configure key map"""
self.active_functions = set()
self.key2func = {}
for funcname, key in self.key_map.items():
self.key2func[key] = getattr(self, funcname)
@staticmethod
def clamp(value, minval=-1.0, maxval=1.0):
return min(max(value, minval), maxval)
def on_mouse_motion(self, x, y, dx, dy):
# Mouse controls cyclic
self.cyclic_pitch = self.clamp(
self.cyclic_pitch - dy * self.cyclic_mouse_sensitivity)
self.cyclic_roll = self.clamp(
self.cyclic_roll - dx * self.cyclic_mouse_sensitivity)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.collective = self.clamp(
self.collective - scroll_y * self.collective_wheel_sensitivity, minval=0)
def on_key_press(self, symbol, modifier):
if symbol in self.key2func:
self.active_functions.add(self.key2func[symbol])
return True
def on_key_release(self, symbol, modifier):
try:
self.active_functions.remove(self.key2func[symbol])
return True
except KeyError:
pass
def update(self, gamesystem, dt):
if abs(self.pedal) < self.pedal_keyboard_speed / 2:
self.pedal = 0
'''
if abs(self.cyclic_pitch) < self.cyclic_keyboard_speed / 2:
self.cyclic_pitch = 0
if abs(self.cyclic_roll) < self.cyclic_keyboard_speed / 2:
self.cyclic_roll = 0
'''
for func in self.active_functions:
func(gamesystem, dt)
self.pedal *= self.pedal_centering
# self.cyclic_pitch *= self.pedal_centering
# self.cyclic_roll *= self.pedal_centering
# Control functions #
"""so The problem now is how do I map keys to certain functions.
I am currently mapping object names to keys. For now ignore and
verify that multiple objects can take input."""
def key_a(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['a'], "game", self.message_map['key_a']))
def key_d(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['d'], "game", self.message_map['key_d']))
def key_w(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['w'], "game", self.message_map['key_w']))
def key_s(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['s'], "game", self.message_map['key_s']))
def down(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['down'], "game", self.message_map['down']))
def up(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['up'], "game", self.message_map['up']))
def left(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['left'], "game", self.message_map['left']))
def right(self, gamesystem, dt):
gamesystem.send(
message(self.name_map['right'], "game", self.message_map['right']))
def pause(self, dt):
global PAUSED
PAUSED = not PAUSED
|
lordmauve/lepton | examples/generate.py | on_resize | python | def on_resize(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(30, 1.0*width/height, 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() | Setup 3D projection | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/generate.py#L32-L39 | null | #############################################################################
#
# Copyright (c) 1008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Visual test of domain generated vectors"""
__version__ = '$Id$'
import os
import math
from pyglet import image
from pyglet.gl import *
from lepton import Particle, ParticleGroup, default_system
from lepton.texturizer import SpriteTexturizer, create_point_texture
from lepton.renderer import PointRenderer
from lepton.emitter import StaticEmitter
from lepton.controller import Lifetime, Fader
from lepton import domain
win = pyglet.window.Window(resizable=True, visible=False)
win.clear()
win.on_resize = on_resize
glEnable(GL_BLEND)
glShadeModel(GL_SMOOTH)
glBlendFunc(GL_SRC_ALPHA,GL_ONE)
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);
glDisable(GL_DEPTH_TEST)
renderer = PointRenderer(7, SpriteTexturizer(create_point_texture(16, 10)))
domains = [
domain.Sphere((0,0,0), 1),
domain.Disc((0,0,0), (-1,0,0), 1),
domain.Cylinder((-0.5,0,0), (0.5,0,0), 1),
domain.Cone((-0.5,0,0), (0.5,0,0), 1),
]
groups = [
ParticleGroup(
controllers=[
StaticEmitter(
rate=15000,
position=domain,
template=Particle(
color=(1,1,1),
size=(.1,.1,0),
)
)
],
renderer=renderer)
for domain in domains]
default_system.add_global_controller(
Lifetime(0.5),
Fader(max_alpha=0.7,fade_out_start=0.1, fade_out_end=0.5),
)
pyglet.clock.schedule_interval(default_system.update, (1.0/40.0))
pyglet.clock.set_fps_limit(None)
translations = [(-1.1,-1.1), (1.1,-1.1), (-1.1,1.1), (1.1,1.1)]
rot = 0
@win.event
def on_draw():
global rot
win.clear()
for (xt, yt), group in zip(translations, groups):
glLoadIdentity()
glTranslatef(xt, yt, -8)
glRotatef(rot, 0, 1, 0)
group.draw()
for domain in domains:
domain.inner_radius = math.sin(rot * 0.05) * 0.5 + 0.5
rot += 0.75
if __name__ == '__main__':
win.set_visible(True)
pyglet.app.run()
|
lordmauve/lepton | examples/tunnel.py | vary_radius | python | def vary_radius(dt):
global time
time += dt
disc.inner_radius = disc.outer_radius = 2.5 + math.sin(time / 2.0) * 1.5 | Vary the disc radius over time | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/tunnel.py#L86-L90 | null | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Infinite tunnel using textured billboard quads"""
__version__ = '$Id$'
import os
import math
from pyglet import image
from pyglet.gl import *
from lepton import Particle, ParticleGroup, default_system
from lepton.renderer import BillboardRenderer
from lepton.texturizer import SpriteTexturizer
from lepton.emitter import StaticEmitter
from lepton.controller import Gravity, Movement, Fader, Growth, Collector
from lepton import domain
win = pyglet.window.Window(resizable=True, visible=False)
win.clear()
def on_resize(width, height):
"""Initial settings for the OpenGL state machine, clear color, window size, etc"""
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1.0*width/height, 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
win.on_resize = on_resize
glEnable(GL_BLEND)
glShadeModel(GL_SMOOTH)
glBlendFunc(GL_SRC_ALPHA,GL_ONE)
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);
glDisable(GL_DEPTH_TEST)
disc = domain.Disc((0,0,-50), (0, 0, 1), 1.5, 1.5)
viewer_plane = domain.Plane((0,0,0), (0,0,-1))
jet = StaticEmitter(
rate=2000,
position=disc,
template=Particle(
color=(1,1,0),
),
deviation=Particle(
velocity=(0,0,15),
up=(0,0,math.pi),
color=(0.1, 0.1, 0.1))
)
default_system.add_global_controller(
Movement(max_velocity=10),
Collector(viewer_plane),
Gravity((0,0,15)),
Growth(0.17),
Fader(fade_in_end=0, max_alpha=0.3, fade_out_start=0, fade_out_end=8.0),
)
texture = image.load(os.path.join(os.path.dirname(__file__), 'Particle.bmp')).get_texture()
group = ParticleGroup(controllers=[jet],
renderer=BillboardRenderer(SpriteTexturizer(texture.id)))
default_system.run_ahead(5, 40)
pyglet.clock.schedule_interval(default_system.update, (1.0/30.0))
pyglet.clock.set_fps_limit(None)
time = 0
def ring(dt):
"""Emit a ring of particles periodically"""
jet.emit(1000, group)
pyglet.clock.schedule_interval(ring, 5)
pyglet.clock.schedule_interval(vary_radius, 1.0/10.0)
vary_radius(0)
@win.event
def on_draw():
win.clear()
glLoadIdentity()
default_system.draw()
if __name__ == '__main__':
win.set_visible(True)
pyglet.app.run()
|
lordmauve/lepton | examples/bouncy.py | resize | python | def resize(widthWindow, heightWindow):
glEnable(GL_BLEND)
glEnable(GL_POINT_SMOOTH)
glShadeModel(GL_SMOOTH)# Enables Smooth Shading
glBlendFunc(GL_SRC_ALPHA,GL_ONE)#Type Of Blending To Perform
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);#Really Nice Perspective Calculations
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST);#Really Nice Point Smoothing
glDisable(GL_DEPTH_TEST) | Initial settings for the OpenGL state machine, clear color, window size, etc | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/bouncy.py#L46-L54 | null | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Bouncy Bouncy
Demos how to use domains to position particles and redirect them
using the Bounce controller.
"""
__version__ = '$Id$'
from pyglet import image
from pyglet.gl import *
from lepton import Particle, ParticleGroup, default_system
from lepton.renderer import PointRenderer
from lepton.emitter import StaticEmitter
from lepton.controller import Movement, Bounce, Gravity, Drag
from lepton.domain import AABox, Sphere
class Bumper:
color = (1, 0, 0)
def __init__(self, position, radius):
self.domain = Sphere(position, radius)
self.controller = Bounce(
self.domain, bounce=1.5, friction=-0.25, callback=self.set_bumper_color)
def set_bumper_color(self, particle, group, bumper, collision_point, collision_normal):
"""Set bumper color to the color of the particle that collided with it"""
self.color = tuple(particle.color)[:3]
win = pyglet.window.Window(resizable=True, visible=False)
win.clear()
ball_count = 100
ball_size = 15
bumper_count = 8
# Screen domain is a box the size of the screen
screen_domain = AABox((ball_size/2.0, ball_size/2.0, 0),
(win.width-ball_size/2.0,win.height-ball_size/2.0,0))
bumpers = []
for i in range(bumper_count):
bumper = Bumper(
(win.width/(bumper_count-1) * i, win.height*2.0/3.0 - (i % 2) * win.height/3, 0),
win.height / 15)
bumpers.append(bumper)
up_fan = AABox((win.width/2 - win.width/12, 0, -1), (win.width/2 + win.width/12, win.height * 0.8, 1))
left_fan = AABox((win.width/2 - win.width/12, win.height * 0.8, -1), (win.width/2, win.height, 1))
right_fan = AABox((win.width/2, win.height * 0.8, -1), (win.width/2 + win.width/12, win.height, 1))
default_system.add_global_controller(
Gravity((0,-50,0)),
Movement(max_velocity=250),
Drag(0.0, 0.0001, (0, 800, 0), domain=up_fan),
Drag(0.0, 0.0001, (-200, 400, 0), domain=left_fan),
Drag(0.0, 0.0001, (200, 400, 0), domain=right_fan),
*[bumper.controller for bumper in bumpers]
)
# Make the bounce controller for the screen boundary run last
# to ensure no particles can "escape"
default_system.add_global_controller(
Bounce(screen_domain, friction=0.01)
)
group = ParticleGroup(renderer=PointRenderer(point_size=ball_size))
ball_emitter = StaticEmitter(
position=screen_domain,
deviation=Particle(velocity=(60,60,0), color=(0.3,0.3,0.3,0)),
color=[(1,0,0,1), (0,1,0,1), (0,0,1,1), (1,1,0,1), (0,1,1,1), (1,1,1,1)],
mass=[1],
)
ball_emitter.emit(ball_count, group)
group.update(0)
# Kill particles inside the bumpers
for p in group:
for bumper in bumpers:
if p.position in bumper.domain:
group.kill(p)
win.resize = resize
win.set_visible(True)
win.resize(win.width, win.height)
pyglet.clock.schedule_interval(default_system.update, 1.0/30.0)
#pyglet.clock.schedule_interval(lambda x: default_system.update(0.05), (1.0/5.0))
def draw_bumpers():
glPointSize(bumpers[0].domain.radius * 2 - ball_size/2.0 - 15)
glColor3f(1.0, 1.0, 0)
glBegin(GL_POINTS)
for bumper in bumpers:
cx, cy, cz = bumper.domain.center
glVertex3f(cx, cy, cz)
glEnd()
glPointSize(bumpers[0].domain.radius * 2 - ball_size/2.0)
glBegin(GL_POINTS)
for bumper in bumpers:
cx, cy, cz = bumper.domain.center
glColor3f(*bumper.color)
glVertex3f(cx, cy, cz)
glEnd()
@win.event
def on_draw():
win.clear()
glLoadIdentity()
draw_bumpers()
default_system.draw()
if __name__ == '__main__':
pyglet.app.run()
|
lordmauve/lepton | examples/bouncy.py | Bumper.set_bumper_color | python | def set_bumper_color(self, particle, group, bumper, collision_point, collision_normal):
self.color = tuple(particle.color)[:3] | Set bumper color to the color of the particle that collided with it | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/bouncy.py#L38-L40 | null | class Bumper:
color = (1, 0, 0)
def __init__(self, position, radius):
self.domain = Sphere(position, radius)
self.controller = Bounce(
self.domain, bounce=1.5, friction=-0.25, callback=self.set_bumper_color)
|
lordmauve/lepton | lepton/domain.py | Box | python | def Box(*args, **kw):
import warnings
warnings.warn("lepton.domain.Box is deprecated, use AABox instead. "
"This domain class will mean something different in future versions of lepton",
stacklevel=2)
return AABox(*args, **kw) | Axis-aligned box domain (same as AABox for now)
WARNING: Deprecated, use AABox instead. This domain will mean something
different in future versions of lepton | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/domain.py#L103-L113 | null | #
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#
"""Domains represent regions of space and are used for generating vectors
(positions, velocities, colors). Domains are also used by controllers to test
for collision. Colliding with domains can then influence particle
behavior
"""
__version__ = '$Id$'
from .particle_struct import Vec3
from ._domain import Line, Plane, AABox, Sphere, Disc, Cylinder, Cone
class Domain(object):
"""Domain abstract base class"""
def generate(self):
"""Return a point within the domain as a 3-tuple. For domains with a
non-zero volume, 'point in domain' is guaranteed to return true.
"""
raise NotImplementedError
def __contains__(self, point):
"""Return true if point is inside the domain, false if not."""
raise NotImplementedError
def closest_point_to(self, point):
"""Return the closest point in the domain to the given point
and the surface normal vector at that point. If the given
point is in the domain, return the point and a null normal
vector.
Note the closest point may not in the domain, if there are
multiple points in the domain that are closest. In that case
return the average of the closest points and a null normal
vector.
"""
raise NotImplementedError
def intersect(self, start_point, end_point):
"""For the line segment defined by the start and end point specified
(coordinate 3-tuples), return the point closest to the start point
where the line segment intersects surface of the domain, and the
surface normal unit vector at that point as a 2-tuple. If the line
segment does not intersect the domain, return the 2-tuple (None,
None).
Only 2 or 3 dimensional domains may be intersected.
Note performance is more important than absolute accuracy with this
method, so approximations are acceptable.
"""
raise NotImplementedError
EPSILON = 0.00001
class Point(object):
"""Simple single point domain"""
def __init__(self, point):
self.point = Vec3(*point)
def __contains__(self, point):
x, y, z = point
cx, cy, cz = self.point
return (x - cx) ** 2 + (y - cy) ** 2 + (z - cz) ** 2 < EPSILON
def generate(self):
"""Generate always returns the domain point"""
return tuple(self.point)
def closest_point_to(self, point):
"""Return the closest point in the domain to the given point
and a normal vector at that point.
The point returned is always the domain point.
The normal returned for a point domain is a unit vector parallel to
the line formed between the supplied point and the domain point,
facing outward from the domain. This effectively treats the point
domain like a zero-radius sphere.
"""
return self.point, -(self.point - point).normalize()
def intersect(self, start_point, end_point):
"""You cannot intersect a point domain"""
return None, None
|
lordmauve/lepton | lepton/texturizer.py | _atlas_from_images | python | def _atlas_from_images(images):
import pyglet
widest = max(img.width for img in images)
height = sum(img.height for img in images)
atlas = pyglet.image.atlas.TextureAtlas(
width=_nearest_pow2(widest), height=_nearest_pow2(height))
textures = [atlas.add(image) for image in images]
return atlas, textures | Create a pyglet texture atlas from a sequence of images.
Return a tuple of (atlas, textures) | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/texturizer.py#L34-L45 | [
"def _nearest_pow2(v):\n # From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2\n # Credit: Sean Anderson\n v -= 1\n v |= v >> 1\n v |= v >> 2\n v |= v >> 4\n v |= v >> 8\n v |= v >> 16\n return v + 1\n"
] | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Particle renderer texturizers
Texturizers generate texture coordinates for particles and perform
the necessary OpenGL state changes to setup texturing for rendering
"""
import math
import ctypes
from . import _texturizer
def _nearest_pow2(v):
# From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
# Credit: Sean Anderson
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
class SpriteTexturizer(_texturizer.SpriteTexturizer):
__doc__ = _texturizer.SpriteTexturizer.__doc__
@classmethod
def from_images(cls, images, weights=None, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
"""Create a SpriteTexturizer from a sequence of Pyglet images.
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024
"""
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
weights, filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer
class FlipBookTexturizer(_texturizer.FlipBookTexturizer):
__doc__ = _texturizer.FlipBookTexturizer.__doc__
@classmethod
def from_images(cls, images, duration, loop=True, dimension=2, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
"""Create a FlipBookTexturizer from a sequence of Pyglet images
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024
"""
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
duration, loop, dimension,
filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer
def create_point_texture(size, feather=0):
"""Create and load a circular grayscale image centered in a square texture
with a width and height of size. The radius of the circle is size / 2.
Since size is used as the texture width and height, it should typically
be a power of two.
Feather determines the softness of the edge of the circle. The default,
zero, creates a hard edged circle. Larger feather values create softer
edges for blending. The point at the center of the texture is always
white.
Return the OpenGL texture name (id) for the resulting texture. This
value can be passed directy to a texturizer or glBindTexture
"""
from pyglet import gl
assert feather >= 0, 'Expected feather value >= 0'
coords = range(size)
texel = (gl.GLfloat * size**2)()
r = size / 2.0
c = feather + 1.0
for y in coords:
col = y * size
for x in coords:
d = math.sqrt((x - r)**2 + (y - r)**2)
if d < r and (1.0 - 1.0 / (d / r - 1.0)) < 100:
texel[x + col] = c**2 / c**(1.0 - 1.0 / (d / r - 1.0))
else:
texel[x + col] = 0
id = gl.GLuint()
gl.glGenTextures(1, ctypes.byref(id))
gl.glBindTexture(gl.GL_TEXTURE_2D, id.value)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 4)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_LUMINANCE, size, size, 0,
gl.GL_LUMINANCE, gl.GL_FLOAT, ctypes.byref(texel))
gl.glFlush()
return id.value
|
lordmauve/lepton | lepton/texturizer.py | create_point_texture | python | def create_point_texture(size, feather=0):
from pyglet import gl
assert feather >= 0, 'Expected feather value >= 0'
coords = range(size)
texel = (gl.GLfloat * size**2)()
r = size / 2.0
c = feather + 1.0
for y in coords:
col = y * size
for x in coords:
d = math.sqrt((x - r)**2 + (y - r)**2)
if d < r and (1.0 - 1.0 / (d / r - 1.0)) < 100:
texel[x + col] = c**2 / c**(1.0 - 1.0 / (d / r - 1.0))
else:
texel[x + col] = 0
id = gl.GLuint()
gl.glGenTextures(1, ctypes.byref(id))
gl.glBindTexture(gl.GL_TEXTURE_2D, id.value)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 4)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_LUMINANCE, size, size, 0,
gl.GL_LUMINANCE, gl.GL_FLOAT, ctypes.byref(texel))
gl.glFlush()
return id.value | Create and load a circular grayscale image centered in a square texture
with a width and height of size. The radius of the circle is size / 2.
Since size is used as the texture width and height, it should typically
be a power of two.
Feather determines the softness of the edge of the circle. The default,
zero, creates a hard edged circle. Larger feather values create softer
edges for blending. The point at the center of the texture is always
white.
Return the OpenGL texture name (id) for the resulting texture. This
value can be passed directy to a texturizer or glBindTexture | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/texturizer.py#L93-L130 | null | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Particle renderer texturizers
Texturizers generate texture coordinates for particles and perform
the necessary OpenGL state changes to setup texturing for rendering
"""
import math
import ctypes
from . import _texturizer
def _nearest_pow2(v):
# From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
# Credit: Sean Anderson
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
def _atlas_from_images(images):
"""Create a pyglet texture atlas from a sequence of images.
Return a tuple of (atlas, textures)
"""
import pyglet
widest = max(img.width for img in images)
height = sum(img.height for img in images)
atlas = pyglet.image.atlas.TextureAtlas(
width=_nearest_pow2(widest), height=_nearest_pow2(height))
textures = [atlas.add(image) for image in images]
return atlas, textures
class SpriteTexturizer(_texturizer.SpriteTexturizer):
__doc__ = _texturizer.SpriteTexturizer.__doc__
@classmethod
def from_images(cls, images, weights=None, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
"""Create a SpriteTexturizer from a sequence of Pyglet images.
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024
"""
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
weights, filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer
class FlipBookTexturizer(_texturizer.FlipBookTexturizer):
__doc__ = _texturizer.FlipBookTexturizer.__doc__
@classmethod
def from_images(cls, images, duration, loop=True, dimension=2, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
"""Create a FlipBookTexturizer from a sequence of Pyglet images
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024
"""
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
duration, loop, dimension,
filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer
|
lordmauve/lepton | lepton/texturizer.py | SpriteTexturizer.from_images | python | def from_images(cls, images, weights=None, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
weights, filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer | Create a SpriteTexturizer from a sequence of Pyglet images.
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024 | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/texturizer.py#L52-L67 | [
"def _atlas_from_images(images):\n\t\"\"\"Create a pyglet texture atlas from a sequence of images.\n\tReturn a tuple of (atlas, textures)\n\t\"\"\"\n\timport pyglet\n\twidest = max(img.width for img in images)\n\theight = sum(img.height for img in images)\n\n\tatlas = pyglet.image.atlas.TextureAtlas(\n\t\twidth=_nearest_pow2(widest), height=_nearest_pow2(height))\n\ttextures = [atlas.add(image) for image in images]\n\treturn atlas, textures\n"
] | class SpriteTexturizer(_texturizer.SpriteTexturizer):
__doc__ = _texturizer.SpriteTexturizer.__doc__
@classmethod
|
lordmauve/lepton | examples/games/bonk/game.py | game_system.bind_objects | python | def bind_objects(self, *objects):
"""Bind one or more objects"""
self.control.bind_keys(objects)
self.objects += objects | Bind one or more objects | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L53-L56 | null | class game_system(object):
"""game system controls when objects in the system get updated"""
objects = ()
controllers = ()
def __init__(self, window=None, objects=(), controllers=()):
# Create a font for score
self.font = font.load('Arial', 18)
# pyglet.font.Text object to display the score
self.score = font.Text(self.font, x=10, y=10, color=(1, 1, 1, 1))
self.control = Controls(window)
self.bind_objects(*objects)
self.bind_controllers(*controllers)
self.score_right = 0
self.score_left = 0
self.draw_score()
self.height = window.width
self.width = window.height
# should this not be handled within the object?
# map effects to different objects in the game
effects_map = {"fire": None}
def draw_score(self):
self.score.text = ("left: %d right: %d") % (
self.score_left, self.score_right)
self.score.draw()
def bind_controllers(self, *controllers):
"""Bind one or more controllers"""
self.controllers += controllers
def bind_objects(self, *objects):
"""Bind one or more objects"""
self.control.bind_keys(objects)
self.objects += objects
def add(self, object):
"""Add an object to the list of objects that get traveresed when update is called"""
self.objects += object
def __iter__(self):
"""Iterate the particles in the group"""
# Make a copy of the particle set in case the group is
# modified during iteration
return iter(set(self.objects)) # used to be particles
def __len__(self):
"""Return the number of particles in the group"""
return len(self.objects)
def send(self, message):
# send the message to correct object
for object in self.objects:
if object.name == message.to:
object.move(message.content)
# Check to see if score has occured
if message.to == "score":
print("score message")
self.update_score(message.content)
def update_score(self, message):
if message == "right:score":
self.score_right += 1
if message == "left:score":
self.score_left += 1
# draw the score
self.draw_score()
# reset ball because its gone outside of the screen
for object in self.objects:
if object.name == "ball":
object.reset_ball(self.height / 2, self.width / 2)
def update(self, time_delta):
"""Update all sprites in the system. time_delta is the
time since the last update (in arbitrary time units).
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
self.control.update(self, time_delta)
for object in self.objects:
object.update(time_delta)
# object.sprite.last_position = object.sprite.position
# object.sprite.last_velocity = object.sprite.velocity
# for group in self:
for controller in self.controllers:
controller(time_delta, self)
def draw(self):
"""Draw all the sprites in the system using their renderers.
This method is convenient to call from you Pyglet window's
on_draw handler to redraw particles when needed.
"""
glPushAttrib(GL_ALL_ATTRIB_BITS)
self.draw_score()
for sprite in self:
sprite.draw()
glPopAttrib()
|
lordmauve/lepton | examples/games/bonk/game.py | game_system.update | python | def update(self, time_delta):
"""Update all sprites in the system. time_delta is the
time since the last update (in arbitrary time units).
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
self.control.update(self, time_delta)
for object in self.objects:
object.update(time_delta)
# object.sprite.last_position = object.sprite.position
# object.sprite.last_velocity = object.sprite.velocity
# for group in self:
for controller in self.controllers:
controller(time_delta, self) | Update all sprites in the system. time_delta is the
time since the last update (in arbitrary time units).
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L94-L109 | null | class game_system(object):
"""game system controls when objects in the system get updated"""
objects = ()
controllers = ()
def __init__(self, window=None, objects=(), controllers=()):
# Create a font for score
self.font = font.load('Arial', 18)
# pyglet.font.Text object to display the score
self.score = font.Text(self.font, x=10, y=10, color=(1, 1, 1, 1))
self.control = Controls(window)
self.bind_objects(*objects)
self.bind_controllers(*controllers)
self.score_right = 0
self.score_left = 0
self.draw_score()
self.height = window.width
self.width = window.height
# should this not be handled within the object?
# map effects to different objects in the game
effects_map = {"fire": None}
def draw_score(self):
self.score.text = ("left: %d right: %d") % (
self.score_left, self.score_right)
self.score.draw()
def bind_controllers(self, *controllers):
"""Bind one or more controllers"""
self.controllers += controllers
def bind_objects(self, *objects):
"""Bind one or more objects"""
self.control.bind_keys(objects)
self.objects += objects
def add(self, object):
"""Add an object to the list of objects that get traveresed when update is called"""
self.objects += object
def __iter__(self):
"""Iterate the particles in the group"""
# Make a copy of the particle set in case the group is
# modified during iteration
return iter(set(self.objects)) # used to be particles
def __len__(self):
"""Return the number of particles in the group"""
return len(self.objects)
def send(self, message):
# send the message to correct object
for object in self.objects:
if object.name == message.to:
object.move(message.content)
# Check to see if score has occured
if message.to == "score":
print("score message")
self.update_score(message.content)
def update_score(self, message):
if message == "right:score":
self.score_right += 1
if message == "left:score":
self.score_left += 1
# draw the score
self.draw_score()
# reset ball because its gone outside of the screen
for object in self.objects:
if object.name == "ball":
object.reset_ball(self.height / 2, self.width / 2)
def update(self, time_delta):
"""Update all sprites in the system. time_delta is the
time since the last update (in arbitrary time units).
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
self.control.update(self, time_delta)
for object in self.objects:
object.update(time_delta)
# object.sprite.last_position = object.sprite.position
# object.sprite.last_velocity = object.sprite.velocity
# for group in self:
for controller in self.controllers:
controller(time_delta, self)
def draw(self):
"""Draw all the sprites in the system using their renderers.
This method is convenient to call from you Pyglet window's
on_draw handler to redraw particles when needed.
"""
glPushAttrib(GL_ALL_ATTRIB_BITS)
self.draw_score()
for sprite in self:
sprite.draw()
glPopAttrib()
|
lordmauve/lepton | examples/games/bonk/game.py | game_system.draw | python | def draw(self):
"""Draw all the sprites in the system using their renderers.
This method is convenient to call from you Pyglet window's
on_draw handler to redraw particles when needed.
"""
glPushAttrib(GL_ALL_ATTRIB_BITS)
self.draw_score()
for sprite in self:
sprite.draw()
glPopAttrib() | Draw all the sprites in the system using their renderers.
This method is convenient to call from you Pyglet window's
on_draw handler to redraw particles when needed. | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L111-L121 | null | class game_system(object):
"""game system controls when objects in the system get updated"""
objects = ()
controllers = ()
def __init__(self, window=None, objects=(), controllers=()):
# Create a font for score
self.font = font.load('Arial', 18)
# pyglet.font.Text object to display the score
self.score = font.Text(self.font, x=10, y=10, color=(1, 1, 1, 1))
self.control = Controls(window)
self.bind_objects(*objects)
self.bind_controllers(*controllers)
self.score_right = 0
self.score_left = 0
self.draw_score()
self.height = window.width
self.width = window.height
# should this not be handled within the object?
# map effects to different objects in the game
effects_map = {"fire": None}
def draw_score(self):
self.score.text = ("left: %d right: %d") % (
self.score_left, self.score_right)
self.score.draw()
def bind_controllers(self, *controllers):
"""Bind one or more controllers"""
self.controllers += controllers
def bind_objects(self, *objects):
"""Bind one or more objects"""
self.control.bind_keys(objects)
self.objects += objects
def add(self, object):
"""Add an object to the list of objects that get traveresed when update is called"""
self.objects += object
def __iter__(self):
"""Iterate the particles in the group"""
# Make a copy of the particle set in case the group is
# modified during iteration
return iter(set(self.objects)) # used to be particles
def __len__(self):
"""Return the number of particles in the group"""
return len(self.objects)
def send(self, message):
# send the message to correct object
for object in self.objects:
if object.name == message.to:
object.move(message.content)
# Check to see if score has occured
if message.to == "score":
print("score message")
self.update_score(message.content)
def update_score(self, message):
if message == "right:score":
self.score_right += 1
if message == "left:score":
self.score_left += 1
# draw the score
self.draw_score()
# reset ball because its gone outside of the screen
for object in self.objects:
if object.name == "ball":
object.reset_ball(self.height / 2, self.width / 2)
def update(self, time_delta):
"""Update all sprites in the system. time_delta is the
time since the last update (in arbitrary time units).
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
self.control.update(self, time_delta)
for object in self.objects:
object.update(time_delta)
# object.sprite.last_position = object.sprite.position
# object.sprite.last_velocity = object.sprite.velocity
# for group in self:
for controller in self.controllers:
controller(time_delta, self)
def draw(self):
"""Draw all the sprites in the system using their renderers.
This method is convenient to call from you Pyglet window's
on_draw handler to redraw particles when needed.
"""
glPushAttrib(GL_ALL_ATTRIB_BITS)
self.draw_score()
for sprite in self:
sprite.draw()
glPopAttrib()
|
lordmauve/lepton | examples/games/bonk/game.py | ball.reset_ball | python | def reset_ball(self, x, y):
"""reset ball to set location on the screen"""
self.sprite.position.x = x
self.sprite.position.y = y | reset ball to set location on the screen | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L246-L249 | null | class ball(object):
"""draw a ball using glpoints"""
def __init__(self, position=(210, 100, 0), velocity=(60, 60, 0), color=(0.0, 0.0, 1.0, 1.0), size=(10.0, 0.0, 0.0)):
self.sprite = Sprite(
position=position, velocity=velocity, color=color, size=size)
self.name = "ball"
self.keys = None
self.particle_group = None
# We don't care if anything collides with the ball
self.domain = None
def move(self, message):
print(message)
if message == "a":
self.sprite.position.x += 10
def add_particle_group(self, group):
"""Bind a particle group to this ball"""
self.particle_group = group
def draw(self):
"""Render the particles as points"""
# x = self.sprite.position.x
# y = self.sprite.position.y
# glPointSize( self.sprite.size.x)
# glBegin(GL_POINTS)
# glColor4fv(self.sprite.color)
# glVertex3f(x, y, 0)
# glEnd()
def reset_ball(self, x, y):
"""reset ball to set location on the screen"""
self.sprite.position.x = x
self.sprite.position.y = y
def update_particle_group(self, td):
# print("update particle group")
self.particle_group.template.position = (
self.sprite.position.x,
self.sprite.position.y,
self.sprite.position.z)
# comet.template.velocity = (
# self.sprite.position.x*0.05 - self.sprite.last_position.x,
# self.sprite.position.y*0.05 - self.sprite.last_position.y,
# self.sprite.position.z*0.05 - self.sprite.last_position.z)
default_system.update(td)
def update(self, td):
"""Update state of ball"""
self.sprite.last_position = self.sprite.position
self.sprite.last_velocity = self.sprite.velocity
if self.particle_group != None:
self.update_particle_group(td)
|
lordmauve/lepton | examples/games/bonk/game.py | ball.update | python | def update(self, td):
"""Update state of ball"""
self.sprite.last_position = self.sprite.position
self.sprite.last_velocity = self.sprite.velocity
if self.particle_group != None:
self.update_particle_group(td) | Update state of ball | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L263-L268 | null | class ball(object):
"""draw a ball using glpoints"""
def __init__(self, position=(210, 100, 0), velocity=(60, 60, 0), color=(0.0, 0.0, 1.0, 1.0), size=(10.0, 0.0, 0.0)):
self.sprite = Sprite(
position=position, velocity=velocity, color=color, size=size)
self.name = "ball"
self.keys = None
self.particle_group = None
# We don't care if anything collides with the ball
self.domain = None
def move(self, message):
print(message)
if message == "a":
self.sprite.position.x += 10
def add_particle_group(self, group):
"""Bind a particle group to this ball"""
self.particle_group = group
def draw(self):
"""Render the particles as points"""
# x = self.sprite.position.x
# y = self.sprite.position.y
# glPointSize( self.sprite.size.x)
# glBegin(GL_POINTS)
# glColor4fv(self.sprite.color)
# glVertex3f(x, y, 0)
# glEnd()
def reset_ball(self, x, y):
"""reset ball to set location on the screen"""
self.sprite.position.x = x
self.sprite.position.y = y
def update_particle_group(self, td):
# print("update particle group")
self.particle_group.template.position = (
self.sprite.position.x,
self.sprite.position.y,
self.sprite.position.z)
# comet.template.velocity = (
# self.sprite.position.x*0.05 - self.sprite.last_position.x,
# self.sprite.position.y*0.05 - self.sprite.last_position.y,
# self.sprite.position.z*0.05 - self.sprite.last_position.z)
default_system.update(td)
def update(self, td):
"""Update state of ball"""
self.sprite.last_position = self.sprite.position
self.sprite.last_velocity = self.sprite.velocity
if self.particle_group != None:
self.update_particle_group(td)
|
lordmauve/lepton | examples/games/bonk/game.py | Box.generate | python | def generate(self):
"""Return a random point inside the box"""
x, y, z = self.point1
return (x + self.size_x * random(),
y + self.size_y * random(),
z + self.size_z * random()) | Return a random point inside the box | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L475-L480 | null | class Box(Domain):
"""Axis aligned rectangular prism"""
def __init__(self, point1, point2):
"""point1 and point2 define any two opposite corners of the box"""
p1 = Vec3(*point1)
p2 = Vec3(*point2)
self.point1 = Vec3()
self.point2 = Vec3()
self.point1.x = min(p1.x, p2.x)
self.point1.y = min(p1.y, p2.y)
self.point1.z = min(p1.z, p2.z)
self.point2.x = max(p1.x, p2.x)
self.point2.y = max(p1.y, p2.y)
self.point2.z = max(p1.z, p2.z)
self.size_x = self.point2.x - self.point1.x
self.size_y = self.point2.y - self.point1.y
self.size_z = self.point2.z - self.point1.z
def generate(self):
"""Return a random point inside the box"""
x, y, z = self.point1
return (x + self.size_x * random(),
y + self.size_y * random(),
z + self.size_z * random())
def __contains__(self, p):
"""Return true if the point is within the box"""
px, py, pz = p
x1, y1, z1 = self.point1
x2, y2, z2 = self.point2
return (x1 <= px <= x2 and y1 <= py <= y2 and z1 <= pz <= z2)
def update_position(self, point1, point2):
"""update the position of the box"""
p1 = Vec3(*point1)
p2 = Vec3(*point2)
self.point1 = Vec3()
self.point2 = Vec3()
self.point1.x = min(p1.x, p2.x)
self.point1.y = min(p1.y, p2.y)
self.point1.z = min(p1.z, p2.z)
self.point2.x = max(p1.x, p2.x)
self.point2.y = max(p1.y, p2.y)
self.point2.z = max(p1.z, p2.z)
self.size_x = self.point2.x - self.point1.x
self.size_y = self.point2.y - self.point1.y
self.size_z = self.point2.z - self.point1.z
def intersect(self, start_point, end_point):
"""Intersect the line segment with the box return the first
intersection point and normal vector pointing into space from
the box side intersected.
If the line does not intersect, or lies completely in one side
of the box return (None, None)
"""
sx, sy, sz = start_point
ex, ey, ez = end_point
p1x, p1y, p1z = self.point1
p2x, p2y, p2z = self.point2
start_inside = start_point in self
end_inside = end_point in self
if start_inside != end_inside:
if (end_inside and sy > p2y) or (start_inside and ey >= p2y) and (ey != sy):
# Test for itersection with bottom face
t = (sy - p2y) / (ey - sy)
ix = (ex - sx) * t + sx
iy = p2y
iz = (ez - sz) * t + sz
if p1x <= ix <= p2x and p1z <= iz <= p2z:
return (ix, iy, iz), (0.0, (sy > p2y) * 2.0 - 1.0, 0.0)
if (end_inside and sx < p1x) or (start_inside and ex <= p1x) and (ex != sx):
# Test for itersection with left face
t = (sx - p1x) / (ex - sx)
ix = p1x
iy = (ey - sy) * t + sy
iz = (ez - sz) * t + sz
if p1y <= iy <= p2y and p1z <= iz <= p2z:
return (ix, iy, iz), ((sx > p1x) * 2.0 - 1.0, 0.0, 0.0)
if (end_inside and sy < p1y) or (start_inside and ey <= p1y) and (ey != sy):
# Test for itersection with top face
t = (sy - p1y) / (ey - sy)
ix = (ex - sx) * t + sx
iy = p1y
iz = (ez - sz) * t + sz
if p1x <= ix <= p2x and p1z <= iz <= p2z:
return (ix, iy, iz), (0.0, (sy > p1y) * 2.0 - 1.0, 0.0)
if (end_inside and sx > p2x) or (start_inside and ex >= p2x) and (ex != sx):
# Test for itersection with right face
t = (sx - p2x) / (ex - sx)
ix = p2x
iy = (ey - sy) * t + sy
iz = (ez - sz) * t + sz
if p1y <= iy <= p2y and p1z <= iz <= p2z:
return (ix, iy, iz), ((sx > p2x) * 2.0 - 1.0, 0.0, 0.0)
if (end_inside and sz > p2z) or (start_inside and ez >= p2z) and (ez != sz):
# Test for itersection with far face
t = (sz - p2z) / (ez - sz)
ix = (ex - sx) * t + sx
iy = (ey - sy) * t + sy
iz = p2z
if p1y <= iy <= p2y and p1x <= ix <= p2x:
return (ix, iy, iz), (0.0, 0.0, (sz > p2z) * 2.0 - 1.0)
if (end_inside and sz < p1z) or (start_inside and ez <= p1z) and (ez != sz):
# Test for itersection with near face
t = (sz - p1z) / (ez - sz)
ix = (ex - sx) * t + sx
iy = (ey - sy) * t + sy
iz = p1z
if p1y <= iy <= p2y and p1x <= ix <= p2x:
return (ix, iy, iz), (0.0, 0.0, (sz > p1z) * 2.0 - 1.0)
return None, None
|
lordmauve/lepton | examples/games/bonk/game.py | Box.intersect | python | def intersect(self, start_point, end_point):
"""Intersect the line segment with the box return the first
intersection point and normal vector pointing into space from
the box side intersected.
If the line does not intersect, or lies completely in one side
of the box return (None, None)
"""
sx, sy, sz = start_point
ex, ey, ez = end_point
p1x, p1y, p1z = self.point1
p2x, p2y, p2z = self.point2
start_inside = start_point in self
end_inside = end_point in self
if start_inside != end_inside:
if (end_inside and sy > p2y) or (start_inside and ey >= p2y) and (ey != sy):
# Test for itersection with bottom face
t = (sy - p2y) / (ey - sy)
ix = (ex - sx) * t + sx
iy = p2y
iz = (ez - sz) * t + sz
if p1x <= ix <= p2x and p1z <= iz <= p2z:
return (ix, iy, iz), (0.0, (sy > p2y) * 2.0 - 1.0, 0.0)
if (end_inside and sx < p1x) or (start_inside and ex <= p1x) and (ex != sx):
# Test for itersection with left face
t = (sx - p1x) / (ex - sx)
ix = p1x
iy = (ey - sy) * t + sy
iz = (ez - sz) * t + sz
if p1y <= iy <= p2y and p1z <= iz <= p2z:
return (ix, iy, iz), ((sx > p1x) * 2.0 - 1.0, 0.0, 0.0)
if (end_inside and sy < p1y) or (start_inside and ey <= p1y) and (ey != sy):
# Test for itersection with top face
t = (sy - p1y) / (ey - sy)
ix = (ex - sx) * t + sx
iy = p1y
iz = (ez - sz) * t + sz
if p1x <= ix <= p2x and p1z <= iz <= p2z:
return (ix, iy, iz), (0.0, (sy > p1y) * 2.0 - 1.0, 0.0)
if (end_inside and sx > p2x) or (start_inside and ex >= p2x) and (ex != sx):
# Test for itersection with right face
t = (sx - p2x) / (ex - sx)
ix = p2x
iy = (ey - sy) * t + sy
iz = (ez - sz) * t + sz
if p1y <= iy <= p2y and p1z <= iz <= p2z:
return (ix, iy, iz), ((sx > p2x) * 2.0 - 1.0, 0.0, 0.0)
if (end_inside and sz > p2z) or (start_inside and ez >= p2z) and (ez != sz):
# Test for itersection with far face
t = (sz - p2z) / (ez - sz)
ix = (ex - sx) * t + sx
iy = (ey - sy) * t + sy
iz = p2z
if p1y <= iy <= p2y and p1x <= ix <= p2x:
return (ix, iy, iz), (0.0, 0.0, (sz > p2z) * 2.0 - 1.0)
if (end_inside and sz < p1z) or (start_inside and ez <= p1z) and (ez != sz):
# Test for itersection with near face
t = (sz - p1z) / (ez - sz)
ix = (ex - sx) * t + sx
iy = (ey - sy) * t + sy
iz = p1z
if p1y <= iy <= p2y and p1x <= ix <= p2x:
return (ix, iy, iz), (0.0, 0.0, (sz > p1z) * 2.0 - 1.0)
return None, None | Intersect the line segment with the box return the first
intersection point and normal vector pointing into space from
the box side intersected.
If the line does not intersect, or lies completely in one side
of the box return (None, None) | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L505-L568 | null | class Box(Domain):
"""Axis aligned rectangular prism"""
def __init__(self, point1, point2):
"""point1 and point2 define any two opposite corners of the box"""
p1 = Vec3(*point1)
p2 = Vec3(*point2)
self.point1 = Vec3()
self.point2 = Vec3()
self.point1.x = min(p1.x, p2.x)
self.point1.y = min(p1.y, p2.y)
self.point1.z = min(p1.z, p2.z)
self.point2.x = max(p1.x, p2.x)
self.point2.y = max(p1.y, p2.y)
self.point2.z = max(p1.z, p2.z)
self.size_x = self.point2.x - self.point1.x
self.size_y = self.point2.y - self.point1.y
self.size_z = self.point2.z - self.point1.z
def generate(self):
"""Return a random point inside the box"""
x, y, z = self.point1
return (x + self.size_x * random(),
y + self.size_y * random(),
z + self.size_z * random())
def __contains__(self, p):
"""Return true if the point is within the box"""
px, py, pz = p
x1, y1, z1 = self.point1
x2, y2, z2 = self.point2
return (x1 <= px <= x2 and y1 <= py <= y2 and z1 <= pz <= z2)
def update_position(self, point1, point2):
"""update the position of the box"""
p1 = Vec3(*point1)
p2 = Vec3(*point2)
self.point1 = Vec3()
self.point2 = Vec3()
self.point1.x = min(p1.x, p2.x)
self.point1.y = min(p1.y, p2.y)
self.point1.z = min(p1.z, p2.z)
self.point2.x = max(p1.x, p2.x)
self.point2.y = max(p1.y, p2.y)
self.point2.z = max(p1.z, p2.z)
self.size_x = self.point2.x - self.point1.x
self.size_y = self.point2.y - self.point1.y
self.size_z = self.point2.z - self.point1.z
def intersect(self, start_point, end_point):
"""Intersect the line segment with the box return the first
intersection point and normal vector pointing into space from
the box side intersected.
If the line does not intersect, or lies completely in one side
of the box return (None, None)
"""
sx, sy, sz = start_point
ex, ey, ez = end_point
p1x, p1y, p1z = self.point1
p2x, p2y, p2z = self.point2
start_inside = start_point in self
end_inside = end_point in self
if start_inside != end_inside:
if (end_inside and sy > p2y) or (start_inside and ey >= p2y) and (ey != sy):
# Test for itersection with bottom face
t = (sy - p2y) / (ey - sy)
ix = (ex - sx) * t + sx
iy = p2y
iz = (ez - sz) * t + sz
if p1x <= ix <= p2x and p1z <= iz <= p2z:
return (ix, iy, iz), (0.0, (sy > p2y) * 2.0 - 1.0, 0.0)
if (end_inside and sx < p1x) or (start_inside and ex <= p1x) and (ex != sx):
# Test for itersection with left face
t = (sx - p1x) / (ex - sx)
ix = p1x
iy = (ey - sy) * t + sy
iz = (ez - sz) * t + sz
if p1y <= iy <= p2y and p1z <= iz <= p2z:
return (ix, iy, iz), ((sx > p1x) * 2.0 - 1.0, 0.0, 0.0)
if (end_inside and sy < p1y) or (start_inside and ey <= p1y) and (ey != sy):
# Test for itersection with top face
t = (sy - p1y) / (ey - sy)
ix = (ex - sx) * t + sx
iy = p1y
iz = (ez - sz) * t + sz
if p1x <= ix <= p2x and p1z <= iz <= p2z:
return (ix, iy, iz), (0.0, (sy > p1y) * 2.0 - 1.0, 0.0)
if (end_inside and sx > p2x) or (start_inside and ex >= p2x) and (ex != sx):
# Test for itersection with right face
t = (sx - p2x) / (ex - sx)
ix = p2x
iy = (ey - sy) * t + sy
iz = (ez - sz) * t + sz
if p1y <= iy <= p2y and p1z <= iz <= p2z:
return (ix, iy, iz), ((sx > p2x) * 2.0 - 1.0, 0.0, 0.0)
if (end_inside and sz > p2z) or (start_inside and ez >= p2z) and (ez != sz):
# Test for itersection with far face
t = (sz - p2z) / (ez - sz)
ix = (ex - sx) * t + sx
iy = (ey - sy) * t + sy
iz = p2z
if p1y <= iy <= p2y and p1x <= ix <= p2x:
return (ix, iy, iz), (0.0, 0.0, (sz > p2z) * 2.0 - 1.0)
if (end_inside and sz < p1z) or (start_inside and ez <= p1z) and (ez != sz):
# Test for itersection with near face
t = (sz - p1z) / (ez - sz)
ix = (ex - sx) * t + sx
iy = (ey - sy) * t + sy
iz = p1z
if p1y <= iy <= p2y and p1x <= ix <= p2x:
return (ix, iy, iz), (0.0, 0.0, (sz > p1z) * 2.0 - 1.0)
return None, None
|
lordmauve/lepton | examples/magnet.py | resize | python | def resize(widthWindow, heightWindow):
glViewport(0, 0, widthWindow, heightWindow)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1.0*widthWindow/heightWindow, 0.001, 10000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity() | Setup 3D projection for window | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/magnet.py#L43-L50 | null | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
""" Magnet.py
Demos the magnet controller. Electrons orbit protons.
"""
__version__ = '$Id: magnet.py 104 2008-11-08 06:49:41Z andrew.charles $'
from pyglet import image
from pyglet.gl import *
import os, math
from lepton import Particle, ParticleGroup, default_system
from lepton.renderer import BillboardRenderer
from lepton.texturizer import SpriteTexturizer
from lepton.emitter import StaticEmitter, PerParticleEmitter
from lepton.controller import Movement, Magnet, Collector, Lifetime, Fader
from lepton.domain import Sphere, Point, Disc
from random import expovariate
win = pyglet.window.Window(resizable=True, visible=False)
win.clear()
glEnable(GL_BLEND)
glEnable(GL_POINT_SMOOTH)
glShadeModel(GL_SMOOTH)
glBlendFunc(GL_SRC_ALPHA,GL_ONE)
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST);
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);
glDisable(GL_DEPTH_TEST)
win.on_resize = resize
electron_lifetime = 22
max_electrons = 6
trail_lifetime = 4.5
texture = image.load(os.path.join(os.path.dirname(__file__),'flare3.png')).get_texture()
texturizer = SpriteTexturizer(texture.id)
nucleus = Sphere((0, 0, 0), 5)
protons = ParticleGroup(renderer=BillboardRenderer(texturizer),
controllers=[
Movement(),
]
)
proton_emitter = StaticEmitter(
template=Particle(
size=(30, 30, 0),
color=(0.5, 1.0, 0.2, 0.5),
),
size=[(26, 26, 0), (30, 30, 0), (34, 34, 0)],
deviation=Particle(
rotation=(0, 0, math.pi / 6),
))
proton_emitter.emit(3, protons)
electrons = ParticleGroup(renderer=BillboardRenderer(texturizer),
controllers=[
Movement(min_velocity=10),
Lifetime(electron_lifetime * 1.5),
Magnet(nucleus, charge=15000.0),
Magnet(nucleus, charge=-15000.0, exponent=3),
Fader(fade_in_end=1,
fade_out_start=electron_lifetime * 1.4,
fade_out_end=electron_lifetime * 1.5),
]
)
electron_emitter = StaticEmitter(
template=Particle(
position=(-20, 0, 0),
size=(25, 25, 25),
color=(0.1, 0.1, 1.0),
),
velocity=Disc((0,0,0), (-1,0,0), 36, 36),
)
# Trails for electrons
trail_emitter = PerParticleEmitter(electrons, rate=80,
template=Particle(
color=(1, 0, 0 ,1),
size=(4.25, 4.25, 0)
),
deviation=Particle(
up=(0, 0, math.pi),
rotation=(0, 0, math.pi),
size=(0.5, 0.5, 0),
velocity=(1, 1, 1),
color=(0, 1, 0),
age=trail_lifetime / 2.0),)
trails = ParticleGroup(
controllers=[
Lifetime(trail_lifetime * 1.5),
Movement(damping=0.7, max_velocity=60),
Magnet(nucleus, charge=17000.0),
Magnet(nucleus, charge=-17000.0, exponent=2.5),
Collector(Sphere((0, 0, 0), 1)),
Fader(fade_in_end=0.75, max_alpha=0.3, fade_out_start=0, fade_out_end=trail_lifetime),
trail_emitter
],
renderer=BillboardRenderer(texturizer))
win.set_visible(True)
pyglet.clock.schedule_interval(default_system.update, (1.0/30.0))
yrot = 0.0
xrot = 0.0
@win.event
def on_mouse_motion(x, y, dx, dy):
global yrot, xrot
yrot += dx * 0.3
xrot -= dy * 0.3
def summon(dt=None):
if len(electrons) < max_electrons:
electron_emitter.emit(1 ,electrons)
pyglet.clock.schedule_once(summon, expovariate(1.0)+1.0)
summon()
@win.event
def on_draw():
global i
global yrot,xrot
win.clear()
glLoadIdentity()
glTranslatef(0, 0, -50)
glRotatef(yrot, 0.0, 1.0, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
default_system.draw()
if __name__ == '__main__':
pyglet.app.run()
|
jvamvas/rhymediscovery | rhymediscovery/celex.py | is_rhyme | python | def is_rhyme(d, w1, w2):
for p1 in d[w1]:
# extract only "rhyming portion"
p1 = p1.split("'")[-1]
m = VOWELS_RE.search(p1)
if not m:
print(p1)
p1 = p1[m.start():]
for p2 in d[w2]:
p2 = p2.split("'")[-1]
m = VOWELS_RE.search(p2)
if not m:
print(w2, p2)
p2 = p2[m.start():]
if p1 == p2:
return True
return False | check if words rhyme | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/celex.py#L24-L41 | null | from __future__ import unicode_literals
import random
import re
from collections import defaultdict
import numpy
VOWELS_RE = re.compile('[iye|aou#$(IYE/A{&QO}VU@!)*<cq0~^KLM123456789WBX]')
CELEX_DIR = '../../data/celex/CELEX_V2/' # change to the location of your CELEX directory
EPW_FILE = CELEX_DIR + '/english/epw/epw.cd'
def read_celex():
spam = map(lambda x: x.strip().split('\\'), open(EPW_FILE).readlines())
spam = map(lambda x: (x[1], x[6].replace('-', '').replace('"', "'")), spam)
d = defaultdict(list)
for (word, pron) in spam:
if "'" in pron: # can only test words with at least on stressed syllable
d[word].append(pron)
return d
def init_perfect_ttable(words):
"""initialize (normalized) theta according to whether words rhyme"""
d = read_celex()
not_in_dict = 0
n = len(words)
t_table = numpy.zeros((n, n + 1))
# initialize P(c|r) accordingly
for r, w in enumerate(words):
if w not in d:
not_in_dict += 1
for c, v in enumerate(words):
if c < r:
t_table[r, c] = t_table[c, r]
elif w in d and v in d:
t_table[r, c] = int(is_rhyme(d, w, v)) + 0.001 # for backoff
else:
t_table[r, c] = random.random()
t_table[r, n] = random.random() # no estimate for P(r|no history)
print(not_in_dict, "of", n, " words are not in CELEX")
# normalize
for c in range(n + 1):
tot = sum(t_table[:, c])
for r in range(n):
t_table[r, c] = t_table[r, c] / tot
return t_table
|
jvamvas/rhymediscovery | rhymediscovery/celex.py | init_perfect_ttable | python | def init_perfect_ttable(words):
d = read_celex()
not_in_dict = 0
n = len(words)
t_table = numpy.zeros((n, n + 1))
# initialize P(c|r) accordingly
for r, w in enumerate(words):
if w not in d:
not_in_dict += 1
for c, v in enumerate(words):
if c < r:
t_table[r, c] = t_table[c, r]
elif w in d and v in d:
t_table[r, c] = int(is_rhyme(d, w, v)) + 0.001 # for backoff
else:
t_table[r, c] = random.random()
t_table[r, n] = random.random() # no estimate for P(r|no history)
print(not_in_dict, "of", n, " words are not in CELEX")
# normalize
for c in range(n + 1):
tot = sum(t_table[:, c])
for r in range(n):
t_table[r, c] = t_table[r, c] / tot
return t_table | initialize (normalized) theta according to whether words rhyme | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/celex.py#L44-L74 | [
"def read_celex():\n spam = map(lambda x: x.strip().split('\\\\'), open(EPW_FILE).readlines())\n spam = map(lambda x: (x[1], x[6].replace('-', '').replace('\"', \"'\")), spam)\n d = defaultdict(list)\n for (word, pron) in spam:\n if \"'\" in pron: # can only test words with at least on stressed syllable\n d[word].append(pron)\n return d\n",
"def is_rhyme(d, w1, w2):\n \"\"\"check if words rhyme\"\"\"\n for p1 in d[w1]:\n # extract only \"rhyming portion\"\n p1 = p1.split(\"'\")[-1]\n m = VOWELS_RE.search(p1)\n if not m:\n print(p1)\n p1 = p1[m.start():]\n for p2 in d[w2]:\n p2 = p2.split(\"'\")[-1]\n m = VOWELS_RE.search(p2)\n if not m:\n print(w2, p2)\n p2 = p2[m.start():]\n if p1 == p2:\n return True\n return False\n"
] | from __future__ import unicode_literals
import random
import re
from collections import defaultdict
import numpy
VOWELS_RE = re.compile('[iye|aou#$(IYE/A{&QO}VU@!)*<cq0~^KLM123456789WBX]')
CELEX_DIR = '../../data/celex/CELEX_V2/' # change to the location of your CELEX directory
EPW_FILE = CELEX_DIR + '/english/epw/epw.cd'
def read_celex():
spam = map(lambda x: x.strip().split('\\'), open(EPW_FILE).readlines())
spam = map(lambda x: (x[1], x[6].replace('-', '').replace('"', "'")), spam)
d = defaultdict(list)
for (word, pron) in spam:
if "'" in pron: # can only test words with at least on stressed syllable
d[word].append(pron)
return d
def is_rhyme(d, w1, w2):
"""check if words rhyme"""
for p1 in d[w1]:
# extract only "rhyming portion"
p1 = p1.split("'")[-1]
m = VOWELS_RE.search(p1)
if not m:
print(p1)
p1 = p1[m.start():]
for p2 in d[w2]:
p2 = p2.split("'")[-1]
m = VOWELS_RE.search(p2)
if not m:
print(w2, p2)
p2 = p2[m.start():]
if p1 == p2:
return True
return False
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | load_stanzas | python | def load_stanzas(stanzas_file):
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas | Load stanzas from gold standard file | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L23-L33 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | get_wordlist | python | def get_wordlist(stanzas):
return sorted(list(set().union(*[stanza.words for stanza in stanzas]))) | Get an iterable of all final words in all stanzas | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L86-L90 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | get_rhymelists | python | def get_rhymelists(stanza, scheme):
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values()) | Returns ordered lists of the stanza's word indices as defined by given scheme | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L93-L100 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | init_distance_ttable | python | def init_distance_ttable(wordlist, distance_function):
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table | Initialize pair-wise rhyme strenghts according to the given word distance function | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L103-L123 | [
"def basic_word_sim(word1, word2):\n \"\"\"\n Simple measure of similarity: Number of letters in common / max length\n \"\"\"\n return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))\n",
"def difflib_similarity(word1, word2):\n \"\"\"\n Distance function using the built-in difflib ratio\n \"\"\"\n sequence_matcher = SequenceMatcher(None, word1, word2)\n return sequence_matcher.ratio()\n"
] | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | init_uniform_ttable | python | def init_uniform_ttable(wordlist):
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n) | Initialize (normalized) theta uniformly | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L126-L131 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | basic_word_sim | python | def basic_word_sim(word1, word2):
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2)) | Simple measure of similarity: Number of letters in common / max length | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L134-L138 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | post_prob_scheme | python | def post_prob_scheme(t_table, stanza, scheme):
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob | Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L157-L173 | [
"def get_rhymelists(stanza, scheme):\n \"\"\"\n Returns ordered lists of the stanza's word indices as defined by given scheme\n \"\"\"\n rhymelists = defaultdict(list)\n for rhyme_group, word_index in zip(scheme, stanza.word_indices):\n rhymelists[rhyme_group].append(word_index)\n return list(rhymelists.values())\n"
] | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | expectation_step | python | def expectation_step(t_table, stanzas, schemes, rprobs):
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs | Compute posterior probability of schemes for each stanza | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L176-L193 | [
"def post_prob_scheme(t_table, stanza, scheme):\n \"\"\"\n Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist\n rhyming with all the ones before it\n \"\"\"\n myprob = 1\n rhymelists = get_rhymelists(stanza, scheme)\n for rhymelist in rhymelists:\n for i, word_index in enumerate(rhymelist):\n if i == 0: # first word, use P(w|x)\n myprob *= t_table[word_index, -1]\n else:\n for word_index2 in rhymelist[:i]: # history\n myprob *= t_table[word_index, word_index2]\n if myprob == 0 and len(stanza) > 30: # probably underflow\n myprob = 1e-300\n return myprob\n"
] | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | maximization_step | python | def maximization_step(num_words, stanzas, schemes, probs):
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs | Update latent variables t_table, rprobs | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L196-L225 | [
"def get_rhymelists(stanza, scheme):\n \"\"\"\n Returns ordered lists of the stanza's word indices as defined by given scheme\n \"\"\"\n rhymelists = defaultdict(list)\n for rhyme_group, word_index in zip(scheme, stanza.word_indices):\n rhymelists[rhyme_group].append(word_index)\n return list(rhymelists.values())\n"
] | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | iterate | python | def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs | Iterate EM and return final probabilities | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L228-L254 | [
"def expectation_step(t_table, stanzas, schemes, rprobs):\n \"\"\"\n Compute posterior probability of schemes for each stanza\n \"\"\"\n probs = numpy.zeros((len(stanzas), schemes.num_schemes))\n for i, stanza in enumerate(stanzas):\n scheme_indices = schemes.get_schemes_for_len(len(stanza))\n for scheme_index in scheme_indices:\n scheme = schemes.scheme_list[scheme_index]\n probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)\n probs = numpy.dot(probs, numpy.diag(rprobs))\n\n # Normalize\n scheme_sums = numpy.sum(probs, axis=1)\n for i, scheme_sum in enumerate(scheme_sums.tolist()):\n if scheme_sum > 0:\n probs[i, :] /= scheme_sum\n return probs\n",
"def maximization_step(num_words, stanzas, schemes, probs):\n \"\"\"\n Update latent variables t_table, rprobs\n \"\"\"\n t_table = numpy.zeros((num_words, num_words + 1))\n rprobs = numpy.ones(schemes.num_schemes)\n for i, stanza in enumerate(stanzas):\n scheme_indices = schemes.get_schemes_for_len(len(stanza))\n for scheme_index in scheme_indices:\n myprob = probs[i, scheme_index]\n rprobs[scheme_index] += myprob\n scheme = schemes.scheme_list[scheme_index]\n rhymelists = get_rhymelists(stanza, scheme)\n for rhymelist in rhymelists:\n for j, word_index in enumerate(rhymelist):\n t_table[word_index, -1] += myprob\n for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:\n t_table[word_index, word_index2] += myprob\n\n # Normalize t_table\n t_table_sums = numpy.sum(t_table, axis=0)\n for i, t_table_sum in enumerate(t_table_sums.tolist()):\n if t_table_sum != 0:\n t_table[:, i] /= t_table_sum\n\n # Normalize rprobs\n totrprob = numpy.sum(rprobs)\n rprobs /= totrprob\n\n return t_table, rprobs\n"
] | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | get_results | python | def get_results(probs, stanzas, schemes):
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results | Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
) | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L264-L275 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | print_results | python | def print_results(results, outfile):
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result") | Write results to outfile | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L278-L286 | null | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
def main(args_list=None):
"""
Wrapper for find_schemes if called from command line
"""
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | main | python | def main(args_list=None):
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Discover schemes of given stanza file')
parser.add_argument(
'infile',
type=argparse.FileType('r'),
)
parser.add_argument(
'outfile',
help='Where the result is written to. Default: stdout',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
)
parser.add_argument(
'-t --init-type',
help='Whether to initialize theta uniformly (u), with the orthographic similarity '
'measure (o), or using CELEX pronunciations and definition of rhyme (p). '
'The last one requires you to have CELEX on your machine.',
dest='init_type',
choices=('u', 'o', 'p', 'd'),
default='o',
)
parser.add_argument(
'-i, --iterations',
help='Number of iterations (default: 10)',
dest='num_iterations',
type=int,
default=10,
)
parser.add_argument(
'-v', '--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args(args_list)
logging.basicConfig(level=args.loglevel)
stanzas = load_stanzas(args.infile)
init_function = None
if args.init_type == 'u':
init_function = init_uniform_ttable
elif args.init_type == 'o':
init_function = init_basicortho_ttable
elif args.init_type == 'p':
init_function = celex.init_perfect_ttable
elif args.init_type == 'd':
init_function = init_difflib_ttable
results = find_schemes(stanzas, init_function, args.num_iterations)
print_results(results, args.outfile) | Wrapper for find_schemes if called from command line | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L315-L372 | [
"def load_stanzas(stanzas_file):\n \"\"\"\n Load stanzas from gold standard file\n \"\"\"\n f = stanzas_file.readlines()\n stanzas = []\n for i, line in enumerate(f):\n if i % 4 == 0:\n stanza_words = line.strip().split()[1:]\n stanzas.append(Stanza(stanza_words))\n return stanzas\n",
"def print_results(results, outfile):\n \"\"\"\n Write results to outfile\n \"\"\"\n for stanza_words, scheme in results:\n outfile.write(str(' ').join(stanza_words) + str('\\n'))\n outfile.write(str(' ').join(map(str, scheme)) + str('\\n\\n'))\n outfile.close()\n logging.info(\"Wrote result\")\n",
"def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):\n # Allow input of string lists as stanzas\n if not isinstance(stanzas[0], Stanza):\n stanzas = [Stanza(words) for words in stanzas]\n\n scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')\n with open(scheme_filename, 'r') as scheme_file:\n schemes = Schemes(scheme_file)\n logging.info(\"Loaded files\")\n\n wordlist = get_wordlist(stanzas)\n for stanza in stanzas:\n stanza.set_word_indices(wordlist)\n logging.info(\"Initialized list of {} words\".format(len(wordlist)))\n\n # Initialize p(r)\n rprobs = init_uniform_r(schemes)\n t_table = t_table_init_function(wordlist)\n logging.info(\"Created t_table with shape {}\".format(t_table.shape))\n final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)\n\n logging.info(\"EM done; writing results\")\n results = get_results(final_probs, stanzas, schemes)\n return results\n"
] | #!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: Sravana Reddy (sravana@cs.uchicago.edu), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return numpy.ones((n, n + 1)) * (1 / n)
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
def init_basicortho_ttable(wordlist):
return init_distance_ttable(wordlist, basic_word_sim)
def difflib_similarity(word1, word2):
"""
Distance function using the built-in difflib ratio
"""
sequence_matcher = SequenceMatcher(None, word1, word2)
return sequence_matcher.ratio()
def init_difflib_ttable(wordlist):
return init_distance_ttable(wordlist, difflib_similarity)
def post_prob_scheme(t_table, stanza, scheme):
"""
Compute posterior probability of a scheme for a stanza, with probability of every word in rhymelist
rhyming with all the ones before it
"""
myprob = 1
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for i, word_index in enumerate(rhymelist):
if i == 0: # first word, use P(w|x)
myprob *= t_table[word_index, -1]
else:
for word_index2 in rhymelist[:i]: # history
myprob *= t_table[word_index, word_index2]
if myprob == 0 and len(stanza) > 30: # probably underflow
myprob = 1e-300
return myprob
def expectation_step(t_table, stanzas, schemes, rprobs):
"""
Compute posterior probability of schemes for each stanza
"""
probs = numpy.zeros((len(stanzas), schemes.num_schemes))
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
scheme = schemes.scheme_list[scheme_index]
probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme)
probs = numpy.dot(probs, numpy.diag(rprobs))
# Normalize
scheme_sums = numpy.sum(probs, axis=1)
for i, scheme_sum in enumerate(scheme_sums.tolist()):
if scheme_sum > 0:
probs[i, :] /= scheme_sum
return probs
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs
def init_uniform_r(schemes):
"""
Assign equal probability to all schemes
"""
return numpy.ones(schemes.num_schemes) / schemes.num_schemes
def get_results(probs, stanzas, schemes):
"""
Returns a list of tuples (
stanza [as list of final words],
best scheme [as list of integers]
)
"""
results = []
for i, stanza in enumerate(stanzas):
best_scheme = schemes.scheme_list[numpy.argmax(probs[i, :])]
results.append((stanza.words, best_scheme))
return results
def print_results(results, outfile):
"""
Write results to outfile
"""
for stanza_words, scheme in results:
outfile.write(str(' ').join(stanza_words) + str('\n'))
outfile.write(str(' ').join(map(str, scheme)) + str('\n\n'))
outfile.close()
logging.info("Wrote result")
def find_schemes(stanzas, t_table_init_function=init_uniform_ttable, num_iterations=10):
# Allow input of string lists as stanzas
if not isinstance(stanzas[0], Stanza):
stanzas = [Stanza(words) for words in stanzas]
scheme_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_filename, 'r') as scheme_file:
schemes = Schemes(scheme_file)
logging.info("Loaded files")
wordlist = get_wordlist(stanzas)
for stanza in stanzas:
stanza.set_word_indices(wordlist)
logging.info("Initialized list of {} words".format(len(wordlist)))
# Initialize p(r)
rprobs = init_uniform_r(schemes)
t_table = t_table_init_function(wordlist)
logging.info("Created t_table with shape {}".format(t_table.shape))
final_probs = iterate(t_table, wordlist, stanzas, schemes, rprobs, num_iterations)
logging.info("EM done; writing results")
results = get_results(final_probs, stanzas, schemes)
return results
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | Stanza.set_word_indices | python | def set_word_indices(self, wordlist):
self.word_indices = [wordlist.index(word) for word in self.words] | Populate the list of word_indices, mapping self.words to the given wordlist | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L42-L46 | null | class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
|
jvamvas/rhymediscovery | rhymediscovery/find_schemes.py | Schemes._parse_scheme_file | python | def _parse_scheme_file(self):
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict | Initialize redundant data structures for lookup optimization | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/find_schemes.py#L65-L77 | null | class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
|
jvamvas/rhymediscovery | rhymediscovery/evaluate_schemes.py | get_wordset | python | def get_wordset(poems):
words = sorted(list(set(reduce(lambda x, y: x + y, poems))))
return words | get all words | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/evaluate_schemes.py#L76-L79 | null | #!/usr/bin/env python
"""
Evaluate rhyme schemes against gold standard.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import sys
from collections import defaultdict
from functools import reduce
class SuccessMeasure:
def __init__(self):
self.accuracy = None
self.precision = None
self.recall = None
self.f_score = None
def __str__(self):
return """\
Accuracy:\t{accuracy:.4f}
Precision:\t{precision:.4f}
Recall:\t\t{recall:.4f}
F-score:\t{f_score:.4f}""".format(
accuracy=self.accuracy,
precision=self.precision,
recall=self.recall,
f_score=self.f_score,
)
class EvaluationResult:
def __init__(self):
self.num_stanzas = None
self.num_lines = None
self.num_end_word_types = None
self.naive_baseline_success = None
self.less_naive_baseline_success = None
self.input_success = None
def __str__(self):
s = """\
Num of stanzas: {num_stanzas}
Num of lines: {num_lines}
Num of end word types: {num_end_word_types}
Naive baseline:
{naive_baseline_success}
Less naive baseline:
{less_naive_baseline_success}
""".format(
num_stanzas=self.num_stanzas,
num_lines=self.num_lines,
num_end_word_types=self.num_end_word_types,
naive_baseline_success=self.naive_baseline_success,
less_naive_baseline_success=self.less_naive_baseline_success,
)
if self.input_success:
s += """
Input:
{input_success}
""".format(
input_success=self.input_success,
)
return s
def load_gold(gold_file):
lines = gold_file.readlines()
stanzas = []
stanzaschemes = []
for i, line in enumerate(lines):
line = line.split()
if i % 4 == 0:
stanzas.append(tuple(line[1:]))
elif i % 4 == 1:
stanzaschemes.append(tuple(int(i) for i in line))
return [stanzaschemes, stanzas]
def load_hypothesis(result_lines):
stanzas = []
schemes = []
for i, line in enumerate(result_lines):
line = line.split()
if i % 3 == 0:
stanzas.append(tuple(line))
elif i % 3 == 1:
schemes.append(tuple(int(i) for i in line))
return zip(stanzas, schemes)
def compare(stanzas, gold_schemes, found_schemes):
"""get accuracy and precision/recall"""
result = SuccessMeasure()
total = float(len(gold_schemes))
correct = 0.0
for (g, f) in zip(gold_schemes, found_schemes):
if g == f:
correct += 1
result.accuracy = correct / total
# for each word, let rhymeset[word] = set of words in rest of stanza rhyming with the word
# precision = # correct words in rhymeset[word]/# words in proposed rhymeset[word]
# recall = # correct words in rhymeset[word]/# words in reference words in rhymeset[word]
# total precision and recall = avg over all words over all stanzas
tot_p = 0.0
tot_r = 0.0
tot_words = 0.0
for (s, g, f) in zip(stanzas, gold_schemes, found_schemes):
stanzasize = len(s)
for wi, word in enumerate(s):
grhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == g[wi], zip(range(wi + 1, stanzasize), g[wi + 1:]))))
frhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == f[wi], zip(range(wi + 1, stanzasize), f[wi + 1:]))))
if len(grhymeset_word) == 0:
continue
tot_words += 1
if len(frhymeset_word) == 0:
continue
# find intersection
correct = float(len(grhymeset_word.intersection(frhymeset_word)))
precision = correct / len(frhymeset_word)
recall = correct / len(grhymeset_word)
tot_p += precision
tot_r += recall
precision = tot_p / tot_words
recall = tot_r / tot_words
result.precision = precision
result.recall = recall
if precision + recall > 0:
result.f_score = 2 * precision * recall / (precision + recall)
return result
def naive(gold_schemes):
"""find naive baseline (most common scheme of a given length)?"""
scheme_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_path, 'r') as f:
dist = json.loads(f.read())
best_schemes = {}
for i in dist.keys():
if not dist[i]:
continue
best_schemes[int(i)] = tuple(int(j) for j in (max(dist[i], key=lambda x: x[1])[0]).split())
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes
def less_naive(gold_schemes):
"""find 'less naive' baseline (most common scheme of a given length in subcorpus)"""
best_schemes = defaultdict(lambda: defaultdict(int))
for g in gold_schemes:
best_schemes[len(g)][tuple(g)] += 1
for i in best_schemes:
best_schemes[i] = tuple(max(best_schemes[i].items(), key=lambda x: x[1])[0])
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes
def evaluate(gstanzaschemes, gstanzas, hypothesis):
words = get_wordset(gstanzas)
result = EvaluationResult()
result.num_stanzas = len(gstanzas)
result.num_lines = sum(map(len, gstanzas))
result.num_end_word_types = len(words)
naive_schemes = naive(gstanzaschemes)
result.naive_baseline_success = compare(gstanzas, gstanzaschemes, naive_schemes)
less_naive_schemes = less_naive(gstanzaschemes)
result.less_naive_baseline_success = compare(gstanzas, gstanzaschemes, less_naive_schemes)
hstanzaschemes = [scheme for (stanza, scheme) in hypothesis]
result.input_success = compare(gstanzas, gstanzaschemes, hstanzaschemes)
return result
def main(args_list=None):
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Evaluate find_schemes output')
parser.add_argument(
'infile',
help='Output of find_schemes that is evaluated',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
)
parser.add_argument(
'gold_file',
help='Stanzas annotated with correct rhyme schemes',
type=argparse.FileType('r'),
)
args = parser.parse_args(args_list)
gstanzaschemes, gstanzas = load_gold(args.gold_file)
args.gold_file.close()
hypothesis_lines = args.infile.readlines()
hstanzaschemes = load_hypothesis(hypothesis_lines)
result = evaluate(gstanzaschemes, gstanzas, hstanzaschemes)
print(result)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/evaluate_schemes.py | compare | python | def compare(stanzas, gold_schemes, found_schemes):
result = SuccessMeasure()
total = float(len(gold_schemes))
correct = 0.0
for (g, f) in zip(gold_schemes, found_schemes):
if g == f:
correct += 1
result.accuracy = correct / total
# for each word, let rhymeset[word] = set of words in rest of stanza rhyming with the word
# precision = # correct words in rhymeset[word]/# words in proposed rhymeset[word]
# recall = # correct words in rhymeset[word]/# words in reference words in rhymeset[word]
# total precision and recall = avg over all words over all stanzas
tot_p = 0.0
tot_r = 0.0
tot_words = 0.0
for (s, g, f) in zip(stanzas, gold_schemes, found_schemes):
stanzasize = len(s)
for wi, word in enumerate(s):
grhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == g[wi], zip(range(wi + 1, stanzasize), g[wi + 1:]))))
frhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == f[wi], zip(range(wi + 1, stanzasize), f[wi + 1:]))))
if len(grhymeset_word) == 0:
continue
tot_words += 1
if len(frhymeset_word) == 0:
continue
# find intersection
correct = float(len(grhymeset_word.intersection(frhymeset_word)))
precision = correct / len(frhymeset_word)
recall = correct / len(grhymeset_word)
tot_p += precision
tot_r += recall
precision = tot_p / tot_words
recall = tot_r / tot_words
result.precision = precision
result.recall = recall
if precision + recall > 0:
result.f_score = 2 * precision * recall / (precision + recall)
return result | get accuracy and precision/recall | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/evaluate_schemes.py#L107-L155 | null | #!/usr/bin/env python
"""
Evaluate rhyme schemes against gold standard.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import sys
from collections import defaultdict
from functools import reduce
class SuccessMeasure:
def __init__(self):
self.accuracy = None
self.precision = None
self.recall = None
self.f_score = None
def __str__(self):
return """\
Accuracy:\t{accuracy:.4f}
Precision:\t{precision:.4f}
Recall:\t\t{recall:.4f}
F-score:\t{f_score:.4f}""".format(
accuracy=self.accuracy,
precision=self.precision,
recall=self.recall,
f_score=self.f_score,
)
class EvaluationResult:
def __init__(self):
self.num_stanzas = None
self.num_lines = None
self.num_end_word_types = None
self.naive_baseline_success = None
self.less_naive_baseline_success = None
self.input_success = None
def __str__(self):
s = """\
Num of stanzas: {num_stanzas}
Num of lines: {num_lines}
Num of end word types: {num_end_word_types}
Naive baseline:
{naive_baseline_success}
Less naive baseline:
{less_naive_baseline_success}
""".format(
num_stanzas=self.num_stanzas,
num_lines=self.num_lines,
num_end_word_types=self.num_end_word_types,
naive_baseline_success=self.naive_baseline_success,
less_naive_baseline_success=self.less_naive_baseline_success,
)
if self.input_success:
s += """
Input:
{input_success}
""".format(
input_success=self.input_success,
)
return s
def get_wordset(poems):
"""get all words"""
words = sorted(list(set(reduce(lambda x, y: x + y, poems))))
return words
def load_gold(gold_file):
lines = gold_file.readlines()
stanzas = []
stanzaschemes = []
for i, line in enumerate(lines):
line = line.split()
if i % 4 == 0:
stanzas.append(tuple(line[1:]))
elif i % 4 == 1:
stanzaschemes.append(tuple(int(i) for i in line))
return [stanzaschemes, stanzas]
def load_hypothesis(result_lines):
stanzas = []
schemes = []
for i, line in enumerate(result_lines):
line = line.split()
if i % 3 == 0:
stanzas.append(tuple(line))
elif i % 3 == 1:
schemes.append(tuple(int(i) for i in line))
return zip(stanzas, schemes)
def naive(gold_schemes):
"""find naive baseline (most common scheme of a given length)?"""
scheme_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_path, 'r') as f:
dist = json.loads(f.read())
best_schemes = {}
for i in dist.keys():
if not dist[i]:
continue
best_schemes[int(i)] = tuple(int(j) for j in (max(dist[i], key=lambda x: x[1])[0]).split())
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes
def less_naive(gold_schemes):
"""find 'less naive' baseline (most common scheme of a given length in subcorpus)"""
best_schemes = defaultdict(lambda: defaultdict(int))
for g in gold_schemes:
best_schemes[len(g)][tuple(g)] += 1
for i in best_schemes:
best_schemes[i] = tuple(max(best_schemes[i].items(), key=lambda x: x[1])[0])
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes
def evaluate(gstanzaschemes, gstanzas, hypothesis):
words = get_wordset(gstanzas)
result = EvaluationResult()
result.num_stanzas = len(gstanzas)
result.num_lines = sum(map(len, gstanzas))
result.num_end_word_types = len(words)
naive_schemes = naive(gstanzaschemes)
result.naive_baseline_success = compare(gstanzas, gstanzaschemes, naive_schemes)
less_naive_schemes = less_naive(gstanzaschemes)
result.less_naive_baseline_success = compare(gstanzas, gstanzaschemes, less_naive_schemes)
hstanzaschemes = [scheme for (stanza, scheme) in hypothesis]
result.input_success = compare(gstanzas, gstanzaschemes, hstanzaschemes)
return result
def main(args_list=None):
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Evaluate find_schemes output')
parser.add_argument(
'infile',
help='Output of find_schemes that is evaluated',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
)
parser.add_argument(
'gold_file',
help='Stanzas annotated with correct rhyme schemes',
type=argparse.FileType('r'),
)
args = parser.parse_args(args_list)
gstanzaschemes, gstanzas = load_gold(args.gold_file)
args.gold_file.close()
hypothesis_lines = args.infile.readlines()
hstanzaschemes = load_hypothesis(hypothesis_lines)
result = evaluate(gstanzaschemes, gstanzas, hstanzaschemes)
print(result)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/evaluate_schemes.py | naive | python | def naive(gold_schemes):
scheme_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_path, 'r') as f:
dist = json.loads(f.read())
best_schemes = {}
for i in dist.keys():
if not dist[i]:
continue
best_schemes[int(i)] = tuple(int(j) for j in (max(dist[i], key=lambda x: x[1])[0]).split())
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes | find naive baseline (most common scheme of a given length)? | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/evaluate_schemes.py#L158-L172 | null | #!/usr/bin/env python
"""
Evaluate rhyme schemes against gold standard.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import sys
from collections import defaultdict
from functools import reduce
class SuccessMeasure:
def __init__(self):
self.accuracy = None
self.precision = None
self.recall = None
self.f_score = None
def __str__(self):
return """\
Accuracy:\t{accuracy:.4f}
Precision:\t{precision:.4f}
Recall:\t\t{recall:.4f}
F-score:\t{f_score:.4f}""".format(
accuracy=self.accuracy,
precision=self.precision,
recall=self.recall,
f_score=self.f_score,
)
class EvaluationResult:
def __init__(self):
self.num_stanzas = None
self.num_lines = None
self.num_end_word_types = None
self.naive_baseline_success = None
self.less_naive_baseline_success = None
self.input_success = None
def __str__(self):
s = """\
Num of stanzas: {num_stanzas}
Num of lines: {num_lines}
Num of end word types: {num_end_word_types}
Naive baseline:
{naive_baseline_success}
Less naive baseline:
{less_naive_baseline_success}
""".format(
num_stanzas=self.num_stanzas,
num_lines=self.num_lines,
num_end_word_types=self.num_end_word_types,
naive_baseline_success=self.naive_baseline_success,
less_naive_baseline_success=self.less_naive_baseline_success,
)
if self.input_success:
s += """
Input:
{input_success}
""".format(
input_success=self.input_success,
)
return s
def get_wordset(poems):
"""get all words"""
words = sorted(list(set(reduce(lambda x, y: x + y, poems))))
return words
def load_gold(gold_file):
lines = gold_file.readlines()
stanzas = []
stanzaschemes = []
for i, line in enumerate(lines):
line = line.split()
if i % 4 == 0:
stanzas.append(tuple(line[1:]))
elif i % 4 == 1:
stanzaschemes.append(tuple(int(i) for i in line))
return [stanzaschemes, stanzas]
def load_hypothesis(result_lines):
stanzas = []
schemes = []
for i, line in enumerate(result_lines):
line = line.split()
if i % 3 == 0:
stanzas.append(tuple(line))
elif i % 3 == 1:
schemes.append(tuple(int(i) for i in line))
return zip(stanzas, schemes)
def compare(stanzas, gold_schemes, found_schemes):
"""get accuracy and precision/recall"""
result = SuccessMeasure()
total = float(len(gold_schemes))
correct = 0.0
for (g, f) in zip(gold_schemes, found_schemes):
if g == f:
correct += 1
result.accuracy = correct / total
# for each word, let rhymeset[word] = set of words in rest of stanza rhyming with the word
# precision = # correct words in rhymeset[word]/# words in proposed rhymeset[word]
# recall = # correct words in rhymeset[word]/# words in reference words in rhymeset[word]
# total precision and recall = avg over all words over all stanzas
tot_p = 0.0
tot_r = 0.0
tot_words = 0.0
for (s, g, f) in zip(stanzas, gold_schemes, found_schemes):
stanzasize = len(s)
for wi, word in enumerate(s):
grhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == g[wi], zip(range(wi + 1, stanzasize), g[wi + 1:]))))
frhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == f[wi], zip(range(wi + 1, stanzasize), f[wi + 1:]))))
if len(grhymeset_word) == 0:
continue
tot_words += 1
if len(frhymeset_word) == 0:
continue
# find intersection
correct = float(len(grhymeset_word.intersection(frhymeset_word)))
precision = correct / len(frhymeset_word)
recall = correct / len(grhymeset_word)
tot_p += precision
tot_r += recall
precision = tot_p / tot_words
recall = tot_r / tot_words
result.precision = precision
result.recall = recall
if precision + recall > 0:
result.f_score = 2 * precision * recall / (precision + recall)
return result
def less_naive(gold_schemes):
"""find 'less naive' baseline (most common scheme of a given length in subcorpus)"""
best_schemes = defaultdict(lambda: defaultdict(int))
for g in gold_schemes:
best_schemes[len(g)][tuple(g)] += 1
for i in best_schemes:
best_schemes[i] = tuple(max(best_schemes[i].items(), key=lambda x: x[1])[0])
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes
def evaluate(gstanzaschemes, gstanzas, hypothesis):
words = get_wordset(gstanzas)
result = EvaluationResult()
result.num_stanzas = len(gstanzas)
result.num_lines = sum(map(len, gstanzas))
result.num_end_word_types = len(words)
naive_schemes = naive(gstanzaschemes)
result.naive_baseline_success = compare(gstanzas, gstanzaschemes, naive_schemes)
less_naive_schemes = less_naive(gstanzaschemes)
result.less_naive_baseline_success = compare(gstanzas, gstanzaschemes, less_naive_schemes)
hstanzaschemes = [scheme for (stanza, scheme) in hypothesis]
result.input_success = compare(gstanzas, gstanzaschemes, hstanzaschemes)
return result
def main(args_list=None):
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Evaluate find_schemes output')
parser.add_argument(
'infile',
help='Output of find_schemes that is evaluated',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
)
parser.add_argument(
'gold_file',
help='Stanzas annotated with correct rhyme schemes',
type=argparse.FileType('r'),
)
args = parser.parse_args(args_list)
gstanzaschemes, gstanzas = load_gold(args.gold_file)
args.gold_file.close()
hypothesis_lines = args.infile.readlines()
hstanzaschemes = load_hypothesis(hypothesis_lines)
result = evaluate(gstanzaschemes, gstanzas, hstanzaschemes)
print(result)
if __name__ == '__main__':
main()
|
jvamvas/rhymediscovery | rhymediscovery/evaluate_schemes.py | less_naive | python | def less_naive(gold_schemes):
best_schemes = defaultdict(lambda: defaultdict(int))
for g in gold_schemes:
best_schemes[len(g)][tuple(g)] += 1
for i in best_schemes:
best_schemes[i] = tuple(max(best_schemes[i].items(), key=lambda x: x[1])[0])
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes | find 'less naive' baseline (most common scheme of a given length in subcorpus) | train | https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/evaluate_schemes.py#L175-L187 | null | #!/usr/bin/env python
"""
Evaluate rhyme schemes against gold standard.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import os
import sys
from collections import defaultdict
from functools import reduce
class SuccessMeasure:
def __init__(self):
self.accuracy = None
self.precision = None
self.recall = None
self.f_score = None
def __str__(self):
return """\
Accuracy:\t{accuracy:.4f}
Precision:\t{precision:.4f}
Recall:\t\t{recall:.4f}
F-score:\t{f_score:.4f}""".format(
accuracy=self.accuracy,
precision=self.precision,
recall=self.recall,
f_score=self.f_score,
)
class EvaluationResult:
def __init__(self):
self.num_stanzas = None
self.num_lines = None
self.num_end_word_types = None
self.naive_baseline_success = None
self.less_naive_baseline_success = None
self.input_success = None
def __str__(self):
s = """\
Num of stanzas: {num_stanzas}
Num of lines: {num_lines}
Num of end word types: {num_end_word_types}
Naive baseline:
{naive_baseline_success}
Less naive baseline:
{less_naive_baseline_success}
""".format(
num_stanzas=self.num_stanzas,
num_lines=self.num_lines,
num_end_word_types=self.num_end_word_types,
naive_baseline_success=self.naive_baseline_success,
less_naive_baseline_success=self.less_naive_baseline_success,
)
if self.input_success:
s += """
Input:
{input_success}
""".format(
input_success=self.input_success,
)
return s
def get_wordset(poems):
"""get all words"""
words = sorted(list(set(reduce(lambda x, y: x + y, poems))))
return words
def load_gold(gold_file):
lines = gold_file.readlines()
stanzas = []
stanzaschemes = []
for i, line in enumerate(lines):
line = line.split()
if i % 4 == 0:
stanzas.append(tuple(line[1:]))
elif i % 4 == 1:
stanzaschemes.append(tuple(int(i) for i in line))
return [stanzaschemes, stanzas]
def load_hypothesis(result_lines):
stanzas = []
schemes = []
for i, line in enumerate(result_lines):
line = line.split()
if i % 3 == 0:
stanzas.append(tuple(line))
elif i % 3 == 1:
schemes.append(tuple(int(i) for i in line))
return zip(stanzas, schemes)
def compare(stanzas, gold_schemes, found_schemes):
"""get accuracy and precision/recall"""
result = SuccessMeasure()
total = float(len(gold_schemes))
correct = 0.0
for (g, f) in zip(gold_schemes, found_schemes):
if g == f:
correct += 1
result.accuracy = correct / total
# for each word, let rhymeset[word] = set of words in rest of stanza rhyming with the word
# precision = # correct words in rhymeset[word]/# words in proposed rhymeset[word]
# recall = # correct words in rhymeset[word]/# words in reference words in rhymeset[word]
# total precision and recall = avg over all words over all stanzas
tot_p = 0.0
tot_r = 0.0
tot_words = 0.0
for (s, g, f) in zip(stanzas, gold_schemes, found_schemes):
stanzasize = len(s)
for wi, word in enumerate(s):
grhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == g[wi], zip(range(wi + 1, stanzasize), g[wi + 1:]))))
frhymeset_word = set(
map(lambda x: x[0], filter(lambda x: x[1] == f[wi], zip(range(wi + 1, stanzasize), f[wi + 1:]))))
if len(grhymeset_word) == 0:
continue
tot_words += 1
if len(frhymeset_word) == 0:
continue
# find intersection
correct = float(len(grhymeset_word.intersection(frhymeset_word)))
precision = correct / len(frhymeset_word)
recall = correct / len(grhymeset_word)
tot_p += precision
tot_r += recall
precision = tot_p / tot_words
recall = tot_r / tot_words
result.precision = precision
result.recall = recall
if precision + recall > 0:
result.f_score = 2 * precision * recall / (precision + recall)
return result
def naive(gold_schemes):
"""find naive baseline (most common scheme of a given length)?"""
scheme_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_path, 'r') as f:
dist = json.loads(f.read())
best_schemes = {}
for i in dist.keys():
if not dist[i]:
continue
best_schemes[int(i)] = tuple(int(j) for j in (max(dist[i], key=lambda x: x[1])[0]).split())
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes
def evaluate(gstanzaschemes, gstanzas, hypothesis):
words = get_wordset(gstanzas)
result = EvaluationResult()
result.num_stanzas = len(gstanzas)
result.num_lines = sum(map(len, gstanzas))
result.num_end_word_types = len(words)
naive_schemes = naive(gstanzaschemes)
result.naive_baseline_success = compare(gstanzas, gstanzaschemes, naive_schemes)
less_naive_schemes = less_naive(gstanzaschemes)
result.less_naive_baseline_success = compare(gstanzas, gstanzaschemes, less_naive_schemes)
hstanzaschemes = [scheme for (stanza, scheme) in hypothesis]
result.input_success = compare(gstanzas, gstanzaschemes, hstanzaschemes)
return result
def main(args_list=None):
args_list = args_list or sys.argv[1:]
parser = argparse.ArgumentParser(description='Evaluate find_schemes output')
parser.add_argument(
'infile',
help='Output of find_schemes that is evaluated',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
)
parser.add_argument(
'gold_file',
help='Stanzas annotated with correct rhyme schemes',
type=argparse.FileType('r'),
)
args = parser.parse_args(args_list)
gstanzaschemes, gstanzas = load_gold(args.gold_file)
args.gold_file.close()
hypothesis_lines = args.infile.readlines()
hstanzaschemes = load_hypothesis(hypothesis_lines)
result = evaluate(gstanzaschemes, gstanzas, hstanzaschemes)
print(result)
if __name__ == '__main__':
main()
|
svven/summary | summary/filters.py | AdblockURLFilterMeta.load_raw_rules | python | def load_raw_rules(cls, url):
"Load raw rules from url or package file."
raw_rules = []
filename = url.split('/')[-1] # e.g.: easylist.txt
try:
with closing(request.get(url, stream=True)) as file:
file.raise_for_status()
# lines = 0 # to be removed
for rule in file.iter_lines():
raw_rules.append(rule.strip())
# lines += 1 # tbr
# if lines == 2500: break # tbr, only for windoze with no re2
logger.info("Adblock online %s: %d", filename, len(raw_rules))
except: # file server down or bad url
with open(resource_filename('summary', filename), 'r') as file:
for rule in file:
raw_rules.append(rule.strip())
logger.info("Adblock offline %s: %d", filename, len(raw_rules))
return raw_rules | Load raw rules from url or package file. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/filters.py#L38-L56 | null | class AdblockURLFilterMeta(type):
"""
Lazy loading Adblock rules.
First try to download easylist.txt, or load file from package.
The same for extralist.txt.
"""
def load_raw_rules(cls, url):
"Load raw rules from url or package file."
raw_rules = []
filename = url.split('/')[-1] # e.g.: easylist.txt
try:
with closing(request.get(url, stream=True)) as file:
file.raise_for_status()
# lines = 0 # to be removed
for rule in file.iter_lines():
raw_rules.append(rule.strip())
# lines += 1 # tbr
# if lines == 2500: break # tbr, only for windoze with no re2
logger.info("Adblock online %s: %d", filename, len(raw_rules))
except: # file server down or bad url
with open(resource_filename('summary', filename), 'r') as file:
for rule in file:
raw_rules.append(rule.strip())
logger.info("Adblock offline %s: %d", filename, len(raw_rules))
return raw_rules
def get_all_rules(cls):
"Load all available Adblock rules."
from adblockparser import AdblockRules
raw_rules = []
for url in [
config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]:
raw_rules.extend(cls.load_raw_rules(url))
rules = AdblockRules(raw_rules)
return rules
@property
def rules(cls):
if getattr(cls, '_rules', None) is None:
rules = cls.get_all_rules()
cls._rules = rules
return cls._rules
|
svven/summary | summary/filters.py | AdblockURLFilterMeta.get_all_rules | python | def get_all_rules(cls):
"Load all available Adblock rules."
from adblockparser import AdblockRules
raw_rules = []
for url in [
config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]:
raw_rules.extend(cls.load_raw_rules(url))
rules = AdblockRules(raw_rules)
return rules | Load all available Adblock rules. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/filters.py#L58-L68 | [
"def load_raw_rules(cls, url):\n \"Load raw rules from url or package file.\"\n raw_rules = []\n filename = url.split('/')[-1] # e.g.: easylist.txt\n try:\n with closing(request.get(url, stream=True)) as file:\n file.raise_for_status()\n # lines = 0 # to be removed\n for rule in file.iter_lines():\n raw_rules.append(rule.strip())\n # lines += 1 # tbr\n # if lines == 2500: break # tbr, only for windoze with no re2\n logger.info(\"Adblock online %s: %d\", filename, len(raw_rules))\n except: # file server down or bad url\n with open(resource_filename('summary', filename), 'r') as file:\n for rule in file:\n raw_rules.append(rule.strip())\n logger.info(\"Adblock offline %s: %d\", filename, len(raw_rules))\n return raw_rules\n"
] | class AdblockURLFilterMeta(type):
"""
Lazy loading Adblock rules.
First try to download easylist.txt, or load file from package.
The same for extralist.txt.
"""
def load_raw_rules(cls, url):
"Load raw rules from url or package file."
raw_rules = []
filename = url.split('/')[-1] # e.g.: easylist.txt
try:
with closing(request.get(url, stream=True)) as file:
file.raise_for_status()
# lines = 0 # to be removed
for rule in file.iter_lines():
raw_rules.append(rule.strip())
# lines += 1 # tbr
# if lines == 2500: break # tbr, only for windoze with no re2
logger.info("Adblock online %s: %d", filename, len(raw_rules))
except: # file server down or bad url
with open(resource_filename('summary', filename), 'r') as file:
for rule in file:
raw_rules.append(rule.strip())
logger.info("Adblock offline %s: %d", filename, len(raw_rules))
return raw_rules
def get_all_rules(cls):
"Load all available Adblock rules."
from adblockparser import AdblockRules
raw_rules = []
for url in [
config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]:
raw_rules.extend(cls.load_raw_rules(url))
rules = AdblockRules(raw_rules)
return rules
@property
def rules(cls):
if getattr(cls, '_rules', None) is None:
rules = cls.get_all_rules()
cls._rules = rules
return cls._rules
|
svven/summary | summary/filters.py | NoImageFilter.get_image | python | def get_image(cls, url):
"""
Returned Image instance has response url.
This might be different than the url param because of redirects.
"""
from PIL.ImageFile import Parser as PILParser
length = 0
raw_image = None
with closing(request.get(url, stream=True)) as response:
response.raise_for_status()
response_url = response.url
parser = PILParser()
for chunk in response.iter_content(config.CHUNK_SIZE):
length += len(chunk)
if length > config.IMAGE_MAX_BYTESIZE:
del parser
raise cls.MaxBytesException
parser.feed(chunk)
# comment this to get the whole file
if parser.image and parser.image.size:
raw_image = parser.image
del parser # free some memory
break
# or this to get just the size and format
# raw_image = parser.close()
if length == 0:
raise cls.ZeroBytesException
if not raw_image:
raise cls.NoImageException
image = Image(response_url, raw_image.size, raw_image.format)
return image | Returned Image instance has response url.
This might be different than the url param because of redirects. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/filters.py#L112-L143 | null | class NoImageFilter(object): # AdblockURLFilter
"""
Retrieves actual image file, and returns `None` if it fails.
Otherwise it returns an instance of the `filters.Image` class containing
the URL, together with the size and format of the actual image.
Basically it hydrates this instance which is passed to following filters.
Worth mentioning again that it only gets first few chunks of the image file
until the PIL parser gets the size and format of the image.
"""
class MaxBytesException(Exception):
pass
class ZeroBytesException(Exception):
pass
class NoImageException(Exception):
pass
@classmethod
def get_image(cls, url):
"""
Returned Image instance has response url.
This might be different than the url param because of redirects.
"""
from PIL.ImageFile import Parser as PILParser
length = 0
raw_image = None
with closing(request.get(url, stream=True)) as response:
response.raise_for_status()
response_url = response.url
parser = PILParser()
for chunk in response.iter_content(config.CHUNK_SIZE):
length += len(chunk)
if length > config.IMAGE_MAX_BYTESIZE:
del parser
raise cls.MaxBytesException
parser.feed(chunk)
# comment this to get the whole file
if parser.image and parser.image.size:
raw_image = parser.image
del parser # free some memory
break
# or this to get just the size and format
# raw_image = parser.close()
if length == 0:
raise cls.ZeroBytesException
if not raw_image:
raise cls.NoImageException
image = Image(response_url, raw_image.size, raw_image.format)
return image
def __call__(self, url):
# url = super(NoImageFilter, self).__call__(url)
try:
image = NoImageFilter.get_image(url)
return image
except Exception, e:
if url.startswith('data'): # data URI
url = url[:url.find(';')]
logger.debug("Bad image (%s): %s", clsn(e), url)
pass
return None
|
svven/summary | summary/filters.py | MonoImageFilter.check_color | python | def check_color(cls, raw_image):
"""
Just check if raw_image is completely white.
http://stackoverflow.com/questions/14041562/python-pil-detect-if-an-image-is-completely-black-or-white
"""
# sum(img.convert("L").getextrema()) in (0, 2)
extrema = raw_image.convert("L").getextrema()
if extrema == (255, 255): # all white
raise cls.MonoImageException | Just check if raw_image is completely white.
http://stackoverflow.com/questions/14041562/python-pil-detect-if-an-image-is-completely-black-or-white | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/filters.py#L223-L231 | null | class MonoImageFilter(object): # SizeImageFilter
"""
Checks whether the image is plain white and returns `None`.
This filter retrieves the whole image file so it has an extra regex check
before. E.g.: rules out these URLs:
- http://wordpress.com/i/blank.jpg?m=1383295312g
- http://images.inc.com/leftnavmenu/inc-logo-white.png
"""
__metaclass__ = MonoImageFilterMeta
class MonoImageException(Exception):
pass
@classmethod
def check_color(cls, raw_image):
"""
Just check if raw_image is completely white.
http://stackoverflow.com/questions/14041562/python-pil-detect-if-an-image-is-completely-black-or-white
"""
# sum(img.convert("L").getextrema()) in (0, 2)
extrema = raw_image.convert("L").getextrema()
if extrema == (255, 255): # all white
raise cls.MonoImageException
def __call__(self, image):
# image = super(MonoImageFilter, self).__call__(image)
try:
if MonoImageFilter.regex.search(image.url):
content = request.get(image.url).content
pic = StringIO(content)
raw_image = PIL.Image.open(pic)
MonoImageFilter.check_color(raw_image)
del raw_image # more cleaning maybe
logger.debug("Good image (%s): %s", clsn(self), image.url)
return image
except Exception, e:
logger.debug("Bad image (%s): %s", clsn(e), image.url)
pass
return None
|
svven/summary | summary/filters.py | FormatImageFilter.check_animated | python | def check_animated(cls, raw_image):
"Checks whether the gif is animated."
try:
raw_image.seek(1)
except EOFError:
isanimated= False
else:
isanimated= True
raise cls.AnimatedImageException | Checks whether the gif is animated. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/filters.py#L259-L267 | null | class FormatImageFilter(object): # MonoImageFilter
"""
Rules out animated gif images for the moment.
This can be extended to exclude other image formats based on file contents.
"""
class AnimatedImageException(Exception):
pass
@classmethod
def check_animated(cls, raw_image):
"Checks whether the gif is animated."
try:
raw_image.seek(1)
except EOFError:
isanimated= False
else:
isanimated= True
raise cls.AnimatedImageException
def __call__(self, image):
# image = super(FormatImageFilter, self).__call__(image)
try:
if image.format.lower() == "gif":
content = request.get(image.url).content
pic = StringIO(content)
raw_image = PIL.Image.open(pic)
FormatImageFilter.check_animated(raw_image)
del raw_image
logger.debug("Good image (%s): %s", clsn(self), image.url)
return image
except Exception, e:
logger.debug("Bad image (%s): %s", clsn(e), image.url)
pass
return None
|
svven/summary | summary/request.py | get | python | def get(url, **kwargs):
headers = kwargs.get('headers', {})
headers['User-Agent'] = config.USER_AGENT # overwrite
kwargs['headers'] = headers
timeout = kwargs.get('timeout', config.TIMEOUT)
kwargs['timeout'] = timeout
kwargs['verify'] = False # no SSLError
logger.debug("Getting: %s", url)
return requests.get(url, **kwargs) | Wrapper for `request.get` function to set params. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/request.py#L11-L25 | null | """
Wrapper for `requests`.
"""
import requests
requests.packages.urllib3.disable_warnings()
import time
import config, logging
logger = logging.getLogger(__name__)
def phantomjs_get(url):
"""
Perform the request via PhantomJS.
"""
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = config.USER_AGENT
dcap["phantomjs.page.settings.loadImages"] = False
driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=config.PHANTOMJS_BIN)
logger.debug("PhantomJS get: %s", url)
driver.get(url)
time.sleep(10) # to follow redirects
response = driver.page_source
driver.quit()
return response
|
svven/summary | summary/request.py | phantomjs_get | python | def phantomjs_get(url):
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = config.USER_AGENT
dcap["phantomjs.page.settings.loadImages"] = False
driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=config.PHANTOMJS_BIN)
logger.debug("PhantomJS get: %s", url)
driver.get(url)
time.sleep(10) # to follow redirects
response = driver.page_source
driver.quit()
return response | Perform the request via PhantomJS. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/request.py#L27-L45 | null | """
Wrapper for `requests`.
"""
import requests
requests.packages.urllib3.disable_warnings()
import time
import config, logging
logger = logging.getLogger(__name__)
def get(url, **kwargs):
"""
Wrapper for `request.get` function to set params.
"""
headers = kwargs.get('headers', {})
headers['User-Agent'] = config.USER_AGENT # overwrite
kwargs['headers'] = headers
timeout = kwargs.get('timeout', config.TIMEOUT)
kwargs['timeout'] = timeout
kwargs['verify'] = False # no SSLError
logger.debug("Getting: %s", url)
return requests.get(url, **kwargs)
|
svven/summary | summary/techniques.py | HTTPEquivRefreshTags.parse_refresh_header | python | def parse_refresh_header(self, refresh):
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = self.clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl | >>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): blah | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/techniques.py#L29-L53 | null | class HTTPEquivRefreshTags(Technique):
"""
Extract http-equiv refresh url to follow.
<meta http-equiv="refresh"
content="0;url=http://www.quora.com/Startup-Ideas/What-are-the-best-ways-to-think-of-ideas-for-a-startup">
"""
key_attr = 'http-equiv'
val_attr = 'refresh' #
def clean_refresh_url(self, url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return url
def extract(self, html):
"Extract http-equiv refresh url to follow."
extracted = {}
soup = BeautifulSoup(html, parser)
for meta_tag in soup.find_all('meta'):
if self.key_attr in meta_tag.attrs and 'content' in meta_tag.attrs and \
meta_tag[self.key_attr].lower() == self.val_attr:
refresh = meta_tag.attrs['content']
try:
pause, newurl = self.parse_refresh_header(refresh)
if newurl:
extracted['urls'] = [newurl]
break # one is enough
except:
pass # nevermind
return extracted
|
svven/summary | summary/techniques.py | HTTPEquivRefreshTags.extract | python | def extract(self, html):
"Extract http-equiv refresh url to follow."
extracted = {}
soup = BeautifulSoup(html, parser)
for meta_tag in soup.find_all('meta'):
if self.key_attr in meta_tag.attrs and 'content' in meta_tag.attrs and \
meta_tag[self.key_attr].lower() == self.val_attr:
refresh = meta_tag.attrs['content']
try:
pause, newurl = self.parse_refresh_header(refresh)
if newurl:
extracted['urls'] = [newurl]
break # one is enough
except:
pass # nevermind
return extracted | Extract http-equiv refresh url to follow. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/techniques.py#L55-L70 | null | class HTTPEquivRefreshTags(Technique):
"""
Extract http-equiv refresh url to follow.
<meta http-equiv="refresh"
content="0;url=http://www.quora.com/Startup-Ideas/What-are-the-best-ways-to-think-of-ideas-for-a-startup">
"""
key_attr = 'http-equiv'
val_attr = 'refresh' #
def clean_refresh_url(self, url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return url
def parse_refresh_header(self, refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = self.clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
|
svven/summary | summarize.py | render | python | def render(template, **kwargs):
"""
Renders the HTML containing provided summaries.
The summary has to be an instance of summary.Summary,
or at least contain similar properties: title, image, url,
description and collections: titles, images, descriptions.
"""
import jinja2
import os.path as path
searchpath = path.join(path.dirname(__file__),
"templates")
loader = jinja2.FileSystemLoader(searchpath=searchpath)
env = jinja2.Environment(loader=loader)
temp = env.get_template(template)
return temp.render(**kwargs) | Renders the HTML containing provided summaries.
The summary has to be an instance of summary.Summary,
or at least contain similar properties: title, image, url,
description and collections: titles, images, descriptions. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summarize.py#L6-L23 | null | """
Parses a list of URLs, performs data extraction,
and renders the output in HTML format as news articles.
"""
def render(template, **kwargs):
"""
Renders the HTML containing provided summaries.
The summary has to be an instance of summary.Summary,
or at least contain similar properties: title, image, url,
description and collections: titles, images, descriptions.
"""
import jinja2
import os.path as path
searchpath = path.join(path.dirname(__file__),
"templates")
loader = jinja2.FileSystemLoader(searchpath=searchpath)
env = jinja2.Environment(loader=loader)
temp = env.get_template(template)
return temp.render(**kwargs)
def summarize(urls):
"""
Calls extract for each of the URLs,
Returns the list of Extracted instances as summaries,
the result of the process, and the speed.
"""
import time
from summary import Summary
fails = 0
err = lambda e: e.__class__.__name__
summaries = []
start = time.time()
for url in urls:
try:
print "-> %s" % url
summary = Summary(url)
summary.extract()
except KeyboardInterrupt:
break
except Exception, e:
fails += 1
summary = {
'titles': ["[%s]" % err(e)],
'urls': [url],
'descriptions': [str(e)],
'source': url,
}
print "[%s] (%s): %s" % (err(e), e, url)
summaries.append(summary)
end = time.time()
result = fails and "Fails: %s out of %s." % (fails, len(summaries)) \
or "Success: %s." % len(summaries)
print result
duration = end - start
speed = "%.2f" % (duration/len(summaries))
return summaries, result, speed
if __name__ == '__main__':
urls = []
with open('urls.txt', 'r') as file:
urls.extend([url.strip() for url in file if not url.strip().startswith("#") \
and url.strip() != ""])
summaries, result, speed = summarize(urls)
page = render(template="news.html",
summaries=summaries, result=result, speed=speed)
with open('news.html', 'w') as file:
file.write(page.encode('utf-8'))
|
svven/summary | summarize.py | summarize | python | def summarize(urls):
"""
Calls extract for each of the URLs,
Returns the list of Extracted instances as summaries,
the result of the process, and the speed.
"""
import time
from summary import Summary
fails = 0
err = lambda e: e.__class__.__name__
summaries = []
start = time.time()
for url in urls:
try:
print "-> %s" % url
summary = Summary(url)
summary.extract()
except KeyboardInterrupt:
break
except Exception, e:
fails += 1
summary = {
'titles': ["[%s]" % err(e)],
'urls': [url],
'descriptions': [str(e)],
'source': url,
}
print "[%s] (%s): %s" % (err(e), e, url)
summaries.append(summary)
end = time.time()
result = fails and "Fails: %s out of %s." % (fails, len(summaries)) \
or "Success: %s." % len(summaries)
print result
duration = end - start
speed = "%.2f" % (duration/len(summaries))
return summaries, result, speed | Calls extract for each of the URLs,
Returns the list of Extracted instances as summaries,
the result of the process, and the speed. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summarize.py#L26-L67 | [
"def extract(self, check_url=None, http_equiv_refresh=True):\n \"\"\"\n Downloads HTML <head> tag first, extracts data from it using\n specific head techniques, loads it and checks if is complete. \n Otherwise downloads the HTML <body> tag as well and loads data \n extracted by using appropriate semantic techniques.\n\n Eagerly calls check_url(url) if any, before parsing the HTML.\n Provided function should raise an exception to break extraction.\n E.g.: URL has been summarized before; URL points to off limits\n websites like foursquare.com, facebook.com, bitly.com and so on.\n \"\"\"\n # assert self._is_clear()\n logger = logging.getLogger(__name__)\n logger.info(\"Extract: %s\", self.clean_url)\n with closing(request.get(self.clean_url, stream=True)) as response:\n response.raise_for_status()\n mime = response.headers.get('content-type')\n if mime and not ('html' in mime.lower()):\n raise HTMLParseError('Invalid Content-Type: %s' % mime)\n self.clean_url = self._clean_url(response.url)\n if self.clean_url is None:\n raise URLError('Bad url: %s' % response.url)\n if check_url is not None:\n check_url(url=self.clean_url)\n\n encoding = config.ENCODING or response.encoding\n\n self._html = \"\"\n if config.PHANTOMJS_BIN and \\\n site(self.clean_url) in config.PHANTOMJS_SITES:\n self._html = request.phantomjs_get(self.clean_url)\n response.consumed = True\n\n head = self._get_tag(response, tag_name=\"head\", encoding=encoding)\n\n if http_equiv_refresh:\n # Check meta http-equiv refresh tag\n html = head or decode(self._html, encoding)\n self._extract(html, self.clean_url, [\n \"summary.techniques.HTTPEquivRefreshTags\",\n ])\n new_url = self.urls and self.urls[0]\n if new_url and new_url != self.clean_url:\n logger.warning(\"Refresh: %s\", new_url)\n self._clear()\n self.clean_url = new_url\n return self.extract(check_url=check_url, http_equiv_refresh=False)\n\n if head:\n logger.debug(\"Got head: %s\", len(head))\n self._extract(head, self.clean_url, [\n \"extraction.techniques.FacebookOpengraphTags\",\n \"extraction.techniques.TwitterSummaryCardTags\",\n \"extraction.techniques.HeadTags\"\n ])\n else:\n logger.debug(\"No head: %s\", self.clean_url)\n\n if config.GET_ALL_DATA or not self._is_complete():\n body = self._get_tag(response, tag_name=\"body\", encoding=encoding)\n if body:\n logger.debug(\"Got body: %s\", len(body))\n self._extract(body, self.clean_url, [\n \"extraction.techniques.HTML5SemanticTags\",\n \"extraction.techniques.SemanticTags\" \n ])\n else:\n logger.debug(\"No body: %s\", self.clean_url)\n\n if not head and not body:\n raise HTMLParseError('No head nor body tags found.')\n\n del self._html # no longer needed\n",
"err = lambda e: e.__class__.__name__\n"
] | """
Parses a list of URLs, performs data extraction,
and renders the output in HTML format as news articles.
"""
def render(template, **kwargs):
"""
Renders the HTML containing provided summaries.
The summary has to be an instance of summary.Summary,
or at least contain similar properties: title, image, url,
description and collections: titles, images, descriptions.
"""
import jinja2
import os.path as path
searchpath = path.join(path.dirname(__file__),
"templates")
loader = jinja2.FileSystemLoader(searchpath=searchpath)
env = jinja2.Environment(loader=loader)
temp = env.get_template(template)
return temp.render(**kwargs)
def summarize(urls):
"""
Calls extract for each of the URLs,
Returns the list of Extracted instances as summaries,
the result of the process, and the speed.
"""
import time
from summary import Summary
fails = 0
err = lambda e: e.__class__.__name__
summaries = []
start = time.time()
for url in urls:
try:
print "-> %s" % url
summary = Summary(url)
summary.extract()
except KeyboardInterrupt:
break
except Exception, e:
fails += 1
summary = {
'titles': ["[%s]" % err(e)],
'urls': [url],
'descriptions': [str(e)],
'source': url,
}
print "[%s] (%s): %s" % (err(e), e, url)
summaries.append(summary)
end = time.time()
result = fails and "Fails: %s out of %s." % (fails, len(summaries)) \
or "Success: %s." % len(summaries)
print result
duration = end - start
speed = "%.2f" % (duration/len(summaries))
return summaries, result, speed
if __name__ == '__main__':
urls = []
with open('urls.txt', 'r') as file:
urls.extend([url.strip() for url in file if not url.strip().startswith("#") \
and url.strip() != ""])
summaries, result, speed = summarize(urls)
page = render(template="news.html",
summaries=summaries, result=result, speed=speed)
with open('news.html', 'w') as file:
file.write(page.encode('utf-8'))
|
svven/summary | summary/__init__.py | Summary._load | python | def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break | Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/__init__.py#L103-L136 | null | class Summary(object):
"Provides incremental load mechanism and validation."
def __init__(self, source_url=None):
"""
Unlike Extracted ctor, this one just sets the source_url.
Extracted data is loaded later gradually by calling extract.
"""
self._html = ""
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
self.source_url = source_url
self.clean_url = self.source_url
# Non-plural properties
@property
def title(self):
"Return the best title, if any."
if self.titles:
return self.titles[0]
else:
return None
@property
def description(self):
"Return the best description, if any."
if self.descriptions:
return self.descriptions[0]
else:
return None
@property
def image(self):
"Return the best image, if any."
if self.images:
return self.images[0]
else:
return None
@property
def url(self):
"Return the best canonical url, or the cleaned source url."
if self.urls:
return self.urls[0]
else:
return self.clean_url
def _is_clear(self):
return not (self.titles or self.descriptions or self.images or self.urls)
def _is_complete(self):
return self.titles and self.descriptions and self.images and self.urls and True
def _clear(self):
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
# Picking the best item by sorting
# self.titles = sorted(self.titles, key=len)
# self.descriptions = sorted(self.descriptions, key=len, reverse=True)
# self.images = sorted(self.images, key=lambda i: sum(i.size), reverse=True)
def _clean_text(self, text):
"""
Checks for bad text like "{{ metatags.title }}" and such
"""
if text.startswith('{{') and text.endswith('}}'):
return None
return text
def _clean_url(self, url):
"""
Canonicalizes the url, as it is done in Scrapy.
And keeps only USEFUL_QUERY_KEYS. It also strips the
trailing slash to help identifying dupes.
"""
# TODO: Turn this into regex
if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:
return None
if site(norm(url).lower()) in config.NONCANONIC_SITES:
clean_url = canonicalize_url(url, keep_params=True)
else:
clean_url = canonicalize_url(url)
return clean_url
def _filter_image(self, url):
"The param is the image URL, which is returned if it passes all the filters."
return reduce(lambda f, g: f and g(f),
[
filters.AdblockURLFilter()(url),
filters.NoImageFilter(),
filters.SizeImageFilter(),
filters.MonoImageFilter(),
filters.FormatImageFilter(),
])
def _get_tag(self, response, tag_name="html", encoding="utf-8"):
"""
Iterates response content and returns the tag if found.
If not found, the response content is fully consumed so
self._html equals response.content, and it returns None.
"""
def find_tag(tag_name):
tag_start = tag_end = None
found = lambda: \
tag_start is not None and tag_end is not None
html = self._html.lower()
start = html.find("<%s" % tag_name)
if start >= 0:
tag_start = start
else:
return None # no tag
end = html.find("</%s>" % tag_name)
if end > tag_start:
tag_end = end+len(tag_name)+3
elif consumed:
tag_end = -1 # till the end
if found():
return self._html[tag_start:tag_end]
return None
consumed = getattr(response, 'consumed', False)
if not consumed:
stream = getattr(response, 'stream', None)
if stream is None:
stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True
response.stream = stream
while True:
try:
chunk = next(stream)
self._html += chunk
tag = find_tag(tag_name)
if tag:
return tag
if len(self._html) > config.HTML_MAX_BYTESIZE:
raise HTMLParseError('Maximum response size reached.')
except StopIteration:
response.consumed = True
tag = find_tag(tag_name)
return decode(tag, encoding) # decode here
def _extract(self, html, url, techniques):
extractor = extraction.SvvenExtractor(techniques=techniques)
extracted = extractor.extract(html, source_url=url)
self._load(**extracted)
def extract(self, check_url=None, http_equiv_refresh=True):
"""
Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on.
"""
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html # no longer needed
|
svven/summary | summary/__init__.py | Summary._clean_url | python | def _clean_url(self, url):
# TODO: Turn this into regex
if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:
return None
if site(norm(url).lower()) in config.NONCANONIC_SITES:
clean_url = canonicalize_url(url, keep_params=True)
else:
clean_url = canonicalize_url(url)
return clean_url | Canonicalizes the url, as it is done in Scrapy.
And keeps only USEFUL_QUERY_KEYS. It also strips the
trailing slash to help identifying dupes. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/__init__.py#L151-L164 | null | class Summary(object):
"Provides incremental load mechanism and validation."
def __init__(self, source_url=None):
"""
Unlike Extracted ctor, this one just sets the source_url.
Extracted data is loaded later gradually by calling extract.
"""
self._html = ""
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
self.source_url = source_url
self.clean_url = self.source_url
# Non-plural properties
@property
def title(self):
"Return the best title, if any."
if self.titles:
return self.titles[0]
else:
return None
@property
def description(self):
"Return the best description, if any."
if self.descriptions:
return self.descriptions[0]
else:
return None
@property
def image(self):
"Return the best image, if any."
if self.images:
return self.images[0]
else:
return None
@property
def url(self):
"Return the best canonical url, or the cleaned source url."
if self.urls:
return self.urls[0]
else:
return self.clean_url
def _is_clear(self):
return not (self.titles or self.descriptions or self.images or self.urls)
def _is_complete(self):
return self.titles and self.descriptions and self.images and self.urls and True
def _clear(self):
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break
# Picking the best item by sorting
# self.titles = sorted(self.titles, key=len)
# self.descriptions = sorted(self.descriptions, key=len, reverse=True)
# self.images = sorted(self.images, key=lambda i: sum(i.size), reverse=True)
def _clean_text(self, text):
"""
Checks for bad text like "{{ metatags.title }}" and such
"""
if text.startswith('{{') and text.endswith('}}'):
return None
return text
def _filter_image(self, url):
"The param is the image URL, which is returned if it passes all the filters."
return reduce(lambda f, g: f and g(f),
[
filters.AdblockURLFilter()(url),
filters.NoImageFilter(),
filters.SizeImageFilter(),
filters.MonoImageFilter(),
filters.FormatImageFilter(),
])
def _get_tag(self, response, tag_name="html", encoding="utf-8"):
"""
Iterates response content and returns the tag if found.
If not found, the response content is fully consumed so
self._html equals response.content, and it returns None.
"""
def find_tag(tag_name):
tag_start = tag_end = None
found = lambda: \
tag_start is not None and tag_end is not None
html = self._html.lower()
start = html.find("<%s" % tag_name)
if start >= 0:
tag_start = start
else:
return None # no tag
end = html.find("</%s>" % tag_name)
if end > tag_start:
tag_end = end+len(tag_name)+3
elif consumed:
tag_end = -1 # till the end
if found():
return self._html[tag_start:tag_end]
return None
consumed = getattr(response, 'consumed', False)
if not consumed:
stream = getattr(response, 'stream', None)
if stream is None:
stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True
response.stream = stream
while True:
try:
chunk = next(stream)
self._html += chunk
tag = find_tag(tag_name)
if tag:
return tag
if len(self._html) > config.HTML_MAX_BYTESIZE:
raise HTMLParseError('Maximum response size reached.')
except StopIteration:
response.consumed = True
tag = find_tag(tag_name)
return decode(tag, encoding) # decode here
def _extract(self, html, url, techniques):
extractor = extraction.SvvenExtractor(techniques=techniques)
extracted = extractor.extract(html, source_url=url)
self._load(**extracted)
def extract(self, check_url=None, http_equiv_refresh=True):
"""
Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on.
"""
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html # no longer needed
|
svven/summary | summary/__init__.py | Summary._filter_image | python | def _filter_image(self, url):
"The param is the image URL, which is returned if it passes all the filters."
return reduce(lambda f, g: f and g(f),
[
filters.AdblockURLFilter()(url),
filters.NoImageFilter(),
filters.SizeImageFilter(),
filters.MonoImageFilter(),
filters.FormatImageFilter(),
]) | The param is the image URL, which is returned if it passes all the filters. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/__init__.py#L166-L175 | null | class Summary(object):
"Provides incremental load mechanism and validation."
def __init__(self, source_url=None):
"""
Unlike Extracted ctor, this one just sets the source_url.
Extracted data is loaded later gradually by calling extract.
"""
self._html = ""
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
self.source_url = source_url
self.clean_url = self.source_url
# Non-plural properties
@property
def title(self):
"Return the best title, if any."
if self.titles:
return self.titles[0]
else:
return None
@property
def description(self):
"Return the best description, if any."
if self.descriptions:
return self.descriptions[0]
else:
return None
@property
def image(self):
"Return the best image, if any."
if self.images:
return self.images[0]
else:
return None
@property
def url(self):
"Return the best canonical url, or the cleaned source url."
if self.urls:
return self.urls[0]
else:
return self.clean_url
def _is_clear(self):
return not (self.titles or self.descriptions or self.images or self.urls)
def _is_complete(self):
return self.titles and self.descriptions and self.images and self.urls and True
def _clear(self):
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break
# Picking the best item by sorting
# self.titles = sorted(self.titles, key=len)
# self.descriptions = sorted(self.descriptions, key=len, reverse=True)
# self.images = sorted(self.images, key=lambda i: sum(i.size), reverse=True)
def _clean_text(self, text):
"""
Checks for bad text like "{{ metatags.title }}" and such
"""
if text.startswith('{{') and text.endswith('}}'):
return None
return text
def _clean_url(self, url):
"""
Canonicalizes the url, as it is done in Scrapy.
And keeps only USEFUL_QUERY_KEYS. It also strips the
trailing slash to help identifying dupes.
"""
# TODO: Turn this into regex
if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:
return None
if site(norm(url).lower()) in config.NONCANONIC_SITES:
clean_url = canonicalize_url(url, keep_params=True)
else:
clean_url = canonicalize_url(url)
return clean_url
def _get_tag(self, response, tag_name="html", encoding="utf-8"):
"""
Iterates response content and returns the tag if found.
If not found, the response content is fully consumed so
self._html equals response.content, and it returns None.
"""
def find_tag(tag_name):
tag_start = tag_end = None
found = lambda: \
tag_start is not None and tag_end is not None
html = self._html.lower()
start = html.find("<%s" % tag_name)
if start >= 0:
tag_start = start
else:
return None # no tag
end = html.find("</%s>" % tag_name)
if end > tag_start:
tag_end = end+len(tag_name)+3
elif consumed:
tag_end = -1 # till the end
if found():
return self._html[tag_start:tag_end]
return None
consumed = getattr(response, 'consumed', False)
if not consumed:
stream = getattr(response, 'stream', None)
if stream is None:
stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True
response.stream = stream
while True:
try:
chunk = next(stream)
self._html += chunk
tag = find_tag(tag_name)
if tag:
return tag
if len(self._html) > config.HTML_MAX_BYTESIZE:
raise HTMLParseError('Maximum response size reached.')
except StopIteration:
response.consumed = True
tag = find_tag(tag_name)
return decode(tag, encoding) # decode here
def _extract(self, html, url, techniques):
extractor = extraction.SvvenExtractor(techniques=techniques)
extracted = extractor.extract(html, source_url=url)
self._load(**extracted)
def extract(self, check_url=None, http_equiv_refresh=True):
"""
Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on.
"""
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html # no longer needed
|
svven/summary | summary/__init__.py | Summary._get_tag | python | def _get_tag(self, response, tag_name="html", encoding="utf-8"):
def find_tag(tag_name):
tag_start = tag_end = None
found = lambda: \
tag_start is not None and tag_end is not None
html = self._html.lower()
start = html.find("<%s" % tag_name)
if start >= 0:
tag_start = start
else:
return None # no tag
end = html.find("</%s>" % tag_name)
if end > tag_start:
tag_end = end+len(tag_name)+3
elif consumed:
tag_end = -1 # till the end
if found():
return self._html[tag_start:tag_end]
return None
consumed = getattr(response, 'consumed', False)
if not consumed:
stream = getattr(response, 'stream', None)
if stream is None:
stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True
response.stream = stream
while True:
try:
chunk = next(stream)
self._html += chunk
tag = find_tag(tag_name)
if tag:
return tag
if len(self._html) > config.HTML_MAX_BYTESIZE:
raise HTMLParseError('Maximum response size reached.')
except StopIteration:
response.consumed = True
tag = find_tag(tag_name)
return decode(tag, encoding) | Iterates response content and returns the tag if found.
If not found, the response content is fully consumed so
self._html equals response.content, and it returns None. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/__init__.py#L177-L219 | null | class Summary(object):
"Provides incremental load mechanism and validation."
def __init__(self, source_url=None):
"""
Unlike Extracted ctor, this one just sets the source_url.
Extracted data is loaded later gradually by calling extract.
"""
self._html = ""
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
self.source_url = source_url
self.clean_url = self.source_url
# Non-plural properties
@property
def title(self):
"Return the best title, if any."
if self.titles:
return self.titles[0]
else:
return None
@property
def description(self):
"Return the best description, if any."
if self.descriptions:
return self.descriptions[0]
else:
return None
@property
def image(self):
"Return the best image, if any."
if self.images:
return self.images[0]
else:
return None
@property
def url(self):
"Return the best canonical url, or the cleaned source url."
if self.urls:
return self.urls[0]
else:
return self.clean_url
def _is_clear(self):
return not (self.titles or self.descriptions or self.images or self.urls)
def _is_complete(self):
return self.titles and self.descriptions and self.images and self.urls and True
def _clear(self):
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break
# Picking the best item by sorting
# self.titles = sorted(self.titles, key=len)
# self.descriptions = sorted(self.descriptions, key=len, reverse=True)
# self.images = sorted(self.images, key=lambda i: sum(i.size), reverse=True)
def _clean_text(self, text):
"""
Checks for bad text like "{{ metatags.title }}" and such
"""
if text.startswith('{{') and text.endswith('}}'):
return None
return text
def _clean_url(self, url):
"""
Canonicalizes the url, as it is done in Scrapy.
And keeps only USEFUL_QUERY_KEYS. It also strips the
trailing slash to help identifying dupes.
"""
# TODO: Turn this into regex
if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:
return None
if site(norm(url).lower()) in config.NONCANONIC_SITES:
clean_url = canonicalize_url(url, keep_params=True)
else:
clean_url = canonicalize_url(url)
return clean_url
def _filter_image(self, url):
"The param is the image URL, which is returned if it passes all the filters."
return reduce(lambda f, g: f and g(f),
[
filters.AdblockURLFilter()(url),
filters.NoImageFilter(),
filters.SizeImageFilter(),
filters.MonoImageFilter(),
filters.FormatImageFilter(),
])
# decode here
def _extract(self, html, url, techniques):
extractor = extraction.SvvenExtractor(techniques=techniques)
extracted = extractor.extract(html, source_url=url)
self._load(**extracted)
def extract(self, check_url=None, http_equiv_refresh=True):
"""
Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on.
"""
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html # no longer needed
|
svven/summary | summary/__init__.py | Summary.extract | python | def extract(self, check_url=None, http_equiv_refresh=True):
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html | Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/__init__.py#L227-L300 | null | class Summary(object):
"Provides incremental load mechanism and validation."
def __init__(self, source_url=None):
"""
Unlike Extracted ctor, this one just sets the source_url.
Extracted data is loaded later gradually by calling extract.
"""
self._html = ""
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
self.source_url = source_url
self.clean_url = self.source_url
# Non-plural properties
@property
def title(self):
"Return the best title, if any."
if self.titles:
return self.titles[0]
else:
return None
@property
def description(self):
"Return the best description, if any."
if self.descriptions:
return self.descriptions[0]
else:
return None
@property
def image(self):
"Return the best image, if any."
if self.images:
return self.images[0]
else:
return None
@property
def url(self):
"Return the best canonical url, or the cleaned source url."
if self.urls:
return self.urls[0]
else:
return self.clean_url
def _is_clear(self):
return not (self.titles or self.descriptions or self.images or self.urls)
def _is_complete(self):
return self.titles and self.descriptions and self.images and self.urls and True
def _clear(self):
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break
# Picking the best item by sorting
# self.titles = sorted(self.titles, key=len)
# self.descriptions = sorted(self.descriptions, key=len, reverse=True)
# self.images = sorted(self.images, key=lambda i: sum(i.size), reverse=True)
def _clean_text(self, text):
"""
Checks for bad text like "{{ metatags.title }}" and such
"""
if text.startswith('{{') and text.endswith('}}'):
return None
return text
def _clean_url(self, url):
"""
Canonicalizes the url, as it is done in Scrapy.
And keeps only USEFUL_QUERY_KEYS. It also strips the
trailing slash to help identifying dupes.
"""
# TODO: Turn this into regex
if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:
return None
if site(norm(url).lower()) in config.NONCANONIC_SITES:
clean_url = canonicalize_url(url, keep_params=True)
else:
clean_url = canonicalize_url(url)
return clean_url
def _filter_image(self, url):
"The param is the image URL, which is returned if it passes all the filters."
return reduce(lambda f, g: f and g(f),
[
filters.AdblockURLFilter()(url),
filters.NoImageFilter(),
filters.SizeImageFilter(),
filters.MonoImageFilter(),
filters.FormatImageFilter(),
])
def _get_tag(self, response, tag_name="html", encoding="utf-8"):
"""
Iterates response content and returns the tag if found.
If not found, the response content is fully consumed so
self._html equals response.content, and it returns None.
"""
def find_tag(tag_name):
tag_start = tag_end = None
found = lambda: \
tag_start is not None and tag_end is not None
html = self._html.lower()
start = html.find("<%s" % tag_name)
if start >= 0:
tag_start = start
else:
return None # no tag
end = html.find("</%s>" % tag_name)
if end > tag_start:
tag_end = end+len(tag_name)+3
elif consumed:
tag_end = -1 # till the end
if found():
return self._html[tag_start:tag_end]
return None
consumed = getattr(response, 'consumed', False)
if not consumed:
stream = getattr(response, 'stream', None)
if stream is None:
stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True
response.stream = stream
while True:
try:
chunk = next(stream)
self._html += chunk
tag = find_tag(tag_name)
if tag:
return tag
if len(self._html) > config.HTML_MAX_BYTESIZE:
raise HTMLParseError('Maximum response size reached.')
except StopIteration:
response.consumed = True
tag = find_tag(tag_name)
return decode(tag, encoding) # decode here
def _extract(self, html, url, techniques):
extractor = extraction.SvvenExtractor(techniques=techniques)
extracted = extractor.extract(html, source_url=url)
self._load(**extracted)
# no longer needed
|
svven/summary | summary/url.py | unicode_to_str | python | def unicode_to_str(text, encoding=None, errors='strict'):
if encoding is None:
encoding = 'utf-8'
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif isinstance(text, str):
return text
else:
raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__) | Return the str representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a str
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
This function has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/python.py | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/url.py#L55-L72 | null | """
Copyright (c) Scrapy developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Scrapy nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/url.py
Hence the above copyright notice.
"""
import urllib
import urlparse
from urllib import quote
import config
import re
from urltools import extract, construct, normalize, URL
from urlnorm import norm
# scrapy.utils.url was moved to w3lib.url and import * ensures this move doesn't break old code
from w3lib.url import *
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if host:
return any(((host == d.lower()) or (host.endswith('.%s' % d.lower())) for d in domains))
else:
return False
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def getFragment(url, keep_fragments):
fragment = extract(norm(url)).fragment
return fragment if fragment.startswith('!') or keep_fragments else ''
def canonicalize_url(url, keep_params=False, keep_fragments=False):
"""Canonicalize the given url by applying the following procedures:
# a sort query arguments, first by key, then by value
# b percent encode paths and query arguments. non-ASCII characters are
# c percent-encoded using UTF-8 (RFC-3986)
# d normalize all spaces (in query arguments) '+' (plus symbol)
# e normalize percent encodings case (%2f -> %2F)
# f remove query arguments with blank values (unless site in NONCANONIC_SITES)
# g remove fragments (unless #!)
# h remove username/password at front of domain
# i remove port if 80, keep if not
# k remove query arguments (unless site in USEFUL_QUERY_KEYS)
The url passed can be a str or unicode, while the url returned is always a
str.
"""
if keep_params:
# Preserve all query params
parsed = extract(norm(url))
else:
# Remove unwanted params
parsed = extract(url_query_cleaner(normalize(url), parameterlist=config.USEFUL_QUERY_KEYS))
# Sort params, remove blank if not wanted
query = urllib.urlencode(sorted(urlparse.parse_qsl(parsed.query, keep_blank_values=keep_params)))
fragment = getFragment(url, keep_fragments)
# The following is to remove orphaned '=' from query string params with no values
query = re.sub(r"=$", "", query.replace("=&", "&"))
# Reconstruct URL, escaping apart from safe chars
# See http://stackoverflow.com/questions/2849756/list-of-valid-characters-for-the-fragment-identifier-in-an-url
# http://stackoverflow.com/questions/4669692/valid-characters-for-directory-part-of-a-url-for-short-links
safe = "/.-_~!$&'()*+,;=:@"
newurl = construct(URL(parsed.scheme, '', '', parsed.subdomain, parsed.domain, parsed.tld, parsed.port, quote(parsed.path, safe=safe), query, quote(fragment, safe=safe), ''))
return newurl.rstrip('/')
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return urllib.unquote(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
return url if isinstance(url, urlparse.ParseResult) else \
urlparse.urlparse(unicode_to_str(url, encoding))
|
svven/summary | summary/url.py | url_is_from_any_domain | python | def url_is_from_any_domain(url, domains):
host = parse_url(url).netloc.lower()
if host:
return any(((host == d.lower()) or (host.endswith('.%s' % d.lower())) for d in domains))
else:
return False | Return True if the url belongs to any of the given domains | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/url.py#L75-L82 | [
"def parse_url(url, encoding=None):\n \"\"\"Return urlparsed url from the given argument (which could be an already\n parsed url)\n \"\"\"\n return url if isinstance(url, urlparse.ParseResult) else \\\n urlparse.urlparse(unicode_to_str(url, encoding))\n"
] | """
Copyright (c) Scrapy developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Scrapy nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/url.py
Hence the above copyright notice.
"""
import urllib
import urlparse
from urllib import quote
import config
import re
from urltools import extract, construct, normalize, URL
from urlnorm import norm
# scrapy.utils.url was moved to w3lib.url and import * ensures this move doesn't break old code
from w3lib.url import *
def unicode_to_str(text, encoding=None, errors='strict'):
"""Return the str representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a str
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
This function has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/python.py
"""
if encoding is None:
encoding = 'utf-8'
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif isinstance(text, str):
return text
else:
raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def getFragment(url, keep_fragments):
fragment = extract(norm(url)).fragment
return fragment if fragment.startswith('!') or keep_fragments else ''
def canonicalize_url(url, keep_params=False, keep_fragments=False):
"""Canonicalize the given url by applying the following procedures:
# a sort query arguments, first by key, then by value
# b percent encode paths and query arguments. non-ASCII characters are
# c percent-encoded using UTF-8 (RFC-3986)
# d normalize all spaces (in query arguments) '+' (plus symbol)
# e normalize percent encodings case (%2f -> %2F)
# f remove query arguments with blank values (unless site in NONCANONIC_SITES)
# g remove fragments (unless #!)
# h remove username/password at front of domain
# i remove port if 80, keep if not
# k remove query arguments (unless site in USEFUL_QUERY_KEYS)
The url passed can be a str or unicode, while the url returned is always a
str.
"""
if keep_params:
# Preserve all query params
parsed = extract(norm(url))
else:
# Remove unwanted params
parsed = extract(url_query_cleaner(normalize(url), parameterlist=config.USEFUL_QUERY_KEYS))
# Sort params, remove blank if not wanted
query = urllib.urlencode(sorted(urlparse.parse_qsl(parsed.query, keep_blank_values=keep_params)))
fragment = getFragment(url, keep_fragments)
# The following is to remove orphaned '=' from query string params with no values
query = re.sub(r"=$", "", query.replace("=&", "&"))
# Reconstruct URL, escaping apart from safe chars
# See http://stackoverflow.com/questions/2849756/list-of-valid-characters-for-the-fragment-identifier-in-an-url
# http://stackoverflow.com/questions/4669692/valid-characters-for-directory-part-of-a-url-for-short-links
safe = "/.-_~!$&'()*+,;=:@"
newurl = construct(URL(parsed.scheme, '', '', parsed.subdomain, parsed.domain, parsed.tld, parsed.port, quote(parsed.path, safe=safe), query, quote(fragment, safe=safe), ''))
return newurl.rstrip('/')
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return urllib.unquote(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
return url if isinstance(url, urlparse.ParseResult) else \
urlparse.urlparse(unicode_to_str(url, encoding))
|
svven/summary | summary/url.py | canonicalize_url | python | def canonicalize_url(url, keep_params=False, keep_fragments=False):
if keep_params:
# Preserve all query params
parsed = extract(norm(url))
else:
# Remove unwanted params
parsed = extract(url_query_cleaner(normalize(url), parameterlist=config.USEFUL_QUERY_KEYS))
# Sort params, remove blank if not wanted
query = urllib.urlencode(sorted(urlparse.parse_qsl(parsed.query, keep_blank_values=keep_params)))
fragment = getFragment(url, keep_fragments)
# The following is to remove orphaned '=' from query string params with no values
query = re.sub(r"=$", "", query.replace("=&", "&"))
# Reconstruct URL, escaping apart from safe chars
# See http://stackoverflow.com/questions/2849756/list-of-valid-characters-for-the-fragment-identifier-in-an-url
# http://stackoverflow.com/questions/4669692/valid-characters-for-directory-part-of-a-url-for-short-links
safe = "/.-_~!$&'()*+,;=:@"
newurl = construct(URL(parsed.scheme, '', '', parsed.subdomain, parsed.domain, parsed.tld, parsed.port, quote(parsed.path, safe=safe), query, quote(fragment, safe=safe), ''))
return newurl.rstrip('/') | Canonicalize the given url by applying the following procedures:
# a sort query arguments, first by key, then by value
# b percent encode paths and query arguments. non-ASCII characters are
# c percent-encoded using UTF-8 (RFC-3986)
# d normalize all spaces (in query arguments) '+' (plus symbol)
# e normalize percent encodings case (%2f -> %2F)
# f remove query arguments with blank values (unless site in NONCANONIC_SITES)
# g remove fragments (unless #!)
# h remove username/password at front of domain
# i remove port if 80, keep if not
# k remove query arguments (unless site in USEFUL_QUERY_KEYS)
The url passed can be a str or unicode, while the url returned is always a
str. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/url.py#L94-L130 | [
"def getFragment(url, keep_fragments):\n fragment = extract(norm(url)).fragment\n return fragment if fragment.startswith('!') or keep_fragments else ''\n"
] | """
Copyright (c) Scrapy developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Scrapy nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/url.py
Hence the above copyright notice.
"""
import urllib
import urlparse
from urllib import quote
import config
import re
from urltools import extract, construct, normalize, URL
from urlnorm import norm
# scrapy.utils.url was moved to w3lib.url and import * ensures this move doesn't break old code
from w3lib.url import *
def unicode_to_str(text, encoding=None, errors='strict'):
"""Return the str representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a str
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
This function has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/python.py
"""
if encoding is None:
encoding = 'utf-8'
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif isinstance(text, str):
return text
else:
raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if host:
return any(((host == d.lower()) or (host.endswith('.%s' % d.lower())) for d in domains))
else:
return False
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def getFragment(url, keep_fragments):
fragment = extract(norm(url)).fragment
return fragment if fragment.startswith('!') or keep_fragments else ''
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return urllib.unquote(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
return url if isinstance(url, urlparse.ParseResult) else \
urlparse.urlparse(unicode_to_str(url, encoding))
|
svven/summary | summary/url.py | parse_url | python | def parse_url(url, encoding=None):
return url if isinstance(url, urlparse.ParseResult) else \
urlparse.urlparse(unicode_to_str(url, encoding)) | Return urlparsed url from the given argument (which could be an already
parsed url) | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/url.py#L141-L146 | null | """
Copyright (c) Scrapy developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Scrapy nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/url.py
Hence the above copyright notice.
"""
import urllib
import urlparse
from urllib import quote
import config
import re
from urltools import extract, construct, normalize, URL
from urlnorm import norm
# scrapy.utils.url was moved to w3lib.url and import * ensures this move doesn't break old code
from w3lib.url import *
def unicode_to_str(text, encoding=None, errors='strict'):
"""Return the str representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a str
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
This function has been copied from here:
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/python.py
"""
if encoding is None:
encoding = 'utf-8'
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif isinstance(text, str):
return text
else:
raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if host:
return any(((host == d.lower()) or (host.endswith('.%s' % d.lower())) for d in domains))
else:
return False
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def getFragment(url, keep_fragments):
fragment = extract(norm(url)).fragment
return fragment if fragment.startswith('!') or keep_fragments else ''
def canonicalize_url(url, keep_params=False, keep_fragments=False):
"""Canonicalize the given url by applying the following procedures:
# a sort query arguments, first by key, then by value
# b percent encode paths and query arguments. non-ASCII characters are
# c percent-encoded using UTF-8 (RFC-3986)
# d normalize all spaces (in query arguments) '+' (plus symbol)
# e normalize percent encodings case (%2f -> %2F)
# f remove query arguments with blank values (unless site in NONCANONIC_SITES)
# g remove fragments (unless #!)
# h remove username/password at front of domain
# i remove port if 80, keep if not
# k remove query arguments (unless site in USEFUL_QUERY_KEYS)
The url passed can be a str or unicode, while the url returned is always a
str.
"""
if keep_params:
# Preserve all query params
parsed = extract(norm(url))
else:
# Remove unwanted params
parsed = extract(url_query_cleaner(normalize(url), parameterlist=config.USEFUL_QUERY_KEYS))
# Sort params, remove blank if not wanted
query = urllib.urlencode(sorted(urlparse.parse_qsl(parsed.query, keep_blank_values=keep_params)))
fragment = getFragment(url, keep_fragments)
# The following is to remove orphaned '=' from query string params with no values
query = re.sub(r"=$", "", query.replace("=&", "&"))
# Reconstruct URL, escaping apart from safe chars
# See http://stackoverflow.com/questions/2849756/list-of-valid-characters-for-the-fragment-identifier-in-an-url
# http://stackoverflow.com/questions/4669692/valid-characters-for-directory-part-of-a-url-for-short-links
safe = "/.-_~!$&'()*+,;=:@"
newurl = construct(URL(parsed.scheme, '', '', parsed.subdomain, parsed.domain, parsed.tld, parsed.port, quote(parsed.path, safe=safe), query, quote(fragment, safe=safe), ''))
return newurl.rstrip('/')
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return urllib.unquote(path)
|
svven/summary | summary/config.py | from_object | python | def from_object(updates):
"Update same name (or prefixed) settings."
import sys
config = sys.modules[__name__]
prefix = config.__name__.split('.')[0].upper()
keys = [k for k in config.__dict__ if \
k != from_object.__name__ and not k.startswith('_')]
get_value = lambda c, k: hasattr(c, k) and getattr(c, k) or None
for key in keys:
prefix_key = '%s_%s' % (prefix, key)
value = get_value(updates, prefix_key) or get_value(updates, key)
if value: setattr(config, key, value) | Update same name (or prefixed) settings. | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/config.py#L5-L17 | [
"get_value = lambda c, k: hasattr(c, k) and getattr(c, k) or None\n"
] | """
Summary config settings.
These can be overwritten after importing `summary` and before using it.
"""
def from_object(updates):
"Update same name (or prefixed) settings."
import sys
config = sys.modules[__name__]
prefix = config.__name__.split('.')[0].upper()
keys = [k for k in config.__dict__ if \
k != from_object.__name__ and not k.startswith('_')]
get_value = lambda c, k: hasattr(c, k) and getattr(c, k) or None
for key in keys:
prefix_key = '%s_%s' % (prefix, key)
value = get_value(updates, prefix_key) or get_value(updates, key)
if value: setattr(config, key, value)
### Package settings ###
USER_AGENT = 'summary-extraction 0.2'
# USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 (KHTML, like Gecko) Chrome/15.0.87"
ENCODING = 'utf-8' # None for autodetect
TIMEOUT = (10, 10) # (connect, read) # None for never
CHUNK_SIZE = 1024 # 1 KB
HTML_MAX_BYTESIZE = 2 * 1048576 # 1 MB
GET_ALL_DATA = False # for better performance
# MAX_ITEMS = 2 # to choose from
# URL query keys to keep
USEFUL_QUERY_KEYS = [
'id',
]
# PhantomJS
PHANTOMJS_BIN = '' # '/usr/local/bin/phantomjs'
PHANTOMJS_SITES = [
'readwrite.com',
]
# Noncanonic sites
NONCANONIC_SITES = [
'www.google.com',
]
### Filters settings ###
# AdblockURLFilter
ADBLOCK_EASYLIST_URL = 'easylist.txt'
# 'https://easylist-downloads.adblockplus.org/easylist.txt'
ADBLOCK_EXTRALIST_URL = 'extralist.txt'
# 'https://dl.dropboxusercontent.com/u/134594/svven/extralist.txt'
# NoImageFilter
IMAGE_MAX_BYTESIZE = 1 * 1048576 # 1 MB
# SizeImageFilter
IMAGE_LIMIT_RATIO = 3.6 # if js crop center square
IMAGE_MIN_IMGSIZE = (75, 75)
IMAGE_MAX_IMGSIZE = (2064, 2064)
# MonoImageFilter
IMAGE_MONO_RULE = r'((_|\b)(white|blank|black|overlay)(_|\b))'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.