repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
lightkurve
|
lightkurve-main/tests/io/test_pathos.py
|
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve import search_lightcurve
from lightkurve.io.pathos import read_pathos_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_pathos():
"""Can we detect the correct format for PATHOS files?"""
url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/pathos/s0008/hlsp_pathos_tess_lightcurve_tic-0093270923-s0008_tess_v1_llc.fits"
f = fits.open(url)
assert detect_filetype(f) == "PATHOS"
@pytest.mark.remote_data
def test_read_pathos():
"""Can we read PATHOS files?"""
url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/pathos/s0008/hlsp_pathos_tess_lightcurve_tic-0093270923-s0008_tess_v1_llc.fits"
f = fits.open(url)
# Verify different extensions
fluxes = []
exts = ["PSF_FLUX_RAW", "PSF_FLUX_COR"]
exts.extend([f"AP{ap}_FLUX_RAW" for ap in [1, 2, 3, 4]])
exts.extend([f"AP{ap}_FLUX_COR" for ap in [1, 2, 3, 4]])
for ext in exts:
lc = read_pathos_lightcurve(url, flux_column=ext)
assert type(lc).__name__ == "TessLightCurve"
assert lc.meta["FLUX_ORIGIN"] == ext.lower()
# Are `time` and `flux` consistent with the FITS file?
assert_array_equal(f[1].data["TIME"][lc.meta["QUALITY_MASK"]], lc.time.value)
assert_array_equal(f[1].data[ext][lc.meta["QUALITY_MASK"]], lc.flux.value)
fluxes.append(lc.flux)
# Different extensions should show different fluxes
for i in range(9):
assert not np.array_equal(fluxes[i], fluxes[i + 1])
@pytest.mark.remote_data
def test_search_pathos():
"""Can we search and download a PATHOS light curve?"""
search = search_lightcurve("TIC 93270923", author="PATHOS", sector=8)
assert len(search) == 1
assert search.table["author"][0] == "PATHOS"
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
assert lc.sector == 8
| 2,016
| 35.672727
| 150
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_eleanor.py
|
import pytest
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve import search_lightcurve
from lightkurve.io.eleanor import read_eleanor_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_gsfc_eleanor_lite():
"""Can we read in GSFC-ELEANOR-LITE light curves?"""
url = (
"https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/gsfc-eleanor-lite/s0001/0000/"
"0003/3673/2616/hlsp_gsfc-eleanor-lite_tess_ffi_s0001-0000000336732616_tess_v1.0_lc.fits"
)
with fits.open(url, mode="readonly") as hdulist:
# Can we auto-detect a GSFC-ELEANOR-LITE file?
assert detect_filetype(hdulist) == "ELEANOR"
# Are the correct fluxes read in?
lc = read_eleanor_lightcurve(url, quality_bitmask=0)
assert lc.meta["AUTHOR"] == "GSFC-ELEANOR-LITE"
assert lc.meta["FLUX_ORIGIN"] == "corr_flux"
assert_array_equal(lc.flux.value, hdulist[1].data["CORR_FLUX"])
# Are the correct quality flags read in?
lc = read_eleanor_lightcurve(url, quality_bitmask='default')
assert ((lc["quality"] & 2**17) != 0).any() and ((lc["quality"] & 2**18) != 0).any()
lc = read_eleanor_lightcurve(url, quality_bitmask='hardest')
assert not (lc["quality"] & (2**17 | 2**18)).any()
assert np.issubdtype(lc["cadenceno"].dtype, np.integer)
@pytest.mark.parametrize(
"url",
[
get_pkg_data_filename("../data/test-lc-tess-pimen_s1_eleanor_lite-100-cadences.fits"),
# full version can also be read, though the full-specific data is not explicitly handled
get_pkg_data_filename("../data/test-lc-tess-pimen_s1_eleanor_full-100-cadences.fits"),
],
)
def test_vanilla_eleanor(url):
"""Can we read in vanilla eleanor light curves?"""
with fits.open(url, mode="readonly") as hdulist:
# Can we auto-detect a vanilla eleanor file?
assert detect_filetype(hdulist) == "ELEANOR"
# Are the correct fluxes read in?
lc = read_eleanor_lightcurve(url, quality_bitmask=0)
assert lc.meta["AUTHOR"] == "ELEANOR"
assert lc.meta["FLUX_ORIGIN"] == "corr_flux"
assert_array_equal(lc.flux.value, hdulist[1].data["CORR_FLUX"])
# vanilla eleanor can also contain PSF flux, ensure it is read in
assert_array_equal(lc.psf_flux.value, hdulist[1].data["PSF_FLUX"])
# vanilla eleanor's output cadenceno (FFIINDEX) dtype in the
# FITS file is float, breaking convention
# ensure we compensate it.
assert np.issubdtype(lc["cadenceno"].dtype, np.integer)
@pytest.mark.remote_data
def test_search_gsfc_eleanor_lite():
"""Can we search and download GSFC-ELEANOR-LITE light curves from MAST?"""
search = search_lightcurve("TIC 336732616", author="GSFC-ELEANOR-LITE", sector=1)
assert len(search) == 1
assert search.table["author"][0] == "GSFC-ELEANOR-LITE"
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
assert lc.sector == 1
assert lc.author == "GSFC-ELEANOR-LITE"
| 3,179
| 41.972973
| 99
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_tasoc.py
|
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve import search_lightcurve
from lightkurve.io.tasoc import read_tasoc_lightcurve
from lightkurve.io.detect import detect_filetype
# The URL needs to be updated upon a new TASOC data release.
TEST_TIC_ID = 150441810
TEST_FIT_URL = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/tasoc/s0001/c1800/0000/0001/5044/1810/hlsp_tasoc_tess_ffi_tic00150441810-s0001-cam4-ccd4-c1800_tess_v05_ens-lc.fits"
@pytest.mark.remote_data
def test_detect_tasoc():
"""Can we detect the correct format for TASOC files?"""
url = TEST_FIT_URL
f = fits.open(url)
assert detect_filetype(f) == "TASOC"
@pytest.mark.remote_data
def test_read_tasoc():
"""Can we read TASOC files?"""
url = TEST_FIT_URL
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX_RAW"]
lc = read_tasoc_lightcurve(url, flux_column="FLUX_RAW")
assert lc.meta["FLUX_ORIGIN"] == "flux_raw"
assert_array_equal(fluxes, lc.flux.value)
@pytest.mark.remote_data
def test_search_tasoc():
"""Can we search and download a TASOC light curve?"""
search = search_lightcurve(f"TIC {TEST_TIC_ID}", author="TASOC")
assert len(search) >= 1
assert search.table["author"][0] == "TASOC"
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
| 1,425
| 29.340426
| 192
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_qlp.py
|
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve import search_lightcurve
from lightkurve.io.qlp import read_qlp_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_qlp():
"""Can we read in QLP light curves?"""
url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits"
with fits.open(url, mode="readonly") as hdulist:
# Can we auto-detect a QLP file?
assert detect_filetype(hdulist) == "QLP"
# Are the correct fluxes read in?
lc = read_qlp_lightcurve(url, quality_bitmask=0)
assert lc.meta["FLUX_ORIGIN"] == "sap_flux"
assert_array_equal(lc.flux.value, hdulist[1].data["SAP_FLUX"])
@pytest.mark.remote_data
def test_search_qlp():
"""Can we search and download QLP light curves from MAST?"""
search = search_lightcurve("TIC 277554109", author="QLP", sector=11)
assert len(search) == 1
assert search.table["author"][0] == "QLP"
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
assert lc.sector == 11
assert lc.author == "QLP"
| 1,259
| 35
| 164
|
py
|
lightkurve
|
lightkurve-main/tests/io/__init__.py
| 0
| 0
| 0
|
py
|
|
lightkurve
|
lightkurve-main/tests/io/test_read.py
|
import os
import warnings
import tempfile
import pytest
from lightkurve.utils import LightkurveDeprecationWarning, LightkurveError
from lightkurve import (
PACKAGEDIR,
KeplerTargetPixelFile,
TessTargetPixelFile,
LightCurve,
)
from lightkurve.io import read
from .. import TESTDATA
def test_read():
# define paths to k2 and tess data
k2_path = os.path.join(TESTDATA, "test-tpf-star.fits")
tess_path = os.path.join(TESTDATA, "tess25155310-s01-first-cadences.fits.gz")
# Ensure files are read in as the correct object
k2tpf = read(k2_path)
assert isinstance(k2tpf, KeplerTargetPixelFile)
tesstpf = read(tess_path)
assert isinstance(tesstpf, TessTargetPixelFile)
# Open should fail if the filetype is not recognized
try:
read(os.path.join(PACKAGEDIR, "data", "lightkurve.mplstyle"))
except LightkurveError:
pass
# Can you instantiate with a path?
assert isinstance(KeplerTargetPixelFile(k2_path), KeplerTargetPixelFile)
assert isinstance(TessTargetPixelFile(tess_path), TessTargetPixelFile)
# Can open take a quality_bitmask argument?
assert read(k2_path, quality_bitmask="hard").quality_bitmask == "hard"
def test_open():
"""Does the deprecated `open` function still work?"""
from lightkurve.io import open
with warnings.catch_warnings(): # lk.open is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
# define paths to k2 and tess data
k2_path = os.path.join(TESTDATA, "test-tpf-star.fits")
tess_path = os.path.join(TESTDATA, "tess25155310-s01-first-cadences.fits.gz")
# Ensure files are read in as the correct object
k2tpf = open(k2_path)
assert isinstance(k2tpf, KeplerTargetPixelFile)
tesstpf = open(tess_path)
assert isinstance(tesstpf, TessTargetPixelFile)
# Open should fail if the filetype is not recognized
try:
open(os.path.join(PACKAGEDIR, "data", "lightkurve.mplstyle"))
except LightkurveError:
pass
# Can you instantiate with a path?
assert isinstance(KeplerTargetPixelFile(k2_path), KeplerTargetPixelFile)
assert isinstance(TessTargetPixelFile(tess_path), TessTargetPixelFile)
# Can open take a quality_bitmask argument?
assert open(k2_path, quality_bitmask="hard").quality_bitmask == "hard"
def test_filenotfound():
"""Regression test for #540; ensure lk.read() yields `FileNotFoundError`."""
filename = "some/path/DOESNOTEXIST"
with pytest.raises(FileNotFoundError) as excinfo:
read(filename)
# ensure the filepath is in the exception
assert filename in str(excinfo.value)
@pytest.mark.filterwarnings("ignore:.*been truncated.*") # ignore AstropyUserWarning: File may have been truncated
def test_file_corrupted():
"""Regression test for #1184; ensure lk.read() yields an error that includes the filename."""
# fits source: mast:TESS/product/tess2018206045859-s0001-0000000261136679-0120-s_lc.fits
filename_lc_pimen_corrupted = os.path.join(TESTDATA, "test-lc-tess-pimen-100-cadences-corrupted.fits")
with pytest.raises(BaseException) as excinfo:
read(filename_lc_pimen_corrupted)
# ensure the filepath is in the exception
assert filename_lc_pimen_corrupted in str(excinfo.value)
def test_basic_ascii_io():
"""Verify we do not break the basic ascii i/o functionality provided by AstroPy Table."""
# Part I: Can we read a LightCurve from a CSV file?
csvfile = tempfile.NamedTemporaryFile(
delete=False
) # using delete=False to make tests pass on Windows
try:
csvfile.write(b"time,flux,flux_err,color\n1,2,3,red\n4,5,6,green\n7,8,9,blue")
csvfile.flush()
lc_csv = LightCurve.read(csvfile.name, format="ascii.csv")
assert lc_csv.time[0].value == 1
assert lc_csv.flux[1] == 5
assert lc_csv.color[2] == "blue"
finally:
csvfile.close()
os.remove(csvfile.name)
# Part II: can we write the light curve to a tab-separated ascii file, and read it back in?
tabfile = tempfile.NamedTemporaryFile(delete=False)
try:
lc_csv.write(tabfile.name, format="ascii.tab", overwrite=True)
lc_rst = LightCurve.read(tabfile.name, format="ascii.tab")
assert lc_rst.color[2] == "blue"
assert (lc_csv == lc_rst).all()
finally:
tabfile.close()
os.remove(tabfile.name)
| 4,482
| 38.672566
| 115
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/test_pldcorrector.py
|
import pytest
import matplotlib.pyplot as plt
from lightkurve import (
search_targetpixelfile,
search_tesscut,
KeplerLightCurve,
TessLightCurve,
)
from lightkurve.correctors import PLDCorrector
@pytest.mark.remote_data
def test_kepler_pld_corrector():
tpf = search_targetpixelfile("K2-199")[0].download()
pld = PLDCorrector(tpf)
# Is the correct filetype returned?
clc = pld.correct()
assert isinstance(clc, KeplerLightCurve)
# Do the diagnostic plots run?
pld.diagnose()
plt.close()
pld.diagnose_masks()
plt.close()
# Does sparse correction work?
pld.correct(sparse=True)
# Did the correction with default values help?
raw_lc = tpf.to_lightcurve(aperture_mask="threshold")
assert clc.estimate_cdpp() < raw_lc.estimate_cdpp()
@pytest.mark.remote_data
def test_tess_pld_corrector():
tpf = search_targetpixelfile("TOI 700")[0].download()
pld = PLDCorrector(tpf)
# Is the correct filetype returned?
clc = pld.correct()
assert isinstance(clc, TessLightCurve)
# Do the diagnostic plots run?
pld.diagnose()
plt.close()
pld.diagnose_masks()
plt.close()
# Does sparse correction work?
pld.correct(sparse=True)
# Did the correction with default values help?
raw_lc = tpf.to_lightcurve(aperture_mask="threshold")
assert clc.estimate_cdpp() < raw_lc.estimate_cdpp()
@pytest.mark.remote_data
def test_pld_aperture_mask():
"""Test for #523: does PLDCorrector.correct() accept separate apertures for
PLD pixels?"""
tpf = search_targetpixelfile("K2-205")[0].download()
# use only the pixels in the pipeline mask
lc_pipeline = tpf.to_corrector("pld").correct(
pld_aperture_mask="pipeline", restore_trend=False
)
# use all pixels in the tpf
lc_all = tpf.to_corrector("pld").correct(
pld_aperture_mask="all", restore_trend=False
)
# does this improve the correction?
assert lc_all.estimate_cdpp() < lc_pipeline.estimate_cdpp()
@pytest.mark.remote_data
def test_pld_corrector():
# download tpf data for a target
k2_target = "EPIC247887989"
k2_tpf = search_targetpixelfile(k2_target).download()
# instantiate PLD corrector object
pld = PLDCorrector(k2_tpf[:500], aperture_mask="threshold")
# produce a PLD-corrected light curve with a default aperture mask
corrected_lc = pld.correct()
# ensure the CDPP was reduced by the corrector
pld_cdpp = corrected_lc.estimate_cdpp()
raw_cdpp = k2_tpf.to_lightcurve().estimate_cdpp()
assert pld_cdpp < raw_cdpp
# make sure the returned object is the correct type (`KeplerLightCurve`)
assert isinstance(corrected_lc, KeplerLightCurve)
# try detrending using a threshold mask
corrected_lc = pld.correct()
# reduce using fewer principle components
corrected_lc = pld.correct(pca_components=20)
# try PLD on a TESS observation
from lightkurve import TessTargetPixelFile
from ..test_targetpixelfile import TESS_SIM
tess_tpf = TessTargetPixelFile(TESS_SIM)
# instantiate PLD corrector object
pld = PLDCorrector(tess_tpf[:500], aperture_mask="pipeline")
# produce a PLD-corrected light curve with a pipeline aperture mask
raw_lc = tess_tpf.to_lightcurve(aperture_mask="pipeline")
corrected_lc = pld.correct(pca_components=20)
# the corrected light curve should have higher precision
assert corrected_lc.estimate_cdpp() < raw_lc.estimate_cdpp()
# make sure the returned object is the correct type (`TessLightCurve`)
assert isinstance(corrected_lc, TessLightCurve)
@pytest.mark.remote_data
def test_tpf_with_zero_flux_cadence():
"""Regression test for #873."""
tpf = search_tesscut("TIC 123835353", sector=6).download(cutout_size=5)
tpf.to_corrector("pld").correct()
| 3,818
| 34.036697
| 79
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/test_sparsedesignmatrix.py
|
import pytest
import warnings
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from scipy import sparse
from lightkurve.correctors import (
SparseDesignMatrix,
SparseDesignMatrixCollection,
DesignMatrix,
DesignMatrixCollection,
)
from lightkurve import LightkurveWarning
from lightkurve.correctors.designmatrix import (
create_sparse_spline_matrix,
create_spline_matrix,
)
def test_designmatrix_basics():
"""Can we create a design matrix from a dataframe?"""
size, name = 10, "testmatrix"
df = pd.DataFrame(
{
"vector1": np.ones(size),
"vector2": np.arange(size),
"vector3": np.arange(size) ** 2,
}
)
X = sparse.csr_matrix(np.asarray(df))
dm = SparseDesignMatrix(X, name=name, columns=["vector1", "vector2", "vector3"])
assert dm.columns == ["vector1", "vector2", "vector3"]
assert dm.name == name
assert dm.shape == (size, 3)
dm.plot()
dm.plot_priors()
assert dm.append_constant().shape == (size, 4) # expect one column more
assert dm.pca(nterms=2).shape == (size, 2) # expect one column less
assert dm.split([5]).shape == (size, 6) # expect double columns
dm.__repr__()
dm = SparseDesignMatrix(X, name=name, columns=["vector1", "vector2", "vector3"])
dm.append_constant(inplace=True)
assert dm.shape == (size, 4) # expect one column more
dm = SparseDesignMatrix(X, name=name, columns=["vector1", "vector2", "vector3"])
dm.split([5], inplace=True)
assert dm.shape == (size, 6) # expect double columns
def test_split():
"""Can we split a design matrix correctly?"""
X = sparse.csr_matrix(
np.vstack([np.linspace(0, 9, 10), np.linspace(100, 109, 10)]).T
)
dm = SparseDesignMatrix(X, columns=["a", "b"])
# Do we retrieve the correct shape?
assert dm.shape == (10, 2)
assert dm.split(2).shape == (10, 4)
assert dm.split([2, 8]).shape == (10, 6)
# Are the new areas padded with zeros?
assert (dm.split([2, 8]).values[2:, 0:2] == 0).all()
assert (dm.split([2, 8]).values[:8, 4:] == 0).all()
# Are all the column names unique?
assert len(set(dm.split(4).columns)) == 4
def test_standardize():
"""Verifies DesignMatrix.standardize()"""
# A column with zero standard deviation remains unchanged
X = sparse.csr_matrix(np.vstack([np.ones(10)]).T)
dm = SparseDesignMatrix(X, columns=["const"])
assert (dm.standardize()["const"] == dm["const"]).all()
# Normally-distributed columns will become Normal(0, 1)
X = sparse.csr_matrix(np.vstack([np.random.normal(loc=5, scale=3, size=100)]).T)
dm = SparseDesignMatrix(X, columns=["normal"])
assert np.round(np.mean(dm.standardize()["normal"]), 3) == 0
assert np.round(np.std(dm.standardize()["normal"]), 1) == 1
dm.standardize(inplace=True)
def test_pca():
"""Verifies DesignMatrix.pca()"""
size = 10
dm = DesignMatrix(
{
"a": np.random.normal(10, 20, size),
"b": np.random.normal(40, 10, size),
"c": np.random.normal(60, 5, size),
}
).to_sparse()
for nterms in [1, 2, 3]:
assert dm.pca(nterms=nterms).shape == (size, nterms)
def test_collection_basics():
"""Can we create a design matrix collection?"""
size = 5
dm1 = DesignMatrix(np.ones((size, 1)), columns=["col1"], name="matrix1").to_sparse()
dm2 = DesignMatrix(
np.zeros((size, 2)), columns=["col2", "col3"], name="matrix2"
).to_sparse()
dmc = SparseDesignMatrixCollection([dm1, dm2])
assert_array_equal(dmc["matrix1"].values, dm1.values)
assert_array_equal(dmc["matrix2"].values, dm2.values)
assert_array_equal(dmc.values, np.hstack((dm1.values, dm2.values)))
dmc.plot()
dmc.__repr__()
dmc = dm1.collect(dm2)
assert_array_equal(dmc["matrix1"].values, dm1.values)
assert_array_equal(dmc["matrix2"].values, dm2.values)
assert_array_equal(dmc.values, np.hstack((dm1.values, dm2.values)))
"""Can we create a design matrix collection when one is sparse?"""
size = 5
dm1 = DesignMatrix(np.ones((size, 1)), columns=["col1"], name="matrix1")
dm2 = DesignMatrix(
np.zeros((size, 2)), columns=["col2", "col3"], name="matrix2"
).to_sparse()
with warnings.catch_warnings():
warnings.simplefilter("always")
with pytest.warns(
LightkurveWarning,
match="Sparse matrices will be converted to dense matrices.",
):
dmc = DesignMatrixCollection([dm1, dm2])
assert not np.any([sparse.issparse(d.X) for d in dmc])
with warnings.catch_warnings():
warnings.simplefilter("always")
with pytest.warns(
LightkurveWarning,
match="Dense matrices will be converted to sparse matrices.",
):
dmc = SparseDesignMatrixCollection([dm1, dm2])
assert np.all([sparse.issparse(d.X) for d in dmc])
dmc.plot()
dmc.__repr__()
assert isinstance(dmc.to_designmatrix(), SparseDesignMatrix)
def test_designmatrix_rank():
"""Does DesignMatrix issue a low-rank warning when justified?"""
warnings.simplefilter("always")
# Good rank
dm = DesignMatrix({"a": [1, 2, 3]}).to_sparse()
assert dm.rank == 1
dm.validate(rank=True) # Should not raise a warning
# Bad rank
with pytest.warns(LightkurveWarning, match="rank"):
dm = DesignMatrix(
{
"a": [1, 2, 3],
"b": [1, 1, 1],
"c": [1, 1, 1],
"d": [1, 1, 1],
"e": [3, 4, 5],
}
)
dm.validate(rank=True) # Should raise a warning
dm = dm.to_sparse()
assert dm.rank == 2
with pytest.warns(LightkurveWarning, match="rank"):
dm.validate(rank=True)
def test_splines():
"""Do splines work as expected?"""
# Dense and sparse splines should produce the same answer.
x = np.linspace(0, 1, 100)
spline_dense = create_spline_matrix(x, knots=[0.1, 0.3, 0.6, 0.9], degree=2)
spline_sparse = create_sparse_spline_matrix(x, knots=[0.1, 0.3, 0.6, 0.9], degree=2)
assert np.allclose(spline_dense.values, spline_sparse.values)
assert isinstance(spline_dense, DesignMatrix)
assert isinstance(spline_sparse, SparseDesignMatrix)
| 6,374
| 33.274194
| 88
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/test_sffcorrector.py
|
"""Tests the `lightkurve.correctors.SFFCorrector` class."""
import pytest
import warnings
import numpy as np
from astropy.utils.data import get_pkg_data_filename
from numpy.testing import assert_array_equal
from lightkurve import (
LightCurve,
KeplerLightCurve,
TessLightCurve,
LightkurveWarning,
search_lightcurve,
)
from lightkurve.correctors import SFFCorrector
K2_C08 = (
"https://archive.stsci.edu/missions/k2/lightcurves/c8/"
"220100000/39000/ktwo220139473-c08_llc.fits"
)
@pytest.mark.remote_data
@pytest.mark.parametrize("path", [K2_C08])
def test_remote_data(path):
"""Can we correct a simple K2 light curve?"""
lc = KeplerLightCurve.read(path, quality_bitmask=None)
sff = SFFCorrector(lc.remove_nans())
sff.correct(windows=10, bins=5, timescale=0.5)
sff.correct(windows=10, bins=5, timescale=0.5, sparse=True)
def test_sff_knots():
"""Is SFF robust against gaps in time and irregular time sampling?
This test creates a light curve with gaps in time between
days 20-30 and days 78-80. In addition, the time sampling rate changes
in the interval between day 30 and 78. SFF should fail without error.
"""
n_points = 300
fn = get_pkg_data_filename("../../tests/data/ep60021426alldiagnostics.csv")
data = np.genfromtxt(fn, delimiter=",", skip_header=1)
raw_flux = data[:, 1][:n_points]
centroid_col = data[:, 3][:n_points]
centroid_row = data[:, 4][:n_points]
time = np.concatenate(
(
np.linspace(0, 20, int(n_points / 3)),
np.linspace(30, 78, int(n_points / 3)),
np.linspace(80, 100, int(n_points / 3)),
)
)
lc = KeplerLightCurve(
time=time,
flux=raw_flux,
flux_err=np.ones(n_points) * 0.0001,
centroid_col=centroid_col,
centroid_row=centroid_row,
)
# These calls should not raise an exception:
SFFCorrector(lc).correct()
lc.to_corrector(method="sff").correct()
def test_sff_corrector():
"""Does our code agree with the example presented in Vanderburg
and Johnson (2014)?"""
# The following csv file, provided by Vanderburg and Johnson
# at https://www.cfa.harvard.edu/~avanderb/k2/ep60021426.html,
# contains the results of applying SFF to EPIC 60021426.
fn = get_pkg_data_filename("../../tests/data/ep60021426alldiagnostics.csv")
data = np.genfromtxt(fn, delimiter=",", skip_header=1)
mask = data[:, -2] == 0 # indicates whether the thrusters were on or off
time = data[:, 0]
raw_flux = data[:, 1]
corrected_flux = data[:, 2]
centroid_col = data[:, 3]
centroid_row = data[:, 4]
# NOTE: we need a small number of windows below because this test data set
# is unusually short, i.e. has an unusually small number of cadences.
lc = LightCurve(time=time, flux=raw_flux, flux_err=np.ones(len(raw_flux)) * 0.0001)
sff = SFFCorrector(lc)
corrected_lc = sff.correct(
centroid_col=centroid_col,
centroid_row=centroid_row,
restore_trend=True,
windows=1,
)
assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
assert len(sff.window_points) == 0 # expect 0 break points for 1 window
# masking
corrected_lc = sff.correct(
centroid_col=centroid_col,
centroid_row=centroid_row,
windows=3,
restore_trend=True,
cadence_mask=mask,
)
assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
assert len(sff.window_points) == 2 # expect 2 break points for 3 windows
# masking and breakindex
corrected_lc = sff.correct(
centroid_col=centroid_col,
centroid_row=centroid_row,
windows=3,
restore_trend=True,
cadence_mask=mask,
)
assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
# masking and breakindex and iters
corrected_lc = sff.correct(
centroid_col=centroid_col,
centroid_row=centroid_row,
windows=3,
restore_trend=True,
cadence_mask=mask,
niters=3,
)
assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
# masking and breakindex and bins
corrected_lc = sff.correct(
centroid_col=centroid_col,
centroid_row=centroid_row,
windows=3,
restore_trend=True,
cadence_mask=mask,
bins=5,
)
assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
assert np.all((sff.lc.flux_err / sff.corrected_lc.flux_err) == 1)
# masking and breakindex and bins and propagate_errors
corrected_lc = sff.correct(
centroid_col=centroid_col,
centroid_row=centroid_row,
windows=3,
restore_trend=True,
cadence_mask=mask,
bins=5,
propagate_errors=True,
)
assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
assert np.all((sff.lc.flux_err / sff.corrected_lc.flux_err) < 1)
# test using KeplerLightCurve interface
klc = KeplerLightCurve(
time=time,
flux=raw_flux,
flux_err=np.ones(len(raw_flux)) * 0.0001,
centroid_col=centroid_col,
centroid_row=centroid_row,
)
sff = klc.to_corrector("sff")
klc = sff.correct(windows=3, restore_trend=True)
assert np.isclose(corrected_flux, klc.flux, atol=0.001).all()
# Can plot
sff.diagnose()
def test_sff_priors():
"""SFF Spline flux mean should == lc.flux.mean()
SFF arclength component should have mean 0
"""
n_points = 300
fn = get_pkg_data_filename("../../tests/data/ep60021426alldiagnostics.csv")
data = np.genfromtxt(fn, delimiter=",", skip_header=1)
raw_flux = data[:, 1][:n_points]
centroid_col = data[:, 3][:n_points]
centroid_row = data[:, 4][:n_points]
time = np.concatenate(
(
np.linspace(0, 20, int(n_points / 3)),
np.linspace(30, 78, int(n_points / 3)),
np.linspace(80, 100, int(n_points / 3)),
)
)
lc = KeplerLightCurve(
time=time,
flux=raw_flux,
flux_err=np.ones(n_points) * 0.0001,
centroid_col=centroid_col,
centroid_row=centroid_row,
)
sff = SFFCorrector(lc)
sff.correct() # should not raise an exception
assert np.isclose(sff.diagnostic_lightcurves["spline"].flux.mean(), 1, atol=1e-3)
assert np.isclose(sff.diagnostic_lightcurves["sff"].flux.mean(), 0, atol=1e-3)
def test_sff_breakindex():
"""Regression test for #616."""
lc = LightCurve(flux=np.ones(20))
with warnings.catch_warnings():
# Ignore "LightkurveWarning: The design matrix has low rank".
warnings.simplefilter("ignore", LightkurveWarning)
corr = SFFCorrector(lc)
corr.correct(
breakindex=[5, 10],
centroid_col=np.random.randn(20),
centroid_row=np.random.randn(20),
)
assert 5 in corr.window_points
assert 10 in corr.window_points
corr.correct(
breakindex=[5, 10],
centroid_col=np.random.randn(20),
centroid_row=np.random.randn(20),
windows=1,
)
assert_array_equal(corr.window_points, np.asarray([5, 10]))
def test_sff_tess_warning():
"""SFF is not designed for TESS, so we raise a warning."""
lc = TessLightCurve(flux=[1, 2, 3], meta={"MISSION": "TESS"})
with pytest.warns(LightkurveWarning, match="not suitable"):
corr = SFFCorrector(lc)
@pytest.mark.remote_data
def test_sff_nan_centroids():
"""Regression test for #827: SFF failed if light curve contained
NaNs in its `centroid_col` or `centroid_row` columns."""
lc = search_lightcurve("EPIC 211083408", author="K2").download()
# This previously raised a ValueError:
lc[200:500].remove_nans().to_corrector("sff").correct()
def test_designmatrix_prior_type():
"""Regression test for #982: prior_mu and prior_sigma should not be Quantity objects."""
size = 10
lc = LightCurve(flux=np.random.normal(loc=1.0, scale=0.1, size=size))
corr = lc.to_corrector("sff")
corr.correct(centroid_col=np.random.normal(loc=1.0, scale=0.1, size=size),
centroid_row=np.random.normal(loc=1.0, scale=0.1, size=size),
windows=1)
assert "Quantity" not in str(type(corr.design_matrix_collection.prior_mu))
assert "Quantity" not in str(type(corr.design_matrix_collection.prior_sigma))
| 8,492
| 32.972
| 92
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/test_cbvcorrector.py
|
""" cbvcorrector.py module unit tests
"""
import pytest
from numpy.testing import (
assert_almost_equal,
assert_array_equal,
assert_allclose,
assert_raises,
)
import warnings
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import astropy.units as u
import pandas as pd
from astropy.table import Table
from astropy.time import Time
from lightkurve import TessLightCurve, KeplerLightCurve
from lightkurve import search_lightcurve
from lightkurve import LightkurveWarning
from lightkurve.correctors.designmatrix import DesignMatrix
from lightkurve.correctors.cbvcorrector import (
load_kepler_cbvs,
load_tess_cbvs,
CotrendingBasisVectors,
KeplerCotrendingBasisVectors,
TessCotrendingBasisVectors,
)
from lightkurve.correctors.cbvcorrector import CBVCorrector
from .. import TESTDATA
# *******************************************************************************
# *******************************************************************************
# *******************************************************************************
# CotrendingBasisVectors unit tests
def test_CotrendingBasisVectors_nonretrieval():
"""Tests CotrendingBasisVectors class without requiring remote data"""
# ***
# Constructor
# Create some generic CotrendingBasisVectors objects
# Generic CotrendingBasisVectors object
dataTbl = Table(
[[1, 2, 3], [False, True, False], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]],
names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_3"),
)
cbvTime = Time([443.51090033, 443.53133457, 443.55176891], format="bkjd")
cbvs = CotrendingBasisVectors(data=dataTbl, time=cbvTime)
assert isinstance(cbvs, CotrendingBasisVectors)
assert cbvs.cbv_indices == [1, 3]
assert np.all(cbvs.time.value == [443.51090033, 443.53133457, 443.55176891])
# Auto-initiate 'GAP' and 'CADENCENO'
dataTbl = Table([[2.0, 3.0, 4.0], [3.0, 4.0, 5.0]], names=("VECTOR_3", "VECTOR_12"))
cbvTime = Time([443.51090033, 443.53133457, 443.55176891], format="bkjd")
cbvs = CotrendingBasisVectors(data=dataTbl, time=cbvTime)
assert isinstance(cbvs, CotrendingBasisVectors)
assert cbvs.cbv_indices == [3, 12]
assert np.all(cbvs.gap_indicators == [False, False, False])
assert np.all(cbvs.cadenceno == [0, 1, 2])
# ***
# _to_designmatrix
# Make sure CBVs are the columns in the returned 2-dim array
dataTbl = Table(
[
[1, 2, 3],
[False, True, False],
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0],
],
names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_2", "VECTOR_3"),
)
cbvTime = Time([1569.44053967, 1569.44192856, 1569.44331746], format="btjd")
cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
cbv_dm_name = "test cbv set"
# CBV index 5 does not exists and should be ingored
cbv_designmatrix = cbvs.to_designmatrix(cbv_indices=[1, 3, 5], name=cbv_dm_name)
assert cbv_designmatrix.shape == (3, 2)
assert np.all(cbv_designmatrix["VECTOR_1"] == np.array([1.0, 2.0, 3.0]))
assert np.all(cbv_designmatrix["VECTOR_3"] == np.array([7.0, 8.0, 9.0]))
assert cbv_designmatrix.name == cbv_dm_name
# CBV #2 was not requested, so make sure it is not present
with pytest.raises(KeyError):
cbv_designmatrix["VECTOR_2"]
# ***
# plot
ax = cbvs.plot(cbv_indices=[1, 2], ax=None)
assert isinstance(ax, matplotlib.axes.Axes)
# There is no CBV # 5 so the third cbv_indices entry will be ignored
ax = cbvs.plot(cbv_indices=[1, 2, 5], ax=ax)
assert isinstance(ax, matplotlib.axes.Axes)
# CBVs use 1-based indexing. Throw error if requesting CBV index 0
with pytest.raises(ValueError):
ax = cbvs.plot(cbv_indices=[0, 1, 2], ax=ax)
# Only 'all' or specific CBV indices can be requested
with pytest.raises(ValueError):
ax = cbvs.plot("Doh!")
# ***
# align
# Set up some cadenceno such that both CBV is trimmed and NaNs inserted
sample_lc = TessLightCurve(
time=[1, 2, 3, 4, 6, 7],
flux=[1, 2, 3, 4, 6, 7],
flux_err=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
cadenceno=[1, 2, 3, 4, 6, 7],
)
dataTbl = Table(
[
[1, 2, 3, 5, 6],
[False, True, False, False, False],
[1.0, 2.0, 3.0, 5.0, 6.0],
],
names=("CADENCENO", "GAP", "VECTOR_1"),
)
cbvTime = Time(
[1569.43915078, 1569.44053967, 1569.44192856, 1569.44470635, 1569.44609524],
format="btjd",
)
cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
cbvs = cbvs.align(sample_lc)
assert np.all(sample_lc.cadenceno == cbvs.cadenceno)
assert len(cbvs.cadenceno) == 6
assert len(sample_lc.flux) == 6
assert np.all(cbvs.gap_indicators.value[[1, 3, 5]])
# Ignore the warning in to_designmatric due to a low rank matrix
with warnings.catch_warnings():
# Instantiating light curves with NaN times will yield a warning
warnings.simplefilter("ignore", LightkurveWarning)
cbv_designmatrix = cbvs.to_designmatrix(cbv_indices=[1])
assert np.all(cbv_designmatrix["VECTOR_1"][[0, 1, 2, 4]] == [1.0, 2.0, 3.0, 6.0])
assert np.all(np.isnan(cbv_designmatrix["VECTOR_1"][[3, 5]]))
# ***
# interpolate
nLcCadences = 20
xLc = np.linspace(0.0, 2 * np.pi, num=nLcCadences)
sample_lc = TessLightCurve(
time=xLc,
flux=np.sin(xLc),
flux_err=np.full(nLcCadences, 0.1),
cadenceno=np.arange(nLcCadences),
)
nCbvCadences = 10
xCbv = np.linspace(0.0, 2 * np.pi, num=nCbvCadences)
dataTbl = Table(
[
np.arange(nCbvCadences),
np.full(nCbvCadences, False),
np.cos(xCbv),
np.sin(xCbv + np.pi * 0.125),
],
names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_2"),
)
cbvTime = Time(xCbv, format="btjd")
cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=False)
assert np.all(cbv_interpolated.time.value == sample_lc.time.value)
# Extrapolation test
# If extrapolate=False then all outside values set to 0.0
xCbv = np.linspace(0.0, 1.5 * np.pi, num=nCbvCadences)
dataTbl = Table(
[
np.arange(nCbvCadences),
np.full(nCbvCadences, False),
np.cos(xCbv),
np.sin(xCbv + np.pi * 0.125),
],
names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_2"),
)
cbvTime = Time(xCbv, format="btjd")
cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=False)
assert np.all(
cbv_interpolated["VECTOR_1"].value[
np.nonzero(cbv_interpolated.time.value > 1.5 * np.pi)[0]
] == 0.0
)
# extrapolate
cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=True)
assert np.all(
cbv_interpolated["VECTOR_1"].value[
np.nonzero(cbv_interpolated.time.value > 1.5 * np.pi)[0]
] != 0.0
)
@pytest.mark.remote_data
def test_cbv_retrieval():
"""Tests reading in some CBVs from MAST
This indirectly tests the classes KeplerCotrendingBasisVectors and
TessCotrendingBasisVectors
"""
cbvs = load_tess_cbvs(sector=10, camera=2, ccd=4, cbv_type="SingleScale")
assert isinstance(cbvs, TessCotrendingBasisVectors)
ax = cbvs.plot([1, 2, 4, 6, 8])
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.mission == "TESS"
assert cbvs.cbv_type == "SingleScale"
assert cbvs.band is None
assert cbvs.sector == 10
assert cbvs.camera == 2
assert cbvs.ccd == 4
cbvs = load_tess_cbvs(sector=10, camera=2, ccd=4, cbv_type="MultiScale", band=2)
assert isinstance(cbvs, TessCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.band == 2
cbvs = load_tess_cbvs(sector=8, camera=3, ccd=1, cbv_type="Spike")
assert isinstance(cbvs, TessCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
# No band specified for MultiScale, this should error
with pytest.raises(AssertionError):
cbvs = load_tess_cbvs(sector=10, camera=2, ccd=4, cbv_type="MultiScale")
# Band specified for SingleScale, this should also error
with pytest.raises(AssertionError):
cbvs = load_tess_cbvs(
sector=10, camera=2, ccd=4, cbv_type="SingleScale", band=2
)
# Improper CBV type request
with pytest.raises(Exception):
cbvs = load_tess_cbvs(
sector=10, camera=2, ccd=4, cbv_type="SuperSingleScale"
)
cbvs = load_kepler_cbvs(mission="Kepler", quarter=8, module=16, output=4)
assert isinstance(cbvs, KeplerCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.mission == "Kepler"
assert cbvs.cbv_type == "SingleScale"
assert cbvs.quarter == 8
assert cbvs.campaign is None
assert cbvs.module == 16
assert cbvs.output == 4
cbvs = load_kepler_cbvs(mission="K2", campaign=15, channel=24)
assert isinstance(cbvs, KeplerCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.mission == "K2"
assert cbvs.cbv_type == "SingleScale"
assert cbvs.quarter is None
assert cbvs.campaign == 15
assert cbvs.module == 8
assert cbvs.output == 4
def test_cbv_local():
"""Tests loading local CBVs as above
This indirectly tests the classes KeplerCotrendingBasisVectors and
TessCotrendingBasisVectors
"""
cbv_dir = TESTDATA
cbvs = load_tess_cbvs(cbv_dir=cbv_dir,sector=10, camera=2, ccd=4, cbv_type="SingleScale")
assert isinstance(cbvs, TessCotrendingBasisVectors)
ax = cbvs.plot([1, 2, 4, 6, 8])
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.mission == "TESS"
assert cbvs.cbv_type == "SingleScale"
assert cbvs.band is None
assert cbvs.sector == 10
assert cbvs.camera == 2
assert cbvs.ccd == 4
cbvs = load_tess_cbvs(cbv_dir=cbv_dir,sector=10, camera=2, ccd=4, cbv_type="MultiScale", band=2)
assert isinstance(cbvs, TessCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.band == 2
cbvs = load_tess_cbvs(cbv_dir=cbv_dir,sector=10, camera=2, ccd=4, cbv_type="Spike")
assert isinstance(cbvs, TessCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
# No band specified for MultiScale, this should error
with pytest.raises(AssertionError):
cbvs = load_tess_cbvs(sector=10, camera=2, ccd=4, cbv_type="MultiScale")
# Band specified for SingleScale, this should also error
with pytest.raises(AssertionError):
cbvs = load_tess_cbvs(
sector=10, camera=2, ccd=4, cbv_type="SingleScale", band=2
)
# Improper CBV type request
with pytest.raises(Exception):
cbvs = load_tess_cbvs(
sector=10, camera=2, ccd=4, cbv_type="SuperSingleScale"
)
cbvs = load_kepler_cbvs(cbv_dir=cbv_dir,mission="Kepler", quarter=8, module=16, output=4)
assert isinstance(cbvs, KeplerCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.mission == "Kepler"
assert cbvs.cbv_type == "SingleScale"
assert cbvs.quarter == 8
assert cbvs.campaign is None
assert cbvs.module == 16
assert cbvs.output == 4
cbvs = load_kepler_cbvs(cbv_dir=cbv_dir,mission="K2", campaign=15, channel=24)
assert isinstance(cbvs, KeplerCotrendingBasisVectors)
ax = cbvs.plot("all")
assert isinstance(ax, matplotlib.axes.Axes)
assert cbvs.mission == "K2"
assert cbvs.cbv_type == "SingleScale"
assert cbvs.quarter is None
assert cbvs.campaign == 15
assert cbvs.module == 8
assert cbvs.output == 4
# *******************************************************************************
# *******************************************************************************
# *******************************************************************************
# CBVCorrector Unit Tests
def test_CBVCorrector():
# Create a CBVCorrector without reading CBVs from MAST
sample_lc = TessLightCurve(
time=[1, 2, 3, 4, 5],
flux=[1, 2, np.nan, 4, 5],
flux_err=[0.1, 0.1, 0.1, 0.1, 0.1],
cadenceno=[1, 2, 3, 4, 5],
flux_unit=u.Unit("electron / second"),
)
cbvCorrector = CBVCorrector(sample_lc, do_not_load_cbvs=True)
# Check that Nan was removed
assert len(cbvCorrector.lc.flux) == 4
# Check that the median flux value is preserved
assert_allclose(
np.nanmedian(cbvCorrector.lc.flux).value, np.nanmedian(sample_lc.flux).value
)
dm = DesignMatrix(pd.DataFrame({"a": np.ones(4), "b": [1, 2, 4, 5]}))
# ***
# RegressionCorrector.correct passthrough method
lc = cbvCorrector.correct_regressioncorrector(dm)
# Check that returned lc is in absolute flux units
assert isinstance(lc, TessLightCurve)
# The design matrix should have completely zeroed the flux around the median
lc_median = np.nanmedian(lc.flux)
assert_allclose(lc.flux, lc_median)
# ***
# Gaussian Prior fit
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=None, cbv_indices=None, alpha=1e-9, ext_dm=dm
)
assert isinstance(lc, TessLightCurve)
# Check that returned lc is in absolute flux units
assert lc.flux.unit == u.Unit("electron / second")
# The design matrix should have completely zeroed the flux around the median
lc_median = np.nanmedian(lc.flux)
assert_allclose(lc.flux, lc_median)
ax = cbvCorrector.diagnose()
assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes.Axes)
# Now add a strong regularization term and under-fit the data
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=None, cbv_indices=None, alpha=1e9, ext_dm=dm
)
# There should be virtually no change in the flux
assert_allclose(lc.flux, sample_lc.remove_nans().flux)
# This should error because the dm has incorrect number of cadences
dm_err = DesignMatrix(pd.DataFrame({"a": np.ones(5), "b": [1, 2, 4, 5, 6]}))
with pytest.raises(ValueError):
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=None, cbv_indices=None, alpha=1e-2, ext_dm=dm_err
)
# ***
# ElasticNet fit
lc = cbvCorrector.correct_elasticnet(
cbv_type=None, cbv_indices=None, alpha=1e-20, l1_ratio=0.5, ext_dm=dm
)
assert isinstance(lc, TessLightCurve)
assert lc.flux.unit == u.Unit("electron / second")
# The design matrix should have completely zeroed the flux around the median
lc_median = np.nanmedian(lc.flux)
assert_allclose(lc.flux, lc_median, rtol=1e-3)
ax = cbvCorrector.diagnose()
assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes.Axes)
# Now add a strong regularization term and under-fit the data
lc = cbvCorrector.correct_elasticnet(
cbv_type=None, cbv_indices=None, alpha=1e9, l1_ratio=0.5, ext_dm=dm
)
# There should be virtually no change in the flux
assert_allclose(lc.flux, sample_lc.remove_nans().flux)
# ***
# Correction optimizer
# The optimizer cannot be run without downloading targest from MAST for use
# within the under-fitting metric.
# So let's just verify it fails as expected (not much else we can do)
dm_err = DesignMatrix(pd.DataFrame({"a": np.ones(5), "b": [1, 2, 4, 5, 6]}))
with pytest.raises(ValueError):
lc = cbvCorrector.correct(
cbv_type=None,
cbv_indices=None,
alpha_bounds=[1e-4, 1e4],
ext_dm=dm_err,
target_over_score=0.5,
target_under_score=0.8,
)
@pytest.mark.remote_data
def test_CBVCorrector_retrieval():
"""Tests CBVCorrector by retrieving some sample Kepler/TESS light curves
and correcting them
"""
# ***
# A good TESS example of both over- and under-fitting
# The "over-fitted" curve looks better to the eye, but eyes can be deceiving!
lc = search_lightcurve(
"TIC 357126143", mission="tess", author="spoc", sector=10
).download(flux_column="sap_flux")
cbvCorrector = CBVCorrector(lc)
assert isinstance(cbvCorrector, CBVCorrector)
cbv_type = ["SingleScale", "Spike"]
cbv_indices = [np.arange(1, 9), "ALL"]
# Gaussian Prior correction
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=cbv_type, cbv_indices=cbv_indices, alpha=1e-2
)
assert isinstance(lc, TessLightCurve)
# Check that returned lightcurve is in flux units
assert lc.flux.unit == u.Unit("electron / second")
ax = cbvCorrector.diagnose()
assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes.Axes)
# ElasticNet corrections
lc = cbvCorrector.correct_elasticnet(
cbv_type=cbv_type, cbv_indices=cbv_indices, alpha=1e1, l1_ratio=0.5
)
assert isinstance(lc, TessLightCurve)
assert lc.flux.unit == u.Unit("electron / second")
ax = cbvCorrector.diagnose()
assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes.Axes)
# Correction optimizer
lc = cbvCorrector.correct(
cbv_type=cbv_type,
cbv_indices=cbv_indices,
alpha_bounds=[1e-4, 1e4],
target_over_score=0.5,
target_under_score=0.8,
)
assert isinstance(lc, TessLightCurve)
assert lc.flux.unit == u.Unit("electron / second")
ax = cbvCorrector.diagnose()
assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes.Axes)
# Goodness metric scan plot
ax = cbvCorrector.goodness_metric_scan_plot(
cbv_type=cbv_type, cbv_indices=cbv_indices
)
assert isinstance(ax, matplotlib.axes.Axes)
# Try multi-scale basis vectors
cbv_type = ["MultiScale.1", "MultiScale.2", "MultiScale.3"]
cbv_indices = ["ALL", "ALL", "ALL"]
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=cbv_type, cbv_indices=cbv_indices, alpha=1e-2
)
assert isinstance(lc, TessLightCurve)
# ***
# A Kepler and K2 example
lc = search_lightcurve(
"KIC 6508221", mission="kepler", author="kepler", quarter=5
).download(flux_column="sap_flux")
cbvCorrector = CBVCorrector(lc)
lc = cbvCorrector.correct_gaussian_prior(alpha=1.0)
assert isinstance(lc, KeplerLightCurve)
assert lc.flux.unit == u.Unit("electron / second")
lc = search_lightcurve("EPIC 247887989", mission="k2", author="k2").download(
flux_column="sap_flux"
)
cbvCorrector = CBVCorrector(lc)
lc = cbvCorrector.correct_gaussian_prior(alpha=1.0)
assert isinstance(lc, KeplerLightCurve)
assert lc.flux.unit == u.Unit("electron / second")
lc = cbvCorrector.correct()
assert isinstance(lc, KeplerLightCurve)
assert lc.flux.unit == u.Unit("electron / second")
# ***
# Try some expected failures
# cbv_type and cbv_indices not the same list lengths
with pytest.raises(AssertionError):
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=["SingleScale", "Spike"], cbv_indices=["all"], alpha=1e-2
)
# cbv_type is not a list
with pytest.raises(AssertionError):
lc = cbvCorrector.correct_gaussian_prior(
cbv_type="SingleScale", cbv_indices=["all"], alpha=1e-2
)
# cbv_indices is not a list
with pytest.raises(AssertionError):
lc = cbvCorrector.correct_gaussian_prior(
cbv_type=["SingleScale"], cbv_indices="all", alpha=1e-2
)
#***
# Test the need for extrapolation with Kepler data
lc = search_lightcurve("KIC 2437317", mission="Kepler", author="kepler", cadence='long',
quarter=6).download(flux_column="sap_flux")
cbv_type = ['SingleScale']
cbv_indices = [np.arange(1,9)]
# This will generate an warning about the need for extrapolation
cbvCorrector = CBVCorrector(lc, interpolate_cbvs=True, extrapolate_cbvs=False)
# This will generate a light curve with the last value well below the median corrected flux
cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices, alpha=1e-4)
assert (cbvCorrector.corrected_lc.flux[-1] - np.median(cbvCorrector.corrected_lc.flux)).value < -300
# This will generate a light curve with the last value at about the median corrected flux
cbvCorrector = CBVCorrector(lc, interpolate_cbvs=True, extrapolate_cbvs=True)
cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices, alpha=1e-4)
assert ((cbvCorrector.corrected_lc.flux[-1] - np.median(cbvCorrector.corrected_lc.flux)).value > 0.0 and
(cbvCorrector.corrected_lc.flux[-1] - np.median(cbvCorrector.corrected_lc.flux)).value < 20)
| 21,005
| 36.780576
| 110
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/test_designmatrix.py
|
import pytest
import warnings
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from lightkurve.correctors import DesignMatrix, DesignMatrixCollection
from lightkurve import LightkurveWarning
def test_designmatrix_basics():
"""Can we create a design matrix from a dataframe?"""
size, name = 10, "testmatrix"
df = pd.DataFrame(
{"vector1": np.ones(size), "vector2": np.zeros(size), "vector3": np.ones(size)}
)
dm = DesignMatrix(df, name=name)
assert dm.columns == ["vector1", "vector2", "vector3"]
assert dm.name == name
assert dm.shape == (size, 3)
assert (dm["vector1"] == df["vector1"]).all()
dm.plot()
dm.plot_priors()
assert dm.append_constant().shape == (size, 4) # expect one column more
assert dm.pca(nterms=2).shape == (size, 2) # expect one column less
assert dm.split([10]).shape == (size, 6) # expect double columns
dm.__repr__()
dm = DesignMatrix(df, name=name)
dm.append_constant(inplace=True)
assert dm.shape == (size, 4) # expect one column more
dm = DesignMatrix(df, name=name)
dm.split([10], inplace=True)
assert dm.shape == (size, 6) # expect double columns
def test_designmatrix_from_numpy():
"""Can we create a design matrix from an ndarray?"""
size = 10
dm = DesignMatrix(np.ones((size, 2)))
assert dm.columns == [0, 1]
assert dm.name == "unnamed_matrix"
assert (dm[0] == np.ones(size)).all()
def test_designmatrix_from_dict():
"""Can we create a design matrix from a dictionary?"""
size = 10
dm = DesignMatrix(
{"centroid_col": np.ones(size), "centroid_row": np.ones(size)},
name="motion_systematics",
)
assert dm.shape == (size, 2)
assert (dm["centroid_col"] == np.ones(size)).all()
def test_split():
"""Can we split a design matrix correctly?"""
dm = DesignMatrix({"a": np.linspace(0, 9, 10), "b": np.linspace(100, 109, 10)})
# Do we retrieve the correct shape?
assert dm.shape == (10, 2)
assert dm.split(2).shape == (10, 4)
assert dm.split([2, 8]).shape == (10, 6)
# Are the new areas padded with zeros?
assert (dm.split([2, 8]).values[2:, 0:2] == 0).all()
assert (dm.split([2, 8]).values[:8, 4:] == 0).all()
# Are all the column names unique?
assert len(set(dm.split(2).columns)) == 4
def test_standardize():
"""Verifies DesignMatrix.standardize()"""
# A column with zero standard deviation remains unchanged
dm = DesignMatrix({"const": np.ones(10)})
assert (dm.standardize()["const"] == dm["const"]).all()
# Normally-distributed columns will become Normal(0, 1)
dm = DesignMatrix({"normal": np.random.normal(loc=5, scale=3, size=100)})
assert np.round(np.median(dm.standardize()["normal"]), 3) == 0
assert np.round(np.std(dm.standardize()["normal"]), 1) == 1
dm.standardize(inplace=True)
def test_pca():
"""Verifies DesignMatrix.pca()"""
size = 10
dm = DesignMatrix(
{
"a": np.random.normal(10, 20, size),
"b": np.random.normal(40, 10, size),
"c": np.random.normal(60, 5, size),
}
)
for nterms in [1, 2, 3]:
assert dm.pca(nterms=nterms).shape == (size, nterms)
def test_collection_basics():
"""Can we create a design matrix collection?"""
size = 5
dm1 = DesignMatrix(np.ones((size, 1)), columns=["col1"], name="matrix1")
dm2 = DesignMatrix(np.zeros((size, 2)), columns=["col2", "col3"], name="matrix2")
dmc = DesignMatrixCollection([dm1, dm2])
assert_array_equal(dmc["matrix1"].values, dm1.values)
assert_array_equal(dmc["matrix2"].values, dm2.values)
assert_array_equal(dmc.values, np.hstack((dm1.values, dm2.values)))
dmc.plot()
dmc.__repr__()
dmc = dm1.collect(dm2)
assert_array_equal(dmc["matrix1"].values, dm1.values)
assert_array_equal(dmc["matrix2"].values, dm2.values)
assert_array_equal(dmc.values, np.hstack((dm1.values, dm2.values)))
assert isinstance(dmc.to_designmatrix(), DesignMatrix)
def test_designmatrix_rank():
"""Does DesignMatrix issue a low-rank warning when justified?"""
warnings.simplefilter("always")
# Good rank
dm = DesignMatrix({"a": [1, 2, 3]})
assert dm.rank == 1
dm.validate(rank=True) # Should not raise a warning
# Bad rank
with pytest.warns(LightkurveWarning, match="rank"):
dm = DesignMatrix(
{
"a": [1, 2, 3],
"b": [1, 1, 1],
"c": [1, 1, 1],
"d": [1, 1, 1],
"e": [3, 4, 5],
}
)
assert dm.rank == 2
dm.validate(rank=True) # Should raise a warning
| 4,720
| 32.246479
| 87
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/__init__.py
| 0
| 0
| 0
|
py
|
|
lightkurve
|
lightkurve-main/tests/correctors/test_metrics.py
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
from lightkurve import LightCurve, search_lightcurve
from lightkurve.correctors.metrics import (
overfit_metric_lombscargle,
underfit_metric_neighbors,
_compute_correlation,
_align_to_lc,
)
def test_overfit_metric_lombscargle():
"""Sanity checks for `overfit_metric_lombscargle`"""
# Create artificial flat and sinusoid light curves
time = np.arange(1, 100, 0.1)
lc_flat = LightCurve(time=time, flux=1, flux_err=0.0)
lc_sine = LightCurve(time=time, flux=np.sin(time) + 1, flux_err=0.0)
# If the light curve didn't change, it should be "perfect", i.e. metric == 1
assert overfit_metric_lombscargle(lc_flat, lc_flat) == 1.0
assert overfit_metric_lombscargle(lc_sine, lc_sine) == 1.0
# If the light curve went from a sine to a flat line,
# no noise was introduced, hence metric == 1 (good)
assert overfit_metric_lombscargle(lc_sine, lc_flat) == 1.0
# If the light curve went from flat to sine, metric == 0 (bad)
assert overfit_metric_lombscargle(lc_flat, lc_sine) == 0.0
# However, if the light curves were noisy to begin with, it shouldn't be considered that bad
lc_flat.flux_err += 0.5
lc_sine.flux_err += 0.5
assert overfit_metric_lombscargle(lc_flat, lc_sine) > 0.5
@pytest.mark.remote_data
def test_underfit_metric_neighbors():
"""Sanity checks for `underfit_metric_neighbors`."""
# PDCSAP_FLUX has a very good score (>0.99) because it has been corrected
lc_pdcsap = search_lightcurve("Proxima Cen", sector=11, author="SPOC").download(
flux_column="pdcsap_flux"
)
assert underfit_metric_neighbors(lc_pdcsap, min_targets=3, max_targets=3) > 0.99
# SAP_FLUX has a worse score (<0.95) because it hasn't been corrected
lc_sap = lc_pdcsap.copy()
lc_sap.flux = lc_pdcsap.sap_flux
lc_sap.flux_err = lc_pdcsap.sap_flux_err
assert underfit_metric_neighbors(lc_sap, min_targets=3, max_targets=3) < 0.95
# A flat light curve should have a perfect score (1)
notnan = ~np.isnan(lc_sap.flux)
lc_sap.flux.value[notnan] = np.ones(notnan.sum())
assert underfit_metric_neighbors(lc_sap, min_targets=3, max_targets=3) == 1.0
def test_compute_correlation():
""" Simple test to verify the correction function works"""
# Fully correlated matrix
fluxMatrix = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
correlation_matrix = _compute_correlation(fluxMatrix)
assert np.all(correlation_matrix == 1.0)
# Partially correlated
fluxMatrix = np.array(
[
[1.0, -1.0, 1.0, -1.0],
[-1.0, 1.0, 1.0, -1.0],
[1.0, -1.0, 1.0, -1.0],
[-1.0, 1.0, -1.0, 1.0],
]
)
correlation_matrix = _compute_correlation(fluxMatrix)
correlation_truth = np.array(
[
[1.0, -1.0, 0.5, -0.5],
[-1.0, 1.0, -0.5, 0.5],
[0.5, -0.5, 1.0, -1.0],
[-0.5, 0.5, -1.0, 1.0],
]
)
assert_allclose(correlation_matrix, correlation_truth)
def test_align_to_lc():
""" Test to ensure we can properly align different light curves
"""
time = np.arange(1, 100, 0.1)
lc1 = LightCurve(time=time, flux=1, flux_err=0.0)
lc1['cadenceno'] = np.arange(1,len(time)+1)
lc2 = LightCurve(time=time, flux=2, flux_err=0.0)
lc2['cadenceno'] = np.arange(1,len(time)+1)
# Remove different cadences from both light curve and align the second to the first
lc1 = lc1[0:10].append(lc1[20:100])
lc2 = lc2[0:50].append(lc2[70:100])
aligned_lc2 = _align_to_lc(lc2, lc1)
assert np.all(lc1['cadenceno'] == aligned_lc2['cadenceno'])
| 3,718
| 35.106796
| 96
|
py
|
lightkurve
|
lightkurve-main/tests/correctors/test_regressioncorrector.py
|
"""Unit tests for the `RegressionCorrector` class."""
import warnings
import numpy as np
from numpy.testing import assert_almost_equal
import pandas as pd
import pytest
from lightkurve import LightCurve, LightkurveWarning
from lightkurve.correctors import RegressionCorrector, DesignMatrix
def test_regressioncorrector_priors():
"""This test will fit a design matrix containing the column vectors
a=[1, 1] and b=[1, 2] to a light curve with flux=[5, 10].
The best coefficients for this problem are [0, 5] because 0*a + 5*b == flux,
however we will verify that changing the priors will yield different
solutions.
"""
lc1 = LightCurve(flux=[5, 10])
lc2 = LightCurve(flux=[5, 10], flux_err=[1, 1])
design_matrix = DesignMatrix(pd.DataFrame({"a": [1, 1], "b": [1, 2]}))
for dm in [design_matrix, design_matrix.to_sparse()]:
for lc in [lc1, lc2]:
rc = RegressionCorrector(lc)
# No prior
rc.correct(dm)
assert_almost_equal(rc.coefficients, [0, 5])
# Strict prior centered on correct solution
dm.prior_mu = [0, 5]
dm.prior_sigma = [1e-6, 1e-6]
rc.correct(dm)
assert_almost_equal(rc.coefficients, [0, 5])
# Strict prior centered on incorrect solution
dm.prior_mu = [99, 99]
dm.prior_sigma = [1e-6, 1e-6]
rc.correct(dm)
assert_almost_equal(rc.coefficients, [99, 99])
# Wide prior centered on incorrect solution
dm.prior_mu = [9, 9]
dm.prior_sigma = [1e6, 1e6]
rc.correct(dm)
assert_almost_equal(rc.coefficients, [0, 5])
def test_sinusoid_noise():
"""Can we remove simple sinusoid noise added to a flat light curve?"""
size = 100
time = np.linspace(1, 100, size)
true_flux = np.ones(size)
noise = np.sin(time / 5)
# True light curve is flat, i.e. flux=1 at all time steps
true_lc = LightCurve(time=time, flux=true_flux, flux_err=0.1 * np.ones(size))
# Noisy light curve has a sinusoid single added
noisy_lc = LightCurve(time=time, flux=true_flux + noise, flux_err=true_lc.flux_err)
design_matrix = DesignMatrix(
{"noise": noise, "offset": np.ones(len(time))}, name="noise_model"
)
for dm in [design_matrix, design_matrix.to_sparse()]:
# Can we recover the true light curve?
rc = RegressionCorrector(noisy_lc)
corrected_lc = rc.correct(dm)
assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
# Can we produce the diagnostic plot?
rc.diagnose()
# Does it work when we set priors?
dm.prior_mu = [0.1, 0.1]
dm.prior_sigma = [1e6, 1e6]
corrected_lc = RegressionCorrector(noisy_lc).correct(dm)
assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
# Does it work when `flux_err` isn't available?
noisy_lc = LightCurve(time=time, flux=true_flux + noise)
corrected_lc = RegressionCorrector(noisy_lc).correct(dm)
assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
def test_nan_input():
# The following light curves should all raise ValueErrors because of NaNs
with warnings.catch_warnings():
# Instantiating light curves with NaN times will yield a warning
warnings.simplefilter("ignore", LightkurveWarning)
lcs = [
LightCurve(flux=[5, 10], flux_err=[np.nan, 1]),
LightCurve(flux=[np.nan, 10], flux_err=[1, 1]),
]
# Passing these to RegressionCorrector should raise a ValueError
for lc in lcs:
with pytest.raises(ValueError):
RegressionCorrector(lc)
# However, we should be flexible with letting `flux_err` be all-NaNs,
# because it is common for errors to be missing.
lc = LightCurve(flux=[5, 10], flux_err=[np.nan, np.nan])
RegressionCorrector(lc)
def test_zero_fluxerr():
"""Regression test for #668.
Flux uncertainties smaller than or equal to zero (`lc.flux_err <= 0`) will
trigger an invalid or non-finite matrix. We expect `RegressionCorrector`
to detect this and yield a graceful `ValueError`."""
lc = LightCurve(flux=[5, 10], flux_err=[1, 0])
with pytest.raises(ValueError):
RegressionCorrector(lc)
lc = LightCurve(flux=[5, 10], flux_err=[1, -10])
with pytest.raises(ValueError):
RegressionCorrector(lc)
| 4,463
| 36.512605
| 87
|
py
|
lightkurve
|
lightkurve-main/tests/prf/__init__.py
| 0
| 0
| 0
|
py
|
|
lightkurve
|
lightkurve-main/tests/prf/test_tpfmodel.py
|
"""Test the features of the lightkurve.prf.tpfmodels module."""
import os
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import mode
from lightkurve.prf import FixedValuePrior, GaussianPrior, UniformPrior
from lightkurve.prf import StarPrior, BackgroundPrior, FocusPrior, MotionPrior
from lightkurve.prf import TPFModel, PRFPhotometry
from lightkurve.prf import SimpleKeplerPRF, KeplerPRF
from .. import TESTDATA
def test_fixedvalueprior():
fvp = FixedValuePrior(1.5)
assert fvp.mean == 1.5
assert fvp(1.5) == 0
def test_starprior():
"""Tests the StarPrior class."""
col, row, flux = 1, 2, 3
sp = StarPrior(
col=GaussianPrior(mean=col, var=0.1),
row=GaussianPrior(mean=row, var=0.1),
flux=GaussianPrior(mean=flux, var=0.1),
)
assert sp.col.mean == col
assert sp.row.mean == row
assert sp.flux.mean == flux
assert sp.evaluate(col, row, flux) == 0
# The object should be callable
assert sp(col, row, flux + 0.1) == sp.evaluate(col, row, flux + 0.1)
# A point further away from the mean should have a larger negative log likelihood
assert sp.evaluate(col, row, flux) < sp.evaluate(col, row, flux + 0.1)
# Object should have a nice __repr__
assert "StarPrior" in str(sp)
def test_backgroundprior():
"""Tests the BackgroundPrior class."""
flux = 2.0
bp = BackgroundPrior(flux=flux)
assert bp.flux.mean == flux
assert bp(flux) == 0.0
assert not np.isfinite(bp(flux + 0.1))
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_tpf_model_simple():
prf = SimpleKeplerPRF(channel=16, shape=[10, 10], column=15, row=15)
model = TPFModel(prfmodel=prf)
assert model.prfmodel.channel == 16
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_tpf_model():
col, row, flux, bgflux = 1, 2, 3, 4
shape = (7, 8)
model = TPFModel(
star_priors=[
StarPrior(
col=GaussianPrior(mean=col, var=2 ** 2),
row=GaussianPrior(mean=row, var=2 ** 2),
flux=UniformPrior(lb=flux - 0.5, ub=flux + 0.5),
targetid="TESTSTAR",
)
],
background_prior=BackgroundPrior(flux=GaussianPrior(mean=bgflux, var=bgflux)),
focus_prior=FocusPrior(
scale_col=GaussianPrior(mean=1, var=0.0001),
scale_row=GaussianPrior(mean=1, var=0.0001),
rotation_angle=UniformPrior(lb=-3.1415, ub=3.1415),
),
motion_prior=MotionPrior(
shift_col=GaussianPrior(mean=0.0, var=0.01),
shift_row=GaussianPrior(mean=0.0, var=0.01),
),
prfmodel=KeplerPRF(channel=40, shape=shape, column=30, row=20),
fit_background=True,
fit_focus=False,
fit_motion=False,
)
# Sanity checks
assert model.star_priors[0].col.mean == col
assert model.star_priors[0].targetid == "TESTSTAR"
# Test initial guesses
params = model.get_initial_guesses()
assert params.stars[0].col == col
assert params.stars[0].row == row
assert params.stars[0].flux == flux
assert params.background.flux == bgflux
assert len(params.to_array()) == 4 # The model has 4 free parameters
assert_allclose([col, row, flux, bgflux], params.to_array(), rtol=1e-5)
# Predict should return an image
assert model.predict().shape == shape
# Test __repr__
assert "TESTSTAR" in str(model)
# Tagging the test below as `remote_data` because AppVeyor hangs on this test;
# at present we don't understand why.
@pytest.mark.remote_data
def test_tpf_model_fitting():
# Is the PRF photometry result consistent with simple aperture photometry?
tpf_fn = os.path.join(TESTDATA, "ktwo201907706-c01-first-cadence.fits.gz")
tpf = fits.open(tpf_fn)
col, row = 173, 526
fluxsum = np.sum(tpf[1].data)
bkg = mode(tpf[1].data, None)[0]
prfmodel = KeplerPRF(
channel=tpf[0].header["CHANNEL"], column=col, row=row, shape=tpf[1].data.shape
)
star_priors = [
StarPrior(
col=UniformPrior(lb=prfmodel.col_coord[0], ub=prfmodel.col_coord[-1]),
row=UniformPrior(lb=prfmodel.row_coord[0], ub=prfmodel.row_coord[-1]),
flux=UniformPrior(lb=0.5 * fluxsum, ub=1.5 * fluxsum),
)
]
background_prior = BackgroundPrior(flux=UniformPrior(lb=0.5 * bkg, ub=1.5 * bkg))
model = TPFModel(
star_priors=star_priors, background_prior=background_prior, prfmodel=prfmodel
)
# Does fitting run without errors?
result = model.fit(tpf[1].data)
# Can we change model parameters?
assert result.motion.fitted == False
model.fit_motion = True
result = model.fit(tpf[1].data)
assert result.motion.fitted == True
# Does fitting via the PRFPhotometry class run without errors?
phot = PRFPhotometry(model)
phot.run([tpf[1].data])
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_empty_model():
"""Can we fit the background flux in a model without stars?"""
shape = (4, 3)
bgflux = 1.23
background_prior = BackgroundPrior(flux=UniformPrior(lb=0, ub=10))
model = TPFModel(background_prior=background_prior, fit_background=True)
background = bgflux * np.ones(shape=shape)
results = model.fit(background)
assert np.isclose(results.background.flux, bgflux, rtol=1e-2)
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_model_with_one_star():
"""Can we fit the background flux in a model with one star?"""
channel = 42
shape = (10, 12)
starflux, col, row = 1000.0, 60.0, 70.0
bgflux = 10.0
scale_col, scale_row, rotation_angle = 1.2, 1.3, 0.2
prf = KeplerPRF(channel=channel, shape=shape, column=col, row=row)
star_prior = StarPrior(
col=GaussianPrior(col + 6, 0.01),
row=GaussianPrior(row + 6, 0.01),
flux=UniformPrior(lb=0.5 * starflux, ub=1.5 * starflux),
)
background_prior = BackgroundPrior(flux=UniformPrior(lb=0, ub=100))
focus_prior = FocusPrior(
scale_col=UniformPrior(lb=0.5, ub=1.5),
scale_row=UniformPrior(lb=0.5, ub=1.5),
rotation_angle=UniformPrior(lb=0.0, ub=0.5),
)
model = TPFModel(
star_priors=[star_prior],
background_prior=background_prior,
focus_prior=focus_prior,
prfmodel=prf,
fit_background=True,
fit_focus=True,
)
# Generate and fit fake data
fake_data = bgflux + prf(
col + 6,
row + 6,
starflux,
scale_col=scale_col,
scale_row=scale_row,
rotation_angle=rotation_angle,
)
results = model.fit(fake_data, tol=1e-12, options={"maxiter": 100})
# Do the results match the input?
assert np.isclose(results.stars[0].col, col + 6)
assert np.isclose(results.stars[0].row, row + 6)
assert np.isclose(results.stars[0].flux, starflux)
assert np.isclose(results.background.flux, bgflux)
assert np.isclose(results.focus.scale_col, scale_col)
assert np.isclose(results.focus.scale_row, scale_row)
assert np.isclose(results.focus.rotation_angle, rotation_angle)
| 7,272
| 35.547739
| 86
|
py
|
lightkurve
|
lightkurve-main/tests/prf/test_prfmodel.py
|
"""Test the features of the lightkurve.prf.prfmodels module."""
from __future__ import division, print_function
from collections import OrderedDict
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
import numpy as np
from numpy.testing import assert_allclose
import pytest
from lightkurve.prf import KeplerPRF, SimpleKeplerPRF
from lightkurve.targetpixelfile import KeplerTargetPixelFile
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_prf_normalization():
"""Does the PRF model integrate to the requested flux across the focal plane?"""
for channel in [1, 20, 40, 60, 84]:
for col in [123, 678]:
for row in [234, 789]:
shape = (18, 14)
flux = 100
prf = KeplerPRF(channel=channel, column=col, row=row, shape=shape)
prf_sum = prf.evaluate(
col + shape[0] / 2, row + shape[1] / 2, flux, 1, 1, 0
).sum()
assert np.isclose(prf_sum, flux, rtol=0.1)
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_simple_kepler_prf():
"""Ensures that concentric PRFs have the same values."""
prf_1 = SimpleKeplerPRF(channel=16, shape=[20, 20], column=0, row=0)
prf_2 = SimpleKeplerPRF(channel=16, shape=[10, 10], column=5, row=5)
for c in [10, 8, 10, 7]:
for r in [10, 10, 7, 7]:
assert_allclose(
prf_2(center_col=c, center_row=r, flux=1),
prf_1(center_col=c, center_row=r, flux=1)[5:15, 5:15],
rtol=1e-5,
)
@pytest.mark.remote_data
def test_simple_kepler_prf_interpolation_consistency():
"""Ensures that the interpolated prf is consistent with calibration files."""
sprf = SimpleKeplerPRF(channel=56, shape=[15, 15], column=0, row=0)
cal_prf = fits.open(
"http://archive.stsci.edu/missions/kepler/fpc/prf/" "kplr16.4_2011265_prf.fits"
)
cal_prf_subsampled = cal_prf[-1].data[25::50, 25::50]
cal_prf_subsampled_normalized = cal_prf_subsampled / (
cal_prf[-1].data.sum() * 0.02 ** 2
)
sprf_data = sprf(center_col=7.5, center_row=7.5, flux=1)
np.isclose(np.sum(np.abs(sprf_data - cal_prf_subsampled_normalized)), 0)
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_get_model_prf():
tpf_fn = get_pkg_data_filename("../../tests/data/test-tpf-star.fits")
tpf = KeplerTargetPixelFile(tpf_fn)
prf = KeplerPRF(
channel=tpf.channel, shape=tpf.shape[1:], column=tpf.column, row=tpf.row
)
prf_from_tpf = tpf.get_prf_model()
assert type(prf) == type(prf_from_tpf)
assert prf.channel == prf_from_tpf.channel
assert prf.shape == prf_from_tpf.shape
assert prf.column == prf_from_tpf.column
assert prf.row == prf_from_tpf.row
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
def test_keplerprf_gradient_against_simplekeplerprf():
"""is the gradient of KeplerPRF consistent with
the gradient of SimpleKeplerPRF?
"""
kwargs = {"channel": 56, "shape": [15, 15], "column": 0, "row": 0}
params = {"center_col": 7, "center_row": 7, "flux": 1.0}
simple_prf = SimpleKeplerPRF(**kwargs)
prf = KeplerPRF(**kwargs)
prf_grad = prf.gradient(rotation_angle=0.0, scale_col=1.0, scale_row=1.0, **params)
assert_allclose(prf_grad[:-3], simple_prf.gradient(**params))
@pytest.mark.remote_data # PRF relies on calibration files on stsci.edu
@pytest.mark.parametrize(
"param_to_test",
[
("center_col"),
("center_row"),
("flux"),
("scale_col"),
("scale_row"),
("rotation_angle"),
],
)
def test_keplerprf_gradient_against_calculus(param_to_test):
"""is the gradient of KeplerPRF consistent with Calculus?"""
params = OrderedDict(
[
("center_col", 7),
("center_row", 7),
("flux", 1000.0),
("scale_col", 1.0),
("scale_row", 1.0),
("rotation_angle", 0),
]
)
param_order = OrderedDict(zip(params.keys(), range(0, 6)))
kwargs = {"channel": 56, "shape": [15, 15], "column": 0, "row": 0}
prf = KeplerPRF(**kwargs)
h = 1e-8
f = prf.evaluate
inc_params = params.copy()
# increment the parameter under test for later finite difference computation
inc_params[param_to_test] += h
# compute finite differences
diff_prf = (f(**inc_params) - f(**params)) / h
# compute analytical gradient
prf_grad = prf.gradient(**params)
# assert that the average absolute/relative error is less than 1e-5
assert (
np.max(
np.abs(prf_grad[param_order[param_to_test]] - diff_prf)
/ (1.0 + np.abs(diff_prf))
)
< 1e-5
)
| 4,845
| 35.164179
| 87
|
py
|
lightkurve
|
lightkurve-main/tests/seismology/test_butler.py
|
import pytest
from astropy import units as u
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import unit_impulse as deltafn
from lightkurve.search import search_lightcurve
from lightkurve.periodogram import Periodogram
from lightkurve.periodogram import SNRPeriodogram
@pytest.mark.remote_data
def test_asteroseismology():
datalist = search_lightcurve("KIC11615890")
data = datalist.download_all()
lc = data[0].normalize().flatten()
for nlc in data[0:5]:
lc = lc.append(nlc.normalize().flatten())
lc = lc.remove_nans()
pg = lc.to_periodogram(normalization="psd")
snr = pg.flatten()
snr.to_seismology().estimate_numax()
def generate_test_spectrum():
"""Generates a simple solar-like oscillator spectrum of oscillation modes"""
f = np.arange(0, 4000.0, 0.4)
p = np.ones(len(f))
nmx = 2500.0
fs = f.max() / len(f)
s = 0.25 * nmx / 2.335 # std of the hump
p *= 10 * np.exp(-0.5 * (f - nmx) ** 2 / s ** 2) # gaussian profile of the hump
m = np.zeros(len(f))
lo = int(np.floor(0.5 * nmx / fs))
hi = int(np.floor(1.5 * nmx / fs))
deltanu_true = 0.294 * nmx ** 0.772
modelocs = np.arange(lo, hi, deltanu_true / 2, dtype=int)
for modeloc in modelocs:
m += deltafn(len(f), modeloc)
p *= m
p += 1
return f, p, nmx, deltanu_true
def test_estimate_numax_basics():
"""Test if we can estimate a numax."""
f, p, true_numax, _ = generate_test_spectrum()
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
numax = snr.to_seismology().estimate_numax()
# Assert recovers numax within 10%
assert np.isclose(true_numax, numax.value, atol=0.1 * true_numax)
# Assert numax has unit equal to input frequency unit
assert numax.unit == u.microhertz
# Assert you can recover numax with a chopped periodogram
rsnr = snr[(snr.frequency.value > 1600) & (snr.frequency.value < 3200)]
numax = rsnr.to_seismology().estimate_numax()
assert np.isclose(true_numax, numax.value, atol=0.1 * true_numax)
# Assert numax estimator works when input frequency is not in microhertz
fday = u.Quantity(f * u.microhertz, 1 / u.day)
snr = SNRPeriodogram(fday, u.Quantity(p, None))
numax = snr.to_seismology().estimate_numax()
nmxday = u.Quantity(true_numax * u.microhertz, 1 / u.day)
assert np.isclose(nmxday, numax, atol=0.1 * nmxday)
# Assert numax estimator fails when frequqencies are not uniform
f, p, true_numax, _ = generate_test_spectrum()
f += np.random.uniform(size=len(f))
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
with pytest.raises(ValueError) as exc:
numax = snr.to_seismology().estimate_numax()
assert "uniformly spaced" in str(exc.value)
def test_estimate_numax_kwargs():
"""Test if we can estimate a numax using its various keyword arguments."""
f, p, true_numax, _ = generate_test_spectrum()
std = 0.25 * true_numax / 2.335 # The standard deviation of the mode envelope
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
butler = snr.to_seismology()
numaxs = np.linspace(true_numax - 2 * std, true_numax + 2 * std, 500)
numax = butler.estimate_numax(numaxs=numaxs)
# Assert we can recover numax using a custom numax
assert np.isclose(numax.value, true_numax, atol=0.1 * true_numax)
# Assert we can't pass custom numaxs outside a functional range
with pytest.raises(ValueError):
numax = butler.estimate_numax(numaxs=np.linspace(-5, 5.0))
with pytest.raises(ValueError):
numax = butler.estimate_numax(numaxs=np.linspace(1.0, 5000.0))
# Assert we can pass a custom window in microhertz or days
numax = butler.estimate_numax(window_width=200.0)
assert np.isclose(numax.value, true_numax, atol=0.1 * true_numax)
numax = butler.estimate_numax(
window_width=u.Quantity(200.0, u.microhertz).to(1 / u.day)
)
assert np.isclose(numax.value, true_numax, atol=0.1 * true_numax)
# Assert we can't pass in window_widths outside functional range
# Assert we can't pass custom numaxs outside a functional range
with pytest.raises(ValueError):
numax = butler.estimate_numax(window_width=-5)
with pytest.raises(ValueError):
numax = butler.estimate_numax(window_width=1e6)
with pytest.raises(ValueError):
numax = butler.estimate_numax(window_width=0.001)
# Assert we can pass a custom spacing in microhertz or days
numax = butler.estimate_numax(spacing=15.0)
assert np.isclose(numax.value, true_numax, atol=0.1 * true_numax)
numax = butler.estimate_numax(spacing=u.Quantity(15.0, u.microhertz).to(1 / u.day))
assert np.isclose(numax.value, true_numax, atol=0.1 * true_numax)
# Assert we can't pass in spacing outside functional range
with pytest.raises(ValueError):
numax = butler.estimate_numax(spacing=-5)
with pytest.raises(ValueError):
numax = butler.estimate_numax(spacing=1e6)
with pytest.raises(ValueError):
numax = butler.estimate_numax(spacing=0.001)
# Assert it doesn't matter what units of frqeuency numaxs are passed in as
# Assert the output is still in the same units as the object frequencies
daynumaxs = u.Quantity(numaxs * u.microhertz, 1 / u.day)
numax = butler.estimate_numax(numaxs=daynumaxs)
assert np.isclose(numax.value, true_numax, atol=0.1 * true_numax)
assert numax.unit == u.microhertz
def test_plot_numax_diagnostics():
"""Test if we can estimate numax using the diagnostics function, and that
it returns a correct metric when requested
"""
f, p, true_numax, _ = generate_test_spectrum()
std = 0.25 * true_numax / 2.335 # The standard deviation of the mode envelope
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
butler = snr.to_seismology()
numaxs = np.linspace(true_numax - 2 * std, true_numax + 2 * std, 500)
butler.estimate_numax(numaxs=numaxs, window_width=250.0, spacing=10.0)
butler.diagnose_numax()
# Note: checks on the `numaxs` kwarg in `estimate_numax_kwargs` also apply
# to this function, no need to check them twice.
# Assert recovers numax within 10%
assert np.isclose(true_numax, butler.numax.value, atol=0.1 * true_numax)
# Assert numax has unit equal to input frequency unit
assert butler.numax.unit == u.microhertz
# Sanity check that plotting works under all conditions
numax = butler.estimate_numax()
butler.diagnose_numax(numax)
numax = butler.estimate_numax(numaxs=numaxs)
butler.diagnose_numax(numax)
daynumaxs = u.Quantity(numaxs * u.microhertz, 1 / u.day)
numax = butler.estimate_numax(numaxs=daynumaxs)
butler.diagnose_numax(numax)
numax = butler.estimate_numax(window_width=100.0)
butler.diagnose_numax(numax)
# Check plotting works when periodogram is sliced
rsnr = snr[(snr.frequency.value > 1600) & (snr.frequency.value < 3200)]
butler = rsnr.to_seismology()
butler.estimate_numax()
butler.diagnose_numax()
# Check metric of appropriate length is returned
numax = butler.estimate_numax(numaxs=numaxs)
assert len(numax.diagnostics["metric"]) == len(numaxs)
def test_estimate_deltanu_basics():
"""Test if we can estimate a deltanu"""
f, p, _, true_deltanu = generate_test_spectrum()
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
butler = snr.to_seismology()
butler.estimate_numax()
deltanu = butler.estimate_deltanu()
# Assert recovers deltanu within 25%
assert np.isclose(true_deltanu, deltanu.value, atol=0.25 * true_deltanu)
# Assert deltanu has unit equal to input frequency unit
assert deltanu.unit == u.microhertz
# Assert you can recover numax with a sliced periodogram
rsnr = snr[(snr.frequency.value > 1600) & (snr.frequency.value < 3200)]
butler = rsnr.to_seismology()
butler.estimate_numax()
numax = butler.estimate_deltanu()
assert np.isclose(true_deltanu, deltanu.value, atol=0.25 * true_deltanu)
# Assert deltanu estimator works when input frequency is not in microhertz
fday = u.Quantity(f * u.microhertz, 1 / u.day)
daysnr = SNRPeriodogram(fday, u.Quantity(p, None))
butler = daysnr.to_seismology()
butler.estimate_numax()
deltanu = butler.estimate_deltanu()
deltanuday = u.Quantity(true_deltanu * u.microhertz, 1 / u.day)
assert np.isclose(deltanuday.value, deltanu.value, atol=0.25 * deltanuday.value)
# Assert deltanu estimator fails when frequqencies are not uniform
f, p, true_numax, _ = generate_test_spectrum()
f += np.random.uniform(size=len(f))
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
with pytest.raises(ValueError) as exc:
deltanu = snr.to_seismology().estimate_deltanu(numax=100)
assert "uniformly spaced" in str(exc.value)
def test_estimate_deltanu_kwargs():
"""Test if we can estimate a deltanu using its various keyword arguments"""
f, p, _, true_deltanu = generate_test_spectrum()
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
butler = snr.to_seismology()
# Assert custom numax works
numax = butler.estimate_numax()
deltanu = butler.estimate_deltanu(numax=numax)
assert np.isclose(deltanu.value, true_deltanu, atol=0.25 * true_deltanu)
# Assert you can't pass custom numax outside of appropriate range
with pytest.raises(ValueError):
deltanu = butler.estimate_deltanu(numax=-5.0)
with pytest.raises(ValueError):
deltanu = butler.estimate_deltanu(numax=5000)
# Assert it doesn't matter what units of frequency numax is passed in as
daynumax = u.Quantity(numax.value * u.microhertz, 1 / u.day)
deltanu = butler.estimate_deltanu(numax=daynumax)
assert np.isclose(deltanu.value, true_deltanu, atol=0.25 * true_deltanu)
assert deltanu.unit == u.microhertz
def test_plot_deltanu_diagnostics():
"""Test if we can estimate numax using the diagnostics function, and that
it returns a correct metric when requested
"""
f, p, _, true_deltanu = generate_test_spectrum()
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
butler = snr.to_seismology()
butler.estimate_numax()
deltanu = butler.estimate_deltanu()
ax = butler.diagnose_deltanu()
assert np.isclose(deltanu.value, true_deltanu, atol=0.25 * true_deltanu)
assert deltanu.unit == u.microhertz
plt.close("all")
# Note: checks on the `numax` kwarg in `estimate_deltanu_kwargs` also apply
# to this function, no need to check them twice.
# Sanity check that plotting works under all conditions
numax = butler.estimate_numax()
butler.diagnose_deltanu()
deltanu = butler.estimate_deltanu(numax=numax)
butler.diagnose_deltanu(deltanu)
daynumax = u.Quantity(numax.value * u.microhertz, 1 / u.day)
deltanu = butler.estimate_deltanu(numax=daynumax)
butler.diagnose_deltanu(deltanu)
plt.close("all")
# Check plotting works when periodogram is sliced
rsnr = snr[(snr.frequency.value > 1600) & (snr.frequency.value < 3200)]
butler = rsnr.to_seismology()
butler.estimate_numax()
butler.estimate_deltanu()
ax = butler.diagnose_deltanu()
plt.close("all")
# Check it plots when frequency is in days
fday = u.Quantity(f * u.microhertz, 1 / u.day)
daysnr = SNRPeriodogram(fday, u.Quantity(p, None))
butler = daysnr.to_seismology()
butler.estimate_deltanu(numax=daynumax)
butler.diagnose_deltanu()
plt.close("all")
def test_stellar_estimator_calls():
f, p, _, true_deltanu = generate_test_spectrum()
snr = SNRPeriodogram(f * u.microhertz, u.Quantity(p, None))
snr.meta = {"TEFF": 3000}
butler = snr.to_seismology()
butler.estimate_numax()
deltanu = butler.estimate_deltanu()
# Calling teff from meta
mass = butler.estimate_mass()
rad = butler.estimate_radius()
log = butler.estimate_logg()
# Custom teff
mass = butler.estimate_mass(3100)
rad = butler.estimate_radius(3100)
log = butler.estimate_logg(3100)
# Raise error if no teff available
butler.periodogram.meta["TEFF"] = None
with pytest.raises(ValueError):
mass = butler.estimate_mass()
with pytest.raises(ValueError):
rad = butler.estimate_radius()
with pytest.raises(ValueError):
log = butler.estimate_logg()
def test_plot_echelle():
f, p, numax, deltanu = generate_test_spectrum()
numax *= u.microhertz
deltanu *= u.microhertz
pg = Periodogram(f * u.microhertz, u.Quantity(p, None))
butler = pg.to_seismology()
# Assert basic echelle works
butler.plot_echelle(deltanu=deltanu, numax=numax)
plt.close("all")
butler.plot_echelle(u.Quantity(deltanu, 1 / u.day), numax)
plt.close("all")
# Assert accepts dimensionless input
butler.plot_echelle(deltanu=deltanu.value * 1.001, numax=numax)
plt.close("all")
butler.plot_echelle(deltanu=deltanu, numax=numax.value / 1.001)
plt.close("all")
# Assert echelle works with numax
butler.plot_echelle(deltanu, numax)
plt.close("all")
butler.plot_echelle(deltanu, u.Quantity(numax, 1 / u.day))
plt.close("all")
# Assert echelle works with minimum limit
butler.plot_echelle(deltanu, numax, minimum_frequency=numax)
plt.close("all")
butler.plot_echelle(deltanu, numax, maximum_frequency=numax)
plt.close("all")
butler.plot_echelle(deltanu, numax, minimum_frequency=u.Quantity(numax, 1 / u.day))
plt.close("all")
butler.plot_echelle(deltanu, numax, maximum_frequency=u.Quantity(numax, 1 / u.day))
plt.close("all")
butler.plot_echelle(
deltanu,
numax,
minimum_frequency=u.Quantity(numax - deltanu, 1 / u.day),
maximum_frequency=numax + deltanu,
)
plt.close("all")
# Assert raises error if numax or either of the limits are too high
with pytest.raises(ValueError):
butler.plot_echelle(deltanu, numax, minimum_frequency=f[-1] + 10)
plt.close("all")
with pytest.raises(ValueError):
butler.plot_echelle(deltanu, numax, maximum_frequency=f[-1] + 10)
plt.close("all")
with pytest.raises(ValueError):
butler.plot_echelle(deltanu, numax=f[-1] + 10)
plt.close("all")
# Assert can pass colormap
butler.plot_echelle(deltanu, numax, cmap="viridis")
plt.close("all")
| 14,457
| 37.657754
| 87
|
py
|
lightkurve
|
lightkurve-main/tests/seismology/__init__.py
| 0
| 0
| 0
|
py
|
|
lightkurve
|
lightkurve-main/tests/seismology/test_stellar_estimators.py
|
from astropy import units as u
import numpy as np
from uncertainties import ufloat
from lightkurve.seismology.stellar_estimators import (
NUMAX_SOL,
DELTANU_SOL,
TEFF_SOL,
G_SOL,
estimate_radius,
estimate_mass,
estimate_logg,
)
cM = ufloat(1.30, 0.09)
cR = ufloat(9.91, 0.24)
clogg = ufloat(2.559, 0.009)
ceteff = 80
cenumax = 0.75
cedeltanu = 0.012
cteff = 4531
cnumax = 46.12
cdeltanu = 4.934
def assert_correct_answer(quantity, reference):
"""Standard way we'll compare results against reference values below;
wrapped in a function to reduce code duplication."""
assert np.isclose(quantity.value, reference.n, atol=reference.s)
assert np.isclose(quantity.error.value, reference.s, atol=0.1)
def test_constants():
"""Assert the basic solar parameters are still loaded in and have
appopriate units where necessary"""
assert NUMAX_SOL.n == 3090.0
assert NUMAX_SOL.s == 30.0
assert DELTANU_SOL.n == 135.1
assert DELTANU_SOL.s == 0.1
assert TEFF_SOL.n == 5772.0
assert TEFF_SOL.s == 0.8
assert np.isclose(G_SOL.value, 27420)
assert G_SOL.unit == u.cm / u.second ** 2
def test_estimate_radius_basic():
"""Assert the basic functions of estimate_radius"""
R = estimate_radius(cnumax, cdeltanu, cteff)
# Check units
assert R.unit == u.solRad
# Check returns right answer
assert np.isclose(R.value, cR.n, rtol=cR.s)
# Check units on parameters
R = estimate_radius(u.Quantity(cnumax, u.microhertz), cdeltanu, cteff)
assert np.isclose(R.value, cR.n, rtol=cR.s)
R = estimate_radius(cnumax, u.Quantity(cdeltanu, u.microhertz), cteff)
assert np.isclose(R.value, cR.n, rtol=cR.s)
R = estimate_radius(cnumax, cdeltanu, u.Quantity(cteff, u.Kelvin))
assert np.isclose(R.value, cR.n, rtol=cR.s)
# Check works with a random selection of appropriate units
R = estimate_radius(
u.Quantity(cnumax, u.microhertz).to(1 / u.day),
u.Quantity(cdeltanu, u.microhertz).to(u.hertz),
cteff,
)
assert np.isclose(R.value, cR.n, rtol=cR.s)
def test_estimate_radius_kwargs():
"""Test the kwargs of estimate_radius"""
R = estimate_radius(cnumax, cdeltanu, cteff, cenumax, cedeltanu, ceteff)
assert R.error is not None
# Check conditions for return
t = estimate_radius(cnumax, cdeltanu, cteff, cenumax, cedeltanu)
assert t.error is not None
t = estimate_radius(cnumax, cdeltanu, cteff, cenumax, cedeltanu, ceteff)
assert t.error is not None
# Check units
assert R.unit == u.solRad
assert R.error.unit == u.solRad
# Check returns right answer
assert_correct_answer(R, cR)
# Check units on parameters
R = estimate_radius(
cnumax, cdeltanu, cteff, u.Quantity(cenumax, u.microhertz), cedeltanu, ceteff
)
assert_correct_answer(R, cR)
R = estimate_radius(
cnumax, cdeltanu, cteff, cenumax, u.Quantity(cedeltanu, u.microhertz), ceteff
)
assert_correct_answer(R, cR)
R = estimate_radius(
cnumax, cdeltanu, cteff, cenumax, cedeltanu, u.Quantity(ceteff, u.Kelvin)
)
assert_correct_answer(R, cR)
# Check works with a random selection of appropriate units
R = estimate_radius(
cnumax,
cdeltanu,
cteff,
u.Quantity(cenumax, u.microhertz).to(1 / u.day),
u.Quantity(cedeltanu, u.microhertz).to(u.hertz),
ceteff,
)
assert_correct_answer(R, cR)
def test_estimate_mass_basic():
"""Assert the basic functions of estimate_mass"""
M = estimate_mass(cnumax, cdeltanu, cteff)
assert M.unit == u.solMass # Check units
assert np.isclose(M.value, cM.n, rtol=cM.s) # Check right answer
# Check units on parameters
M = estimate_mass(u.Quantity(cnumax, u.microhertz), cdeltanu, cteff)
assert np.isclose(M.value, cM.n, rtol=cM.s)
M = estimate_mass(cnumax, u.Quantity(cdeltanu, u.microhertz), cteff)
assert np.isclose(M.value, cM.n, rtol=cM.s)
M = estimate_mass(cnumax, cdeltanu, u.Quantity(cteff, u.Kelvin))
assert np.isclose(M.value, cM.n, rtol=cM.s)
# Check works with a random selection of appropriate units
M = estimate_mass(
u.Quantity(cnumax, u.microhertz).to(1 / u.day),
u.Quantity(cdeltanu, u.microhertz).to(u.hertz),
cteff,
)
assert np.isclose(M.value, cM.n, rtol=cM.s)
def test_estimate_mass_kwargs():
"""Test the kwargs of estimate_mass."""
M = estimate_mass(cnumax, cdeltanu, cteff, cenumax, cedeltanu, ceteff)
# Check units
assert M.unit == u.solMass
assert M.error.unit == u.solMass
# Check returns right answer
assert_correct_answer(M, cM)
# Check units on parameters
M = estimate_mass(
cnumax, cdeltanu, cteff, u.Quantity(cenumax, u.microhertz), cedeltanu, ceteff
)
assert_correct_answer(M, cM)
M = estimate_mass(
cnumax, cdeltanu, cteff, cenumax, u.Quantity(cedeltanu, u.microhertz), ceteff
)
assert_correct_answer(M, cM)
M = estimate_mass(
cnumax, cdeltanu, cteff, cenumax, cedeltanu, u.Quantity(ceteff, u.Kelvin)
)
assert_correct_answer(M, cM)
# Check works with a random selection of appropriate units
M = estimate_mass(
cnumax,
cdeltanu,
cteff,
u.Quantity(cenumax, u.microhertz).to(1 / u.day),
u.Quantity(cedeltanu, u.microhertz).to(u.hertz),
ceteff,
)
assert_correct_answer(M, cM)
def test_estimate_logg_basic():
"""Assert basic functionality of estimate_logg."""
logg = estimate_logg(cnumax, cteff)
# Check units
assert logg.unit == u.dex
# Check returns right answer
assert np.isclose(logg.value, clogg.n, rtol=clogg.s)
# Check units on parameters
logg = estimate_logg(u.Quantity(cnumax, u.microhertz), cteff)
assert np.isclose(logg.value, clogg.n, rtol=clogg.s)
logg = estimate_logg(cnumax, u.Quantity(cteff, u.Kelvin))
assert np.isclose(logg.value, clogg.n, rtol=clogg.s)
# Check works with a random selection of appropriate units
logg = estimate_logg(u.Quantity(cnumax, u.microhertz).to(1 / u.day), cteff)
assert np.isclose(logg.value, clogg.n, rtol=clogg.s)
def test_estimate_logg_kwargs():
"""Test the kwargs of estimate_logg."""
logg = estimate_logg(cnumax, cteff, cenumax, ceteff)
# Check units
assert logg.unit == u.dex
assert logg.error.unit == u.dex
# Check returns right answer
assert_correct_answer(logg, clogg)
# Check units on parameters
logg = estimate_logg(cnumax, cteff, u.Quantity(cenumax, u.microhertz), ceteff)
assert_correct_answer(logg, clogg)
logg = estimate_logg(cnumax, cteff, cenumax, u.Quantity(ceteff, u.Kelvin))
assert_correct_answer(logg, clogg)
# Check works with a random selection of appropriate units
logg = estimate_logg(
cnumax, cteff, u.Quantity(cenumax, u.microhertz).to(1 / u.day), ceteff
)
assert_correct_answer(logg, clogg)
| 6,995
| 29.955752
| 85
|
py
|
lightkurve
|
lightkurve-main/docs/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
import lightkurve
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'nbsphinx',
'numpydoc',
'sphinxcontrib.rawfiles']
autosummary_generate = True
# Disable RequireJS because it creates a conflict with bootstrap.js.
# This conflict breaks the navigation toggle button.
# The exact consequence of disabling RequireJS is not understood
# -- likely it means that notebook widgets may not work?
nbsphinx_requirejs_path = ""
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Exclude build directory and Jupyter backup files:
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(lightkurve.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = lightkurve.__version__
# General information about the project.
project = f'Lightkurve v{version}'
copyright = 'Lightkurve developers'
author = 'Lightkurve developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["**/.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Execute notebooks? Possible values: 'always', 'never', 'auto' (default)
nbsphinx_execute = "auto"
# Some notebook cells take longer than 60 seconds to execute
nbsphinx_timeout = 500
# PUT PROLOG HERE
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None) %}
.. only:: html
.. raw:: html
<div style="float:right; margin-bottom:1em;">
<a href="https://github.com/lightkurve/lightkurve/raw/main/docs/source/{{ docname }}"><img src="https://img.shields.io/badge/Notebook-Download-130654?logo=Jupyter&labelColor=fafafa"></a>
<a href="https://timeseries.science.stsci.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Flightkurve%2Flightkurve&urlpath=lab%2Ftree%2Flightkurve%2Fdocs%2Fsource%2F{{ docname }}&branch=main"><img src="https://img.shields.io/badge/Notebook-Open%20in%20TIKE-130654?logo=Jupyter&labelColor=fafafa"></a>
</div>
<br style="clear:both;">
"""
# -- Options for HTML output ----------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
"external_links": [],
"github_url": "https://github.com/lightkurve/lightkurve",
"google_analytics_id": "UA-69171-9",
}
html_title = "Lightkurve "
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
html_sidebars = {
"tutorials/*": [],
"tutorials/*/*": [],
"tutorials/*/*/*": [],
}
# Raw files we want to copy using the sphinxcontrib-rawfiles extension:
# - CNAME tells GitHub the domain name to use for hosting the docs
# - .nojekyll prevents GitHub from hiding the `_static` dir
rawfiles = ['CNAME', '.nojekyll']
# Make sure text marked up `like this` will be interpreted as Python objects
default_role = 'py:obj'
# intersphinx enables links to classes/functions in the packages defined here:
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'astropy': ('https://docs.astropy.org/en/latest/', None)}
| 5,016
| 33.363014
| 326
|
py
|
YouTube-GDD
|
YouTube-GDD-main/tools/download.py
|
from pytube import YouTube
import argparse
import os
parser = argparse.ArgumentParser(description='Download YouTube videos')
parser.add_argument('--videolist', default='./configs/videolist.txt')
parser.add_argument('--videopath', default='videos')
args = parser.parse_args()
if __name__ == '__main__':
if not os.path.exists(args.videopath):
os.mkdir(args.videopath)
count=0
with open(args.videolist,"r") as f:
urls = f.readlines()
urls = [url.replace('\n','') for url in urls]
for url in urls:
try:
video = YouTube(r"https://www.youtube.com/watch?v=" + url)
video.streams.filter(progressive=True, file_extension='mp4').order_by('resolution')[-1].download(args.videopath,filename=url)
count += 1
except:
continue
print(f"download: {count} videos || total: {len(urls)} videos")
| 830
| 32.24
| 131
|
py
|
YouTube-GDD
|
YouTube-GDD-main/tools/select.py
|
import argparse
import os
import shutil
import numpy as np
parser = argparse.ArgumentParser(description='Select YouTube-GDD images')
parser.add_argument('--imagelist', default='./configs/imagelist.npy')
parser.add_argument('--framepath', default='frames')
parser.add_argument('--imagepath', default='images')
args = parser.parse_args()
if __name__ == '__main__':
dic = np.load(args.imagelist,allow_pickle=True).item()
for set in ["train","val","test"]:
if not os.path.exists(os.path.join(args.imagepath,set)):
os.mkdir(os.path.join(args.imagepath,set))
for set in ["train", "val", "test"]:
dist_root = os.path.join(args.imagepath,set)
for image in dic[set]:
source_path = os.path.join(args.framepath,image)
if os.path.exists(source_path):
shutil.copy(source_path,os.path.join(dist_root,image))
| 839
| 37.181818
| 73
|
py
|
YouTube-GDD
|
YouTube-GDD-main/tools/extract.py
|
import argparse
import os
import cv2
import math
parser = argparse.ArgumentParser(description='Extract YouTube frames')
parser.add_argument('--videopath', default='videos')
parser.add_argument('--framepath', default='frames')
def extract_frames(video_path, dst_folder, extract_frequency, abstract_name, frame_rate, index):
video = cv2.VideoCapture()
if not video.open(video_path):
print("can not open the video")
exit(1)
count = 1
while True:
_, frame = video.read()
if frame is None:
break
if count % extract_frequency == 0:
save_path = "{}/{}_{:2d}_{:2d}_{:>06d}.jpg".format(dst_folder, abstract_name, frame_rate, extract_frequency,
index)
cv2.imwrite(save_path, frame)
index += 1
count += 1
video.release()
args = parser.parse_args()
if __name__ == '__main__':
if not os.path.exists(args.framepath):
os.mkdir(args.framepath)
video_names = os.listdir(args.videopath)
for video_name in video_names:
video_path = os.path.join(args.videopath, video_name)
abstract_name = video_name.split('.')[0]
video_capture = cv2.VideoCapture(video_path)
frame_rate = math.ceil(video_capture.get(5))
extract_rate = frame_rate * 2
extract_frames(video_path, args.framepath, extract_rate, abstract_name, frame_rate, 1)
| 1,354
| 32.875
| 114
|
py
|
r-AdjNorm
|
r-AdjNorm-main/src/main.py
|
'''
Tensorflow Implementation of r-Adjnorm model in:
Minghao Zhao et al. Investigating Accuracy-Novelty Performance for Graph-based Collaborative Filtering. In SIGIR 2022.
@author: Minghao Zhao(zhaominghao@corp.netease.com)
'''
####################################################
# This section of code adapted from WangXiang/NGCF,
# adding severl baselines. e.g., pop_reg, PPNW, dropedge
###################################################
import tensorflow as tf
import os
import sys
import csv
import matplotlib.pyplot as plt
from numpy import savetxt
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from utility.helper import *
from utility.batch_test import *
class AdjNorm(object):
def __init__(self, data_config, pretrain_data):
# argument settings
self.model_type = 'AdjNorm'
self.adj_type = args.adj_type
self.alg_type = args.alg_type
self.pretrain_data = pretrain_data
self.n_users = data_config['n_users']
self.n_items = data_config['n_items']
self.n_fold = 1
self.norm_adj = data_config['norm_adj']
self.n_nonzero_elems = self.norm_adj.count_nonzero()
self.lr = args.lr
self.emb_dim = args.embed_size
self.batch_size = args.batch_size
self.weight_size = eval(args.layer_size)
self.n_layers = len(self.weight_size)
self.model_type += '_%s_%s_l%d' % (self.adj_type, self.alg_type, self.n_layers)
self.regs = eval(args.regs)
self.decay = self.regs[0]
self.verbose = args.verbose
self.item_pop_rev = data_config['item_pop_rev']
self.d = data_config['d']
self.pop_reg = args.pop_reg
self.pop_reg_decay = args.pop_reg_decay
self.ppnw = args.ppnw
self.ppnw_a = args.ppnw_a
self.ppnw_g = args.ppnw_g
self.ppnw_l = args.ppnw_l
self.theta_u = data_config['theta_u']
self.theta_i = data_config['theta_i']
self.theta_i_p = data_config['theta_i_p']
self.theta_std_u2 = data_config['theta_std_u2']
'''
Create Placeholder for Input Data & Dropout.
'''
self.users = tf.placeholder(tf.int32, shape=(None,))
self.pos_items = tf.placeholder(tf.int32, shape=(None,))
self.neg_items = tf.placeholder(tf.int32, shape=(None,))
self.node_dropout_flag = args.node_dropout_flag
self.node_dropout = tf.placeholder(tf.float32, shape=[None])
self.mess_dropout = tf.placeholder(tf.float32, shape=[None])
"""
Create Model Parameters (i.e., Initialize Weights).
"""
self.weights = self._init_weights()
"""
Compute Graph-based Representations of all users & items via Message-Passing Mechanism of Graph Neural Networks.
Different Convolutional Layers:
1. ngcf: defined in 'Neural Graph Collaborative Filtering', SIGIR2019;
2. gcn: defined in 'Semi-Supervised Classification with Graph Convolutional Networks', ICLR2018;
3. gcmc: defined in 'Graph Convolutional Matrix Completion', KDD2018;
4. lrgccf: defined in 'Revisiting Graph based Collaborative Filtering: A Linear Residual Graph Convolutional Network Approach', AAAI2020;
5. lightgcn: defined in 'LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation', SIGIR2020;
"""
if self.alg_type in ['ngcf']:
self.ua_embeddings, self.ia_embeddings = self._create_ngcf_embed()
elif self.alg_type in ['gcn']:
self.ua_embeddings, self.ia_embeddings = self._create_gcn_embed()
elif self.alg_type in ['gcmc']:
self.ua_embeddings, self.ia_embeddings = self._create_gcmc_embed()
elif self.alg_type in ['mf']:
self.ua_embeddings, self.ia_embeddings = self.weights['user_embedding'], self.weights['item_embedding']
elif self.alg_type in ['lightgcn']:
self.ua_embeddings, self.ia_embeddings = self._create_lightgcn_embed()
elif self.alg_type in ['lrgccf']:
self.ua_embeddings, self.ia_embeddings = self._create_lrgccf_embed()
"""
Establish the final representations for user-item pairs in batch.
"""
self.u_g_embeddings = tf.nn.embedding_lookup(self.ua_embeddings, self.users)
self.pos_i_g_embeddings = tf.nn.embedding_lookup(self.ia_embeddings, self.pos_items)
self.neg_i_g_embeddings = tf.nn.embedding_lookup(self.ia_embeddings, self.neg_items)
self.pos_i_pop_rev = tf.nn.embedding_lookup(self.item_pop_rev, self.pos_items)
self.neg_i_pop_rev = tf.nn.embedding_lookup(self.item_pop_rev, self.neg_items)
self.pi_ui = tf.nn.embedding_lookup(self.theta_i_p, self.pos_items) * tf.exp(-tf.math.pow(tf.nn.embedding_lookup(self.theta_u, self.users) - tf.nn.embedding_lookup(self.theta_i, self.pos_items), 2) / (2 * self.ppnw_l * tf.nn.embedding_lookup(self.theta_std_u2, self.users)))
self.pi_uj = tf.nn.embedding_lookup(self.theta_i_p, self.neg_items) * tf.exp(-tf.math.pow(tf.nn.embedding_lookup(self.theta_u, self.users) - tf.nn.embedding_lookup(self.theta_i, self.neg_items), 2) / (2 * self.ppnw_l * tf.nn.embedding_lookup(self.theta_std_u2, self.users)))
"""
Inference for the testing phase.
"""
self.batch_ratings = tf.sigmoid(tf.matmul(self.u_g_embeddings, self.pos_i_g_embeddings, transpose_a=False, transpose_b=True))
"""
Generate Predictions & Optimize via BPR loss.
"""
self.mf_loss, self.emb_loss, self.reg_loss = self.create_bpr_loss(self.u_g_embeddings,
self.pos_i_g_embeddings,
self.neg_i_g_embeddings)
self.loss = self.mf_loss + self.emb_loss + self.reg_loss
self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
def _init_weights(self):
all_weights = dict()
initializer = tf.contrib.layers.xavier_initializer()
if self.pretrain_data is None:
all_weights['user_embedding'] = tf.Variable(initializer([self.n_users, self.emb_dim]), name='user_embedding')
all_weights['item_embedding'] = tf.Variable(initializer([self.n_items, self.emb_dim]), name='item_embedding')
print('using xavier initialization')
else:
all_weights['user_embedding'] = tf.Variable(initial_value=self.pretrain_data['user_embed'], trainable=True,
name='user_embedding', dtype=tf.float64)
all_weights['item_embedding'] = tf.Variable(initial_value=self.pretrain_data['item_embed'], trainable=True,
name='item_embedding', dtype=tf.float64)
print('using pretrained initialization')
self.weight_size_list = [self.emb_dim] + self.weight_size
for k in range(self.n_layers):
all_weights['W_gc_%d' %k] = tf.Variable(
initializer([self.weight_size_list[k], self.weight_size_list[k+1]]), name='W_gc_%d' % k)
all_weights['b_gc_%d' %k] = tf.Variable(
initializer([1, self.weight_size_list[k+1]]), name='b_gc_%d' % k)
all_weights['W_bi_%d' % k] = tf.Variable(
initializer([self.weight_size_list[k], self.weight_size_list[k + 1]]), name='W_bi_%d' % k)
all_weights['b_bi_%d' % k] = tf.Variable(
initializer([1, self.weight_size_list[k + 1]]), name='b_bi_%d' % k)
all_weights['W_mlp_%d' % k] = tf.Variable(
initializer([self.weight_size_list[k], self.weight_size_list[k+1]]), name='W_mlp_%d' % k)
all_weights['b_mlp_%d' % k] = tf.Variable(
initializer([1, self.weight_size_list[k+1]]), name='b_mlp_%d' % k)
all_weights['W_att'] = tf.Variable(
initializer([self.emb_dim, self.emb_dim]), name='W_att')
all_weights['b_att'] = tf.Variable(
initializer([1, self.emb_dim]), name='b_att')
all_weights['s'] = tf.Variable(initializer([self.emb_dim, 1]))
all_weights['w'] = tf.Variable(initializer([self.n_layers + 1, 1]))
all_weights['alpha'] = tf.Variable(initializer([1]))
return all_weights
def _split_A_hat(self, X):
A_fold_hat = []
fold_len = (self.n_users + self.n_items) // self.n_fold
for i_fold in range(self.n_fold):
start = i_fold * fold_len
if i_fold == self.n_fold -1:
end = self.n_users + self.n_items
else:
end = (i_fold + 1) * fold_len
A_fold_hat.append(self._convert_sp_mat_to_sp_tensor(X[start:end]))
return A_fold_hat
def _split_A_hat_node_dropout(self, X):
A_fold_hat = []
fold_len = (self.n_users + self.n_items) // self.n_fold
for i_fold in range(self.n_fold):
start = i_fold * fold_len
if i_fold == self.n_fold -1:
end = self.n_users + self.n_items
else:
end = (i_fold + 1) * fold_len
temp = self._convert_sp_mat_to_sp_tensor(X[start:end])
n_nonzero_temp = X[start:end].count_nonzero()
A_fold_hat.append(self._dropout_sparse(temp, 1 - self.node_dropout[0], n_nonzero_temp))
return A_fold_hat
def _create_lightgcn_embed(self):
if self.node_dropout_flag:
A_fold_hat = self._split_A_hat_node_dropout(self.norm_adj)
else:
A_fold_hat = self._split_A_hat(self.norm_adj)
ego_embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = [ego_embeddings]
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append( tf.sparse_tensor_dense_matmul(A_fold_hat[f], ego_embeddings))
side_embeddings = tf.concat(temp_embed, 0)
ego_embeddings = side_embeddings
#if f % 2 == 1:
all_embeddings += [ego_embeddings]
if args.single == 0:
all_embeddings = tf.stack(all_embeddings, 1)
all_embeddings = tf.reduce_mean(all_embeddings, axis=1, keepdims=False)
else:
all_embeddings = all_embeddings[-1]
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
def _create_lrgccf_embed(self):
if self.node_dropout_flag:
A_fold_hat = self._split_A_hat_node_dropout(self.norm_adj)
else:
A_fold_hat = self._split_A_hat(self.norm_adj)
ego_embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = [ego_embeddings]
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append( tf.sparse_tensor_dense_matmul(A_fold_hat[f], ego_embeddings))
side_embeddings = tf.concat(temp_embed, 0)
ego_embeddings = side_embeddings
all_embeddings += [ego_embeddings]
if args.single == 0:
all_embeddings = tf.concat(all_embeddings, axis=1)
else:
all_embeddings = all_embeddings[-1]
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
def _create_ngcf_embed(self):
if self.node_dropout_flag:
A_fold_hat = self._split_A_hat_node_dropout(self.norm_adj)
else:
A_fold_hat = self._split_A_hat(self.norm_adj)
ego_embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = [ego_embeddings]
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], ego_embeddings))
side_embeddings = tf.concat(temp_embed, 0)
sum_embeddings = tf.nn.leaky_relu(tf.matmul(side_embeddings, self.weights['W_gc_%d' % k]) + self.weights['b_gc_%d' % k])
bi_embeddings = tf.multiply(ego_embeddings, side_embeddings)
bi_embeddings = tf.nn.leaky_relu(tf.matmul(bi_embeddings, self.weights['W_bi_%d' % k]) + self.weights['b_bi_%d' % k])
ego_embeddings = sum_embeddings + bi_embeddings
ego_embeddings = tf.nn.dropout(ego_embeddings, 1 - self.mess_dropout[k])
norm_embeddings = tf.math.l2_normalize(ego_embeddings, axis=1)
all_embeddings += [norm_embeddings]
if args.single == 1:
all_embeddings = all_embeddings[-1]
else:
all_embeddings = tf.concat(all_embeddings, 1)
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
def _create_gcn_embed(self):
A_fold_hat = self._split_A_hat(self.norm_adj)
embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = [embeddings]
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))
embeddings = tf.concat(temp_embed, 0)
embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_gc_%d' %k]) + self.weights['b_gc_%d' %k])
embeddings = tf.nn.dropout(embeddings, 1 - self.mess_dropout[k])
all_embeddings += [embeddings]
all_embeddings = tf.concat(all_embeddings, 1)
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
def _create_gcmc_embed(self):
A_fold_hat = self._split_A_hat(self.norm_adj)
embeddings = tf.concat([self.weights['user_embedding'], self.weights['item_embedding']], axis=0)
all_embeddings = []
for k in range(0, self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append(tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))
embeddings = tf.concat(temp_embed, 0)
embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_gc_%d' % k]) + self.weights['b_gc_%d' % k])
mlp_embeddings = tf.nn.leaky_relu(tf.matmul(embeddings, self.weights['W_mlp_%d' %k]) + self.weights['b_mlp_%d' %k])
mlp_embeddings = tf.nn.dropout(mlp_embeddings, 1 - self.mess_dropout[k])
all_embeddings += [mlp_embeddings]
all_embeddings = tf.concat(all_embeddings, 1)
u_g_embeddings, i_g_embeddings = tf.split(all_embeddings, [self.n_users, self.n_items], 0)
return u_g_embeddings, i_g_embeddings
def create_bpr_loss(self, users, pos_items, neg_items):
pos_scores = tf.reduce_sum(tf.multiply(users, pos_items), axis=1)
neg_scores = tf.reduce_sum(tf.multiply(users, neg_items), axis=1)
regularizer = tf.nn.l2_loss(users) + tf.nn.l2_loss(pos_items) + tf.nn.l2_loss(neg_items)
regularizer = regularizer/self.batch_size
def pearson_r(y_true, y_pred):
x = y_true
y = y_pred
mx = tf.reduce_mean(x,)
my = tf.reduce_mean(y,)
xm, ym = x - mx, y - my
t1_norm = tf.nn.l2_normalize(xm,)
t2_norm = tf.nn.l2_normalize(ym,)
cosine = tf.losses.cosine_distance(t1_norm, t2_norm, axis = -1, reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
return 1-cosine
mf_loss = tf.reduce_mean(tf.math.softplus(-(pos_scores - neg_scores))) #+ 1 * pearson_r(pos_scores, self.pos_i_pop_rev)
if self.ppnw:
mf_loss = tf.reduce_mean((1 + self.ppnw_g * ( self.pi_ui - self.pi_uj )) * tf.math.softplus(-pos_scores + neg_scores))
emb_loss = self.decay * regularizer
if self.pop_reg:
reg_loss = self.pop_reg_decay * tf.square(pearson_r(pos_scores, self.pos_i_pop_rev))
else:
reg_loss = tf.constant(0.0, tf.float32, [1])
return mf_loss, emb_loss, reg_loss
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
def _dropout_sparse(self, X, keep_prob, n_nonzero_elems):
"""
Dropout for sparse tensors.
"""
noise_shape = [n_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(X, dropout_mask)
return pre_out * tf.div(1., keep_prob)
def load_pretrained_data():
pretrain_path = '%spretrain/%s/%s.npz' % (args.proj_path, args.dataset, 'embedding')
try:
pretrain_data = np.load(pretrain_path)
print('load the pretrained embeddings.')
except Exception:
pretrain_data = None
return pretrain_data
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
config = dict()
config['n_users'] = data_generator.n_users
config['n_items'] = data_generator.n_items
print('n_users', config['n_users'])
print('n_items', config['n_items'])
"""
Generate the Laplacian matrix, where each entry defines the decay factor (e.g., p_ui) between two connected nodes.
"""
plain_adj, norm_adj, mean_adj, item_pop_rev, d_adj = data_generator.get_adj_mat()
theta_u, theta_i, theta_std_u2, theta_i_p = data_generator.get_ppnw()
config['theta_u'] = theta_u
config['theta_i'] = theta_i
config['theta_std_u2'] = theta_std_u2
config['theta_i_p'] = theta_i_p
if args.adj_type == 'plain':
config['norm_adj'] = plain_adj
print('use the plain adjacency matrix')
if args.adj_type == 'norm':
config['norm_adj'] = norm_adj
print('use the normalized adjacency matrix')
if args.adj_type == 'mean':
config['norm_adj'] = mean_adj
print('use the norm_wo_self adjacency matrix')
config['item_pop_rev'] = item_pop_rev
config['d'] = d_adj
t0 = time()
if args.pretrain == -1:
pretrain_data = load_pretrained_data()
else:
pretrain_data = None
model = AdjNorm(data_config=config, pretrain_data=pretrain_data)
"""
Save the model parameters.
"""
saver = tf.train.Saver()
if args.save_flag == 1:
layer = '-'.join([str(l) for l in eval(args.layer_size)])
weights_save_path = '%sweights/%s/%s/%s/l%s_r%s' % (args.weights_path, args.dataset, model.model_type, layer,
str(args.lr), '-'.join([str(r) for r in eval(args.regs)]))
ensureDir(weights_save_path)
save_saver = tf.train.Saver(max_to_keep=1)
config_ = tf.ConfigProto()
config_.gpu_options.allow_growth = True
sess = tf.Session(config=config_)
"""
Reload the pretrained model parameters.
"""
if args.pretrain == 1:
layer = '-'.join([str(l) for l in eval(args.layer_size)])
pretrain_path = '%sweights/%s/%s/%s/l%s_r%s' % (args.weights_path, args.dataset, model.model_type, layer,
str(args.lr), '-'.join([str(r) for r in eval(args.regs)]))
ckpt = tf.train.get_checkpoint_state(os.path.dirname(pretrain_path + '/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
sess.run(tf.global_variables_initializer())
saver.restore(sess, ckpt.model_checkpoint_path)
print('load the pretrained model parameters from: ', pretrain_path)
if args.report != 1:
users_to_test = list(data_generator.test_set.keys())
ret = test(sess, model, users_to_test, drop_flag=True)
cur_best_pre_0 = ret['recall'][0]
pretrain_ret = 'pretrained model recall=[%.5f, %.5f], precision=[%.5f, %.5f], hit=[%.5f, %.5f],' \
'ndcg=[%.5f, %.5f]' % \
(ret['recall'][0], ret['recall'][-1],
ret['precision'][0], ret['precision'][-1],
ret['hit_ratio'][0], ret['hit_ratio'][-1],
ret['ndcg'][0], ret['ndcg'][-1])
print(pretrain_ret)
else:
sess.run(tf.global_variables_initializer())
cur_best_pre_0 = 0.
print('without pretraining.')
else:
sess.run(tf.global_variables_initializer())
cur_best_pre_0 = 0.
print('without pretraining.')
"""
Train.
"""
loss_loger, pre_loger, rec_loger, ndcg_loger, hit_loger = [], [], [], [], []
degree_loger, cover_loger = [], []
monitor_list = []
stopping_step = 0
should_stop = False
for epoch in range(args.epoch):
t1 = time()
loss, mf_loss, emb_loss, reg_loss = 0., 0., 0., 0.
n_batch = data_generator.n_train // args.batch_size + 1
if args.drop_edge == 1 and (epoch + 1) % 10 ==0:
sample_adj = data_generator.get_sample_adj_mat(args.drop_edge_percent, args.pop_penalty)
model.norm_adj = sample_adj
print('drop edge n_interactions', len(model.norm_adj.nonzero()[0]), model.norm_adj.sum(axis=0) )#len(model.norm_adj.nonzero()[0]))
for idx in range(n_batch):
users, pos_items, neg_items = data_generator.sample()
_, batch_loss, batch_mf_loss, batch_emb_loss, batch_reg_loss = sess.run([model.opt, model.loss, model.mf_loss, model.emb_loss, model.reg_loss],
feed_dict={model.users: users, model.pos_items: pos_items,
model.node_dropout: eval(args.node_dropout),
model.mess_dropout: eval(args.mess_dropout),
model.neg_items: neg_items})
loss += batch_loss
mf_loss += batch_mf_loss
emb_loss += batch_emb_loss
reg_loss += batch_reg_loss
if np.isnan(loss) == True:
print('ERROR: loss is nan.')
sys.exit()
if (epoch ) % 20 != 0:
if args.drop_edge ==1:
start = time()
sample_adj = data_generator.get_sample_adj_mat(args.drop_edge_percent, args.pop_penalty)
model.norm_adj= sample_adj
if args.verbose > 0 and epoch % args.verbose == 0:
perf_str = 'Epoch %d [%.1fs]: train==[%.5f=%.5f + %.5f + %.5f]' % (
epoch, time() - t1, loss, mf_loss, emb_loss, reg_loss)
print(perf_str)
if args.skip == 1:
continue
t2 = time()
users_to_val = list(data_generator.val_set.keys())
degree, cover, ret = test(sess, model, config['norm_adj'], users_to_val, 20, False, False, drop_flag=True)
if args.monitor == True:
users_to_test = list(data_generator.test_set.keys())
degree_m, pru_m, ret_m = test(sess, model, config['norm_adj'], users_to_test, 20, True, True, drop_flag=True)
monitor_list.append([epoch, ret_m['recall'][1], ret_m['ndcg'][1], degree_m, pru_m])
t3 = time()
loss_loger.append(loss)
rec_loger.append(ret['recall'][1])
pre_loger.append(ret['precision'][1])
ndcg_loger.append(ret['ndcg'][1])
hit_loger.append(ret['hit_ratio'][1])
if args.verbose > 0:
perf_str = 'Epoch %d [%.1fs + %.1fs]: train loss==[%.5f=%.5f + %.5f + %.5f], val recall=[%.5f, %.5f], ' \
'val precision=[%.5f, %.5f], val hit=[%.5f, %.5f], val ndcg=[%.5f, %.5f]' % \
(epoch, t2 - t1, t3 - t2, loss, mf_loss, emb_loss, reg_loss, ret['recall'][0], ret['recall'][-1],
ret['precision'][0], ret['precision'][-1], ret['hit_ratio'][0], ret['hit_ratio'][-1],
ret['ndcg'][0], ret['ndcg'][-1])
print(perf_str)
cur_best_pre_0, stopping_step, should_stop = early_stopping(ret['recall'][0], cur_best_pre_0,
stopping_step, epoch, expected_order='acc', flag_step=m)
# early stopping when cur_best_pre_0 is decreasing for ten successive steps.
if should_stop == True:
break
# save the user & item embeddings for pretraining.
if ret['recall'][0] == cur_best_pre_0 and args.save_flag == 1:
model_file = save_saver.save(sess, weights_save_path + '/weights_', global_step=epoch)
print('save the weights in path: ', weights_save_path)
save_saver.restore(sess, model_file)
users_to_test = list(data_generator.test_set.keys())
degree, cover, ret = test(sess, model, config['norm_adj'], users_to_test, 10, True, True, drop_flag=True)
print('test recall', ret['recall'], 'test precision', ret['precision'], '\n'
'test ndcg', ret['ndcg'], 'test hit_ratio', ret['hit_ratio'], 'auc', ret['auc'])
degree_, cover_, ret_ = test(sess, model, config['norm_adj'], users_to_test, 20, True, True, drop_flag=True)
degree_, cover_, ret_ = test(sess, model, config['norm_adj'], users_to_test, 50, True, True, drop_flag=True)
if args.monitor == True:
with open('/data/GCN_pop_bias/'+str(args.dataset)+'_monitor_recall_degree.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['epoch', 'recall@20', 'ndcg@20', 'novelty@20','pru@20'])
writer.writerows(monitor_list)
| 26,223
| 43.674617
| 282
|
py
|
r-AdjNorm
|
r-AdjNorm-main/src/utility/parser.py
|
'''
Tensorflow Implementation of r-Adjnorm model in:
Minghao Zhao et al. Investigating Accuracy-Novelty Performance for Graph-based Collaborative Filtering. In SIGIR 2022.
@author: Minghao Zhao(zhaominghao@corp.netease.com)
'''
####################################################
# This section of code adapted from WangXiang/NGCF
###################################################
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run AdjNorm.")
parser.add_argument('--weights_path', nargs='?', default='',
help='Store model path.')
parser.add_argument('--data_path', nargs='?', default='../data/',
help='Input data path.')
parser.add_argument('--proj_path', nargs='?', default='',
help='Project path.')
parser.add_argument('--dataset', nargs='?', default='amazon-book',
help='Choose a dataset from amazon-book')
parser.add_argument('--pretrain', type=int, default=0,
help='0: No pretrain, -1: Pretrain with the learned embeddings, 1:Pretrain with stored models.')
parser.add_argument('--verbose', type=int, default=1,
help='Interval of evaluation.')
parser.add_argument('--epoch', type=int, default=1000,
help='Number of epoch.')
parser.add_argument('--embed_size', type=int, default=64,
help='Embedding size.')
parser.add_argument('--layer_size', nargs='?', default='[64, 64]',
help='Output sizes of every layer')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--regs', nargs='?', default='[1e-5,1e-5,1e-2]',
help='Regularizations.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--model_type', nargs='?', default='AdjNorm',
help='Specify the name of model (AdjNorm).')
parser.add_argument('--adj_type', nargs='?', default='mean',
help='Specify the type of the adjacency (laplacian) matrix from {plain, norm, mean}.')
parser.add_argument('--alg_type', nargs='?', default='lightgcn',
help='Specify the type of the graph convolutional layer from {ngcf, gcn, gcmc, lightgcn, lrgccf}.')
parser.add_argument('--single', type=int, default=0,
help='single')
parser.add_argument('--pop_reg', type=int, default=0,
help='whether to enable pop_reg')
parser.add_argument('--pop_reg_decay', type=float, default=1.,
help='the hyperparameter for pop_reg')
parser.add_argument('--drop_edge', type=int, default=0,
help='whether to turn on DropEdge')
parser.add_argument('--drop_edge_percent', type=float, default=0.5,
help='the percent of DropEdge')
parser.add_argument('--pop_penalty', type=float, default=0,
help='whether to enable pop_penalty(degree penalty for popoluar nodes) when conducting DropEdge ')
parser.add_argument('--gpu_id', type=int, default=0,
help='0 for NAIS_prod, 1 for NAIS_concat')
parser.add_argument('--node_dropout_flag', type=int, default=0,
help='0: Disable node dropout, 1: Activate node dropout')
parser.add_argument('--node_dropout', nargs='?', default='[0.1]',
help='Keep probability w.r.t. node dropout (i.e., 1-dropout_ratio) for each deep layer. 1: no dropout.')
parser.add_argument('--mess_dropout', nargs='?', default='[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]',
help='Keep probability w.r.t. message dropout (i.e., 1-dropout_ratio) for each deep layer. 1: no dropout.')
parser.add_argument('--Ks', nargs='?', default='[10, 20, 30, 40, 50]',
help='Output sizes of every layer')
parser.add_argument('--save_flag', type=int, default=1,
help='0: Disable model saver, 1: Activate model saver')
parser.add_argument('--test_flag', nargs='?', default='part',
help='Specify the test type from {part, full}, indicating whether the reference is done in mini-batch')
parser.add_argument('--report', type=int, default=0,
help='0: Disable performance report w.r.t. sparsity levels, 1: Show performance report w.r.t. sparsity levels')
parser.add_argument('--skip', type=int, default=0,
help='skip the test')
parser.add_argument('--r', type=float, default=0.5,
help='normalization coefficient')
parser.add_argument('--monitor', type=bool, default=False,
help='monitor the test evaluation')
parser.add_argument('--negative_sample', type=int, default=0)
parser.add_argument('--positive_sample', type=int, default=0)
parser.add_argument('--ns', type=float, default=0,
help='the hyperparameter for negative sampling')
parser.add_argument('--ppnw', type=int, default=0,
help='the hyperparameter for ppnw')
parser.add_argument('--ppnw_a', type=float, default=1,
help='the hyperparameter for ppnw')
parser.add_argument('--ppnw_g', type=float, default=1,
help='the hyperparameter for ppnw')
parser.add_argument('--ppnw_l', type=float, default=1,
help='the hyperparameter for ppnw')
parser.add_argument('--pc', type=int, default=0,
help='the hyperparameter for pc')
parser.add_argument('--pc_a', type=float, default=1,
help='the hyperparameter for pc')
parser.add_argument('--pc_b', type=float, default=0.5,
help='the hyperparameter for pc')
return parser.parse_args()
| 6,115
| 52.182609
| 135
|
py
|
r-AdjNorm
|
r-AdjNorm-main/src/utility/helper.py
|
'''
Tensorflow Implementation of r-Adjnorm model in:
Minghao Zhao et al. Investigating Accuracy-Novelty Performance for Graph-based Collaborative Filtering. In SIGIR 2022.
@author: Minghao Zhao(zhaominghao@corp.netease.com)
'''
####################################################
# This section of code adapted from WangXiang/NGCF
###################################################
import os
import re
def txt2list(file_src):
orig_file = open(file_src, "r")
lines = orig_file.readlines()
return lines
def ensureDir(dir_path):
d = os.path.dirname(dir_path)
if not os.path.exists(d):
os.makedirs(d)
def uni2str(unicode_str):
return str(unicode_str.encode('ascii', 'ignore')).replace('\n', '').strip()
def hasNumbers(inputString):
return bool(re.search(r'\d', inputString))
def delMultiChar(inputString, chars):
for ch in chars:
inputString = inputString.replace(ch, '')
return inputString
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def early_stopping(log_value, best_value, stopping_step, epoch, expected_order='acc', flag_step=50):
# early stopping strategy:
assert expected_order in ['acc', 'dec']
if (expected_order == 'acc' and log_value >= best_value) or (expected_order == 'dec' and log_value <= best_value):
stopping_step = 0
best_value = log_value
else:
stopping_step += 1
if stopping_step >= flag_step:
print("Early stopping is trigger at step: {} log:{}".format(epoch, log_value))
should_stop = True
else:
should_stop = False
return best_value, stopping_step, should_stop
| 1,743
| 30.142857
| 119
|
py
|
r-AdjNorm
|
r-AdjNorm-main/src/utility/batch_test.py
|
'''
Tensorflow Implementation of r-Adjnorm model in:
Minghao Zhao et al. Investigating Accuracy-Novelty Performance for Graph-based Collaborative Filtering. In SIGIR 2022.
@author: Minghao Zhao(zhaominghao@corp.netease.com)
'''
####################################################
# This section of code adapted from WangXiang/NGCF,
# adding Novelty@K and PRU@K.
###################################################
import utility.metrics as metrics
from utility.parser import parse_args
from utility.load_data import *
import multiprocessing
import heapq
import numpy as np
cores = multiprocessing.cpu_count()
args = parse_args()
Ks = eval(args.Ks)
data_generator = Data(path=args.data_path + args.dataset, batch_size=args.batch_size)
USR_NUM, ITEM_NUM = data_generator.n_users, data_generator.n_items
N_TRAIN, N_TEST = data_generator.n_train, data_generator.n_test
BATCH_SIZE = args.batch_size
train_items = data_generator.train_items
train_items_list = []
pop_user = np.zeros(USR_NUM)
for each in train_items.keys():
train_items_list += train_items[each]
pop_user[each] = len(train_items[each])
degree_rev = []
from collections import Counter
item_cnt = Counter(train_items_list)
item_sum = len(train_items_list)
item_dis = len(set(train_items_list))+1
pop_item = []
for i in range(ITEM_NUM):
if item_cnt[i] == 0:
pop_item.append(item_cnt[i]+1)
else:
pop_item.append(item_cnt[i])
pop_item = np.array(pop_item)
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K, len(user_pos_test)))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
rating = x[0]
u = x[1]
is_test_flag = x[2]
try:
training_items = data_generator.train_items[u]
val_items = data_generator.val_set[u]
except Exception:
training_items = []
val_items = []
all_items = set(range(ITEM_NUM))
if is_test_flag:
user_pos_test = data_generator.test_set[u]
test_items = list(all_items - set(training_items) - set(val_items))
else:
user_pos_test = data_generator.val_set[u]
test_items = list(all_items - set(training_items))
if args.test_flag == 'part':
r, auc = ranklist_by_heapq(user_pos_test, test_items, rating, Ks)
else:
r, auc = ranklist_by_sorted(user_pos_test, test_items, rating, Ks)
return get_performance(user_pos_test, r, auc, Ks)
def test(sess, model, adj, users_to_test, topk, flag, is_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
model.norm_adj = adj
if is_test and topk==20:
print('test adj nonzero', len(model.norm_adj.nonzero()[0]))
pool = multiprocessing.Pool(cores)
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
rate_test = np.array([])
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
if drop_flag == False:
i_rate_batch = sess.run(model.batch_ratings, {model.users: user_batch,
model.pos_items: item_batch})
else:
i_rate_batch = sess.run(model.batch_ratings, {model.users: user_batch,
model.pos_items: item_batch,
model.node_dropout: [0.]*len(eval(args.layer_size)),
model.mess_dropout: [0.]*len(eval(args.layer_size))})
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
if drop_flag == False:
rate_batch = sess.run(model.batch_ratings, {model.users: user_batch,
model.pos_items: item_batch})
else:
rate_batch = sess.run(model.batch_ratings, {model.users: user_batch,
model.pos_items: item_batch,
model.node_dropout: [0.] * len(eval(args.layer_size)),
model.mess_dropout: [0.] * len(eval(args.layer_size))})
if is_test:
test_flag = True
else:
test_flag = False
test_flag_list = [test_flag for i in range(rate_batch.shape[0])]
user_batch_rating_uid = zip(rate_batch, user_batch, test_flag_list)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
#topk = 10
if flag:
for i in range(len(user_batch)):
for item in data_generator.train_items[user_batch[i]]:
rate_batch[i, item] = -np.inf
for item_ in data_generator.val_set[user_batch[i]]:
rate_batch[i, item_] = -np.inf
if args.pc == 1:
def norm(user_predict, M, user_item_cnt):
user_predict = user_predict.copy()
user_predict /= (M - user_item_cnt).reshape(-1, 1)
user_predict[user_predict==-np.inf]=0
return np.linalg.norm(user_predict, axis=1)
n = norm(rate_batch, ITEM_NUM, pop_user[user_batch])
c = 1 / pop_item * (rate_batch * args.pc_b + 1 - args.pc_b)
m = norm(c, ITEM_NUM, pop_user[user_batch])
rate_batch += args.pc_a * c * (n / m).reshape(-1, 1)
if is_test:
test_flag = True
else:
test_flag = False
test_flag_list = [test_flag for i in range(rate_batch.shape[0])]
user_batch_rating_uid = zip(rate_batch, user_batch, test_flag_list)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
rate_test = np.append(rate_test, (-rate_batch).argsort()[:,:topk])
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
if flag:
rate_test = rate_test.reshape(-1, topk)
novelty = []
degree = []
error_cnt = 0
for i in rate_test:
for j in i:
if j in item_cnt:
novelty.append(-np.log2(item_cnt[j] / USR_NUM) / np.log2(USR_NUM))
degree.append(item_cnt[j])
else:
error_cnt += 1
cover_ratio = len(set(rate_test.flatten()))/ITEM_NUM
print('cover_ratio@' + str(topk), cover_ratio)
print('degree_mean@' + str(topk), np.mean(degree))
print('novelty@'+ str(topk), np.mean(novelty), 'error cnt', error_cnt)
from scipy import stats
PRU = []
for i in rate_test:
pop = []
for j in i:
pop.append(item_cnt[j])
if sum(np.array(pop)==pop[0])==len(pop):
pop[-1] += 1e-15
PRU.append(-stats.spearmanr(range(topk), pop)[0])
print('PRU@'+ str(topk), np.mean(PRU), 'length', len(PRU))
assert count == n_test_users
pool.close()
if not flag:
return 0, 0, result
else:
return np.mean(novelty), np.mean(PRU), result
| 10,127
| 34.044983
| 119
|
py
|
r-AdjNorm
|
r-AdjNorm-main/src/utility/metrics.py
|
'''
Tensorflow Implementation of r-Adjnorm model in:
Minghao Zhao et al. Investigating Accuracy-Novelty Performance for Graph-based Collaborative Filtering. In SIGIR 2022.
@author: Minghao Zhao(zhaominghao@corp.netease.com)
'''
####################################################
# This section of code adapted from WangXiang/NGCF,
# fixing ndcg_at_k
###################################################
import numpy as np
from sklearn.metrics import roc_auc_score
def recall(rank, ground_truth, N):
return len(set(rank[:N]) & set(ground_truth)) / float(len(set(ground_truth)))
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k]
return np.mean(r)
def average_precision(r,cut):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
Returns:
Average precision
"""
r = np.asarray(r)
out = [precision_at_k(r, k + 1) for k in range(cut) if r[k]]
if not out:
return 0.
return np.sum(out)/float(min(cut, np.sum(r)))
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=1):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, user_test, method=1):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Returns:
Normalized discounted cumulative gain
"""
#dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
dcg_max = dcg_at_k([int(i<user_test) for i in range(k)], k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def recall_at_k(r, k, all_pos_num):
r = np.asfarray(r)[:k]
return np.sum(r) / all_pos_num
def hit_at_k(r, k):
r = np.array(r)[:k]
if np.sum(r) > 0:
return 1.
else:
return 0.
def F1(pre, rec):
if pre + rec > 0:
return (2.0 * pre * rec) / (pre + rec)
else:
return 0.
def auc(ground_truth, prediction):
try:
res = roc_auc_score(y_true=ground_truth, y_score=prediction)
except Exception:
res = 0.
return res
| 2,935
| 26.185185
| 119
|
py
|
r-AdjNorm
|
r-AdjNorm-main/src/utility/load_data.py
|
'''
Tensorflow Implementation of r-Adjnorm model in:
Minghao Zhao et al. Investigating Accuracy-Novelty Performance for Graph-based Collaborative Filtering. In SIGIR 2022.
@author: Minghao Zhao(zhaominghao@corp.netease.com)
'''
####################################################
# This section of code adapted from WangXiang/NGCF
# adding dropedge and PPNW
###################################################
import numpy as np
import random as rd
import scipy.sparse as sp
from time import time
from scipy.sparse.linalg import norm
from sklearn.metrics.pairwise import cosine_similarity
from utility.parser import parse_args
args = parse_args()
class Data(object):
def __init__(self, path, batch_size):
self.path = path
self.batch_size = batch_size
train_file = path + '/train.txt'
test_file = path + '/test.txt'
val_file = path + '/val.txt'
#get number of users and items
self.n_users, self.n_items = 0, 0
self.n_train, self.n_test = 0, 0
self.n_val = 0
self.neg_pools = {}
self.exist_users = []
with open(train_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
items = [int(i) for i in l[1:]]
uid = int(l[0])
self.exist_users.append(uid)
self.n_items = max(self.n_items, max(items))
self.n_users = max(self.n_users, uid)
self.n_train += len(items)
with open(test_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n')
try:
items = [int(i) for i in l.split(' ')[1:]]
except Exception:
continue
self.n_items = max(self.n_items, max(items))
self.n_test += len(items)
with open(val_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n')
try:
items = [int(i) for i in l.split(' ')[1:]]
except Exception:
continue
self.n_items = max(self.n_items, max(items))
self.n_val += len(items)
self.n_items += 1
self.n_users += 1
#self.print_statistics()
self.R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)
self.train_items, self.test_set = {}, {}
self.val_set = {}
with open(train_file) as f_train:
with open(test_file) as f_test:
for l in f_train.readlines():
if len(l) == 0: break
l = l.strip('\n')
items = [int(i) for i in l.split(' ')]
uid, train_items = items[0], items[1:]
for i in train_items:
self.R[uid, i] = 1.
# self.R[uid][i] = 1
self.train_items[uid] = train_items
for l in f_test.readlines():
if len(l) == 0: break
l = l.strip('\n')
try:
items = [int(i) for i in l.split(' ')]
except Exception:
continue
uid, test_items = items[0], items[1:]
self.test_set[uid] = test_items
with open(val_file) as f_val:
for l in f_val.readlines():
if len(l) == 0: break
l = l.strip('\n')
try:
items = [int(i) for i in l.split(' ')]
except Exception:
continue
uid, val_items = items[0], items[1:]
self.val_set[uid] = val_items
train_items_list = []
for each in self.train_items.keys():
train_items_list += self.train_items[each]
from collections import Counter
self.item_cnt = Counter(train_items_list)
self.sample_list = []
for i in range(self.n_items):
self.sample_list += [i] * int(self.item_cnt[i]**args.ns)
def get_adj_mat(self):
adj_mat, norm_adj_mat, mean_adj_mat, item_pop_rev, d_adj = self.create_adj_mat()
return adj_mat, norm_adj_mat, mean_adj_mat, item_pop_rev, d_adj
def get_sample_adj_mat(self, percent, pop_penalty):
adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = self.R.tocoo()#self.R.tolil()
def normalized_adj_single(adj, bi=False):
if not bi:
rowsum = np.array(adj.sum(1))
rowsum[rowsum==0.] = np.inf
d_inv = np.power(rowsum, -0.5).flatten()
d_mat_inv = sp.diags(d_inv)
norm_adj = d_mat_inv.dot(adj).dot(d_mat_inv)
return norm_adj.tocoo()
else:
rowsum = np.array(adj.sum(1))
colsum = np.array(adj.sum(0))
rowsum[rowsum==0.] = np.inf
colsum[colsum==0.] = np.inf
d_inv = np.power(rowsum, -0.).flatten()
d_mat_inv = sp.diags(d_inv)
d_inv_ = np.power(colsum, -1.).flatten()
d_mat_inv_ = sp.diags(d_inv_)
norm_adj = d_mat_inv.dot(adj).dot(d_mat_inv_)
return norm_adj.tocoo()
def randomedge_sampler(train_adj, percent):
"""
Randomly drop edge and preserve percent% edges.
"""
"Opt here"
nnz = train_adj.nnz
#perm = np.random.permutation(nnz)
preserve_nnz = int(nnz*percent)
if pop_penalty:
weights = normalized_adj_single(train_adj, bi=True).data
norm_weights = weights / weights.sum()
nnz = len(norm_weights)
preserve_nnz = int(nnz*percent)
perm = np.random.choice(nnz, preserve_nnz, replace=False, p=norm_weights)
else:
perm = np.random.permutation(nnz)
perm = perm[:preserve_nnz]
r_adj = sp.coo_matrix((train_adj.data[perm],
(train_adj.row[perm],
train_adj.col[perm])),
shape=train_adj.shape)
return r_adj
R = randomedge_sampler(R, percent)
adj_mat[:self.n_users, self.n_users:] = R
adj_mat[self.n_users:, :self.n_users] = R.T
adj_mat = adj_mat.todok()
mean_adj_mat = normalized_adj_single(adj_mat)
return mean_adj_mat
def create_adj_mat(self):
t1 = time()
adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = self.R.tolil()
print('n_interactions', len(R.nonzero()[0]))
adj_mat[:self.n_users, self.n_users:] = R
adj_mat[self.n_users:, :self.n_users] = R.T
adj_mat = adj_mat.todok()
print('already create adjacency matrix', adj_mat.shape, time() - t1)
item_pop = np.array(adj_mat.sum(1)).flatten()[self.n_users:]
item_pop_rev = item_pop#1 / (item_pop+1)
t2 = time()
def normalized_adj_single(adj, verbose= False):
rowsum = np.array(adj.sum(1))
r = args.r
d_inv = np.power(rowsum, r-1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
d_inv_ = np.power(rowsum, -r).flatten()
d_inv_[np.isinf(d_inv_)] = 0.
d_mat_inv_ = sp.diags(d_inv_)
norm_adj = d_mat_inv.dot(adj).dot(d_mat_inv_)
return norm_adj.tocoo()
def check_adj_if_equal(adj):
dense_A = np.array(adj.todense())
degree = np.sum(dense_A, axis=1, keepdims=False)
temp = np.dot(np.diag(np.power(degree, -1)), dense_A)
print('check normalized adjacency matrix whether equal to this laplacian matrix.')
return temp
norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))
mean_adj_mat = normalized_adj_single(adj_mat, verbose=True)
print('already normalize adjacency matrix', time() - t2)
rowsum = np.array(adj_mat.sum(1))
d_inv = np.power(rowsum, -1.).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
return adj_mat.tocsr(), norm_adj_mat.tocsr(), mean_adj_mat.tocsr(), item_pop_rev, d_mat_inv.tocsr()
def negative_pool(self):
t1 = time()
for u in self.train_items.keys():
neg_items = list(set(range(self.n_items)) - set(self.train_items[u]))
pools = [rd.choice(neg_items) for _ in range(100)]
self.neg_pools[u] = pools
print('refresh negative pools', time() - t1)
def sample(self):
if self.batch_size <= self.n_users:
users = rd.sample(self.exist_users, self.batch_size)
else:
users = [rd.choice(self.exist_users) for _ in range(self.batch_size)]
def sample_pos_items_for_u(u, num):
pos_items = self.train_items[u]
n_pos_items = len(pos_items)
pos_batch = []
while True:
if len(pos_batch) == num: break
if args.positive_sample == 1:
while True:
tmp = np.random.randint(low=0, high=n_pos_items, size=1)[0]
if np.random.rand() > 1- np.sqrt(1/self.item_cnt[pos_items[tmp]]):
pos_i_id = pos_items[tmp]
break
else:
pos_id = np.random.randint(low=0, high=n_pos_items, size=1)[0]
pos_i_id = pos_items[pos_id]
if pos_i_id not in pos_batch:
pos_batch.append(pos_i_id)
return pos_batch
def sample_neg_items_for_u(u, num):
neg_items = []
while True:
if len(neg_items) == num: break
if args.negative_sample == 1:
neg_id = self.sample_list[np.random.randint(low=0, high=len(self.sample_list), size=1)[0]]
assert neg_id <= self.n_items
else:
neg_id = np.random.randint(low=0, high=self.n_items,size=1)[0]
if neg_id not in self.train_items[u] and neg_id not in neg_items:
neg_items.append(neg_id)
return neg_items
def choice(probs):
x = np.random.rand()
cum = 0
for i,p in enumerate(probs):
cum += p
if x < cum:
break
return i
def sample_neg_items_for_u_from_pools(u, num):
neg_items = list(set(self.neg_pools[u]) - set(self.train_items[u]))
return rd.sample(neg_items, num)
pos_items, neg_items = [], []
for u in users:
pos_items += sample_pos_items_for_u(u, 1)
neg_items += sample_neg_items_for_u(u, 1)
return users, pos_items, neg_items
def get_num_users_items(self):
return self.n_users, self.n_items
def print_statistics(self):
print('n_users=%d, n_items=%d' % (self.n_users, self.n_items))
('n_interactions=%d' % (self.n_train + self.n_test))
print('n_train=%d, n_val=%d, n_test=%d, sparsity=%.5f' % (self.n_train, self.n_val, self.n_test, (self.n_train + self.n_val + self.n_test)/(self.n_users * self.n_items)))
def get_sparsity_split(self):
try:
split_uids, split_state = [], []
lines = open(self.path + '/sparsity.split', 'r').readlines()
for idx, line in enumerate(lines):
if idx % 2 == 0:
split_state.append(line.strip())
print(line.strip())
else:
split_uids.append([int(uid) for uid in line.strip().split(' ')])
print('get sparsity split.')
except Exception:
split_uids, split_state = self.create_sparsity_split()
f = open(self.path + '/sparsity.split', 'w')
for idx in range(len(split_state)):
f.write(split_state[idx] + '\n')
f.write(' '.join([str(uid) for uid in split_uids[idx]]) + '\n')
print('create sparsity split.')
return split_uids, split_state
def create_sparsity_split(self):
all_users_to_test = list(self.test_set.keys())
user_n_iid = dict()
# generate a dictionary to store (key=n_iids, value=a list of uid).
for uid in all_users_to_test:
train_iids = self.train_items[uid]
test_iids = self.test_set[uid]
n_iids = len(train_iids) + len(test_iids)
if n_iids not in user_n_iid.keys():
user_n_iid[n_iids] = [uid]
else:
user_n_iid[n_iids].append(uid)
split_uids = list()
# split the whole user set into four subset.
temp = []
count = 1
fold = 4
n_count = (self.n_train + self.n_test)
n_rates = 0
split_state = []
for idx, n_iids in enumerate(sorted(user_n_iid)):
temp += user_n_iid[n_iids]
n_rates += n_iids * len(user_n_iid[n_iids])
n_count -= n_iids * len(user_n_iid[n_iids])
if n_rates >= count * 0.25 * (self.n_train + self.n_test):
split_uids.append(temp)
state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' %(n_iids, len(temp), n_rates)
split_state.append(state)
print(state)
temp = []
n_rates = 0
fold -= 1
if idx == len(user_n_iid.keys()) - 1 or n_count == 0:
split_uids.append(temp)
state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' % (n_iids, len(temp), n_rates)
split_state.append(state)
print(state)
return split_uids, split_state
def cosin(self):
feature_vectors = self.R.T
similarities = cosine_similarity(feature_vectors)
for i in range(similarities.shape[0]):
similarities[i,i] = 1.
return similarities
def get_ppnw(self):
ui_mat = self.R.tocoo()
colsum = np.array(ui_mat.sum(0)).flatten()
colsum[colsum==0.]=1#divide 0 error
rowsum = np.array(ui_mat.sum(1)).flatten()
rowsum[rowsum==0.]=1#divide 0 error
all_sum = ui_mat.sum()
theta_i = np.log(all_sum) - np.log(colsum)
d_inv = np.power(colsum, -1)
d_inv[np.isinf(d_inv)] = 0.
d_inv_ = np.power(rowsum, -1)
d_inv_[np.isinf(d_inv_)] = 0.
theta_i_mat = all_sum * ui_mat.dot(sp.diags(d_inv))
theta_i_mat.data -= 1
theta_i_mat = theta_i_mat.log1p()
theta_u = np.array((sp.diags(d_inv_).dot(theta_i_mat)).sum(1)).flatten()
theta_i_mat.data -= theta_u[np.nonzero(theta_i_mat)[0]]
theta_i_mat.data = np.power(theta_i_mat.data, 2)
theta_std_u2 = np.array((sp.diags(d_inv_).dot(theta_i_mat)).sum(1)).flatten()
theta_i_z = (theta_i - np.min(theta_i)) / (np.max(theta_i)-np.min(theta_i))
theta_i_p = np.power(theta_i_z, args.ppnw_a)
return theta_u, theta_i, theta_std_u2, theta_i_p
| 15,834
| 36.434988
| 178
|
py
|
ec-local-rings
|
ec-local-rings-main/zfx_fast.py
|
from itertools import combinations_with_replacement
from time import time
from math import factorial
class Monom:
def __init__(self, a=(0,0,0,0,0,0), x=0, z=0):
self.a = a # len(a) == 6 !!!
self.x = x
self.z = z
def get_deg(self):
# Total degree of m: deg(x) + 3deg(z)
return self.x + 3*self.z
def get_sign(self):
# Sign of the monomial
neg_count = self.a[1] + self.a[3]
return 1 * ((-1) ** neg_count)
def step(self):
# Substitution of z
# z = (a_0)x^3 - a_1xz + a_2x^2z - a_3z^2 + a_4xz^2 + a_6z^3
# Coeffs of (x,z)
inc = [(3,0), (1,1), (2,1), (0,2), (1,2), (0,3)]
if self.z == 0:
return [(self, 1)]
out = []
for i,new_inc in enumerate(inc):
# Increment counter of a_i
new_a = []
for j in range(6):
if j == i:
new_a.append(self.a[j] + 1)
else:
new_a.append(self.a[j])
new_a = tuple(new_a)
new_x = self.x + new_inc[0]
new_z = self.z - 1 + new_inc[1]
out.append((Monom(a=new_a, x=new_x, z=new_z), 1))
return out
def __str__(self):
a_names = ['a0','a1','a2','a3','a4','a6'] #magma
# a_names = ['a_0','a_1','a_2','a_3','a_4','a_6'] #latex
out = ''
for i in range(1,6):
if self.a[i] == 1:
out += f'*{a_names[i]}'
elif self.a[i] > 1:
out += f'*{a_names[i]}^{self.a[i]}'
if self.x == 1:
out += '*X'
elif self.x > 1:
out += f'*X^{self.x}'
if self.z == 1:
out += '*Z'
elif self.z > 1:
out += f'*Z^{self.z}'
if out == '':
return out
return out[1:]
def __eq__(self, other):
return self.a[1:] == other.a[1:] and self.x == other.x and self.z == other.z
def __hash__(self):
return hash((self.a[1:], self.x, self.z))
def main(k: int = 30):
final = {}
m = Monom(z=1)
q = {}
q[m] = 1
intro = f"""// z coordinate as a function of x in pi^{-1}(0)
// Usage:
/*
k := {k}; // Nilpotence degree of eps
R<a1, a2, a3, a4, a6, X> := PolynomialRing(Integers(), 6);
I := ideal<R | X^k>;
Rk := R/I;
load "zfx_stored.magma";
F := Rk!F;
*/
"""
cnt = 0
t1 = time()
while q != {}:
# pop_m, pop_c = q.popitem() # DFS - No pruning!!
pop_m = next(iter(q)) # BFS - Much faster
pop_c = q.pop(pop_m)
cnt += 1
if cnt == 10000:
t2 = time()
print(f'{len(q) = } {len(final) = } t = {round(t2-t1, 2)}')
# print(f'deg = {pop_m.get_deg()} {pop_c = } sample = {str(pop_m)}')
cnt = 0
# Compute the step
new_mon = pop_m.step()
for m, c in new_mon:
# m is the monomial, c the coefficient
# High degree
if m.get_deg() > k:
continue
# No z
if m.z == 0:
final[m] = final.get(m, 0) + pop_c * c
continue
q[m] = q.get(m, 0) + pop_c * c
# Order printable strings by degree
degs = {}
for i in final:
assert i.z == 0
deg = i.get_deg()
sig = {1:'+',-1:'-'}[i.get_sign()]
coeff = final[i]
mon = str(i)
degs[deg] = degs.get(deg, '') + f'{sig}{coeff}*{mon}'
# Write on file
with open(f'zfx_stored_{k}.magma', 'w') as fh:
fh.write(intro)
fh.write('F := \n')
for d in degs:
fh.write(f'\t{degs[d]}\n')
fh.write(';')
if __name__ == '__main__':
k = 30 # Nilpotence degree of eps
main(k=k)
| 3,079
| 20.690141
| 78
|
py
|
ec-local-rings
|
ec-local-rings-main/zfxred_fast.py
|
from itertools import combinations_with_replacement
from time import time
from math import factorial
class Monom:
def __init__(self, a=(0,0,0), x=0, z=0):
# a = (a_0, A, B)
self.a = a # len(a) == 3 !!!
self.x = x
self.z = z
def get_deg(self):
# Total degree of m: deg(x) + 3deg(z)
return self.x + 3*self.z
def step(self):
# Substitution of z
# a_4 = A
# a_6 = B
# z = (a_0)x^3 + a_4xz^2 + a_6z^3
# Coeffs of (x,z)
inc = [(3,0), (1,2), (0,3)]
if self.z == 0:
return [(self, 1)]
out = []
for i,new_inc in enumerate(inc):
# Increment counter of a_i
new_a = []
for j in range(3):
if j == i:
new_a.append(self.a[j] + 1)
else:
new_a.append(self.a[j])
new_a = tuple(new_a)
new_x = self.x + new_inc[0]
new_z = self.z - 1 + new_inc[1]
out.append((Monom(a=new_a, x=new_x, z=new_z), 1))
return out
def __str__(self):
a_names = ['a0','A', 'B'] #magma
# a_names = ['a_0','A','B'] #latex
out = ''
for i in range(1,3):
if self.a[i] == 1:
out += f'*{a_names[i]}'
elif self.a[i] > 1:
out += f'*{a_names[i]}^{self.a[i]}'
if self.x == 1:
out += '*X'
elif self.x > 1:
out += f'*X^{self.x}'
if self.z == 1:
out += '*Z'
elif self.z > 1:
out += f'*Z^{self.z}'
if out == '':
return out
return out[1:]
def __eq__(self, other):
return self.a[1:] == other.a[1:] and self.x == other.x and self.z == other.z
def __hash__(self):
return hash((self.a[1:], self.x, self.z))
def main(k: int = 250):
final = {}
m = Monom(z=1)
q = {}
q[m] = 1
intro = f"""// z coordinate as a function of x in pi^{-1}(0) in reduced form
// Usage:
/*
k := {k}; // Nilpotence degree of eps
R<A, B, X> := PolynomialRing(Integers(), 3);
I := ideal<R | X^k>;
Rk := R/I;
load "zfx_reduced.magma";
F := Rk!F;
*/
"""
cnt = 0
t1 = time()
while q != {}:
# pop_m, pop_c = q.popitem() # DFS - No pruning!!
pop_m = next(iter(q)) # BFS - Much faster
pop_c = q.pop(pop_m)
cnt += 1
if cnt == 10000:
# Logging stuff
t2 = time()
print(f'{len(q) = } {len(final) = } t = {round(t2-t1, 2)}')
# print(f'deg = {pop_m.get_deg()} {pop_c = } sample = {str(pop_m)}')
cnt = 0
# Compute the step
new_mon = pop_m.step()
for m, c in new_mon:
# m is the monomial, c the coefficient
# High degree
if m.get_deg() > k:
continue
# No z
if m.z == 0:
final[m] = final.get(m, 0) + pop_c * c
continue
q[m] = q.get(m, 0) + pop_c * c
# Order printable strings by degree
degs = {}
for i in final:
assert i.z == 0
deg = i.get_deg()
# sig = '+' # <-- always positive
coeff = final[i]
mon = str(i)
if coeff != 1:
degs[deg] = degs.get(deg, '') + f'+{coeff}*{mon}'
else:
degs[deg] = degs.get(deg, '') + f'+{mon}'
debug_step = 200
next_debug = 200 # Log every time 200 degrees are loaded in magma
# Write on file
with open(f'zfxred_stored_{k}.magma', 'w') as fh:
fh.write(intro)
fh.write('F := \n')
for d in degs:
if d > next_debug:
fh.write(f';\nprint "Loaded {next_debug} degrees";\nF +:= \n')
next_debug += debug_step
fh.write(f'\t{degs[d]}\n')
fh.write(';')
if __name__ == '__main__':
k = 300 # Nilpotence degree of eps
main(k=k)
| 3,213
| 20.284768
| 78
|
py
|
ChromaStarPy
|
ChromaStarPy-master/CSGasEst.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 11:25:12 2019
@author: Philip D. Bennett
Port from FORTRAN to Python: Ian Short
"""
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
#import Documents.ChromaStarPy.GAS.BlockData
#from Documents.ChromaStarPy.GAS.GsRead import gsread
import CSBlockData
#import GsRead
import CSGsRead2
def ten(xdum):
x = 2.302585093e0*xdum
x2 = math.exp(x)
return x2
def isign(a, b):
#default:
c = a
if ( (numpy.sign(b) == -1) and (numpy.sign(a) == 1) ):
c = -1 * a
if ( (numpy.sign(b) == 1) and (numpy.sign(a) == -1) ):
c = -1 * a
if ( (numpy.sign(b) == 0) and (numpy.sign(a) == -1) ):
c = -1 * a
return c
#def gasest(isolv, temp, pt, peIn):
def gasest(isolv, temp, pt):
"""
#c
#c cis: Inputs: isolv, temp, pt, pe
#c cis: Ouput: p, neq ??
#
#c
#c GASEST: Returns an estimate of the fractional abundances of
#c each chemical species for a given T, P, and composition.
#c ISOLV=1: Calculate initial estimates only for species with
#c IPR=1, ie. major species.
#c =2: Calculate initial estimates for species with IPR=1
#c or 2, ie. major and minor constituents.
#c Initial estimates are not calculated for IPR=3 species since
#c these are never needed.
#c
"""
#Try this:
#global pi, sbcon, kbol, cvel, gcon, hpl, hmass, t0, everg # /consts/
global kbol, hmass, t0 # /consts/
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
global nlin1, lin1, linv1, nlin2, lin2, linv2 #/lin/
global logk, logwt, it, kt, type0 #equil
#c
#c
t0 = CSBlockData.t0
#ip = [0.0e0 for i in range(150)]
#ip = GsRead.ip
ip = CSGsRead2.ip
#comp = [0.0e0 for i in range(40)]
#comp = GsRead.comp
comp = CSGsRead2.comp
#awt = [0.0e0 for i in range(150)]
#itab = [0 for i in range(83)]
itab = CSBlockData.itab
#ntab = [0 for i in range(5)]
#indx = [ [ [ [ [0 for i in range(2)] for j in range(5) ] for k in range(7) ] for l in range(26) ] for m in range(4) ]
#indx = GsRead.indx
indx = CSGsRead2.indx
#name = [' ' for i in range(150)]
#gsinit = False
#print0 = False
#c
#ipr = [0 for i in range(150)]
#ipr = GsRead.ipr
ipr = CSGsRead2.ipr
#nch = [0 for i in range(150)]
#nch = GsRead.nch
nch = CSGsRead2.nch
#nel = [0 for i in range(150)]
#ntot = [0 for i in range(150)]
#nat = [ [0 for i in range(150)] for j in range(5) ]
#zat = [ [0 for i in range(150)] for j in range(5) ]
#zat = GsRead.zat
zat = CSGsRead2.zat
#neut = [0 for i in range(150)]
#neut = GsRead.neut
neut = CSGsRead2.neut
#idel = [0 for i in range(150)]
#idel = GsRead.idel
idel = CSGsRead2.idel
#indsp = [0 for i in range(40)]
#indsp = GsRead.indsp
indsp = CSGsRead2.indsp
#indzat = [0 for i in range(100)]
#iat = [0 for i in range(150)]
#iat = GsRead.iat
iat = CSGsRead2.iat
#natsp = [0 for i in range(40)]
#iatsp = [ [0 for i in range(40)] for j in range(40) ]
#c
#lin1 = [0 for i in range(40)]
#lin2 = [0 for i in range(40)]
#linv1 = [0 for i in range(40)]
#linv2 = [0 for i in range(40)]
#natom = GsRead.natom
natom = CSGsRead2.natom
#nspec = GsRead.nspec
nspec = CSGsRead2.nspec
#nlin1 = GsRead.nlin1
nlin1 = CSGsRead2.nlin1
#nlin2 = GsRead.nlin2
nlin2 = CSGsRead2.nlin2
#c
#logk = [ [0.0e0 for i in range(150)] for j in range(5) ]
#logwt = [0.0e0 for i in range(150)]
#logk = GsRead.logk
logk = CSGsRead2.logk
#logwt = GsRead.logwt
logwt = CSGsRead2.logwt
it = [0.0e0 for i in range(150)]
kt = [0.0e0 for i in range(150)]
#type0 = [0 for i in range(150)]
#type0 = GsRead.type0
type0 = CSGsRead2.type0
#c
p = [0.0e0 for i in range(40)]
logt = 0.0e0
logit = 0.0e0
logkt = 0.0e0
ipeff = 0.0e0
imp = 0.0e0
ihp = 0.0e0
ihm = 0.0e0
icp = 0.0e0
inp = 0.0e0
iop = 0.0e0
isip = 0.0e0
isp = 0.0e0
iclm = 0.0e0
iscp = 0.0e0
itip = 0.0e0
ivp = 0.0e0
iyp = 0.0e0
izrp = 0.0e0
kh2 = 0.0e0
kch = 0.0e0
koh = 0.0e0
knh = 0.0e0
kco = 0.0e0
kn2 = 0.0e0
kh2o = 0.0e0
ksio = 0.0e0
ksis = 0.0e0
ksih = 0.0e0
khs = 0.0e0
kh2s = 0.0e0
khcl = 0.0e0
ksco = 0.0e0
ksco2 = 0.0e0
ktio = 0.0e0
kvo = 0.0e0
kyo = 0.0e0
kyo2 = 0.0e0
kzro = 0.0e0
kzro2 = 0.0e0
#izmet = [1, 2, 6, 11, 12, 13, 14, 19, 20, 26]
izmet = [0, 1, 5, 10, 11, 12, 13, 18, 19, 25]
nummet = 10
mxspec = 150
#c
#c Calculate equilibrium constants for each species in table
#c N.B. Freeze the chemical equilibrium for T < 1200K.
#c
t = temp
if (t < 1200.0e0):
t = 1200.0e0
th = t0/t
logt = 2.5e0*math.log10(t)
for n in range(nspec):
if (ipr[n] <= 2):
ityp = type0[n]
nq = nch[n]
ich = isign(1, nq)
if ( (ityp == 3) or (ityp == 4) ):
kt[n] = kt[neut[n]]
if ( ((nch[n] - nch[n-1]) != ich) or (nch[n-1] == 0) ):
logit = 0.0e0
logit = logit + ich*(-th*ip[n] + logt + logwt[n] - 0.48e0)
it[n] = ten(logit)
elif (ityp == 2):
logkt = (((logk[4][n]*th + logk[3][n])*th + logk[2][n])*th + logk[1][n])*th + logk[0][n]
kt[n] = ten(logkt)
it[n] = 1.0e0
else:
kt[n] = 1.0e0
it[n] = 1.0e0
kt[mxspec-1] = 1.0e0
it[mxspec-1] = 1.0e0
#c
#c ISOLV=1: Calculate initial estimates of major species
#c and for a fictitous electron donor Z as well as Pe
#c ISOLV=2: Calculate initial estimates of both major and minor
#c species as well as for pe.
#c
jh = iat[indx[1][1][0][0][0]]
comph = comp[jh]
ihp = it[indx[2][1][0][0][0]]
dhp = idel[indx[2][1][0][0][0]]
kh2 = kt[indx[1][1][1][0][0]]
dh2 = idel[indx[1][1][1][0][0]]
#print("jh ", jh, " comph ", comph, " kh2 ", kh2, " dh2 ", dh2)
peh = 0.0e0
if (dhp != 0.0e0):
term = (1.0e0 + comph)*ihp
rat = -4.0e0*comph*ihp*pt/term/term
omrat = 1.0e0 - rat
if (omrat < 0.0e0):
omrat = 0.0e0
if (abs(rat) >= 1.0e-10):
peh = (-term + abs(term)*math.sqrt(omrat))/2.0e0
else:
peh = comph*ihp*pt/term
ipeff = 7.3e0
imp = ten(-ipeff*th + logt - 0.48e0)
#c
#c Estimate PH2 since Pd = PH + PH2 in the cool temperature
#c limit where the metals provide most of the electrons. We
#c then use this Pd value to estimate this electron pressure.
#c
ph2 = 0.0e0
if (dh2 != 0.0e0):
fact = 2.0e0 - comph
terma = fact*fact
termb = 2.0e0*comph*pt*fact + kh2
fact2 = comph*pt
termc = fact2*fact2
rat = 4.0e0*terma*termc/termb/termb
omrat = 1.0e0 - rat
if (omrat < 0.0e0):
omrat = 0.0e0
ph2 = termb*(1.0e0 - math.sqrt(omrat))/2.0e0/terma
#c
#c Include metals with low ionization potential in initial guess
#c Na (Z=11), Mg (Z=12), Al (Z=13), K (Z=19), Ca (Z=20), Fe(Z=26)
#c also Si (Z=14)
#c
compm = 0.0e0
for i in range(2, nummet):
ind = itab[izmet[i]]
j = iat[indx[1][ind][0][0][0]]
compm = compm + comp[j]*idel[indx[2][ind][0][0][0]]
pem2 = imp*imp + 4.0e0*compm*(pt + ph2)*imp
if (pem2 < 0.0e0):
pem2 = 0.0e0
pem = (math.sqrt(pem2) - imp)/2.0e0
#c
#c Estimate total electron pressure
#c
pe0 = max(peh, pem)
#print("peh ", peh, " pem ", pem, " pe0 ", pe0)
#c
#c Having obtained a crude estimate of electron pressure,
#c we now use a linearization approach to obtain a good value.
#c
firstTime = True
neit = 0
#215
#sum1 = 0.0e0
#sum2 = 0.0e0
#pd = pt + ph2 - pe0
#dpe = (pd*sum1 - pe0)/(1.0e0 + sum1 + pd*sum2)
#pe0 = pe0 + dpe
dpe = 1.1e-3 * pe0 #initial dummy value
#print("pt ", pt, " pe0 ", pe0, " peh ", peh, " pem ", pem)
while( ( (neit <= 15) and (abs(pe0/pt) > 1.0e-20) and (abs(dpe/pe0) > 1.0e-3) ) or firstTime == True):
firstTime = False
neit = neit + 1
sum1 = 0.0e0
sum2 = 0.0e0
#c
#c Consider H, He, C, Na, Mg, Al, Si, K, Ca and Fe as electron donors
#c
for i in range(nummet):
ind = itab[izmet[i]]
j = iat[indx[1][ind][0][0][0]]
ii = indx[2][ind][0][0][0]
#print("i ", i, " ind ", ind, " j ", j, " ii ", ii, " idel ", idel[ii])
if (idel[ii] == 1):
fact3 = it[ii] + pe0
#print("it ", it[ii], " fact3 ", fact3)
sum1 = sum1 + comp[j]*it[ii]/fact3
sum2 = sum2 + comp[j]*it[ii]/fact3/fact3
pd = pt + ph2 - pe0
dpe = (pd*sum1 - pe0)/(1.0e0 + sum1 + pd*sum2)
#print("sum1 ", sum1, " sum2 ", sum2, " pd ", pd)
pe0 = pe0 + dpe
#print("neit ", neit, " dpe ", dpe, " pe0 ", pe0)
#Original FORTRAN go to logic replaced by while condition above
#if (neit .le. 15 .AND. dabs(pe0/pt) .gt. 1.0d-20
# .AND. dabs(dpe/pe0) .gt. 1.0e-3) go to 215
pe = pe0
#print("Final pe0 ", pe0)
if (abs(pe/pt) < 1.0e-20):
pe = pt*1.0e-20
#c
#c Estimate partial pressures of major atomic species, ie.
#c H, C, N, O, S, and Si.
#c These are the only initial estimates required if ISOLV=1.
#c
#c First estimate partial pressure of atomic hydrogen
#c
ihm = it[indx[0][1][0][0][0]]
dhm = idel[indx[0][1][0][0][0]]
terma = (2.0e0 - comph)*dh2/kh2
termb = 1.0e0
if (pe > 0.0e0):
#print("dhp ", dhp, " ihp ", ihp, " dhm ", dhm, " ihm ", ihm, " pe ", pe)
termb = 1.0e0 + dhp*ihp/pe + dhm*ihm*pe
termc = -(pt - pe)*comph
rat = 4.0e0*terma*termc/termb/termb
omrat = 1.0e0 - rat
if (omrat < 0.0e0):
omrat = 0.0e0
#print("abs(rat) ", abs(rat))
if (abs(rat) >= 1.0e-10):
ph = ( (-1.0*termb) + abs(termb)*math.sqrt(omrat))/2.0e0/terma
#print("terma ", terma, " termb ", termb, " omrat ", omrat, " ph ", ph)
else:
ph = -1.0*termc/termb
#print(" termb ", termb, " termc ", termc, " ph ", ph)
ph2 = dh2*ph*ph/kh2
pd = pt + ph2 - pe
#c
#c Now that Pd, the total fictitious pressure is known, we can
#c estimate the partial pressure of the other major
#c atomic species C,N,O,Si,S
#c
jc = iat[indx[1][2][0][0][0]]
jn = iat[indx[1][3][0][0][0]]
jo = iat[indx[1][4][0][0][0]]
jsi = iat[indx[1][12][0][0][0]]
js = iat[indx[1][5][0][0][0]]
compc = comp[jc]
compn = comp[jn]
compo = comp[jo]
compsi = comp[jsi]
comps = comp[js]
icp = it[indx[2][2][0][0][0]]
inp = it[indx[2][3][0][0][0]]
iop = it[indx[2][4][0][0][0]]
isip = it[indx[2][12][0][0][0]]
isp = it[indx[2][5][0][0][0]]
kch = kt[indx[1][2][1][0][0]]
koh = kt[indx[1][4][1][0][0]]
knh = kt[indx[1][3][1][0][0]]
kco = kt[indx[1][4][2][0][0]]
kn2 = kt[indx[1][3][3][0][0]]
kh2o = kt[indx[1][4][1][1][0]]
ksio = kt[indx[1][12][4][0][0]]
ksis = kt[indx[1][12][5][0][0]]
#c ksih = kt[indx[1][12][1][0][0]]
khs = kt[indx[1][5][1][0][0]]
kh2s = kt[indx[1][5][1][1][0]]
dcp = idel[indx[2][2][0][0][0]]
dnp = idel[indx[2][3][0][0][0]]
dop = idel[indx[2][4][0][0][0]]
dsip = idel[indx[2][12][0][0][0]]
dsp = idel[indx[2][5][0][0][0]]
dch = idel[indx[1][2][1][0][0]]
doh = idel[indx[1][4][1][0][0]]
dnh = idel[indx[1][3][1][0][0]]
dco = idel[indx[1][4][2][0][0]]
dn2 = idel[indx[1][3][3][0][0]]
dh2o = idel[indx[1][4][1][1][0]]
dsio = idel[indx[1][12][4][0][0]]
dsis = idel[indx[1][12][5][0][0]]
#c dsih = idel[indx[1][12][1][0][0]]
dhs = idel[indx[1][5][1][0][0]]
dh2s = idel[indx[1][5][1][1][0]]
ksih = 1.0e0
dsih = 0.0e0
#c
#c Estimate C and O partial pressures
#c
fact1 = 1.0e0 + doh*ph/koh + dh2o*ph*ph/kh2o + dop*iop/pe
fact2 = 1.0e0 + dch*ph/kch + dcp*icp/pe
terma = fact1*dco/kco
termb = fact1*fact2 + (compc - compo)*pd*dco/kco
termc = -compo*pd*fact2
rat = 4.0e0*terma*termc/termb/termb
omrat = 1.0e0 - rat
if (omrat < 0.0e0):
omrat = 0.0e0
if (abs(rat) >= 1.0e-10):
po = (-termb + abs(termb)*math.sqrt(omrat))/(2.0e0*terma)
else:
if (termb <= 0.0e0):
po = -termb/terma
else:
po = -termc/termb
pc = compc*pd/(fact2 + dco*po/kco)
#c
#c Estimate N partial pressure
#c
terma = 2.0e0*dn2/kn2
termb = 1.0e0 + dnh*ph/knh + dnp*inp/pe
termc = -compn*pd
pn = compn*pd/termb
if ( (dn2 != 0.0e0) and (kn2 < 1.0e6) ):
pnnn = termb*termb - 4.0e0*terma*termc
if (pnnn < 0.0e0):
pnnn = 0.0e0
pn = (-termb + math.sqrt(pnnn))/2.0e0/terma
#c
#c Estimate Si and S partial pressures
#c
fact1 = 1.0e0 + dsio*po/ksio + dsih*ph/ksih + dsip*isip/pe
fact2 = 1.0e0 + dhs*ph/khs + dh2s*ph*ph/kh2s + dsp*isp/pe
terma = fact1*dsis/ksis
termb = fact1*fact2 + (comps - compsi)*pd*dsis/ksis
termc = -compsi*pd*fact2
rat = 4.0e0*terma*termc/termb/termb
omrat = 1.0e0 - rat
if (omrat < 0.0e0):
omrat = 0.0e0
if (abs(rat) >= 1.0e-10):
psi = (-termb + abs(termb)*math.sqrt(omrat))/2.0e0/terma
else:
if (termb <= 0.0e0):
psi = -termb/terma
else:
psi = -termc/termb
ps = comps*pd/(fact2 + dsis*psi/ksis)
#c
#c Fill array of initial partial pressure estimates for H, C, N, O
#c
p[jh] = ph
p[jc] = pc
p[jn] = pn
p[jo] = po
p[jsi] = psi
p[js] = ps
#print("jh ", jh, " p[jh] ", p[jh])
#c
#c Make initial estimates for any other elements to be
#c included in linearizaton.
#c
for j in range(natom):
n = indsp[j]
if (ipr[n] > 2):
p[j] = 0.0e0
else:
#iz = zat[0][indsp[j]]
iz = zat[0][indsp[j]]-1
#Original FORTRAN "computed go to":
# go to (230, 400, 400, 400, 400, 230, 230, 230, 400, 400,
# 400, 400, 400, 230, 400, 230, 317, 400, 400, 400,
# 321, 322, 323, 400, 400, 400, 400, 400, 400, 400,
# 400, 400, 400, 400, 400, 400, 400, 400, 339, 340), iz
if ( iz==1 or iz==2 or iz==3 or iz==4\
or iz==8 or iz==9 or iz==10 or iz==11 or iz==12\
or iz==14 or iz==17 or iz==18 or iz==19\
or (iz>=23 and iz<=37) ):
#c
#c Estimate partial pressure of neutral atomic species considering all
#c atoms are present only as neutral atoms or singly charged ions.
#c Elements for which the above statement is inaccurate
#c (eg., molecular association is appreciable) are treated
#c separately below. These elements are He,Ne,Cl,Sc,Ti,V,Y,Zr.
#c
#400
n = indx[2][itab[iz]][0][0][0]
p[j] = pd*comp[j]/(1.0e0 + idel[n]*it[n]/pe)
#go to 230
elif(iz == 16):
#c
#c Estimate Cl partial pressure
#c
#317
jcl = iat[indx[1][6][0][0][0]]
iclm = it[indx[0][6][0][0][0]]
khcl = kt[indx[1][6][1][0][0]]
dclm = idel[indx[0][6][0][0][0]]
dhcl = idel[indx[1][6][1][0][0]]
p[jcl] = comp[jcl]*pd/(1.0e0 + dhcl*ph/khcl + dclm*iclm*pe)
#go to 230
#c
#c Estimate Sc partial pressure
#c
# 321
elif(iz == 20):
jsc = iat[indx[1][15][0][0][0]]
iscp = it[indx[2][15][0][0][0]]
dscp = idel[indx[2][15][0][0][0]]
ksco = kt[indx[1][15][4][0][0]]
dsco = idel[indx[1][15][4][0][0]]
ksco2 = kt[indx[1][15][4][4][0]]
dsco2 = idel[indx[1][15][4][4][0]]
p[jsc] = comp[jsc]*pd/(1.0e0 + dsco*po/ksco + dsco2*po*po/ksco2 + dscp*iscp/pe)
#go to 230
#c
#c Estimate Ti partial pressure
#c
#322
elif(iz == 21):
jti = iat[indx[1][16][0][0][0]]
itip = it[indx[2][16][0][0][0]]
dtip = idel[indx[2][16][0][0][0]]
ktio = kt[indx[1][16][4][0][0]]
dtio = idel[indx[1][16][4][0][0]]
p[jti] = comp[jti]*pd/(1.0e0 + dtio*po/ktio + dtip*itip/pe)
#go to 230
#c
#c Estimate V partial pressure
#c
#323
elif(iz == 21):
jv = iat[indx[1][17][0][0][0]]
ivp = it[indx[2][17][0][0][0]]
dvp = idel[indx[2][17][0][0][0]]
kvo = kt[indx[1][17][4][0][0]]
dvo = idel[indx[1][17][4][0][0]]
p[jv] = comp[jv]*pd/(1.0e0 + dvo*po/kvo + dvp*ivp/pe)
#go to 230
#c
#c Estimate Y partial pressure
#c
#339
elif(iz == 38):
jy = iat[indx[1][24][0][0][0]]
iyp = it[indx[2][24][0][0][0]]
dyp = idel[indx[2][24][0][0][0]]
kyo = kt[indx[1][24][4][0][0]]
dyo = idel[indx[1][24][4][0][0]]
kyo2 = kt[indx[1][24][4][4][0]]
dyo2 = idel[indx[1][24][4][4][0]]
p[jy] = comp[jy]*pd/(1.0e0 + dyo*po/kyo + dyo2*po*po/kyo2 + dyp*iyp/pe)
#go to 230
#c
#c Estimate Zr partial pressure
#c
#340
elif(iz == 39):
jzr = iat[indx[1][25][0][0][0]]
izrp = it[indx[2][25][0][0][0]]
dzrp = idel[indx[2][25][0][0][0]]
kzro = kt[indx[1][25][4][0][0]]
dzro = idel[indx[1][25][4][0][0]]
kzro2 = kt[indx[1][25][4][4][0]]
dzro2 = idel[indx[1][25][4][4][0]]
p[jzr] = comp[jzr]*pd/(1.0e0 + dzro*po/kzro + dzro2*po*po/kzro2 + dzrp*izrp/pe)
if (isolv == 0):
#neq = 1
neq = 1 + 1
elif (isolv == 1):
neq = nlin1 + 2
#neq = nlin1 + 2 + 1
elif (isolv == 2):
neq = nlin2 + 1
#neq = nlin2 + 1 + 1
#print("GasEst: isolv ", isolv, " nlin2 ", nlin2, " neq ", neq)
#Try returning a tuple:
return pe, p, neq
| 19,667
| 28.443114
| 122
|
py
|
ChromaStarPy
|
ChromaStarPy-master/vegatest.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
from astropy.io import fits
import numpy
import Gauss2
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "VegaAtlas/"
#outPath = absPath + "Outputs/"
#If reading FITS file (from STELIB)
#http://www.ast.obs-mip.fr/users/leborgne/stelib/fits_files.html
#A&A 402, 433–442 (2003) J.-F. Le Borgne1, G. Bruzual2, R. Pell´o1, A. Lanc¸on3, B. Rocca-Volmerange4 , B. Sanahuja5, D. Schaerer1,
#C. Soubiran6, and R. V´ılchez-G´omez
wav = 0.0
inFile = dataPath + "HD172167_V3.2.fits"
hdulist = fits.open(inFile)
#Get the coefficients for the wavelength array
naxis1 = hdulist[0].header['NAXIS1']
crval1 = hdulist[0].header['CRVAL1']
cdelt = hdulist[0].header['CDELT1']
vhelio = hdulist[0].header['VHELIO']
vlsr = hdulist[0].header['VLSR']
radvel = hdulist[0].header['RADVEL']
flux = hdulist[0].data
#Continuum rectification - here a divisor:
cy0 = 1.3e-8
wave = [0.0 for i in range(naxis1)]
for i in range(naxis1):
ii = float(i)
wav = crval1 + cdelt*ii
wave[i] = 0.1 * wav
flux[i] = flux[i] / cy0
""" If reading ascii data
#with open("", 'r', encoding='utf-8') as inputHandle:
numStr = ""
num = 0.0
wav = 0.0
flx = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
inFile = dataPath + "vega.dat"
#Continuum rectification - here a factor:
cy0 = 0.65
with open(inFile, 'r') as inputHandle:
#No header - we'll figure out number of records on fly
wave = []
flux = []
#for i in range(num):
inLine = inputHandle.readline()
while (inLine != ""):
inLine = inputHandle.readline()
#print(inLine)
if not inLine:
break
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = 0.1 * float(wavStr)
wave.append(wav)
flx = cy0 * float(flxStr)
flux.append(flx)
"""
pylab.plot(wave, flux, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "9550.0"
loggStr = "3.95"
logZStr = "-0.5"
massStarStr = "2.0"
xiTStr = "2.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.0"
#Spectrum synthesis
lambdaStartStr = "429.0"
lambdaStopStr = "439.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.0"
logKapFudgeStr = "0.0"
macroVStr = "2.0"
#rotVStr = "275.0"
#rotIStr = "5.0"
rotVStr = "20.0"
rotIStr = "0.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "Run"
teff = 9550.0
logg = 3.95
log10ZScale = -0.5
lambdaStart = 429.0
lambdaStop = 439.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#Interpolate syntehtic spectrum onto uniform wavelength grid and convolve with
#instrumental profile accounting for finite spectral resolving power, R
delLam = 0.01 #sampling in nm
numWavs2 = (waveMod[numWavs-1] - waveMod[0]) / delLam
numWavs2 = int(numWavs2)
wave2 = [0.0 for i in range(numWavs2)]
for i in range(numWavs2):
ii = float(i)
wave2[i] = waveMod[0] + ii*delLam
#necessary?? flux2 = [0.0 for i in range(numWavs2)]
#interpolate the flux onto the new wavelength scale
flux2 = numpy.interp(wave2, waveMod, fluxMod)
specR = 2000 #approximate STELIB value
midWave = (waveMod[numWavs-1] + waveMod[0]) / 2.0
deltaR = midWave / specR #resolution element in nm
sigma = deltaR /delLam #resolution element in array elements
fwhm = 2.0 * sigma
#length of array holding Gaussian in array element space if computing Gaussian from -3.5 to +3.5 sigma
length = int(7.0 * sigma)
#Make a Gaussian instrumental profile
gaussian = Gauss2.gauss2(fwhm, length)
#Convolve the uniformly sampled synthetic spectrum with the instrumental profile
flux2s = numpy.convolve(flux2, gaussian, mode='same')
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
pylab.ylim(0.0, 1.2)
#pylab.plot(waveMod, fluxMod, color="gray")
pylab.plot(wave2, flux2, color=(0.6, 0.6, 0.6))
pylab.plot(wave2, flux2s, color=(0.3, 0.3, 0.3))
#add the line IDs
foundOne = False # work around H I line components being multiply labeled in line list
for i in range(numLineIds):
if "H I" in lblIds[i] and foundOne == False:
foundOne = True
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [0.75, 0.8]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.1, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + ".eps"
plt.savefig(epsName, format='eps', dpi=1000)
| 8,521
| 27.693603
| 131
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Flux.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 17:42:58 2017
@author: Ian
"""
import math
import random
import numpy
import Useful
import ToolBox
def flux(intens, cosTheta):
"""// returns surface flux as a 2XnumLams vector
// - Row 0 linear flux (cgs units)
// - Row 1 log_e flux"""
#//double[][] fluxSurfSpec = new double[2][numLams];
fluxSurfSpec = [0.0 for i in range(2)]
#//double fluxSurfBol, logFluxSurfBol, lambda2, lambda1; // Bolometric quantities for reality check
#// cosTheta is a 2xnumThetas array:
#// Gaussian quadrature
numThetas = len(cosTheta[0])
#//fluxSurfBol = 0;
#//for (int il = 0; il < numLams; il++) {
flx = 0.0
for it in range(numThetas):
#//flx = flx + intens[il][it] * cosTheta[1][it] * cosTheta[0][it];
flx = flx + intens[it] * cosTheta[1][it] * cosTheta[0][it]
#} // it - theta loop
#//fluxSurfSpec[0][il] = 2.0 * Math.PI * flx;
#//fluxSurfSpec[1][il] = Math.log(fluxSurfSpec[0][il]);
fluxSurfSpec[0] = 2.0 * math.pi * flx
fluxSurfSpec[1] = math.log(fluxSurfSpec[0])
"""/* Can no longer do this test here:
if (il > 1) {
lambda2 = lambdas[il]; // * 1.0E-7; // convert nm to cm
lambda1 = lambdas[il - 1]; // * 1.0E-7; // convert nm to cm
fluxSurfBol = fluxSurfBol
+ fluxSurfSpec[0][il] * (lambda2 - lambda1);
}
*/"""
#//} //il - lambda loop
"""/* Can no longer do this test here:
logFluxSurfBol = Math.log(fluxSurfBol);
double logTeff = (logFluxSurfBol - Useful.logSigma()) / 4.0;
double teff = Math.exp(logTeff);
String pattern = "0000.00";
//String pattern = "#####.##";
DecimalFormat myFormatter = new DecimalFormat(pattern);
System.out.println("FLUX: Recovered Teff = " + myFormatter.format(teff));
*/"""
return fluxSurfSpec
#}
#//
def flux3(intens, lambdas, cosTheta, phi,
radius, omegaSini, macroV):
#//console.log("Entering flux3");
#//System.out.println("radius " + radius + " omegaSini " + omegaSini + " macroV " + macroV);
numLams = len(lambdas)
numThetas = len(cosTheta[0])
fluxSurfSpec = [ [ 0.0 for i in range(numLams) ] for j in range(2) ]
#// returns surface flux as a 2XnumLams vector
#// - Row 0 linear flux (cgs units)
#// - Row 1 log_e flux
#// cosTheta is a 2xnumThetas array:
#// row 0 is used for Gaussian quadrature weights
#// row 1 is used for cos(theta) values
#// Gaussian quadrature:
#// Number of angles, numThetas, will have to be determined after the fact
"""/* Re-sampling makes thing worse - ??
//For internal use, interpolate flux spectrum onto uniform fine sampling grid:
double specRes = 3.0e5; //spectral resolution R = lambda/deltaLambda
double midLam = lambdas[numLams/2] * 1.0e7; //nm
double delFine = midLam / specRes; //nm
double lam1 = lambdas[0] * 1.0e7; //nm
double lam2 = lambdas[numLams-1] * 1.0e7; //nm;
double numFineD = (lam2 - lam1) / delFine;
int numFine = (int) numFineD - 1;
double newLambda[] = new double[numFine];
double newIntens[][] = new double[numFine][numThetas];
double thisNewIntens[] = new double[numFine];
double thisIntens[] = new double[numLams];
//System.out.println("midLam " + midLam + " delFine " + delFine + " lam1 " + lam1 + " lam2 " + lam2 + " numFine " + numFine);
//Create fine wavelength array
double ilD;
for (int il = 0; il < numFine; il++){
ilD = (double) il;
newLambda[il] = lam1 + ilD*delFine; //nm
newLambda[il] = newLambda[il] * 1.0e-7; //cm
}
//System.out.println("newLambda[0] " + newLambda[0] + " [numFine-1] " + newLambda[numFine-1]);
for (int it = 0; it < numThetas; it++){
for (int il = 0; il < numLams; il++){
thisIntens[il] = intens[il][it];
} //il loop
thisNewIntens = ToolBox.interpolV(thisIntens, lambdas, newLambda);
for (int il = 0; il < numFine; il++){
newIntens[il][it] = thisNewIntens[il];
} //il
} //it loop
*/"""
#//For geometry calculations: phi = 0 is direction of positive x-axis of right-handed
#// 2D Cartesian coord system in plane of sky with origin at sub-stellar point (phi
#// increases CCW)
#double thisThetFctr;
#//var numThetas = 11;
numPhi = len(phi)
delPhi = 2.0 * math.pi / numPhi
#//console.log("delPhi " + delPhi);
#//macroturbulent broadening helpers:
#double uRnd1, uRnd2, ww, arg, gRnd1, gRnd2;
#//intializations:
uRnd1 = 0.0
uRnd2 = 0.0
gRnd1 = 0.0
gRnd2 = 0.0
arg = 0.0
#//For macroturbulent broadening, we need to transform uniformly
#//generated random numbers on [0, 1] to a Gaussian distribution
#// with a mean of 0.0 and a sigma of 1.0
#//Use the polar form of the Box-Muller transformation
#// http://www.design.caltech.edu/erik/Misc/Gaussian.html
#// Everett (Skip) Carter, Taygeta Scientific Inc.
#//// Original code in c:
#// ww = Math.sqrt
#// do {
#// x1 = 2.0 * ranf() - 1.0;
#// x2 = 2.0 * ranf() - 1.0;
#// w = x1 * x1 + x2 * x2;
#// } while ( w >= 1.0 );
#//
#// w = sqrt( (-2.0 * log( w ) ) / w );
#// y1 = x1 * w;
#// y2 = x2 * w;
#//helpers for rotational broadening
#double x, opposite, theta; //, delLam;
thisIntens = [0.0 for i in range(numLams)]
intensLam = [0.0 for i in range(numLams)]
#//double[] intensLam = new double[numFine];
#//This might not be the smartest approach, but, for now, compute the
#//Doppler shifted wavelength scale across the whole tiled projected disk:
#//
#double sinTheta;
#//double shiftedLam = 0.0;
shiftedLamV = [0.0 for i in range(numLams)]
#//double[] shiftedLamV = new double[numFine];
vRad = [ [ 0.0 for i in range(numPhi) ] for j in range(numThetas) ]
#//For each (theta, phi) tile, compute the contributions to radial velocity
#// from rotational broadening and macoturbulent broadening:
#//test omegaSini = 0.0; //test
for it in range(numThetas):
#//theta = Math.acos(cosTheta[1][it]);
#//opposite = radius * Math.sin(theta);
#// Faster??
sinTheta = math.sqrt( 1.0 - (cosTheta[1][it]*cosTheta[1][it]) )
opposite = radius * sinTheta
for ip in range(numPhi):
#// x-position of each (theta, phi) point:
#////theta = Math.acos(cosTheta[1][it]);
#////opposite = radius * Math.sin(theta);
#//sinTheta = Math.sqrt( 1.0 - (cosTheta[1][it]*cosTheta[1][it]) );
#//opposite = radius * sinTheta;
x = opposite * math.cos(phi[ip])
vRad[it][ip] = x * omegaSini #// should be in cm/s
#//System.out.println("it " + it + " cosTheta[1][it] " + cosTheta[1][it] + " ip " + ip + " phi[ip] " + (phi[ip]/2.0/Math.PI) + " x/R " + (x/radius) + " vRad " + (vRad[it][ip]/1.0e5));
#//For macroturbulent broadening, we need to transform uniformly
#//generated random numbers on [0, 1] to a Gaussian distribution
#// with a mean of 0.0 and a sigma of 1.0
#//Use the polar form of the Box-Muller transformation
#// http://www.design.caltech.edu/erik/Misc/Gaussian.html
#// Everett (Skip) Carter, Taygeta Scientific Inc.
#//initialization that guarantees at least one cycle of the while loop
ww = 2.0;
#//cycle through pairs of uniform random numbers until we get a
#//ww value that is less than unity
while (ww >= 1.0):
#// range [0, 1]
uRnd1 = random.random()
uRnd2 = random.random()
#// range [-1, 1]
uRnd1 = (2.0 * uRnd1) - 1.0
uRnd2 = (2.0 * uRnd2) - 1.0
ww = (uRnd1 * uRnd1) + (uRnd2 * uRnd2)
#// We have a valid ww value - transform the uniform random numbers
#// to Gaussian random numbers with sigma = macroturbulent velocity broadening
arg = (-2.0 * math.log(ww)) / ww
gRnd1 = macroV * arg * uRnd1
#//gRnd2 = macroV * arg * uRnd2; //not needed?
#//console.log("gRnd1 " + gRnd1)
vRad[it][ip] = vRad[it][ip] + gRnd1 #// should be in cm/s
#} //ip loop - phi
#} //it loop - theta
flx = [0.0 for i in range(numLams)]
#//double[] newFlx = new double[numFine];
#//Inititalize flux acumulator:
for il in range(numLams):
#//for (int il = 0; il < numFine; il++){
flx[il] = 0.0
#//newFlx[il] = 0.0;
for it in range(numThetas):
#//flx = flx + ( intens[it] * cosTheta[1][it] * cosTheta[0][it] ); //axi-symmetric version
#//non-axi-symmetric version:
thisThetFctr = cosTheta[1][it] * cosTheta[0][it]
#//console.log("it " + it + " cosTheta[1] " + cosTheta[1][it] + " cosTheta[0] " + cosTheta[0][it]);
#//console.log("thisThetFctr " + thisThetFctr);
for il in range(numLams):
#//for (int il = 0; il < numFine; il++){
intensLam[il] = intens[il][it]
#//intensLam[il] = newIntens[il][it];
for ip in range(numPhi):
for il in range(numLams):
#//for (int il = 0; il < numFine; il++){
#//delLam = lambdas[il] * vRad[it][ip] / Useful.c;
#//shiftedLamV[il] = lambdas[il] + delLam;
shiftedLamV[il] = lambdas[il] * ( (vRad[it][ip]/Useful.c()) + 1.0 )
#//delLam = newLambda[il] * vRad[it][ip] / Useful.c;
#//shiftedLamV[il] = newLambda[il] + delLam;
#//shiftedLamV[il] = shiftedLam;
#//if (il == 1){
#//System.out.println("it " + it + " cosTheta[1][it] " + cosTheta[1][it] + " ip " + ip + " phi[ip] " + (phi[ip]/2.0/Math.PI) + " vRad[it][ip] " + (vRad[it][ip]/1.0e5));
#// System.out.println("it " + it + " ip " + ip + " il " + il + " delLam " + delLam + " shiftedLamV " + shiftedLamV[il] + " intensLam[il] " + intensLam[il]);
#//}
#}
#//for (int il = 0; il < numLams; il++){
#// intensLam[il] = intens[il][it];
#//}
#thisIntens = ToolBox.interpolV(intensLam, shiftedLamV, lambdas);
thisIntens = numpy.interp(lambdas, shiftedLamV, intensLam)
#//thisIntens = ToolBox.interpolV(intensLam, shiftedLamV, newLambda);
#//flx = flx + ( intens[it] * thisThetFctr * delPhi );
for il in range(numLams):
#//for (int il = 0; il < numFine; il++){
flx[il] = flx[il] + ( thisIntens[il] * thisThetFctr * delPhi )
#//newFlx[il] = newFlx[il] + ( thisIntens[il] * thisThetFctr * delPhi )
#//console.log("il " + il + " thisIntens " + thisIntens[il] + " flx " + flx[il]);
#} //ip - phi loop
#} // it - theta loop
#//flx = ToolBox.interpolV(newFlx, newLambda, lambdas);
#//fluxSurfSpec[0] = 2.0 * Math.PI * flx; //axi-symmetric version
for il in range(numLams):
fluxSurfSpec[0][il] = flx[il] #// non-axi-symmetric version
fluxSurfSpec[1][il] = math.log(fluxSurfSpec[0][il])
return fluxSurfSpec
| 11,496
| 38.373288
| 195
|
py
|
ChromaStarPy
|
ChromaStarPy-master/FormalSoln.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 17:29:05 2017
@author: Ian
"""
import math
import Planck
def formalSoln(numDeps, cosTheta, lambda2, tau, temp, lineMode):
"""/**
* Formal solution of the LTE radiative transfer for the monochromatic *surface*
* intensity, I_lambda(Tau=0, theta) at wavelength lambda"""
# *
# * Calls Planck.planck(lambda, temp) to get the LTE source function Input lambda
# * in nm for Planck
# */
logE = math.log10(math.e) #// for debug output
cutoff = 0.001 #// tolerance for stopping deeper contriibutions to I(Tau=0)
#// cosTheta is a 2xnumThetas array:
#// row 0 is used for Gaussian quadrature weights
#// row 1 is used for cos(theta) values
#// Gaussian quadrature:
#// Number of angles, numThetas, will have to be determined after the fact
numThetas = len(cosTheta[0])
#//System.out.println("FORMALSOLN: numThetas= " + numThetas);
#//double[][] intens = new double[numLams][numThetas];
intens = [0.0 for i in range(numThetas)]
#// scratch variables:
#double logSource, lnInteg, integrand, invCosTheta, delta, newInt, increment;
lineSourceVec = [0.0 for i in range(numDeps)]
#//Get line source function vector, of needed:
if (lineMode):
lineSourceVec = LineProf.lineSource(numDeps, tau, temp, lambda2)
#//for (int il = 0; il < numLams; il++ ) {
for it in range(numThetas):
invCosTheta = 1.0 / cosTheta[1][it]
newInt = 0
#// Extended Simpson's Rule - Numerical Recipes in F77, 2nd Ed., p. 128
#// First point in formula: - Extended Simpson's Rule
#// lnSource = Planck.planck(temp[0][0], lambda);
#// lnInteg = lnSource - (tau[0][0] * invCosTheta);
#// integrand = Math.exp(lnInteg) * invCosTheta;
#// delta = (tau[0][1] - tau[0][0]);
#// increment = (1.0 / 3.0) * integrand * delta;
#// newInt = newInt + increment;
#// for (int id = 1; id < numDeps-1; id++) { //Extended Simpson's Rule
for id in range(1, numDeps): #//Extended rectangle rule
if (lineMode == True):
#//Line mode mode - ETLA + coherent scattering: S_lambda = (1-eps)*J_lambda + eps*B_lambda
logSource = lineSourceVec[id]
#//if (id == 5 && it == 0) {
#// System.out.println("logSource scat " + logE * logSource);
#//}
#////logSource = Planck.planck(temp[0][id], lambda);
#//if (id == 5 && it == 0) {
#// System.out.println("logSource therm " + logE * logSource);
#//}
else:
#//Continuum mode - S_lambda = B_lambda
logSource = Planck.planck(temp[0][id], lambda2)
#// }
lnInteg = logSource - (tau[0][id] * invCosTheta)
integrand = math.exp(lnInteg) * invCosTheta
delta = (tau[0][id] - tau[0][id - 1])
#// Extended Simpson's rule:
#// if ((id % 2) == 1) {
#// increment = (4.0 / 3.0) * integrand * delta;
#// newInt = newInt + increment;
#// }
#//
#// if ((id % 2) == 0) {
#// increment = (2.0 / 3.0) * integrand * delta;
#// newInt = newInt + increment;
#// }
#// Extended rectangle rule:
increment = integrand * delta
newInt = newInt + increment
#// the following break-out condition is not so simple if using a closed formula:
#// //Only keep adding contributions from deper layers if the contribution
#// // is significant
if (tau[0][id] > 2.0 / 3.0):
if (newInt > 0):
if (increment / newInt < cutoff):
break
#} //id - depth loop
#// //Last point - Extended Simpson's Rule:
#// lnSource = Planck.planck(temp[0][numDeps - 1], lambda);
#// lnInteg = lnSource - (tau[0][numDeps - 1] * invCosTheta);
#// integrand = Math.exp(lnInteg) * invCosTheta;
#// delta = (tau[0][numDeps - 1] - tau[0][numDeps - 2]);
#// increment = (1.0 / 3.0) * integrand * delta;
#// newInt = newInt + increment;
intens[it] = newInt
#} //it - theta loop
##//} // il - lambda loop
return intens
#}
| 4,506
| 35.942623
| 106
|
py
|
ChromaStarPy
|
ChromaStarPy-master/CSGasData.py
|
import CSBlockData
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
ip = CSBlockData.ip
comp = CSBlockData.comp
awt = CSBlockData.awt
name = CSBlockData.name
ipr = CSBlockData.ipr
nch = CSBlockData.nch
nel = CSBlockData.nel
nat = CSBlockData.nat
zat = CSBlockData.zat
logk = [ [0.0e0 for i in range(150)] for j in range(5) ]
logwt = [0.0e0 for i in range(150)]
name[0] = "H"; ipr[0] = 1; nch[0] = 0; nel[0] = 1; nat[0][0] = 1; zat[0][0] = 1; awt[0] = 1.008; comp[0] = 9.32e-01
name[1] = "H+"; ipr[1] = 1; nch[1] = +1; ip[1] = 13.598; logwt[1] = 0.000
name[2] = "H-"; ipr[2] = 1; nch[2] = -1; ip[2] = 0.754; logwt[2] = 0.600
name[3] = "He"; ipr[3] = 2; nch[3] = 0; nel[3] = 1; nat[0][3] = 1; zat[0][3] = 2; awt[3] = 4.003; comp[1] = 6.53e-02
name[4] = "He+"; ipr[4] = 2; nch[4] = +1; ip[4] = 24.587; logwt[4] = 0.600
name[5] = "C"; ipr[5] = 1; nch[5] = 0; nel[5] = 1; nat[0][5] = 1; zat[0][5] = 6; awt[5] = 12.011; comp[2] = 4.94e-04
name[6] = "C+"; ipr[6] = 1; nch[6] = +1; ip[6] = 11.260; logwt[6] = 0.100
name[7] = "N"; ipr[7] = 1; nch[7] = 0; nel[7] = 1; nat[0][7] = 1; zat[0][7] = 7; awt[7] = 14.007; comp[3] = 8.95e-04
name[8] = "N+"; ipr[8] = 1; nch[8] = +1; ip[8] = 14.534; logwt[8] = 0.650
name[9] = "O"; ipr[9] = 1; nch[9] = 0; nel[9] = 1; nat[0][9] = 1; zat[0][9] = 8; awt[9] = 16.000; comp[4] = 8.48e-04
name[10] = "O+"; ipr[10] = 1; nch[10] = +1; ip[10] = 13.618; logwt[10] = -0.050
name[11] = "Ne"; ipr[11] = 2; nch[11] = 0; nel[11] = 1; nat[0][11] = 1; zat[0][11] = 10; awt[11] = 20.179; comp[5] = 7.74e-05
name[12] = "Ne+"; ipr[12] = 2; nch[12] = +1; ip[12] = 21.564; logwt[12] = 1.080
name[13] = "Na"; ipr[13] = 2; nch[13] = 0; nel[13] = 1; nat[0][13] = 1; zat[0][13] = 11; awt[13] = 22.990; comp[6] = 1.68e-06
name[14] = "Na+"; ipr[14] = 2; nch[14] = +1; ip[14] = 5.139; logwt[14] = 0.000
name[15] = "Mg"; ipr[15] = 2; nch[15] = 0; nel[15] = 1; nat[0][15] = 1; zat[0][15] = 12; awt[15] = 24.305; comp[7] = 2.42e-05
name[16] = "Mg+"; ipr[16] = 2; nch[16] = +1; ip[16] = 7.644; logwt[16] = 0.600
name[17] = "Mg++"; ipr[17] = 2; nch[17] = +2; ip[17] = 15.031; logwt[17] = 0.000
name[18] = "Al"; ipr[18] = 2; nch[18] = 0; nel[18] = 1; nat[0][18] = 1; zat[0][18] = 13; awt[18] = 26.982; comp[8] = 2.24e-06
name[19] = "Al+"; ipr[19] = 2; nch[19] = +1; ip[19] = 5.984; logwt[19] = -0.480
name[20] = "Si"; ipr[20] = 1; nch[20] = 0; nel[20] = 1; nat[0][20] = 1; zat[0][20] = 14; awt[20] = 28.086; comp[9] = 3.08e-05
name[21] = "Si+"; ipr[21] = 1; nch[21] = +1; ip[21] = 8.149; logwt[21] = 0.120
name[22] = "S"; ipr[22] = 1; nch[22] = 0; nel[22] = 1; nat[0][22] = 1; zat[0][22] = 16; awt[22] = 32.060; comp[10] = 1.49e-05
name[23] = "S+"; ipr[23] = 1; nch[23] = +1; ip[23] = 10.360; logwt[23] = -0.050
name[24] = "Cl"; ipr[24] = 3; nch[24] = 0; nel[24] = 1; nat[0][24] = 1; zat[0][24] = 17; awt[24] = 35.453; comp[11] = 3.73e-07
name[25] = "Cl-"; ipr[25] = 3; nch[25] = -1; ip[25] = 3.613; logwt[25] = 1.080
name[26] = "K"; ipr[26] = 2; nch[26] = 0; nel[26] = 1; nat[0][26] = 1; zat[0][26] = 19; awt[26] = 39.102; comp[12] = 8.30e-08
name[27] = "K+"; ipr[27] = 2; nch[27] = +1; ip[27] = 4.339; logwt[27] = 0.000
name[28] = "Ca"; ipr[28] = 2; nch[28] = 0; nel[28] = 1; nat[0][28] = 1; zat[0][28] = 20; awt[28] = 40.080; comp[13] = 1.86e-06
name[29] = "Ca+"; ipr[29] = 2; nch[29] = +1; ip[29] = 6.111; logwt[29] = 0.600
name[30] = "Ca++"; ipr[30] = 2; nch[30] = +2; ip[30] = 11.868; logwt[30] = 0.000
name[31] = "Sc"; ipr[31] = 3; nch[31] = 0; nel[31] = 1; nat[0][31] = 1; zat[0][31] = 21; awt[31] = 44.956; comp[14] = 1.49e-09
name[32] = "Sc+"; ipr[32] = 3; nch[32] = +1; ip[32] = 6.540; logwt[32] = 0.480
name[33] = "Ti"; ipr[33] = 3; nch[33] = 0; nel[33] = 1; nat[0][33] = 1; zat[0][33] = 22; awt[33] = 47.900; comp[15] = 1.21e-07
name[34] = "Ti+"; ipr[34] = 3; nch[34] = +1; ip[34] = 6.820; logwt[34] = 0.430
name[35] = "V"; ipr[35] = 3; nch[35] = 0; nel[35] = 1; nat[0][35] = 1; zat[0][35] = 23; awt[35] = 50.941; comp[16] = 2.33e-08
name[36] = "V+"; ipr[36] = 3; nch[36] = +1; ip[36] = 6.740; logwt[36] = 0.250
name[37] = "Cr"; ipr[37] = 3; nch[37] = 0; nel[37] = 1; nat[0][37] = 1; zat[0][37] = 24; awt[37] = 51.996; comp[17] = 6.62e-07
name[38] = "Cr+"; ipr[38] = 3; nch[38] = +1; ip[38] = 6.766; logwt[38] = 0.230
name[39] = "Mn"; ipr[39] = 3; nch[39] = 0; nel[39] = 1; nat[0][39] = 1; zat[0][39] = 25; awt[39] = 54.938; comp[18] = 2.33e-07
name[40] = "Mn+"; ipr[40] = 3; nch[40] = +1; ip[40] = 7.435; logwt[40] = 0.370
name[41] = "Fe"; ipr[41] = 2; nch[41] = 0; nel[41] = 1; nat[0][41] = 1; zat[0][41] = 26; awt[41] = 55.847; comp[19] = 3.73e-05
name[42] = "Fe+"; ipr[42] = 2; nch[42] = +1; ip[42] = 7.870; logwt[42] = 0.380
name[43] = "Co"; ipr[43] = 3; nch[43] = 0; nel[43] = 1; nat[0][43] = 1; zat[0][43] = 27; awt[43] = 58.933; comp[20] = 1.12e-07
name[44] = "Co+"; ipr[44] = 3; nch[44] = +1; ip[44] = 7.860; logwt[44] = 0.180
name[45] = "Ni"; ipr[45] = 2; nch[45] = 0; nel[45] = 1; nat[0][45] = 1; zat[0][45] = 28; awt[45] = 58.710; comp[21] = 1.86e-06
name[46] = "Ni+"; ipr[46] = 2; nch[46] = +1; ip[46] = 7.635; logwt[46] = -0.020
name[47] = "Sr"; ipr[47] = 3; nch[47] = 0; nel[47] = 1; nat[0][47] = 1; zat[0][47] = 38; awt[47] = 87.620; comp[22] = 6.62e-10
name[48] = "Sr+"; ipr[48] = 3; nch[48] = +1; ip[48] = 5.695; logwt[48] = 0.500
name[49] = "Y"; ipr[49] = 3; nch[49] = 0; nel[49] = 1; nat[0][49] = 1; zat[0][49] = 39; awt[49] = 88.906; comp[23] = 5.87e-11
name[50] = "Y+"; ipr[50] = 3; nch[50] = +1; ip[50] = 6.380; logwt[50] = 0.500
name[51] = "Zr"; ipr[51] = 3; nch[51] = 0; nel[51] = 1; nat[0][51] = 1; zat[0][51] = 40; awt[51] = 91.220; comp[24] = 2.98e-10
name[52] = "Zr+"; ipr[52] = 3; nch[52] = +1; ip[52] = 6.840; logwt[52] = 0.420
name[53] = "H2"; ipr[53] = 1; nch[53] = 0; nel[53] = 1; nat[0][53] = 2; zat[0][53] = 1; logk[0][53] = 12.739; logk[1][53] = -5.1172; logk[2][53] = 0.12572; logk[3][53] = -1.4149e-02; logk[4][53] = 6.3021e-04
name[54] = "H2+"; ipr[54] = 1; nch[54] = +1; ip[54] = 15.422; logwt[54] = 0.600
name[55] = "C2"; ipr[55] = 1; nch[55] = 0; nel[55] = 1; nat[0][55] = 2; zat[0][55] = 6; logk[0][55] = 12.804; logk[1][55] = -6.5178; logk[2][55] = .097719; logk[3][55] = -1.2739e-02; logk[4][55] = 6.2603e-04
name[56] = "C3"; ipr[56] = 1; nch[56] = 0; nel[56] = 1; nat[0][56] = 3; zat[0][56] = 6; logk[0][56] = 25.230; logk[1][56] = -14.445; logk[2][56] = 0.12547; logk[3][56] = -1.7390e-02; logk[4][56] = 8.8594e-04
name[57] = "N2"; ipr[57] = 1; nch[57] = 0; nel[57] = 1; nat[0][57] = 2; zat[0][57] = 7; logk[0][57] = 13.590; logk[1][57] = -10.585; logk[2][57] = 0.22067; logk[3][57] = -2.9997e-02; logk[4][57] = 1.4993e-03
name[58] = "O2"; ipr[58] = 1; nch[58] = 0; nel[58] = 1; nat[0][58] = 2; zat[0][58] = 8; logk[0][58] = 13.228; logk[1][58] = -5.5181; logk[2][58] = .069935; logk[3][58] = -8.1511e-03; logk[4][58] = 3.7970e-04
name[59] = "CH"; ipr[59] = 1; nch[59] = 0; nel[59] = 2; nat[0][59] = 1; zat[0][59] = 6; nat[1][59] = 1; zat[1][59] = 1; nat[2][59] = 0; zat[2][59] = 0; logk[0][59] = 12.135; logk[1][59] = -4.0760; logk[2][59] = 0.12768; logk[3][59] = -1.5473e-02; logk[4][59] = 7.2661e-04
name[60] = "C2H2"; ipr[60] = 1; nch[60] = 0; nel[60] = 2; nat[0][60] = 2; zat[0][60] = 6; nat[1][60] = 2; zat[1][60] = 1; nat[2][60] = 0; zat[2][60] = 0; logk[0][60] = 38.184; logk[1][60] = -17.365; logk[2][60] = .021512; logk[3][60] = -8.8961e-05; logk[4][60] = -2.8720e-05
name[61] = "NH"; ipr[61] = 1; nch[61] = 0; nel[61] = 2; nat[0][61] = 1; zat[0][61] = 7; nat[1][61] = 1; zat[1][61] = 1; nat[2][61] = 0; zat[2][61] = 0; logk[0][61] = 12.033; logk[1][61] = -3.8435; logk[2][61] = 0.13629; logk[3][61] = -1.6643e-02; logk[4][61] = 7.8691e-04
name[62] = "NH2"; ipr[62] = 1; nch[62] = 0; nel[62] = 2; nat[0][62] = 1; zat[0][62] = 7; nat[1][62] = 2; zat[1][62] = 1; nat[2][62] = 0; zat[2][62] = 0; logk[0][62] = 24.603; logk[1][62] = -8.6300; logk[2][62] = 0.20048; logk[3][62] = -2.4124e-02; logk[4][62] = 1.1484e-03
name[63] = "NH3"; ipr[63] = 1; nch[63] = 0; nel[63] = 2; nat[0][63] = 1; zat[0][63] = 7; nat[1][63] = 3; zat[1][63] = 1; nat[2][63] = 0; zat[2][63] = 0; logk[0][63] = 37.554; logk[1][63] = -13.059; logk[2][63] = 0.12910; logk[3][63] = -1.2338e-02; logk[4][63] = 5.3429e-04
name[64] = "OH"; ipr[64] = 1; nch[64] = 0; nel[64] = 2; nat[0][64] = 1; zat[0][64] = 8; nat[1][64] = 1; zat[1][64] = 1; nat[2][64] = 0; zat[2][64] = 0; logk[0][64] = 12.371; logk[1][64] = -5.0578; logk[2][64] = 0.13822; logk[3][64] = -1.6547e-02; logk[4][64] = 7.7224e-04
name[65] = "H2O"; ipr[65] = 1; nch[65] = 0; nel[65] = 2; nat[0][65] = 1; zat[0][65] = 8; nat[1][65] = 2; zat[1][65] = 1; nat[2][65] = 0; zat[2][65] = 0; logk[0][65] = 25.420; logk[1][65] = -10.522; logk[2][65] = 0.16939; logk[3][65] = -1.8368e-02; logk[4][65] = 8.1730e-04
name[66] = "MgH"; ipr[66] = 2; nch[66] = 0; nel[66] = 2; nat[0][66] = 1; zat[0][66] = 12; nat[1][66] = 1; zat[1][66] = 1; nat[2][66] = 0; zat[2][66] = 0; logk[0][66] = 11.285; logk[1][66] = -2.7164; logk[2][66] = 0.19658; logk[3][66] = -2.7310e-02; logk[4][66] = 1.3816e-03
name[67] = "AlH"; ipr[67] = 2; nch[67] = 0; nel[67] = 2; nat[0][67] = 1; zat[0][67] = 13; nat[1][67] = 1; zat[1][67] = 1; nat[2][67] = 0; zat[2][67] = 0; logk[0][67] = 12.191; logk[1][67] = -3.7636; logk[2][67] = 0.25557; logk[3][67] = -3.7261e-02; logk[4][67] = 1.9406e-03
name[68] = "SiH"; ipr[68] = 1; nch[68] = 0; nel[68] = 2; nat[0][68] = 1; zat[0][68] = 14; nat[1][68] = 1; zat[1][68] = 1; nat[2][68] = 0; zat[2][68] = 0; logk[0][68] = 11.852; logk[1][68] = -3.7418; logk[2][68] = 0.15999; logk[3][68] = -2.0629e-02; logk[4][68] = 9.9897e-04
name[69] = "HS"; ipr[69] = 1; nch[69] = 0; nel[69] = 2; nat[0][69] = 1; zat[0][69] = 16; nat[1][69] = 1; zat[1][69] = 1; nat[2][69] = 0; zat[2][69] = 0; logk[0][69] = 12.019; logk[1][69] = -4.2922; logk[2][69] = 0.14913; logk[3][69] = -1.8666e-02; logk[4][69] = 8.9438e-04
name[70] = "H2S"; ipr[70] = 1; nch[70] = 0; nel[70] = 2; nat[0][70] = 1; zat[0][70] = 16; nat[1][70] = 2; zat[1][70] = 1; nat[2][70] = 0; zat[2][70] = 0; logk[0][70] = 24.632; logk[1][70] = -8.4616; logk[2][70] = 0.17014; logk[3][70] = -2.0236e-02; logk[4][70] = 9.5782e-04
name[71] = "HCl"; ipr[71] = 3; nch[71] = 0; nel[71] = 2; nat[0][71] = 1; zat[0][71] = 17; nat[1][71] = 1; zat[1][71] = 1; nat[2][71] = 0; zat[2][71] = 0; logk[0][71] = 12.528; logk[1][71] = -5.1827; logk[2][71] = 0.18117; logk[3][71] = -2.4014e-02; logk[4][71] = 1.1994e-03
name[72] = "CaH"; ipr[72] = 3; nch[72] = 0; nel[72] = 2; nat[0][72] = 1; zat[0][72] = 20; nat[1][72] = 1; zat[1][72] = 1; nat[2][72] = 0; zat[2][72] = 0; logk[0][72] = 11.340; logk[1][72] = -3.0144; logk[2][72] = 0.42349; logk[3][72] = -6.1467e-02; logk[4][72] = 3.1639e-03
name[73] = "CN"; ipr[73] = 1; nch[73] = 0; nel[73] = 2; nat[0][73] = 1; zat[0][73] = 7; nat[1][73] = 1; zat[1][73] = 6; nat[2][73] = 0; zat[2][73] = 0; logk[0][73] = 12.805; logk[1][73] = -8.2793; logk[2][73] = .064162; logk[3][73] = -7.3627e-03; logk[4][73] = 3.4666e-04
name[74] = "NO"; ipr[74] = 1; nch[74] = 0; nel[74] = 2; nat[0][74] = 1; zat[0][74] = 8; nat[1][74] = 1; zat[1][74] = 7; nat[2][74] = 0; zat[2][74] = 0; logk[0][74] = 12.831; logk[1][74] = -7.1964; logk[2][74] = 0.17349; logk[3][74] = -2.3065e-02; logk[4][74] = 1.1380e-03
name[75] = "CO"; ipr[75] = 1; nch[75] = 0; nel[75] = 2; nat[0][75] = 1; zat[0][75] = 8; nat[1][75] = 1; zat[1][75] = 6; nat[2][75] = 0; zat[2][75] = 0; logk[0][75] = 13.820; logk[1][75] = -11.795; logk[2][75] = 0.17217; logk[3][75] = -2.2888e-02; logk[4][75] = 1.1349e-03
name[76] = "CO2"; ipr[76] = 1; nch[76] = 0; nel[76] = 2; nat[0][76] = 2; zat[0][76] = 8; nat[1][76] = 1; zat[1][76] = 6; nat[2][76] = 0; zat[2][76] = 0; logk[0][76] = 27.478; logk[1][76] = -17.098; logk[2][76] = .095012; logk[3][76] = -1.2579e-02; logk[4][76] = 6.4058e-04
name[77] = "MgO"; ipr[77] = 3; nch[77] = 0; nel[77] = 2; nat[0][77] = 1; zat[0][77] = 12; nat[1][77] = 1; zat[1][77] = 8; nat[2][77] = 0; zat[2][77] = 0; logk[0][77] = 11.702; logk[1][77] = -5.0326; logk[2][77] = 0.29641; logk[3][77] = -4.2811e-02; logk[4][77] = 2.2023e-03
name[78] = "AlO"; ipr[78] = 2; nch[78] = 0; nel[78] = 2; nat[0][78] = 1; zat[0][78] = 13; nat[1][78] = 1; zat[1][78] = 8; nat[2][78] = 0; zat[2][78] = 0; logk[0][78] = 12.739; logk[1][78] = -5.2534; logk[2][78] = 0.18218; logk[3][78] = -2.5793e-02; logk[4][78] = 1.3185e-03
name[79] = "SiO"; ipr[79] = 1; nch[79] = 0; nel[79] = 2; nat[0][79] = 1; zat[0][79] = 14; nat[1][79] = 1; zat[1][79] = 8; nat[2][79] = 0; zat[2][79] = 0; logk[0][79] = 13.413; logk[1][79] = -8.8710; logk[2][79] = 0.15042; logk[3][79] = -1.9581e-02; logk[4][79] = 9.4828e-04
name[80] = "SO"; ipr[80] = 1; nch[80] = 0; nel[80] = 2; nat[0][80] = 1; zat[0][80] = 16; nat[1][80] = 1; zat[1][80] = 8; nat[2][80] = 0; zat[2][80] = 0; logk[0][80] = 12.929; logk[1][80] = -6.0100; logk[2][80] = 0.16253; logk[3][80] = -2.1665e-02; logk[4][80] = 1.0676e-03
name[81] = "CaO"; ipr[81] = 2; nch[81] = 0; nel[81] = 2; nat[0][81] = 1; zat[0][81] = 20; nat[1][81] = 1; zat[1][81] = 8; nat[2][81] = 0; zat[2][81] = 0; logk[0][81] = 12.260; logk[1][81] = -6.0525; logk[2][81] = 0.58284; logk[3][81] = -8.5805e-02; logk[4][81] = 4.4425e-03
name[82] = "ScO"; ipr[82] = 3; nch[82] = 0; nel[82] = 2; nat[0][82] = 1; zat[0][82] = 21; nat[1][82] = 1; zat[1][82] = 8; nat[2][82] = 0; zat[2][82] = 0; logk[0][82] = 13.747; logk[1][82] = -8.6420; logk[2][82] = 0.48072; logk[3][82] = -6.9670e-02; logk[4][82] = 3.5747e-03
name[83] = "ScO2"; ipr[83] = 3; nch[83] = 0; nel[83] = 2; nat[0][83] = 1; zat[0][83] = 21; nat[1][83] = 2; zat[1][83] = 8; nat[2][83] = 0; zat[2][83] = 0; logk[0][83] = 26.909; logk[1][83] = -15.824; logk[2][83] = 0.39999; logk[3][83] = -5.9363e-02; logk[4][83] = 3.0875e-03
name[84] = "TiO"; ipr[84] = 2; nch[84] = 0; nel[84] = 2; nat[0][84] = 1; zat[0][84] = 22; nat[1][84] = 1; zat[1][84] = 8; nat[2][84] = 0; zat[2][84] = 0; logk[0][84] = 13.398; logk[1][84] = -8.5956; logk[2][84] = 0.40873; logk[3][84] = -5.7937e-02; logk[4][84] = 2.9287e-03
name[85] = "VO"; ipr[85] = 3; nch[85] = 0; nel[85] = 2; nat[0][85] = 1; zat[0][85] = 23; nat[1][85] = 1; zat[1][85] = 8; nat[2][85] = 0; zat[2][85] = 0; logk[0][85] = 13.811; logk[1][85] = -7.7520; logk[2][85] = 0.37056; logk[3][85] = -5.1467e-02; logk[4][85] = 2.5861e-03
name[86] = "VO2"; ipr[86] = 3; nch[86] = 0; nel[86] = 2; nat[0][86] = 1; zat[0][86] = 23; nat[1][86] = 2; zat[1][86] = 8; nat[2][86] = 0; zat[2][86] = 0; logk[0][86] = 27.754; logk[1][86] = -14.040; logk[2][86] = 0.33613; logk[3][86] = -4.8215e-02; logk[4][86] = 2.4780e-03
name[87] = "YO"; ipr[87] = 3; nch[87] = 0; nel[87] = 2; nat[0][87] = 1; zat[0][87] = 39; nat[1][87] = 1; zat[1][87] = 8; nat[2][87] = 0; zat[2][87] = 0; logk[0][87] = 13.514; logk[1][87] = -8.7775; logk[2][87] = 0.40700; logk[3][87] = -5.8053e-02; logk[4][87] = 2.9535e-03
name[88] = "YO2"; ipr[88] = 3; nch[88] = 0; nel[88] = 2; nat[0][88] = 1; zat[0][88] = 39; nat[1][88] = 2; zat[1][88] = 8; nat[2][88] = 0; zat[2][88] = 0; logk[0][88] = 26.764; logk[1][88] = -16.447; logk[2][88] = 0.39991; logk[3][88] = -5.8916e-02; logk[4][88] = 3.0506e-03
name[89] = "ZrO"; ipr[89] = 3; nch[89] = 0; nel[89] = 2; nat[0][89] = 1; zat[0][89] = 40; nat[1][89] = 1; zat[1][89] = 8; nat[2][89] = 0; zat[2][89] = 0; logk[0][89] = 13.296; logk[1][89] = -9.0129; logk[2][89] = 0.19562; logk[3][89] = -2.9892e-02; logk[4][89] = 1.6010e-03
name[90] = "ZrO2"; ipr[90] = 3; nch[90] = 0; nel[90] = 2; nat[0][90] = 1; zat[0][90] = 40; nat[1][90] = 2; zat[1][90] = 8; nat[2][90] = 0; zat[2][90] = 0; logk[0][90] = 26.793; logk[1][90] = -16.151; logk[2][90] = 0.46988; logk[3][90] = -6.4636e-02; logk[4][90] = 3.2277e-03
name[91] = "CS"; ipr[91] = 1; nch[91] = 0; nel[91] = 2; nat[0][91] = 1; zat[0][91] = 16; nat[1][91] = 1; zat[1][91] = 6; nat[2][91] = 0; zat[2][91] = 0; logk[0][91] = 13.436; logk[1][91] = -8.5574; logk[2][91] = 0.18754; logk[3][91] = -2.5507e-02; logk[4][91] = 1.2735e-03
name[92] = "SiS"; ipr[92] = 1; nch[92] = 0; nel[92] = 2; nat[0][92] = 1; zat[0][92] = 14; nat[1][92] = 1; zat[1][92] = 16; nat[2][92] = 0; zat[2][92] = 0; logk[0][92] = 13.182; logk[1][92] = -7.1147; logk[2][92] = 0.19300; logk[3][92] = -2.5826e-02; logk[4][92] = 1.2648e-03
name[93] = "TiS"; ipr[93] = 2; nch[93] = 0; nel[93] = 2; nat[0][93] = 1; zat[0][93] = 22; nat[1][93] = 1; zat[1][93] = 16; nat[2][93] = 0; zat[2][93] = 0; logk[0][93] = 13.316; logk[1][93] = -6.2216; logk[2][93] = 0.45829; logk[3][93] = -6.4903e-02; logk[4][93] = 3.2788e-03
name[94] = "SiC"; ipr[94] = 1; nch[94] = 0; nel[94] = 2; nat[0][94] = 1; zat[0][94] = 14; nat[1][94] = 1; zat[1][94] = 6; nat[2][94] = 0; zat[2][94] = 0; logk[0][94] = 12.327; logk[1][94] = -5.0419; logk[2][94] = 0.13941; logk[3][94] = -1.9363e-02; logk[4][94] = 9.6202e-04
name[95] = "SiC2"; ipr[95] = 1; nch[95] = 0; nel[95] = 2; nat[0][95] = 1; zat[0][95] = 14; nat[1][95] = 2; zat[1][95] = 6; nat[2][95] = 0; zat[2][95] = 0; logk[0][95] = 25.623; logk[1][95] = -13.085; logk[2][95] = -.055227; logk[3][95] = 9.3363e-03; logk[4][95] = -4.9876e-04
name[96] = "NaCl"; ipr[96] = 2; nch[96] = 0; nel[96] = 2; nat[0][96] = 1; zat[0][96] = 11; nat[1][96] = 1; zat[1][96] = 17; nat[2][96] = 0; zat[2][96] = 0; logk[0][96] = 11.768; logk[1][96] = -4.9884; logk[2][96] = 0.23975; logk[3][96] = -3.4837e-02; logk[4][96] = 1.8034e-03
name[97] = "MgCl"; ipr[97] = 2; nch[97] = 0; nel[97] = 2; nat[0][97] = 1; zat[0][97] = 12; nat[1][97] = 1; zat[1][97] = 17; nat[2][97] = 0; zat[2][97] = 0; logk[0][97] = 11.318; logk[1][97] = -4.2224; logk[2][97] = 0.21137; logk[3][97] = -3.0174e-02; logk[4][97] = 1.5480e-03
name[98] = "AlCl"; ipr[98] = 2; nch[98] = 0; nel[98] = 2; nat[0][98] = 1; zat[0][98] = 13; nat[1][98] = 1; zat[1][98] = 17; nat[2][98] = 0; zat[2][98] = 0; logk[0][98] = 11.976; logk[1][98] = -5.2228; logk[2][98] = -.010263; logk[3][98] = 3.9344e-03; logk[4][98] = -2.6236e-04
name[99] = "CaCl"; ipr[99] = 2; nch[99] = 0; nel[99] = 2; nat[0][99] = 1; zat[0][99] = 20; nat[1][99] = 1; zat[1][99] = 17; nat[2][99] = 0; zat[2][99] = 0; logk[0][99] = 12.314; logk[1][99] = -5.1814; logk[2][99] = 0.56532; logk[3][99] = -8.2868e-02; logk[4][99] = 4.2822e-03
name[100] = "HCN"; ipr[100] = 1; nch[100] = 0; nel[100] = 3; nat[0][100] = 1; zat[0][100] = 7; nat[1][100] = 1; zat[1][100] = 6; nat[2][100] = 1; zat[2][100] = 1; logk[0][100] = 25.635; logk[1][100] = -13.833; logk[2][100] = 0.13827; logk[3][100] = -1.8122e-02; logk[4][100] = 9.1645e-04
name[101] = "HCO"; ipr[101] = 1; nch[101] = 0; nel[101] = 3; nat[0][101] = 1; zat[0][101] = 8; nat[1][101] = 1; zat[1][101] = 6; nat[2][101] = 1; zat[2][101] = 1; logk[0][101] = 25.363; logk[1][101] = -13.213; logk[2][101] = 0.18451; logk[3][101] = -2.2973e-02; logk[4][101] = 1.1114e-03
name[102] = "MgOH"; ipr[102] = 2; nch[102] = 0; nel[102] = 3; nat[0][102] = 1; zat[0][102] = 12; nat[1][102] = 1; zat[1][102] = 8; nat[2][102] = 1; zat[2][102] = 1; logk[0][102] = 24.551; logk[1][102] = -9.3818; logk[2][102] = 0.19666; logk[3][102] = -2.7178e-02; logk[4][102] = 1.3887e-03
name[103] = "AlOH"; ipr[103] = 2; nch[103] = 0; nel[103] = 3; nat[0][103] = 1; zat[0][103] = 13; nat[1][103] = 1; zat[1][103] = 8; nat[2][103] = 1; zat[2][103] = 1; logk[0][103] = 25.707; logk[1][103] = -10.624; logk[2][103] = .097901; logk[3][103] = -1.1835e-02; logk[4][103] = 5.8121e-04
name[104] = "CaOH"; ipr[104] = 2; nch[104] = 0; nel[104] = 3; nat[0][104] = 1; zat[0][104] = 20; nat[1][104] = 1; zat[1][104] = 8; nat[2][104] = 1; zat[2][104] = 1; logk[0][104] = 24.611; logk[1][104] = -10.910; logk[2][104] = 0.60803; logk[3][104] = -8.7197e-02; logk[4][104] = 4.4736e-03
| 20,312
| 160.214286
| 295
|
py
|
ChromaStarPy
|
ChromaStarPy-master/PostProcess.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:20:48 2017
@author: ishort
"""
"""/**** Routines for client side post-processing of raw model atmosphere/spectrum
* synthesis output from server to produce synthetic observables
*/"""
import math
import numpy
import Useful
import ToolBox
nm2cm = 1.0e-7
def UBVRIraw(lambdaScale, flux):
"""
#Computes raw UBVRI fluxes
"""
filters = filterSet()
numBands = len(filters)
#var numLambdaFilt
bandFlux = [0.0 for i in range(numBands)]
#var deltaLam, newY, product;
for ib in range(numBands):
bandFlux[ib] = 0.0 #//initialization
numLambdaFilt = len(filters[ib][0])
#//console.log("ib " + ib + " numLambdaFilt " + numLambdaFilt);
#//wavelength loop is over photometric filter data wavelengths
for il in range(1, numLambdaFilt):
#//In this case - interpolate model SED onto wavelength grid of given photometric filter data
deltaLam = filters[ib][0][il] - filters[ib][0][il - 1] #//nm
#//deltaLam = 1.0e-7 * deltaLam; //cm
#//console.log("ib: " + ib + " il: " + il + " filters[ib][0][il] " + filters[ib][0][il] + " deltaLam: " + deltaLam + " filters[ib][1][il] " + filters[ib][1][il]);
#//hand log flux (row 1) to interpolation routine:
newY = ToolBox.interpol(lambdaScale, flux[1], filters[ib][0][il])
#// linearize interpolated flux: - fluxes add *linearly*
newY = math.exp(newY)
product = filters[ib][1][il] * newY
#if (ib == 2):
# //console.log("Photometry: il: " + il + " newY: " + newY + " filterLamb: " + filters[ib][0][il] + " filterTrans: " + filters[ib][1][il] + " product " + product);
#//System.out.println("Photometry: filtertrans: " + filters[ib][1][il] + " product: " + product + " deltaLam: " + deltaLam);
#//Rectangular picket integration
bandFlux[ib] = bandFlux[ib] + (product * deltaLam)
#//console.log("Photometry: ib: " + ib + " deltaLam " + deltaLam + " bandFlux: " + bandFlux[ib]);
#} //il loop - lambdas
#//console.log("Photometry: ib: " + ib + " bandFlux: " + bandFlux[ib], " product " + product + " deltaLam " + deltaLam);
#} //ib loop - bands
#var raw;
return bandFlux
#}; //UBVRI
#//
#//
def UBVRI(bandFlux):
"""/**
* Computes colors from band-integrated fluxes from UBVRIraw()
* First, reality check raw colours, THEN Run Vega model and subtract off Vega
* colours for single-point calibration
*/"""
#Must be consistent with UBVRIraw()
#As of April 2020: Index, band
# 0, Ux
# 1, Bx
# 2, B
# 3, V
# 4, R
# 5, I
# 6, H
# 7, J
# 8, K
numCols = 7 #//seven band combinations in Johnson-Bessell UxBxBVRI: Ux-Bx, B-V, V-R, V-I, R-I, V-K, J-K
colors = [0.0 for i in range(numCols)]
#// Single-point calibration to Vega:
#// Vega colours computed self-consistently using
#// Stellar parameters of Castelli, F.; Kurucz, R. L., 1994, A&A, 281, 817
#// Teff = 9550 K, log(g) = 3.95, [Fe/H] = -0.5:
#vegaColors = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]; #//For re-calibrating with raw Vega colours
#// Aug 2015 - with 14-line linelist:
#//var vegaColors = [0.289244, -0.400324, 0.222397, -0.288568, -0.510965];
#//var vegaColors = [0.163003, -0.491341, 0.161940, -0.464265, -0.626204];
#//With Balmer line linear Stark broadening wings:
#//vegaColors = [0.321691, -0.248000, 0.061419, -0.463083, -0.524502];
#vegaColors = [0.53, -0.66, 0.11, -0.51, -0.62]
vegaColors = [0.0528, -0.6093, 0.3241, -1.1729, -0.6501, -3.2167, -1.5526] #//lburns, June 2017
#// Ux-Bx:
raw = 2.5 * math.log10(bandFlux[1] / bandFlux[0])
colors[0] = raw - vegaColors[0]
#//console.log("U-B: " + colors[0] + " raw " + raw + " bandFlux[1] " + bandFlux[1] + " bandFlux[0] " + bandFlux[0]);
#// B-V:
raw = 2.5 * math.log10(bandFlux[3] / bandFlux[2])
colors[1] = raw - vegaColors[1]
#//console.log("B-V: " + colors[1])
#// V-R:
raw = 2.5 * math.log(bandFlux[4] / bandFlux[3])
colors[2] = raw - vegaColors[2]
#//console.log("V-R: " + colors[2]);
#// V-I:
raw = 2.5 * math.log(bandFlux[5] / bandFlux[3])
colors[3] = raw - vegaColors[3]
#//console.log("V-I: " + colors[3]);
#// R-I:
raw = 2.5 * math.log10(bandFlux[5] / bandFlux[4])
colors[4] = raw - vegaColors[4]
#//console.log("R-I: " + colors[4]);
#// V-K: lburns
raw = 2.5 * math.log10(bandFlux[8] / bandFlux[3]);
colors[5] = raw - vegaColors[5];
#//console.log("V-K: " + colors[5]);
#// J-K: lburns
raw = 2.5 * math.log10(bandFlux[8] / bandFlux[7]);
colors[6] = raw - vegaColors[6];
#//console.log("J-K: " + colors[6]);
return colors
#}; //UBVRI
#//
#//
def iColors(lambdaScale, intens, numThetas, numLams):
#//No! iColors now returns band-integrated intensities
filters = filterSet()
numCols = 7 #//five band combinations in Johnson-Bessell UxBxBVRI: Ux-Bx, B-V, V-R, V-I, R-I
numBands = len(filters)
numLambdaFilt = len(filters[0][0])
#//var colors = [];
#//colors.length = numCols;
#//// Have to use Array constructor here:
#//for (var i = 0; i < numCols; i++) {
#// colors[i] = new Array(numThetas);
#//}
bandIntens = [ [ 0.0 for i in range(numThetas) ] for j in range(numBands) ]
#JS: #// Have to use Array constructor here:
#for i in range(numBands):
# bandIntens[i] = []
# bandIntens[i].length = numThetas;
#}
#//Unnecessary:
#//Note: Calibration must be re-done! May 2015
#// Single-point Johnson UBVRI calibration to Vega:
#// Vega colours computed self-consistntly with GrayFox 1.0 using
#// Stellar parameters of Castelli, F.; Kurucz, R. L., 1994, A&A, 281, 817
#// Teff = 9550 K, log(g) = 3.95, ([Fe/H] = -0.5 - not directly relevent):
vegaColors = [0.163003, -0.491341, 0.161940, -0.464265, -0.626204]
#var deltaLam, newY, product, raw;
intensLam = [0.0 for i in range(numLams)]
#//Now same for each intensity spectrum:
for it in range(numThetas):
#//Caution: This loop is over *model SED* lambdas!
for jl in range(numLams):
intensLam[jl] = intens[jl][it]
#//System.out.println("it: " + it + " jl: " + jl + " intensLam[jl]: " + intensLam[jl]);
for ib in range(numBands):
bandIntens[ib][it] = 0.0 #//initialization
#//wavelength loop is over photometric filter data wavelengths
for il in range(1, numLambdaFilt):
#//In this case - interpolate model SED onto wavelength grid of given photometric filter data
deltaLam = filters[ib][0][il] - filters[ib][0][il - 1] #//nm
#//deltaLam = 1.0e-7 * deltaLam; //cm
#//hand log flux (row 1) to interpolation routine:
newY = ToolBox.interpol(lambdaScale, intensLam, filters[ib][0][il])
#//System.out.println("Photometry: newFlux: " + newFlux + " filterlamb: " + filters[ib][0][il]);
product = filters[ib][1][il] * newY
#//System.out.println("Photometry: filtertrans: " + filters[ib][1][il] + " product: " + product + " deltaLam: " + deltaLam);
#//Rectangular picket integration
bandIntens[ib][it] = bandIntens[ib][it] + (product * deltaLam)
#//console.log("Photometry: ib: " + ib + " bandIntens: " + bandIntens[ib][it]);
#} //il wavelength loop
#//System.out.println("Photometry: ib: " + ib + " it: " + it + " bandIntens: " + bandIntens[ib][it]);
#} //ib band loop
#//necessary
#//Make the colors! :-)
#//console.log("it: " + it);
#// Ux-Bx:
#//raw = 2.5 * logTen(bandIntens[1][it] / bandIntens[0][it]);
#//colors[0][it] = raw - vegaColors[0];
#//console.log("U-B: " + colors[0][it]);
#// B-V:
#//raw = 2.5 * logTen(bandIntens[3][it] / bandIntens[2][it]);
#//colors[1][it] = raw - vegaColors[1];
#//console.log("B-V: " + colors[1][it]);
#// V-R:
#//raw = 2.5 * logTen(bandIntens[4][it] / bandIntens[3][it]);
#//colors[2][it] = raw - vegaColors[2];
#//console.log("V-R: " + colors[2][it]);
#// V-I:
#// raw = 2.5 * logTen(bandIntens[5][it] / bandIntens[3][it]);
#//colors[3][it] = raw - vegaColors[3];
#//console.log("V-I: " + colors[3][it]);
#// R-I:
#//raw = 2.5 * logTen(bandIntens[5][it] / bandIntens[4][it]);
#//colors[4][it] = raw - vegaColors[4];
#//console.log("R-I: " + colors[4][it]);
#//necessary
#} //theta it loop
#//return colors;
return bandIntens
#}; //iColours
#//Create area normalized Gaussian appropriate for interpolating onto high resolution wavelength gid
def gaussian(lambdaScale, numLams, lambdaIn, sigmaIn, lamUV, lamIR):
#//No! iColors now returns band-integrated intensities
#//diskSigma = 10.0; //test
#//diskSigma = 0.01; //test
#//wavelength sampling interval in nm for interpolation
deltaLam = 0.001 #//nm
sigma = sigmaIn / deltaLam #//sigma of Gaussian in pixels
#//Number of wavelength elements for Gaussian
numSigmas = 2.5 #//+/- 2.5 sigmas
numGauss = int(math.ceil(2.0 * numSigmas * sigma)) #//+/- 2.5 sigmas
#//ensure odd number of elements in Gausian kernal
if ((numGauss % 2) == 0):
numGauss+=1
#//Row 0 holds wavelengths in cm
#//Row 1 holds Gaussian
gauss = [ [ 0.0 for i in range(numGauss) ] for j in range(2) ]
#jsgauss.length = 2;
#gauss[0] = [];
#gauss[0].length = numGauss;
#gauss[1] = [];
#gauss[1].length = numGauss;
midPix = math.floor(numGauss/2)
#////Area normalization factors:
#// var rootTwoPi = Math.sqrt(2.0 * Math.PI);
#// var prefac = 1.0 / (sigma * rootTwoPi);
#var x, expFac;
#//Construct Gaussian in pixel space:
#//var sum = 0.0; //test
for i in range(numGauss):
x = (i - midPix)
expFac = x / sigma
expFac = expFac * expFac
gauss[1][i] = math.exp(-0.5 * expFac)
#//gauss[i] = prefac * gauss[i];
#//sum+= gauss[i]; //test
#//console.log("i " + i + " gauss[i] " + gauss[i]); //test
#//console.log("Gaussian area: " + sum);
#//establish filter lambda scale:
#//var filterLam = [];
#//filterLam.length = numGauss;
lamStart = lambdaIn - (numSigmas * sigmaIn) #//nm
ii = 0;
for i in range(numGauss):
ii = 1.0 * i
#// filterLam[i] = lamStart + (ii * deltaLam); //nm
#// filterLam[i] = 1.0e-7 * filterLam[i]; //cm for reference to lambdaScale
gauss[0][i] = lamStart + (ii * deltaLam) #//nm
gauss[0][i] = nm2cm * gauss[0][i] #//cm for reference to lambdaScale
#//Keep wthin limits of treated SED:
if (gauss[0][i] < nm2cm*lamUV):
gauss[1][i] = 0.0
#}
if (gauss[0][i] > nm2cm*lamIR):
gauss[1][i] = 0.0
#}
#}
#//for (var i = 0; i < numGauss; i++){
#//console.log("i " + i + " filterLam[i] " + filterLam[i] + " gauss[i] " + gauss[i]); //test
#// }
#//console.log("numGauss " + numGauss + " sigma " + sigma + " lamStart " + lamStart);
return gauss
#}; //gaussian
#//var tuneColor = function(lambdaScale, intens, numThetas, numLams, diskLambda, diskSigma, lamUV, lamIR) {
def tuneColor(lambdaScale, intens, numThetas, numLams, gaussian, lamUV, lamIR):
#//No! iColors now returns band-integrated intensities
#//diskSigma = 10.0; //test
#//diskSigma = 0.01; //test
#//wavelength sampling interval in nm for interpolation
numGauss = len(gaussian[0])
deltaLam = gaussian[0][1] - gaussian[0][0]
bandIntens = [0.0 for i in range(numThetas)]
#var product;
intensLam = [0.0 for i in range(numLams)]
#//Now same for each intensity spectrum:
for it in range(numThetas):
#//Caution: This loop is over *model SED* lambdas!
for jl in range(numLams):
intensLam[jl] = intens[jl][it]
#//System.out.println("it: " + it + " jl: " + jl + " intensLam[jl]: " + intensLam[jl]);
#// for (var ib = 0; ib < numBands; ib++) {
bandIntens[it] = 0.0 #//initialization
#//var newY = interpolV(intensLam, lambdaScale, filterLam);
#newY = ToolBox.interpolV(intensLam, lambdaScale, gaussian[0])
newY = numpy.interp(gaussian[0], lambdaScale, intensLam)
#//wavelength loop is over photometric filter data wavelengths
for il in range(1, numGauss):
#//In this case - interpolate model SED onto wavelength grid of tunable filter
#//product = gauss[il] * newY[il];
product = gaussian[1][il] * newY[il]
#//Rectangular picket integration
bandIntens[it] = bandIntens[it] + (product * deltaLam)
#//console.log("Photometry: ib: " + ib + " bandIntens: " + bandIntens[ib][it]);
#} //il wavelength loop
#//console.log("tuneColor: it: " + it + " bandIntens: " + bandIntens[it]);
#// } //ib band loop
#} //theta it loop
#//return colors;
return bandIntens
#}; //tuneColor
#//
#//
def filterSet():
numBands = 9 #// Bessell-Johnson UxBxBVRI
numLambdaFilt = 25 #//test for now
#//double[][][] filterCurves = new double[numBands][2][numLambdaFilt];
filterCurves = [ [ [ 0.0 for i in range(numLambdaFilt) ] for j in range(2) ] for k in range(numBands) ]
#JS: filterCurves.length = numBands;
#// Have to use Array constructor here:
#for (var i = 0; i < numBands; i++) {
# filterCurves[i] = [];
# filterCurves[i].length = 2;
#}
#// Have to use Array constructor here:
#for (var i = 0; i < numBands; i++) {
# filterCurves[i][0] = [];
# filterCurves[i][1] = [];
# filterCurves[i][0].length = numLambdaFilt;
# filterCurves[i][1].length = numLambdaFilt;
#}
#//Initialize all filterCurves - the real data below won't fill in all the array elements:
for ib in range(numBands):
for il in range(numLambdaFilt):
filterCurves[ib][0][il] = 1000.0 #//placeholder wavelength (nm)
filterCurves[ib][1][il] = 0.0e0 #// initialize filter transparency to 0.0
#}
#}
#//http://ulisse.pd.astro.it/Astro/ADPS/Systems/Sys_136/index_136.html
#//Bessell, M. S., 1990, PASP, 102, 1181
#//photometric filter data for Bessell UxBxBVRI system from Asiago database in Java & JavaScript syntax
#//Individual bands are below master table
#// UX
filterCurves[0][0][0] = 300.0;
filterCurves[0][1][0] = 0.000;
filterCurves[0][0][1] = 305.0;
filterCurves[0][1][1] = 0.016;
filterCurves[0][0][2] = 310.0;
filterCurves[0][1][2] = 0.068;
filterCurves[0][0][3] = 315.0;
filterCurves[0][1][3] = 0.167;
filterCurves[0][0][4] = 320.0;
filterCurves[0][1][4] = 0.287;
filterCurves[0][0][5] = 325.0;
filterCurves[0][1][5] = 0.423;
filterCurves[0][0][6] = 330.0;
filterCurves[0][1][6] = 0.560;
filterCurves[0][0][7] = 335.0;
filterCurves[0][1][7] = 0.673;
filterCurves[0][0][8] = 340.0;
filterCurves[0][1][8] = 0.772;
filterCurves[0][0][9] = 345.0;
filterCurves[0][1][9] = 0.841;
filterCurves[0][0][10] = 350.0;
filterCurves[0][1][10] = 0.905;
filterCurves[0][0][11] = 355.0;
filterCurves[0][1][11] = 0.943;
filterCurves[0][0][12] = 360.0;
filterCurves[0][1][12] = 0.981;
filterCurves[0][0][13] = 365.0;
filterCurves[0][1][13] = 0.993;
filterCurves[0][0][14] = 370.0;
filterCurves[0][1][14] = 1.000;
filterCurves[0][0][15] = 375.0;
filterCurves[0][1][15] = 0.989;
filterCurves[0][0][16] = 380.0;
filterCurves[0][1][16] = 0.916;
filterCurves[0][0][17] = 385.0;
filterCurves[0][1][17] = 0.804;
filterCurves[0][0][18] = 390.0;
filterCurves[0][1][18] = 0.625;
filterCurves[0][0][19] = 395.0;
filterCurves[0][1][19] = 0.423;
filterCurves[0][0][20] = 400.0;
filterCurves[0][1][20] = 0.238;
filterCurves[0][0][21] = 405.0;
filterCurves[0][1][21] = 0.114;
filterCurves[0][0][22] = 410.0;
filterCurves[0][1][22] = 0.051;
filterCurves[0][0][23] = 415.0;
filterCurves[0][1][23] = 0.019;
filterCurves[0][0][24] = 420.0;
filterCurves[0][1][24] = 0.000;
#//BX
filterCurves[1][0][0] = 360.0;
filterCurves[1][1][0] = 0.000;
filterCurves[1][0][1] = 370.0;
filterCurves[1][1][1] = 0.026;
filterCurves[1][0][2] = 380.0;
filterCurves[1][1][2] = 0.120;
filterCurves[1][0][3] = 390.0;
filterCurves[1][1][3] = 0.523;
filterCurves[1][0][4] = 400.0;
filterCurves[1][1][4] = 0.875;
filterCurves[1][0][5] = 410.0;
filterCurves[1][1][5] = 0.956;
filterCurves[1][0][6] = 420.0;
filterCurves[1][1][6] = 1.000;
filterCurves[1][0][7] = 430.0;
filterCurves[1][1][7] = 0.998;
filterCurves[1][0][8] = 440.0;
filterCurves[1][1][8] = 0.972;
filterCurves[1][0][9] = 450.0;
filterCurves[1][1][9] = 0.901;
filterCurves[1][0][10] = 460.0;
filterCurves[1][1][10] = 0.793;
filterCurves[1][0][11] = 470.0;
filterCurves[1][1][11] = 0.694;
filterCurves[1][0][12] = 480.0;
filterCurves[1][1][12] = 0.587;
filterCurves[1][0][13] = 490.0;
filterCurves[1][1][13] = 0.470;
filterCurves[1][0][14] = 500.0;
filterCurves[1][1][14] = 0.362;
filterCurves[1][0][15] = 510.0;
filterCurves[1][1][15] = 0.263;
filterCurves[1][0][16] = 520.0;
filterCurves[1][1][16] = 0.169;
filterCurves[1][0][17] = 530.0;
filterCurves[1][1][17] = 0.107;
filterCurves[1][0][18] = 540.0;
filterCurves[1][1][18] = 0.049;
filterCurves[1][0][19] = 550.0;
filterCurves[1][1][19] = 0.010;
filterCurves[1][0][20] = 560.0;
filterCurves[1][1][20] = 0.000;
filterCurves[1][0][21] = 560.0;
filterCurves[1][1][21] = 0.000;
filterCurves[1][0][22] = 560.0;
filterCurves[1][1][22] = 0.000;
filterCurves[1][0][23] = 560.0;
filterCurves[1][1][23] = 0.000;
filterCurves[1][0][24] = 560.0;
filterCurves[1][1][24] = 0.000;
#//B
filterCurves[2][0][0] = 360.0;
filterCurves[2][1][0] = 0.000;
filterCurves[2][0][1] = 370.0;
filterCurves[2][1][1] = 0.030;
filterCurves[2][0][2] = 380.0;
filterCurves[2][1][2] = 0.134;
filterCurves[2][0][3] = 390.0;
filterCurves[2][1][3] = 0.567;
filterCurves[2][0][4] = 400.0;
filterCurves[2][1][4] = 0.920;
filterCurves[2][0][5] = 410.0;
filterCurves[2][1][5] = 0.978;
filterCurves[2][0][6] = 420.0;
filterCurves[2][1][6] = 1.000;
filterCurves[2][0][7] = 430.0;
filterCurves[2][1][7] = 0.978;
filterCurves[2][0][8] = 440.0;
filterCurves[2][1][8] = 0.935;
filterCurves[2][0][9] = 450.0;
filterCurves[2][1][9] = 0.853;
filterCurves[2][0][10] = 460.0;
filterCurves[2][1][10] = 0.740;
filterCurves[2][0][11] = 470.0;
filterCurves[2][1][11] = 0.640;
filterCurves[2][0][12] = 480.0;
filterCurves[2][1][12] = 0.536;
filterCurves[2][0][13] = 490.0;
filterCurves[2][1][13] = 0.424;
filterCurves[2][0][14] = 500.0;
filterCurves[2][1][14] = 0.325;
filterCurves[2][0][15] = 510.0;
filterCurves[2][1][15] = 0.235;
filterCurves[2][0][16] = 520.0;
filterCurves[2][1][16] = 0.150;
filterCurves[2][0][17] = 530.0;
filterCurves[2][1][17] = 0.095;
filterCurves[2][0][18] = 540.0;
filterCurves[2][1][18] = 0.043;
filterCurves[2][0][19] = 550.0;
filterCurves[2][1][19] = 0.009;
filterCurves[2][0][20] = 560.0;
filterCurves[2][1][20] = 0.000;
filterCurves[2][0][21] = 560.0;
filterCurves[2][1][21] = 0.000;
filterCurves[2][0][22] = 560.0;
filterCurves[2][1][22] = 0.000;
filterCurves[2][0][23] = 560.0;
filterCurves[2][1][23] = 0.000;
filterCurves[2][0][24] = 560.0;
filterCurves[2][1][24] = 0.000;
#//V
filterCurves[3][0][0] = 470.0;
filterCurves[3][1][0] = 0.000;
filterCurves[3][0][1] = 480.0;
filterCurves[3][1][1] = 0.030;
filterCurves[3][0][2] = 490.0;
filterCurves[3][1][2] = 0.163;
filterCurves[3][0][3] = 500.0;
filterCurves[3][1][3] = 0.458;
filterCurves[3][0][4] = 510.0;
filterCurves[3][1][4] = 0.780;
filterCurves[3][0][5] = 520.0;
filterCurves[3][1][5] = 0.967;
filterCurves[3][0][6] = 530.0;
filterCurves[3][1][6] = 1.000;
filterCurves[3][0][7] = 540.0;
filterCurves[3][1][7] = 0.973;
filterCurves[3][0][8] = 550.0;
filterCurves[3][1][8] = 0.898;
filterCurves[3][0][9] = 560.0;
filterCurves[3][1][9] = 0.792;
filterCurves[3][0][10] = 570.0;
filterCurves[3][1][10] = 0.684;
filterCurves[3][0][11] = 580.0;
filterCurves[3][1][11] = 0.574;
filterCurves[3][0][12] = 590.0;
filterCurves[3][1][12] = 0.461;
filterCurves[3][0][13] = 600.0;
filterCurves[3][1][13] = 0.359;
filterCurves[3][0][14] = 610.0;
filterCurves[3][1][14] = 0.270;
filterCurves[3][0][15] = 620.0;
filterCurves[3][1][15] = 0.197;
filterCurves[3][0][16] = 630.0;
filterCurves[3][1][16] = 0.135;
filterCurves[3][0][17] = 640.0;
filterCurves[3][1][17] = 0.081;
filterCurves[3][0][18] = 650.0;
filterCurves[3][1][18] = 0.045;
filterCurves[3][0][19] = 660.0;
filterCurves[3][1][19] = 0.025;
filterCurves[3][0][20] = 670.0;
filterCurves[3][1][20] = 0.017;
filterCurves[3][0][21] = 680.0;
filterCurves[3][1][21] = 0.013;
filterCurves[3][0][22] = 690.0;
filterCurves[3][1][22] = 0.009;
filterCurves[3][0][23] = 700.0;
filterCurves[3][1][23] = 0.000;
filterCurves[3][0][24] = 700.0;
filterCurves[3][1][24] = 0.000;
#//R
filterCurves[4][0][0] = 550.0;
filterCurves[4][1][0] = 0.00;
filterCurves[4][0][1] = 560.0;
filterCurves[4][1][1] = 0.23;
filterCurves[4][0][2] = 570.0;
filterCurves[4][1][2] = 0.74;
filterCurves[4][0][3] = 580.0;
filterCurves[4][1][3] = 0.91;
filterCurves[4][0][4] = 590.0;
filterCurves[4][1][4] = 0.98;
filterCurves[4][0][5] = 600.0;
filterCurves[4][1][5] = 1.00;
filterCurves[4][0][6] = 610.0;
filterCurves[4][1][6] = 0.98;
filterCurves[4][0][7] = 620.0;
filterCurves[4][1][7] = 0.96;
filterCurves[4][0][8] = 630.0;
filterCurves[4][1][8] = 0.93;
filterCurves[4][0][9] = 640.0;
filterCurves[4][1][9] = 0.90;
filterCurves[4][0][10] = 650.0;
filterCurves[4][1][10] = 0.86;
filterCurves[4][0][11] = 660.0;
filterCurves[4][1][11] = 0.81;
filterCurves[4][0][12] = 670.0;
filterCurves[4][1][12] = 0.78;
filterCurves[4][0][13] = 680.0;
filterCurves[4][1][13] = 0.72;
filterCurves[4][0][14] = 690.0;
filterCurves[4][1][14] = 0.67;
filterCurves[4][0][15] = 700.0;
filterCurves[4][1][15] = 0.61;
filterCurves[4][0][16] = 710.0;
filterCurves[4][1][16] = 0.56;
filterCurves[4][0][17] = 720.0;
filterCurves[4][1][17] = 0.51;
filterCurves[4][0][18] = 730.0;
filterCurves[4][1][18] = 0.46;
filterCurves[4][0][19] = 740.0;
filterCurves[4][1][19] = 0.40;
filterCurves[4][0][20] = 750.0;
filterCurves[4][1][20] = 0.35;
filterCurves[4][0][21] = 800.0;
filterCurves[4][1][21] = 0.14;
filterCurves[4][0][22] = 850.0;
filterCurves[4][1][22] = 0.03;
filterCurves[4][0][23] = 900.0;
filterCurves[4][1][23] = 0.00;
filterCurves[4][0][24] = 900.0;
filterCurves[4][1][24] = 0.000;
#//I
filterCurves[5][0][0] = 700.0;
filterCurves[5][1][0] = 0.000;
filterCurves[5][0][1] = 710.0;
filterCurves[5][1][1] = 0.024;
filterCurves[5][0][2] = 720.0;
filterCurves[5][1][2] = 0.232;
filterCurves[5][0][3] = 730.0;
filterCurves[5][1][3] = 0.555;
filterCurves[5][0][4] = 740.0;
filterCurves[5][1][4] = 0.785;
filterCurves[5][0][5] = 750.0;
filterCurves[5][1][5] = 0.910;
filterCurves[5][0][6] = 760.0;
filterCurves[5][1][6] = 0.965;
filterCurves[5][0][7] = 770.0;
filterCurves[5][1][7] = 0.985;
filterCurves[5][0][8] = 780.0;
filterCurves[5][1][8] = 0.990;
filterCurves[5][0][9] = 790.0;
filterCurves[5][1][9] = 0.995;
filterCurves[5][0][10] = 800.0;
filterCurves[5][1][10] = 1.000;
filterCurves[5][0][11] = 810.0;
filterCurves[5][1][11] = 1.000;
filterCurves[5][0][12] = 820.0;
filterCurves[5][1][12] = 0.990;
filterCurves[5][0][13] = 830.0;
filterCurves[5][1][13] = 0.980;
filterCurves[5][0][14] = 840.0;
filterCurves[5][1][14] = 0.950;
filterCurves[5][0][15] = 850.0;
filterCurves[5][1][15] = 0.910;
filterCurves[5][0][16] = 860.0;
filterCurves[5][1][16] = 0.860;
filterCurves[5][0][17] = 870.0;
filterCurves[5][1][17] = 0.750;
filterCurves[5][0][18] = 880.0;
filterCurves[5][1][18] = 0.560;
filterCurves[5][0][19] = 890.0;
filterCurves[5][1][19] = 0.330;
filterCurves[5][0][20] = 900.0;
filterCurves[5][1][20] = 0.150;
filterCurves[5][0][21] = 910.0;
filterCurves[5][1][21] = 0.030;
filterCurves[5][0][22] = 920.0;
filterCurves[5][1][22] = 0.000;
filterCurves[5][0][23] = 920.0;
filterCurves[5][1][23] = 0.000;
filterCurves[5][0][24] = 920.0;
filterCurves[5][1][24] = 0.000;
#//HJK from Johnson, H.L, 1965, ApJ 141, 923
#//http://ulisse.pd.astro.it/Astro/ADPS/Systems/Sys_033/index_033.html
#//H lburns /06
filterCurves[6][0][0] = 1460;
filterCurves[6][1][0] = 0.000;
filterCurves[6][0][1] = 1480;
filterCurves[6][1][1] = 0.150;
filterCurves[6][0][2] = 1500;
filterCurves[6][1][2] = 0.440;
filterCurves[6][0][3] = 1520;
filterCurves[6][1][3] = 0.860;
filterCurves[6][0][4] = 1540;
filterCurves[6][1][4] = 0.940;
filterCurves[6][0][5] = 1550;
filterCurves[6][1][5] = 0.960;
filterCurves[6][0][6] = 1560;
filterCurves[6][1][6] = 0.980;
filterCurves[6][0][7] = 1580;
filterCurves[6][1][7] = 0.950;
filterCurves[6][0][8] = 1600;
filterCurves[6][1][8] = 0.990;
filterCurves[6][0][9] = 1610;
filterCurves[6][1][9] = 0.990;
filterCurves[6][0][10] = 1620;
filterCurves[6][1][10] = 0.990;
filterCurves[6][0][11] = 1640;
filterCurves[6][1][11] = 0.990;
filterCurves[6][0][12] = 1660;
filterCurves[6][1][12] = 0.990;
filterCurves[6][0][13] = 1670;
filterCurves[6][1][13] = 0.990;
filterCurves[6][0][14] = 1680;
filterCurves[6][1][14] = 0.990;
filterCurves[6][0][15] = 1690;
filterCurves[6][1][15] = 0.990;
filterCurves[6][0][16] = 1700;
filterCurves[6][1][16] = 0.990;
filterCurves[6][0][17] = 1710;
filterCurves[6][1][17] = 0.970;
filterCurves[6][0][18] = 1720;
filterCurves[6][1][18] = 0.950;
filterCurves[6][0][19] = 1740;
filterCurves[6][1][19] = 0.870;
filterCurves[6][0][20] = 1760;
filterCurves[6][1][20] = 0.840;
filterCurves[6][0][21] = 1780;
filterCurves[6][1][21] = 0.710;
filterCurves[6][0][22] = 1800;
filterCurves[6][1][22] = 0.520;
filterCurves[6][0][23] = 1820;
filterCurves[6][1][23] = 0.020;
filterCurves[6][0][24] = 1840;
filterCurves[6][1][24] = 0.000;
#//J lburns /06
filterCurves[7][0][0] = 1040;
filterCurves[7][1][0] = 0.000;
filterCurves[7][0][1] = 1060;
filterCurves[7][1][1] = 0.020;
filterCurves[7][0][2] = 1080;
filterCurves[7][1][2] = 0.110;
filterCurves[7][0][3] = 1100;
filterCurves[7][1][3] = 0.420;
filterCurves[7][0][4] = 1120;
filterCurves[7][1][4] = 0.320;
filterCurves[7][0][5] = 1140;
filterCurves[7][1][5] = 0.470;
filterCurves[7][0][6] = 1160;
filterCurves[7][1][6] = 0.630;
filterCurves[7][0][7] = 1180;
filterCurves[7][1][7] = 0.730;
filterCurves[7][0][8] = 1190;
filterCurves[7][1][8] = 0.750;
filterCurves[7][0][9] = 1200;
filterCurves[7][1][9] = 0.770;
filterCurves[7][0][10] = 1210;
filterCurves[7][1][10] = 0.790;
filterCurves[7][0][11] = 1220;
filterCurves[7][1][11] = 0.810;
filterCurves[7][0][12] = 1230;
filterCurves[7][1][12] = 0.820;
filterCurves[7][0][13] = 1240;
filterCurves[7][1][13] = 0.830;
filterCurves[7][0][14] = 1250;
filterCurves[7][1][14] = 0.850;
filterCurves[7][0][15] = 1260;
filterCurves[7][1][15] = 0.880;
filterCurves[7][0][16] = 1280;
filterCurves[7][1][16] = 0.940;
filterCurves[7][0][17] = 1300;
filterCurves[7][1][17] = 0.910;
filterCurves[7][0][18] = 1320;
filterCurves[7][1][18] = 0.790;
filterCurves[7][0][19] = 1340;
filterCurves[7][1][19] = 0.680;
filterCurves[7][0][20] = 1360;
filterCurves[7][1][20] = 0.040;
filterCurves[7][0][21] = 1380;
filterCurves[7][1][21] = 0.110;
filterCurves[7][0][22] = 1400;
filterCurves[7][1][22] = 0.070;
filterCurves[7][0][23] = 1420;
filterCurves[7][1][23] = 0.030;
filterCurves[7][0][24] = 1440;
filterCurves[7][1][24] = 0.000;
#//K lburns /06
filterCurves[8][0][0] = 1940;
filterCurves[8][1][0] = 0.000;
filterCurves[8][0][1] = 1960;
filterCurves[8][1][1] = 0.120;
filterCurves[8][0][2] = 1980;
filterCurves[8][1][2] = 0.200;
filterCurves[8][0][3] = 2000;
filterCurves[8][1][3] = 0.300;
filterCurves[8][0][4] = 2020;
filterCurves[8][1][4] = 0.550;
filterCurves[8][0][5] = 2040;
filterCurves[8][1][5] = 0.740;
filterCurves[8][0][6] = 2060;
filterCurves[8][1][6] = 0.550;
filterCurves[8][0][7] = 2080;
filterCurves[8][1][7] = 0.770;
filterCurves[8][0][8] = 2100;
filterCurves[8][1][8] = 0.850;
filterCurves[8][0][9] = 2120;
filterCurves[8][1][9] = 0.900;
filterCurves[8][0][10] = 2140;
filterCurves[8][1][10] = 0.940;
filterCurves[8][0][11] = 2160;
filterCurves[8][1][11] = 0.940;
filterCurves[8][0][12] = 2180;
filterCurves[8][1][12] = 0.950;
filterCurves[8][0][13] = 2200;
filterCurves[8][1][13] = 0.940;
filterCurves[8][0][14] = 2220;
filterCurves[8][1][14] = 0.960;
filterCurves[8][0][15] = 2240;
filterCurves[8][1][15] = 0.980;
filterCurves[8][0][16] = 2260;
filterCurves[8][1][16] = 0.970;
filterCurves[8][0][17] = 2280;
filterCurves[8][1][17] = 0.960;
filterCurves[8][0][18] = 2300;
filterCurves[8][1][18] = 0.910;
filterCurves[8][0][19] = 2320;
filterCurves[8][1][19] = 0.880;
filterCurves[8][0][20] = 2340;
filterCurves[8][1][20] = 0.840;
filterCurves[8][0][21] = 2380;
filterCurves[8][1][21] = 0.750;
filterCurves[8][0][22] = 2400;
filterCurves[8][1][22] = 0.640;
filterCurves[8][0][23] = 2440;
filterCurves[8][1][23] = 0.010;
filterCurves[8][0][24] = 2480;
filterCurves[8][1][24] = 0.000;
#//
#//Check that we set up the array corectly:
#// for (var ib = 0; ib < numBands; ib++) {
#// var ib = 0;
#// for (var il = 0; il < numLambdaFilt; il++) {
#// console.log("ib: " + ib + " il: " + il + " filterCurves[ib][0][il]: " + filterCurves[0][0][il]);
#// console.log("ib: " + ib + " il: " + il + " filterCurves[ib][1][il]: " + filterCurves[0][1][il]);
#// }
#// }
for ib in range(numBands):
#//wavelength loop is over photometric filter data wavelengths
for il in range(numLambdaFilt):
filterCurves[ib][0][il] = filterCurves[ib][0][il] * nm2cm #// nm to cm
#}
#}
return filterCurves
#}; //filterSet
#/* In case it's ever needed again...
#//General convolution method
#// ***** Function to be convolved and kernel function are expected to *already* be
#// interpolated onto same abssica grid!
#//
def convol(x, yFunction, kernel):
ySize = len(yFunction)
kernelSize = len(kernel)
halfKernelSize = math.ceil(kernelSize / 2)
yFuncConv = [0.0 for i in range(ySize)]
#var deltaX;
#//First kernelSize/2 elements of yFunction cannot be convolved
for i in range(halfKernelSize):
yFuncConv[i] = yFunction[i]
#//console.log("Part 1: i " + i + " yFuncConv[i] " + yFuncConv[i]);
#//Convolution:
#//We are effectively integrating in pixel space, not physical wavelength space, so deltaX is always unity - ??
#// Conserves power if kernel area-normalized - ??
offset = 0 #//initialization
for i in range(halfKernelSize, ySize - (halfKernelSize)):
accum = 0 #//accumulator
for j in range(kernelSize):
#//console.log("Part 2: i " + i + " j " + j + " offset " + offset);
#//deltaX = x[j] - x[j-1];
#//console.log("x[j] " + x[j] + " x[j-1] " + x[j-1] + " deltaX " + deltaX
#// + " yFunction[j+offset] " + yFunction[j+offset] + " kernel[j] " + kernel[j]);
accum = accum + ( (kernel[j] * yFunction[j+offset]) ) #//* deltaX );
#} //inner loop, j
yFuncConv[i] = accum
#//console.log("yFuncConv[i] " + yFuncConv[i]);
offset+=1
#} //outer loop, i
#//Last kernelSize/2 elements of yFunction cannot be convolved
for i in range(ySize - halfKernelSize - 1, ySize):
yFuncConv[i] = yFunction[i]
#}
return yFuncConv
#}; //end method convol
"""
*/
/**
*
* THIS VERSION works with spectrum synthesis output returned from server
* in GrayStarServer
*"""
def eqWidthSynth(flux, linePoints):
#//, fluxCont) {
"""* It will try to return the equivalenth width of EVERYTHING in the synthesis region
* as one value! Isolate the synthesis region to a single line to a clean result
* for that line!
*
* Compute the equivalent width of the Voigt line in pm - picometers NOTE: The
* input parameter 'flux' should be a 2 x (numPoints+1) array where the
* numPoints+1st value is the line centre monochromatic Continuum flux
*/"""
logE = math.log10(math.e) #// for debug output
Wlambda = 0.0 #// Equivalent width in pm - picometers
numPoints = len(linePoints)
#//console.log("numPoints " + numPoints);
#var delta, logDelta, term, integ, integ2, logInteg, lastInteg, lastTerm, term2;
#//Spectrum now continuum rectified before eqWidth called
#//Trapezoid rule:
#// First integrand:
lastInteg = 1.0 - flux[0][0]
lastTerm = lastInteg #//initialization
for il in range(1, numPoints-1):
delta = linePoints[il] - linePoints[il - 1]
delta = delta * 1.0E+7 #// cm to nm - W_lambda in pm
logDelta = math.log(delta)
integ = 1.0 - flux[0][il]
#//Extended trapezoid rule:
integ2 = 0.5 * (lastInteg + integ)
#//logInteg = math.log(integ2)
#//term = Math.exp(logInteg + logDelta);
term = integ2 * delta
#//console.log("linePoints[il] " + linePoints[il] + " flux[0][il] " + flux[0][il]
#// + " integ " + integ + " term " + term);
#//Wlambda = Wlambda + (term * delta);
Wlambda = Wlambda + term
lastInteg = integ
#//System.out.println("EqWidth: Wlambda: " + Wlambda);
#}
#// Convert area in nm to pm - picometers
Wlambda = Wlambda * 1.0E3
return Wlambda
#};
def fourier(numThetas, cosTheta, filtIntens):
"""//Discrete cosine and sine Fourier transform of input narrow-band intensity profile"""
# //
#// We will interpret theta/(theta/2) with respect to the local surface normal of the star to
#// be the spatial domain "x" coordinate - this is INDEPENDENT of the distance to, and linear
#// radius of, the star! :-)
pi = math.pi #//a handy enough wee quantity
halfPi = pi / 2.0
#//number of sample points in full intensity profile I(theta), theta = -pi/2 to pi/2 RAD:
numX0 = 2 * numThetas - 1
#//We have as input the itnesity half-profile I(cos(theta)), cos(theta) = 1 to 0
#//create the doubled root-intensity profile sqrt(I(theta/halfPi)), theta/halfPi = -1 to 1
#//this approach assumes the real (cosine) and imaginary (sine) components are in phase
rootIntens2 = [0.0 for i in range(numX0)]
x0 = [0.0 for i in range(numX0)]
#var normIntens;
#//negative x domain of doubled profile:
j = 0
for i in range(numThetas-1, 0, -1):
x0[j] = -1.0*math.acos(cosTheta[1][i]) / halfPi
normIntens = filtIntens[i] / filtIntens[0] #//normalize
rootIntens2[j] = math.sqrt(normIntens)
#//console.log("i " + i + " cosTheta " + cosTheta[1][i] + " filtIntens " + filtIntens[i] + " normIntens " + normIntens
#// + " j " + j + " x0 " + x0[j] + " rootIntens2 " + rootIntens2[j] );
j+=1
#}
#//positive x domain of doubled profile:
for i in range(numThetas, numX0):
j = i - (numThetas-1)
x0[i] = math.acos(cosTheta[1][j]) / halfPi
normIntens = filtIntens[j] / filtIntens[0] #//normalize
#//rootIntens2[i] = Math.sqrt(normIntens);
rootIntens2[i] = normIntens
#//console.log("j " + j + " cosTheta " + cosTheta[1][j] + " filtIntens " + filtIntens[j] + " normIntens " + normIntens
#// + " i " + i + " x0 " + x0[i] + " rootIntens2 " + rootIntens2[i] );
#}
#//create the uniformly sampled spatial domain ("x") and the complementary
#//spatial frequecy domain "k" domain
#//
#//We're interpreting theta/halfPi with respect to local surface normal at surface
#//of star as the spatial domain, "x"
minX = -2.0
maxX = 1.0
numX = 100 #//(is also "numK" - ??)
deltaX = (maxX - minX) / numX
#//Complentary limits in "k" domain; k = 2pi/lambda (radians)
#// - lowest k value corresponds to one half spatial wavelength (lambda) = 2 (ie. 1.0 - (-1.0)):
maxLambda = 2.0 * 2.0
#// stupid?? var minK = 2.0 * pi / maxLambda; //(I know, I know, but let's keep this easy for the human reader)
#// - highest k value has to do with number of points sampling x: Try Nyquist sampling rate of
#// two x points per lambda
minLambda = 8.0 * 2.0 * deltaX
maxK = 2.0 * pi / minLambda #//"right-going" waves
minK = -1.0 * maxK #//"left-going" waves
deltaK = (maxK - minK) / numX
#// console.log("maxK " + maxK + " minK " + minK + " deltaK " + deltaK);
x = [0.0 for i in range(numX)]
k = [0.0 for i in range(numX)]
#var ii;
for i in range(numX):
ii = 1.0 * i
x[i] = minX + ii*deltaX
k[i] = minK + ii*deltaK
#// console.log("i " + i + " x " + x[i] + " k " + k[i]);
#//Interpolate the rootIntens2(theta/halfpi) signal onto uniform spatial sampling:
#//doesn't work: var rootIntens3 = interpolV(rootIntens2, x0, x);
rootIntens3 = [0.0 for i in range(numX)]
for i in range(numX):
rootIntens3[i] = ToolBox.interpol(x0, rootIntens2, x[i])
#//console.log("i " + i + " x " + x[i] + " rootIntens3 " + rootIntens3[i]);
#//returned variable ft:
#// Row 0: wavenumber, spatial frequency, k (radians)
#// Row 1: cosine transform (real component)
#// Row 2: sine transform (imaginary component
ft = [ [ 0.0 for i in range(numX-1) ] for j in range(3) ]
#var argument, rootFt;
#//numXFloat = 1.0 * numX;
#//Outer loop is over the elements of vector holding the power at each frequency "k"
for ik in range(numX-1):
#//initialize ft
ft[0][ik] = k[ik]
rootFtCos = 0.0
rootFtSin = 0.0
ft[1][ik] = 0.0
ft[2][ik] = 0.0
#//Inner llop is cumulative summation over spatial positions "x" - the Fourier cosine and sine series
for ix in range(numX-1):
#//ixFloat = 1.0 * ix;
argument = -1.0 * k[ik] * x[ix]
#//console.log("ik " + ik + " ix " + ix + " argument " + argument + " x " + x[ix] + " rootIntens3 " + rootIntens3[ix]);
#// cosine series:
rootFtCos = rootFtCos + rootIntens3[ix] * math.cos(argument)
#// sine series:
rootFtSin = rootFtSin + rootIntens3[ix] * math.sin(argument);
#} //ix loop
ft[1][ik] = rootFtCos #// * rootFtCos; //Power
ft[2][ik] = rootFtSin #// * rootFtSin;
#//console.log("ik " + ik + " k " + k[ik] + " ft[1] " + ft[1][ik]);
#} //ik loop
return ft
#}; //end method fourier
| 40,742
| 34.305893
| 178
|
py
|
ChromaStarPy
|
ChromaStarPy-master/VegaHgamma.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Castelli & Kurucz
#Model atmosphere
teff = 9550.0 #, K
logg = 3.95 #, cgs
log10ZScale = -0.5 # [A/H]
massStar = 2.0 #, solar masses
xiT = 2.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 429.0 #, nm
lambdaStop = 439.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.0
logKapFudge = 0.0
macroV = 2.0 #, km/s
rotV = 275.0 #, km/s
rotI = 5.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,690
| 33.063291
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Input.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "SunEarth"
runVers = "Test"
#Project specific notes:
# Test case:
# Star: Sun
# Spectrum: Na I D region
# Lightcurve: Earth, in plane of ecliptic
#Default plot
#Select ONE only if plotting 'inline' -
makePlotStruc = True
makePlotSED = True
makePlotSpec = True
makePlotLDC = True
makePlotFT = True
makePlotTLA = True
makePlotTrans = True
makePlotPPress = False
#Chemical species for partial rpessure plot:
plotSpec = "H"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = True
if (specSynMode):
runVers += "SS"
#Model atmosphere
teff = 5777.0 #, K
logg = 4.44 #, cgs
log10ZScale = -0.0 # [A/H]
massStar = 1.00 #, solar masses
xiT = 1.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 588.5 #, nm
lambdaStop = 589.5 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.5 # Logarithmic VdW damping enhancement
logKapFudge = 0.0 # continuum opacity fudge factor
macroV = 1.0 #, macroscopic broadening dispersion km/s
rotV = 2.0 #, equatorial surface rotational velocity km/s
rotI = 90.0 #, inclination of rotational axis AND orbital axis degrees
RV = 0.0 #, system radial velocity km/s
vacAir = "vacuum" # wavelength scale ('air' OR 'vacuum')
sampling = "fine" # density of freq points in spectrum synthesis ('fine' is useful, 'coarse' for quick checking)
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifMols = 1 #, whether to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve (LDC), fourier transform (FT)
diskLambda = 500.0 #, Band centre wavelength nm
diskSigma = 0.01 #, Band dispersion nm
#Two-level atom and spectral line
#Example: NaI D lambda 5896:
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
#Planetary transit parameters for transit light curve modelling
# ** Also depends on rotI chosen above!!
# Oribital period is not a free parameter - it is set by the
# size of the orbit and the planet's mass by basic form of
#Kepler's 3rd law
rJupiter = 11.21 # Earth radii - handy reference
ifTransit = True # set to True if we want an exoplanet lightcurve
#Data source: Wikipedia for now...
rOrbit = 1.00 # AU
rPlanet = 1.00 #Earth radii
#mPlanet = 1.0 #Earth masses #not needed (yet?)
| 3,920
| 35.990566
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Gauss2.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 16:29:34 2017
@author: ishort
"""
import math
""" procedure to generate Gaussian of unit area when passed a FWHM"""
#IDL: PRO GAUSS2,FWHM,LENGTH,NGAUS
def gauss2(fwhm, length):
#length=length*1l & FWHM=FWHM*1l
#NGAUS=FLTARR(LENGTH)
ngaus = [0.0 for i in range(length)]
#This expression for CHAR comes from requiring f(x=0.5*FWHM)=0.5*f(x=0):
#CHAR=-1d0*ALOG(0.5d0)/(0.5d0*0.5d0*FWHM*FWHM)
char = -1.0 * math.log(0.5) / (0.5*0.5*fwhm*fwhm)
#This expression for AMP (amplitude) comes from requiring that the
#area under the gaussian is unity:
#AMP=SQRT(CHAR/PI)
amp = math.sqrt(char/math.pi)
#FOR CNT=0l,(LENGTH-1) DO BEGIN
# X=(CNT-LENGTH/2)*1.d0
# NGAUS(CNT)=AMP*EXP(-CHAR*X^2)
#ENDFOR
for cnt in range(length):
x = 1.0 * (cnt - length/2)
ngaus[cnt] = amp * math.exp(-1.0*char*x*x)
return ngaus
| 955
| 21.761905
| 72
|
py
|
ChromaStarPy
|
ChromaStarPy-master/CSGas.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 2 10:00:46 2019
@author: Philip D. Bennett
Port from FORTRAN to Python: Ian Short
"""
"""
This is the main source file for GAS.
"""
"""
/*
* The openStar project: stellar atmospheres and spectra
*
* ChromaStarPy/GAS
*
* Version 2019-05-02
* Use date based versioning with ISO 8601 date (YYYY-MM-DD)
*
* May 2019
*
* C. Ian Short
* Philip D. Bennett
*
* Saint Mary's University
* Department of Astronomy and Physics
* Institute for Computational Astrophysics (ICA)
* Halifax, NS, Canada
* * ian.short@smu.ca
* www.ap.smu.ca/~ishort/
*
*
* Ported from FORTRAN77
*
*
* Code provided "as is" - there is no formal support
*
*/
"""
"""/*
* The MIT License (MIT)
* Copyright (c) 2019 C. Ian Short
*
* Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
*
* The above copyright notice and this permission notice shall
be included in all copies or substantial portions of the
Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
*
*/
"""
#from decimal import Decimal as D
#plotting:
#import matplotlib
#import matplotlib.pyplot as plt
#%matplotlib inline
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
#from Documents.ChromaStarPy.linpack.Dgesl import dgesl
#from Documents.ChromaStarPy.linpack.Dgefa import dgefa
import Dgesl
import Dgefa
#from Documents.ChromaStarPy.GAS.blas.Daxpy import daxpy
#from Documents.ChromaStarPy.GAS.blas.Ddot import ddot
#from Documents.ChromaStarPy.GAS.blas.Dscal import dscal
#from Documents.ChromaStarPy.GAS.blas.Idamax import idamax
#from Documents.ChromaStarPy.GAS.BlockData import *
#from Documents.ChromaStarPy.GAS.GsRead2 import
import CSBlockData
#import GsRead
import CSGsRead2
def ten(xdum):
x = 2.302585093e0*xdum
x2 = math.exp(x)
return x2
def isign(a, b):
#default:
c = a
if ( (numpy.sign(b) == -1) and (numpy.sign(a) == 1) ):
c = -1 * a
if ( (numpy.sign(b) == 1) and (numpy.sign(a) == -1) ):
c = -1 * a
if ( (numpy.sign(b) == 0) and (numpy.sign(a) == -1) ):
c = -1 * a
return c
def gas(isolv, temp, pt, pe0, p0, neq, tol, maxit):
#Returned structure
# a, ngit, pe, pd, pp, ppix, gmu, rho
#c cis: parameter tol is argument GTOL
#c cis: parameter * is argument PRINT - !!??
#c cis: INPUT: ISOLV,T,P, PE0,P0,NEQ, GTOL,MAXGIT, PRINT (??)
#c cis: OUTPUT: A, NGIT, PE,PD,PP,PPIX,GMU,RHO (??)
#c
#c GAS: Calculates the equilibrium abundances of each molecular and ionic
#c species specified in "gsread", at the given temperature T and
#c pressure P.
#c
#FORTRAN commons - needed
#common /consts/ pi,sbcon,kbol,cvel,gcon,hpl,hmass,t0,everg
#common /gasp/ name,ip,comp,awt,nspec,natom,itab,ntab,indx,
# iprint,gsinit,print1
#common /gasp2/ ipr,nch,nel,ntot,nat,zat,neut,idel,indsp,
# indzat,iat,natsp,iatsp
#common /lin/ nlin1,lin1,linv1,nlin2,lin2,linv2
#common /equil/ logk,logwt,it,kt,type
#common /opacty/ chix,nix,nopac,ixa,ixn,opinit,opflag,opchar,iopt
#Try this:
#global pi, sbcon, kbol, cvel, gcon, hpl, hmass, t0, everg # /consts/
global kbol, hmass, t0 # /consts/
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
global nlin1, lin1, linv1, nlin2, lin2, linv2 #/lin/
global logk, logwt, it, kt, type0 #equil
#global chix, nix, nopac, ixa, ixn, opinit, opflag, opchar, iopt #/opacty/
global chix, nix, ixa, ixn #/opacty/
#c
outString=""
kbol = CSBlockData.kbol
hmass = CSBlockData.hmass
t0 = CSBlockData.t0
#c
#ip = [0.0e0 for i in range(150)]
#ip = GsRead.ip
ip = CSGsRead2.ip
#comp = [0.0e0 for i in range(40)]
#comp = GsRead.comp
comp = CSGsRead2.comp
#awt = [0.0e0 for i in range(150)]
#awt = GsRead.awt
awt = CSGsRead2.awt
#itab = [0 for i in range(83)]
itab = CSBlockData.itab
#ntab = [0 for i in range(5)]
#indx = [ [ [ [ [0 for i in range(2)] for j in range(5) ] for k in range(7) ] for l in range(26) ] for m in range(4) ]
#indx = GsRead.indx
indx = CSGsRead2.indx
#name = [' ' for i in range(150)]
#name = GsRead.name
name = CSGsRead2.name
#gsinit = False
#print0 = False
print0 = CSBlockData.print0
#iprint = GsRead.iprint
iprint = CSGsRead2.iprint
#ipr = [0 for i in range(150)]
#ipr = GsRead.ipr
ipr = CSGsRead2.ipr
#nch = [0 for i in range(150)]
#nch = GsRead.nch
nch = CSGsRead2.nch
#nel = [0 for i in range(150)]
#nel = GsRead.nel
nel = CSGsRead2.nel
#ntot = [0 for i in range(150)]
#ntot = GsRead.ntot
ntot = CSGsRead2.ntot
#nat = [ [0 for i in range(150)] for j in range(5) ]
#nat = GsRead.nat
nat = CSGsRead2.nat
#zat = [ [0 for i in range(150)] for j in range(5) ]
#zat = GsRead.zat
zat = CSGsRead2.zat
#neut = [0 for i in range(150)]
#neut = GsRead.neut
neut = CSGsRead2.neut
#idel = [0 for i in range(150)]
#indsp = [0 for i in range(40)]
#indsp = GsRead.indsp
indsp = CSGsRead2.indsp
#indzat = [0 for i in range(100)]
#indzat = GsRead.indzat
indzat = CSGsRead2.indzat
#iat = [0 for i in range(150)]
#iat = GsRead.iat
iat = CSGsRead2.iat
#natsp = [0 for i in range(40)]
#natsp = GsRead.natsp
natsp = CSGsRead2.natsp
#iatsp = [ [0 for i in range(40)] for j in range(40) ]
#iatsp = GsRead.iatsp
iatsp = CSGsRead2.iatsp
#c
#lin1 = [0 for i in range(40)]
#lin1 = GsRead.lin1
lin1 = CSGsRead2.lin1
#lin2 = [0 for i in range(40)]
#lin2 = GsRead.lin2
lin2 = CSGsRead2.lin2
#linv1 = [0 for i in range(40)]
#linv1 = GsRead.linv1
linv1 = CSGsRead2.linv1
#linv2 = [0 for i in range(40)]
#linv2 = GsRead.linv2
linv2 = CSGsRead2.linv2
#nlin1 = GsRead.nlin1
nlin1 = CSGsRead2.nlin1
#nlin2 = GsRead.nlin2
nlin2 = CSGsRead2.nlin2
#c
#logk = [ [0.0e0 for i in range(150)] for j in range(5) ]
#logwt = [0.0e0 for i in range(150)]
it = [0.0e0 for i in range(150)]
kt = [0.0e0 for i in range(150)]
#type0 = [0 for i in range(150)]
#type0 = GsRead.type0
type0 = CSGsRead2.type0
#c
#ixa = [ [0 for i in range(70)] for j in range(5) ]
#ixn = [0 for i in range(70)]
#ixn = GsRead.ixn
ixn = CSGsRead2.ixn
#chix = [' ' for i in range(70)]
#opchar = [' ' for i in range(25)]
#opflag = [False for i in range(25)]
#opinit = False
nix = CSBlockData.nix
#natom = GsRead.natom
natom = CSGsRead2.natom
#nspec = GsRead.nspec
nspec = CSGsRead2.nspec
#logk = GsRead.logk
logk = CSGsRead2.logk
#logwt = GsRead.logwt
logwt = CSGsRead2.logwt
#c
#print("GAS: neq ", neq, " nlin1 ", nlin1, " nlin2 ", nlin2)
a = [ [0.0e0 for i in range(neq)] for j in range(neq) ]
b = [0.0e0 for i in range(40)]
p = [0.0e0 for i in range(40)]
pp = [0.0e0 for i in range(150)]
pp0 = [0.0e0 for i in range(150)]
al = [0.0e0 for i in range(25)]
ppix = [0.0e0 for i in range(70)]
#p0 = [0.0e0 for i in range(40)]
nd = 0.0e0
logt = 0.0e0
logit = 0.0e0
logkt = 0.0e0
namet = ''
namemx = ''
iperm = [0 for i in range(180)]
metals = 'Z'
ename = 'e-'
blank = ' '
rhs = 'rhs'
job = 0
#c
#c Calculate equilibrium constants for each species in table.
#c N.B. Freeze the chemical equilibrium for T < 1200K.
#c
t = temp
if (t < 1200.0e0):
t = 1200.0e0
th = t0/t
logt = 2.5e0*math.log10(t)
for n in range(0, nspec):
ityp = type0[n]
nq = nch[n]
ich = isign(1, nq)
if (ityp == 3 or ityp == 4):
kt[n] = kt[neut[n]]
if ( ((nch[n] - nch[n-1]) != ich) or (nch[n-1] == 0) ):
logit = 0.0e0
logit = logit + ich*(-th*ip[n] + logt + logwt[n] - 0.48e0)
it[n] = ten(logit)
elif (ityp == 2):
logkt = ( ((logk[4][n]*th + logk[3][n])*th + logk[2][n])*th + logk[1][n] )*th + logk[0][n]
kt[n] = ten(logkt)
it[n] = 1.0e0
else:
kt[n] = 1.0e0
it[n] = 1.0e0
#c
#c Update main arrays
#c
pe = pe0
#print("pe0 ", pe0)
for j in range(0, natom):
p[j] = p0[j]
#print("j ", j, " p0 ", p0[j])
ngit = 0
namemx = blank
delmax = 0.0e0
if (isolv != 0):
if (isolv == 1):
compz = 0.0e0
pzs = 0.0e0
for j in range(natom):
nn = indsp[j]
if (ipr[nn] == 2):
nnp = indx[2][itab[zat[0][nn]-1]][0][0][0]
compz = compz + comp[j]
if (pe > 0.0e0):
pzs = pzs + (1.0e0 + it[nnp]/pe) * p[j]
else:
pzs = pzs + p[j]
#print("print0 ", print0)
if (print0):
#print("T ", "P ", t, pt)
if (isolv == 1):
print("0 # Name Delmax ")
for k in range(0, nlin1):
print(name[indsp[linv1[k]]])
print("ngit ", "namemx ", "delmax ", ngit, namemx, delmax)
for k in range(0, nlin1):
print(p[linv1[k]])
elif (isolv == 2):
print("0 # Name Delmax ")
for k in range(0, nlin2):
print(name[indsp[linv2[k]]])
print("ngit ", "namemx ", "delmax ", ngit, namemx, delmax)
for k in range(0, nlin2):
print(p[linv2[k]])
"""
c
c Main loop: fill linearized coefficient matrix and rhs vector, and
c solve system for partial pressure corrections.
c ISOLV = 1: Linearize only the partial pressures of the neutral atoms
c for which IPR(j) = 1 (major species). The electron pressure Pe is
c assumed to be given in this case, and so is not included in the
c linearization. this is necessary since most of these electrons
c (at cool temps.) originate from elements not considered in the
c linearization. In order to obtain a good value for Pe in the first
c place, it is necessary to call GAS with ISOLV = 2.
c ISOLV = 2: This linearizes the partial pressures of the neutraL atoms
c for which IPR(j) = 1 OR 2. This list of elements should include all
c the significant contributors to the total pressure Pt, as well as the
c electon pressure Pe. Any element (IPR(j) = 3) not included is assumed
c to have a negligible effect on both P and Pe.
c In both cases, the partial pressures of the neutral atoms for elements
c not included in the linearization are calculated directly from the now
c determined pressures of the linearized elements.
c
"""
#316
firstTime = True
while( (delmax > tol) or (firstTime == True) ):
firstTime = False
if (ngit >= maxit):
print('(" *15 Error: Too many iterations in routine GAS")')
print('(" for Isolv, T, P, Pe0= " ')
print(isolv, t, pt, pe0)
#return 1
ngit = ngit + 1
#c
#c Zero coefficient matrix and rhs vector
#c
if (isolv == 1):
nlin = nlin1
elif (isolv == 2):
nlin = nlin2
for jj in range(neq):
for j in range(neq):
a[j][jj] = 0.0e0
b[jj] = 0.0e0
if (isolv == 2):
#c
#c Here the isolv = 2 case is handled. This includes linearization of Pe.
#c
#a[neq][neq] = -1.0e0
a[neq-1][neq-1] = -1.0e0
b[0] = pt
#b[neq] = pe
b[neq-1] = pe
for n in range(nspec):
if (ipr[n] <= 2):
nq = nch[n]
pf = 1.0e0
nelt = nel[n]
for i in range(nelt):
j = indzat[zat[i][n]-1]
pf = pf * p[j]**nat[i][n]
penq = 1.0e0
if (pe > 0.0e0):
penq = pe**nq
pn = it[n]*pf/kt[n]/penq
#c
#c Now fill the matrix and rhs vector of linearized equations
#c
for i in range(nelt):
jj = indzat[zat[i][n]-1]
at = pn*nat[i][n]/p[jj]
kk = lin2[jj]
#if (kk == 0):
if (kk < 0):
print('(" *16 Error: Inconsistency in priority ", "tables")')
print('(" for Isolv, T, P, Pe0= ")')
print(isolv, t, pt, pe0)
#return 1
a[0][kk] = a[0][kk] + (nq + 1)*at
#print("n ", n, " i ", i, " jj ", jj, " kk ", kk)
#print("zat ", zat[i][n]-1, " nat ", nat[i][n], " p ", p[jj], " at ", at, " nq ", nq)
#print("a ", a[0][kk])
if (nlin2 >= 1):
#for k in range(1, nlin2+1):
for k in range(1, nlin2):
j = linv2[k]
a[k][kk] = a[k][kk] + comp[j]*ntot[n]*at
#print("n ", n, " k ", k, " j ", j, " comp ", comp[j], " ntot ", ntot[n], " at ", at)
#print("a ", a[k][kk])
for ii in range(nelt):
jjj = indzat[zat[ii][n]-1]
kkk = lin2[jjj]
if (kkk != 0):
a[kkk][kk] = a[kkk][kk] - nat[ii][n]*at
#print("n ", n, " kk ", kk, " ii ", ii, " jjj ", jjj, " kkk ", kkk, " nat ", nat[ii][n], " at ", at)
#print("a ", a[kkk][kk])
#a[neq][kk] = a[neq][kk] + nq*at
a[neq-1][kk] = a[neq-1][kk] + nq*at
at = 0.0e0
if (pe > 0.0e0):
at = nq*pn/pe
a[0][neq-1] = a[0][neq-1] - (nq + 1)*at
b[0] = b[0] - (nq + 1)*pn
if (nlin2 >= 1):
#for k in range(1, nlin2+1):
for k in range(1, nlin2):
j = linv2[k]
a[k][neq-1] = a[k][neq-1] - comp[j]*ntot[n]*at
b[k] = b[k] - comp[j]*ntot[n]*pn
#print("b ", b[k])
for ii in range(nelt):
jjj = indzat[zat[ii][n]-1]
kkk = lin2[jjj]
if (kkk != 0):
a[kkk][neq-1] = a[kkk][neq-1] + nat[ii][n]*at
b[kkk] = b[kkk] + nat[ii][n]*pn
#print("b ", b[kkk])
#a[neq][neq] = a[neq][neq] - nq*at
a[neq-1][neq-1] = a[neq-1][neq-1] - nq*at
b[neq-1] = b[neq-1] - nq*pn
#print("a ", a[neq-1][neq-1], " b ", b[neq-1])
else:
#c
#c Here the isolv = 1 case is treated. the electron pressure Pe
#c is assumed gven and is not included in the linearization.
#c
#print("****** isolve ne 2 brnach! isolv ", isolv)
sum1 = 0.0e0
sum2 = 0.0e0
for j in range(natom):
nn = indsp[j]
#print("j ", j, " nn ", nn)
if (ipr[nn] == 2):
nnp = indx[2][itab[zat[0][nn]-1]][0][0][0]
#print("zat ", zat[0][nn]-1, " itab ", itab[zat[0][nn]-1],\
# " nnp ", nnp)
fact = it[nnp] + pe
sum1 = sum1 + comp[j]*it[nnp]/fact
sum2 = sum2 + comp[j]*it[nnp]/fact/fact
#print("comp ", comp[j], " it ", it[nnp],\
# " fact ", fact, " sum1 ", sum1)
b[0] = pt - pzs - pe
a[0][nlin1] = 1.0e0
a[0][nlin1+1] = 1.0e0
#print("pt ", pt, " pzs ", pzs, " pe ", pe)
#print("nlin1 ", nlin1, " b[0] ", b[0], " a[0][] ", a[0][nlin1+1], a[0][nlin1+2])
if (nlin1 >= 1):
#for k in range(1, nlin1+1):
for k in range(1, nlin1):
j = linv1[k]
a[k][nlin1] = comp[j]
b[k] = -1.0*comp[j]*pzs
#print("k ", k, " j ", j, " comp ", comp[j], " a () ", a[k][nlin1+1])
pzsrat = 0.0e0
if (compz > 0.0e0):
pzsrat = pzs/compz
a[nlin1][nlin1] = compz - 1.0e0
b[nlin1] = (1.0e0 - compz)*pzs
a[nlin1+1][nlin1] = 0.0e0
if (compz > 0.0e0):
a[nlin1+1][nlin1] = sum1/compz
a[nlin1+1][nlin1+1] = -1.0e0 - sum2*pzsrat
b[nlin1+1] = pe - sum1*pzsrat
#print("compz ", compz, " sum1 ", sum1, " sum2 ", sum2,\
# " pzsrat ", pzsrat)
#print("nlin1+1 ", nlin1+1, " nlin1+2 ", nlin1+2)
#print("a(nlin1+1,nlin1+1) ", a[nlin1+1][nlin1+1],\
# " b(nlin1+1) ", b[nlin1+1],\
# " a(nlin1+2,nlin1+1) ", a[nlin1+2][nlin1+1],\
# " a(nlin1+2,nlin1+2) ", a[nlin1+2][nlin1+2],\
# " b(nlin1+2) ", b[nlin1+2])
for n in range(nspec):
if (ipr[n] <= 1):
nq = nch[n]
pf = 1.0e0
nelt = nel[n]
for i in range(nelt):
j = indzat[zat[i][n]-1]
pf = pf*p[j]**nat[i][n]
penq = 1.0e0
if (pe > 0.0e0):
penq = pe**nq
pn = it[n]*pf/kt[n]/penq
#c
#c Fill the coefficient matrix and rhs vector of linearized eqns
#c
for i in range(nelt):
jj = indzat[zat[i][n]-1]
#print("GAS: n ", n, " name ", name[n], " i ", i," jj ", jj, " p ", p[jj])
at = pn*nat[i][n]/p[jj]
kk = lin1[jj]
#print("i ", i, " jj ", jj, " kk ", kk, " at ", at)
#if (kk == 0):
if (kk < 0):
print('(" *17 Error: Inconsistency in priority tables")')
print('(" for Isolv, T, P, Pe0 = ")', isolv, t, pt, pe0)
#return 1
#print("Before: n ", n, " i ", i, " kk ", kk, " a[0][kk] ", a[0][kk])
a[0][kk] = a[0][kk] + at
#print("a[0][kk] ", a[0][kk])
#print("n ", n, " ntot[n] ", ntot[n])
if (nlin1 >= 1):
#for k in range(1,nlin1+1):
for k in range(1,nlin1):
j = linv1[k]
a[k][kk] = a[k][kk] + comp[j]*ntot[n]*at
for ii in range(nelt):
jjj = indzat[zat[ii][n]-1]
kkk = lin1[jjj]
if (kkk != 0):
a[kkk][kk] = a[kkk][kk] - nat[ii][n]*at
a[nlin1][kk] = a[nlin1][kk] + compz*ntot[n]*at
a[nlin1+1][kk] = a[nlin1+1][kk] + nq*at
at = 0.0e0
if (pe > 0.0e0):
at = nq*pn/pe
a[0][nlin1+1] = a[0][nlin1+1] - at
b[0] = b[0] - pn
if (nlin1 >= 1):
#for k in range(1, nlin1+1):
for k in range(1, nlin1):
j = linv1[k]
a[k][nlin1+1] = a[k][nlin1+1] - comp[j]*ntot[n]*at
b[k] = b[k] - comp[j]*ntot[n]*pn
for ii in range(nelt):
jjj = indzat[zat[ii][n]-1]
kkk = lin1[jjj]
if (kkk != 0):
a[kkk][nlin1+1] = a[kkk][nlin1+1] + nat[ii][n]*at
b[kkk] = b[kkk] + nat[ii][n]*pn
a[nlin1][nlin1+1] = a[nlin1][nlin1+1] - compz*ntot[n]*at
b[nlin1] = b[nlin1] - compz*ntot[n]*pn
a[nlin1+1][nlin1+1] = a[nlin1+1][nlin1+1] - nq*at
b[nlin1+1] = b[nlin1+1] - nq*pn
if (print0):
print('("0 Log of coefficient matrix at iteration #")', ngit)
if (isolv == 1):
for k in range(nlin1):
print(name[indsp[linv1[k]]])
print(metals, ename, rhs)
if (isolv == 2):
# (name(indsp(linv2(k))),k = 1,nlin2),ename,rhs
for k in range(nlin2):
print(name[indsp[linv2[k]]])
print(ename, rhs)
print('(" ")')
neq1 = neq + 1
for i in range(neq):
for j in range(neq):
al[j] = math.log10(abs(a[j][i]) + 1.0e-70)
al[neq1] = math.log10(abs(b[i]) + 1.0e-70)
if (isolv == 1):
if (i <= nlin1):
namet = name[indsp[linv1[i]]]
if (i == nlin1+1):
namet = metals
if (i == nlin1+2):
namet = ename
if (isolv == 2):
if (i <= nlin2):
namet = name[indsp[linv2[i]]]
if (i == nlin2+1):
namet = ename
#print('(" ")', namet)
#for j in range(neq1):
#print(al[j])
#print('(" ")')
#c
#c Now solve the linearized equations.
#c
#FORTRAN subroutine dgefa(a, neq, neq, iperm, info)
#pythonized dgefa returns a tuple:
#print("Before dgefa, a is:")
#for idum in range(neq):
#print("idum ", idum, [a[idum][jdum] for jdum in range(neq)])
#print("b ", [b[kk] for kk in range(neq)])
dgefaReturn = Dgefa.dgefa(a, neq, neq)
a = dgefaReturn[0]
iperm = dgefaReturn[1]
info = dgefaReturn[2]
#print("After dgefa, a is:")
#for idum in range(neq):
#print("idum ", idum, [a[idum][jdum] for jdum in range(neq)])
#print("b ", [b[kk] for kk in range(neq)])
#print("iperm ", [iperm[kk] for kk in range(neq)])
#print("info ", info, " iperm ", iperm)
if (info != 0):
print('(" Info = ",i5," returned from DGEFA in GAS")', info)
#return 1
#Fortanized call call dgesl(a,neq,neq,iperm,b,job)
#print("Before ddgesl, b is:")
#print("b ", b)
b = Dgesl.dgesl(a, neq, neq, iperm, b, job)
#print("After dgesl, a is:")
#for idum in range(neq):
#print("idum ", idum, [a[idum][jdum] for jdum in range(neq)])
#print("b ", [b[kk] for kk in range(neq)])
#print("iperm ", [iperm[kk] for kk in range(neq)])
#print("After ddgesl, b is:")
#print("b ", b)
delmax = 0.0e0
#c
#c First, update the partial pressures for the major species by adding
#c the pressure corrections obtained for each atom from the linearization
#c procedure.
#c
for k in range(nlin):
if (isolv == 1):
j = linv1[k]
if (isolv == 2):
j = linv2[k]
n = indsp[j]
pnew = p[j] + b[k]
if (pnew < 0.0e0):
pnew = abs(pnew)
dp = pnew - p[j]
#print("GAS: k ", k, " j ", j, " n ", n,\
# " b ", b[k], " pnew ", pnew, " p ", p[j], " dp ", dp)
p[j] = pnew
#print("j ", j, " p ", p[j])
if (abs(p[j]/pt) >= 1.0e-15):
delp = abs(dp/p[j])
if (delp > delmax):
namemx = name[n]
delmax = delp
if (isolv == 2):
penew = pe + b[nlin2]
if (penew < 0.0e0):
penew = abs(penew)
dpe = penew - pe
pe = penew
if (abs(pe/pt) >= 1.0e-15):
delpe = abs(dpe/pe)
if (delpe > delmax):
namemx = ename
delmax = delpe
elif (isolv == 1):
pznew = pzs + b[nlin1]
if (pznew < 0.0e0):
pznew = abs(pznew)
dpz = pznew - pzs
pzs = pznew
if (abs(pzs/pt) >= 1.0e-15):
delpz = abs(dpz/pzs)
if (delpz > delmax):
namemx = metals
delmax = delpz
penew = pe + b[nlin1+1]
if (penew < 0.0e0):
penew = abs(penew)
dpe = penew - pe
pe = penew
if (abs(pe/pt) >= 1.0e-15):
delpe = abs(dpe/pe)
if (delpe > delmax):
namemx = ename
delmax = delpe
#c
#c Print out summary line for each iteration
#c
if (print0):
if (isolv == 1):
print('(" ",)', ngit, namemx, delmax, pzs, pe)
for k in range(nlin1):
print(p[linv1[k]])
if (isolv == 2):
print('(" ",)', ngit, namemx, delmax, pe)
for k in range(nlin2):
print(p[linv2[k]])
#print("firstTime ", firstTime)
#print("*** !!! *** ngit ", ngit, " delmax ", delmax, " tol ", tol)
#End while loop 316
#c
#c Calculate the partial pressures of the species included in the above
#c linearization, and also the fictitious total pressure Pd of the gas.
#c
if (isolv == 1):
for j in range(natom):
n = indsp[j]
if (ipr[n] == 2):
np = indx[2][itab[zat[0][n]-1]][0][0][0]
p[j] = comp[j]*pzs*pe/compz/(it[np] + pe)
#print("GAS: j ", j, " n ", n, " np ", np, " comp ", comp[j],\
# " pzs ", pzs, " pe ", pe, " compz ", compz, " it ", it[np])
# I *think* this ends the (isolv != 0) condition on line 290
pd = 0.0e0
pu = 0.0e0
ptot = pe
#print("GAS: pe ", pe)
for n in range(nspec):
ppt = 0.0e0
if (ipr[n] <= 2):
nelt = nel[n]
nq = nch[n]
pf = 1.0e0
for i in range(nelt):
j = indzat[zat[i][n]-1]
pf = pf*p[j]**nat[i][n]
penq = 1.0e0
if (pe > 0.0e0):
penq = pe**nq
ppt = it[n]*pf/kt[n]/penq
#print("1: n ", n, " it ", it[n], " kt ", kt[n], " penq ", penq, " pf ", pf, " ppt ", ppt)
ptot = ptot + ppt
pd = pd + ntot[n]*ppt
pu = pu + awt[n]*ppt
#print("GAS: 1st pp: n ", n, " name ", name[n], " ppt ", ppt, " it ", it[n], " pf ", pf, " kt ", kt[n], " penq ", penq)
pp[n] = ppt
gmu = pu/ptot
nd = ptot/kbol/t
rho = nd*gmu*hmass
"""
c
c return
c
c The following ENTRY point has been removed for the time being,
c so that the partial pressures of all species are always
c calculated automatically, as needed for opacity calculations.
c 29 June/90
c PDB
c
c Entry point "GASPP" calculates partial pressures of all
c species present in the gas.
c
c entry gaspp(pp)
c cis
c entry gaspp(pp)
c
c Now calculate the partial pressure of the remaining atomic
c species. some restrictions apply here. these are:
c 1) Each element being considered here is restricted to a
c single atom per species.
c 2) The other elements appearing in a given species must all
c be major elements, that is, the partial pressure for each
c has already been found by the preceding linearization
c procedure.
c
"""
for j in range(natom):
n = indsp[j]
#print("j ", j, " n ", n, " ipr ", ipr[n])
if (ipr[n] >= 3):
nsp = natsp[j]
#print("nsp ", nsp)
denom = 0.0e0
for k in range(nsp+1):
nn = iatsp[j][k]
nq = nch[nn]
nelt = nel[nn]
pfp = 1.0e0
#print(" k ", k, " nn ", nn, " nq ", nq, " nelt ", nelt)
for i in range(nelt):
jj = indzat[zat[i][nn]-1]
#print(" i ", i, " zat ", zat[i][nn]-1, " jj ", jj)
if (jj == j):
#print("jj == j")
if (nat[i][nn] > 1):
print('(" *18 Error: 2 or more atoms of same element in species")')
print('(" for Isolv, T, P, Pe0= ",i3,2x,1p3d12.4)', isolv, t, pt, pe0)
#return 1
else:
#print("jj !=j")
#if (ipr[indsp[jj]] >= 3):
#print("Going to 363")
if (ipr[indsp[jj]] < 3):
#print("pfp=")
pfp = pfp*p[jj]**nat[i][nn]
#print(" nat ", nat[i][nn], " p ", p[jj])
#print("jj ", jj, " indsp ", indsp[jj], " ipr ", ipr[indsp[jj]])
if ( (ipr[indsp[jj]] < 3) or (jj == j) ):
#print("penq, psp denom=")
penq = 1.0e0
if (pe > 0.0e0):
penq = pe**nq
psp = it[nn]*pfp/kt[nn]/penq
denom = denom + psp
#print("FINAL: j ", j, " comp ", comp[j], " pd ", pd, " denom ", denom)
p[j] = comp[j]*pd/denom
#print("GAS 2: n ", n, " name ", name[n], " j ", j, " comp ", comp[j], " pd ", pd, " denom ", denom, " p ", p[j])
#print("pfp ", pfp, " psp ", psp)
#c
#c Calculate final partial pressures after convergence obtained
#c
ptot = pe
pd = 0.0e0
pu = 0.0e0
pq = 0.0e0
for n in range(nspec):
nelt = nel[n]
nq = nch[n]
pf0 = 1.0e0
pf = 1.0e0
for i in range(nelt):
j = indzat[zat[i][n]-1]
pf0 = pf0*p0[j]**nat[i][n]
pf = pf*p[j]**nat[i][n]
#print("GAS 2: n ", n, " j ", j, " p ", p[j], " i ", i, " nat ", nat[i][n])
penq = 1.0e0
if (pe > 0.0e0):
penq = pe**nq
pp[n] = it[n]*pf/kt[n]/penq
#print("GAS: 2nd pp: n ", n, " name ", name[n], " pp ", pp[n], " it ", it[n], " pf ", pf, " kt ", kt[n], " penq ", penq)
penq = 1.0e0
if (pe0 > 0.0e0):
penq = pe0**nq
pp0[n] = it[n]*pf0/kt[n]/penq
ptot = ptot + pp[n]
pd = pd + ntot[n]*pp[n]
pq = pq + nq*pp[n]
pu = pu + awt[n]*pp[n]
pdtot = pd + pe
dptot = abs(ptot - pt)/pt
dpq = abs(pq - pe)/pt
gmu = pu/ptot
nd = ptot/kbol/t
rho = nd*gmu*hmass
#c
#c Fill the array "PPIX" with the partial pressures of the
#c specified species.
#c
if (nix > 0):
for i in range(nix):
ppix[i] = 0.0e0
ii = ixn[i]
#print("i ", i, " ixn ", ixn[i])
if (ii < 150):
ppix[i] = pp[ixn[i]]
#c
#c Write out final partial pressures
#c
"""
print0 = True
if (print0):
#print('("1After ",i3," iterations, with ISOLV =",i2,":", "0T="," P=", " Pdtot="," dPtot="," dPq="," Number Dens.="," /cm**3 Mean At.Wt.="," Density="," g/cm**3"/, "0 # Species Abundance Initial P Final P", " iT kT "//)',\
# ngit, isolv, t, pt, pdtot, dptot, dpq, nd, gmu, rho)
outString = ("%6s %4d %25s %2d %1s\n"\
%("1After ", ngit, " iterations, with ISOLV =", isolv, ":"))
outFile.write(outString)
outString =("%3s %12.3e %3s %12.3e %7s %12.3e %7s %12.3e %5s %10.3e\n"\
%("0T=", t, " P=", pt, " Pdtot=", pdtot, " dPtot=", dptot, " dPq=", dpq))
outFile.write(outString)
outString = ("%14s %10.3e %24s %8.3f %9s %10.3e %8s\n"\
%(" Number Dens.=", nd, " /cm**3 Mean At.Wt.=", gmu, " Density=", rho, "g/cm**3"))
outFile.write(outString)
nsp1 = nspec + 1
outString = ("%4s %14s %12s %11s %13s %12s %10s\n"\
%("0 #", " Species ", " Abundance ", " Initial P ", " Final P ", " iT ", " kT "))
outFile.write(outString)
for n in range(nspec):
#if (pp[n] <= 0.0e0):
# pp[n] = 1.0e-19
if (type0[n] != 1):
#print(n, name[n], pp0[n], math.log10(abs(pp[n])/pt) ,it[n], kt[n])
outString = ("%4d %14s %24.3e %12.3e %12.3e %12.3e\n"\
%(n, name[n], pp0[n], pp[n] ,it[n], kt[n]))
outFile.write(outString)
else:
j = iat[n]
#print(n, name[n], comp[j], pp0[n], math.log10(abs(pp[n])/pt), it[n], kt[n])
outString = ("%4d %14s %12.3e %12.3e %12.3e %12.3e %12.3e\n"\
%(n, name[n], comp[j], pp0[n], pp[n], it[n], kt[n]))
outFile.write(outString)
if (iprint < 0):
print0 = False
#print(nsp1, ename, pe0, pe)
outString = ("%4d %14s %24.3e %12.3e\n" %(nsp1, ename, pe0, pe))
outFile.write(outString)
"""
#Try returning a tuple:
return a, ngit, pe, pd, pp, ppix, gmu, rho
| 38,285
| 34.614884
| 275
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Kappas.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 17:12:02 2017
@author: ishort
"""
import math
import Planck
import Useful
def kappas2(numDeps, pe, zScale, temp, rho, numLams, lambdas, logAHe, \
logNH1, logNH2, logNHe1, logNHe2, Ne, teff, logKapFudge):
"""/* Compute opacities properly from scratch with real physical cross-sections
*/ // *** CAUTION:
//
// This return's "kappa" as defined by Gray 3rd Ed. - cm^2 per *relelvant particle* where the "releveant particle"
// depends on *which* kappa """
#//
#// *** CAUTION:
#//
#// This return's "kappa" as defined by Gray 3rd Ed. - cm^2 per *relelvant particle* where the "releveant particle"
#// depends on *which* kappa
log10E = math.log10(math.e) #//needed for g_ff
logLog10E = math.log(log10E)
logE10 = math.log(10.0)
logNH = [0.0 for i in range(numDeps)] #//Total H particle number density cm^-3
#double logPH1, logPH2, logPHe1, logPHe2;
for i in range(numDeps):
logNH[i] = math.exp(logNH1[i]) + math.exp(logNH2[i])
logNH[i] = math.log(logNH[i])
#//System.out.println("i " + i + " logNH1 " + log10E*logNH1[i] + " logNH2 " + log10E*logNH2[i]
#//+ " logNHe1 " + log10E*logNHe1[i] + " logNHe2 " + log10E*logNHe2[i] + " logPe " + log10E*pe[1][i]);
#// logPH1 = logNH1[i] + temp[1][i] + Useful.logK();
#// logPH2 = logNH2[i] + temp[1][i] + Useful.logK();
#// logPHe1 = logNHe1[i] + temp[1][i] + Useful.logK();
#// logPHe2 = logNHe2[i] + temp[1][i] + Useful.logK();
#//System.out.println("i " + i + " logPH1 " + log10E*logPH1 + " logPH2 " + log10E*logPH2
#//+ " logPHe1 " + log10E*logPHe1 + " logPHe2 " + log10E*logPHe2 + " logPe " + log10E*pe[1][i]);
#double[][] logKappa = new double[numLams][numDeps];
logKappa = [ [0.0 for i in range(numDeps)] for j in range(numLams) ]
#double kappa; //helper
#double stimEm; //temperature- and wavelength-dependent stimulated emission correction
#double stimHelp, logStimEm;
#double ii; //useful for converting integer loop counter, i, to float
#//
#//
#//Input data and variable declarations:
#//
#//
#// H I b-f & f-f
chiIH = 13.598433 #//eV
Rydberg = 1.0968e-2 #// "R" in nm^-1
#//Generate threshold wavelengths and b-f Gaunt (g_bf) helper factors up to n=10:
#double n; //principle quantum number of Bohr atom E-level
numHlevs = 10
#double logChiHlev;
invThresh = [0.0 for i in range(numHlevs)] #//also serves as g_bf helper factor
threshLambs = [0.0 for i in range(numHlevs)]
chiHlev = [0.0 for i in range(numHlevs)]
for i in range(numHlevs):
n = 1.0 + float(i)
invThresh[i] = Rydberg / n / n #//nm^-1; also serves as g_bf helper factor
threshLambs[i] = 1.0 / invThresh[i] #//nm
logChiHlev = Useful.logH() + Useful.logC() + math.log(invThresh[i]) + 7.0*logE10 #// ergs
chiHlev[i] = math.exp(logChiHlev - Useful.logEv()) #//eV
chiHlev[i] = chiIH - chiHlev[i]
#// System.out.println("i " + i + " n " + n + " invThresh " + invThresh[i] + " threshLambs[i] " + threshLambs[i] + " chiHlev " + chiHlev[i]);
logGauntPrefac = math.log(0.3456) - 0.333333*math.log(Rydberg)
#// **** Caution: this will require lamba in A!:
a0 = 1.0449e-26 #//if lambda in A
logA0 = math.log(a0)
#// Boltzmann const "k" in eV/K - needed for "theta"
logKeV = Useful.logK() - Useful.logEv()
#//g_bf Gaunt factor - depends on lower E-level, n:
loggbf = [0.0 for i in range(numHlevs)]
#//initialize quantities that depend on lowest E-level contributing to opacity at current wavelength:
for iThresh in range(numHlevs):
loggbf[iThresh] = 0.0
#double logGauntHelp, gauntHelp;
#double gbf, gbfHelp, loggbfHelp;
#double gff, gffHelp, loggffHelp, logffHelp, loggff;
#double help, logHelp3;
#double chiLambda, logChiLambda;
#double bfTerm, logbfTerm, bfSum, logKapH1bf, logKapH1ff;
#//initial defaults:
gbf = 1.0
gff = 1.0
loggff = 0.0
logChiFac = math.log(1.2398e3) #// eV per lambda, for lambda in nm
#// Needed for kappa_ff:
#double ffBracket;
logffHelp = logLog10E - math.log(chiIH) - math.log(2.0)
#//logHelp = logffHelp - math.log(2.0)
#//
#//Hminus:
#//
#// H^- b-f
#//This is for the sixth order polynomial fit to the cross-section's wavelength dependence
numHmTerms = 7
logAHm = [0.0 for i in range(numHmTerms)]
signAHm = [0.0 for i in range(numHmTerms)]
aHmbf = 4.158e-10
#//double logAHmbf = Math.log(aHmbf);
#//Is the factor of 10^-18cm^2 from the polynomial fit to alpha_Hmbf missing in Eq. 8.12 on p. 156 of Gray 3rd Ed??
logAHmbf = math.log(aHmbf) - 18.0*logE10
#double alphaHmbf, logAlphaHmbf, logTermHmbf, logKapHmbf;
#//Computing each polynomial term logarithmically
logAHm[0] = math.log(1.99654)
signAHm[0] = 1.0
logAHm[1] = math.log(1.18267e-5)
signAHm[1] = -1.0
logAHm[2] = math.log(2.64243e-6)
signAHm[2] = 1.0
logAHm[3] = math.log(4.40524e-10)
signAHm[3] = -1.0
logAHm[4] = math.log(3.23992e-14)
signAHm[4] = 1.0
logAHm[5] = math.log(1.39568e-18)
signAHm[5] = -1.0
logAHm[6] = math.log(2.78701e-23)
signAHm[6] = 1.0
alphaHmbf = math.exp(logAHm[0]) #//initialize accumulator
#// H^- f-f:
logAHmff = -26.0*logE10
numHmffTerms = 5
#double fPoly, logKapHmff, logLambdaAFac;
fHmTerms = [ [ 0.0 for i in range(numHmffTerms) ] for j in range(3) ]
fHm = [0.0 for i in range(3)]
fHmTerms[0][0] = -2.2763
fHmTerms[0][1] = -1.6850
fHmTerms[0][2] = 0.76661
fHmTerms[0][3] = -0.053346
fHmTerms[0][4] = 0.0
fHmTerms[1][0] = 15.2827
fHmTerms[1][1] = -9.2846
fHmTerms[1][2] = 1.99381
fHmTerms[1][3] = -0.142631
fHmTerms[1][4] = 0.0
fHmTerms[2][0] = -197.789
fHmTerms[2][1] = 190.266
fHmTerms[2][2] = -67.9775
fHmTerms[2][3] = 10.6913
fHmTerms[2][4] = -0.625151
#//
#//H_2^+ molecular opacity - cool stars
#// scasles with proton density (H^+)
#//This is for the third order polynomial fit to the "sigma_l(lambda)" and "U_l(lambda)"
#//terms in the cross-section
numH2pTerms = 4
sigmaH2pTerm = [0.0 for i in range(numH2pTerms)]
UH2pTerm = [0.0 for i in range(numH2pTerms)]
#double logSigmaH2p, sigmaH2p, UH2p, logKapH2p;
aH2p = 2.51e-42
logAH2p = math.log(aH2p)
sigmaH2pTerm[0] = -1040.54
sigmaH2pTerm[1] = 1345.71
sigmaH2pTerm[2] = -547.628
sigmaH2pTerm[3] = 71.9684
#//UH2pTerm[0] = 54.0532
#//UH2pTerm[1] = -32.713
#//UH2pTerm[2] = 6.6699
#//UH2pTerm[3] = -0.4574
#//Reverse signs on U_1 polynomial expansion co-efficients - Dave Gray private communcation
#//based on Bates (1952)
UH2pTerm[0] = -54.0532
UH2pTerm[1] = 32.713
UH2pTerm[2] = -6.6699
UH2pTerm[3] = 0.4574
#// He I b-f & ff:
#double totalH1Kap, logTotalH1Kap, helpHe, logKapHe;
#//
#//He^- f-f
AHe = math.exp(logAHe)
#double logKapHemff, nHe, logNHe, thisTerm, thisLogTerm, alphaHemff, log10AlphaHemff;
#// Gray does not have this pre-factor, but PHOENIX seems to and without it
#// the He opacity is about 10^26 too high!:
logAHemff = -26.0*logE10
numHemffTerms = 5
logC0HemffTerm = [0.0 for i in range(numHemffTerms)]
logC1HemffTerm = [0.0 for i in range(numHemffTerms)]
logC2HemffTerm = [0.0 for i in range(numHemffTerms)]
logC3HemffTerm = [0.0 for i in range(numHemffTerms)]
signC0HemffTerm = [0.0 for i in range(numHemffTerms)]
signC1HemffTerm = [0.0 for i in range(numHemffTerms)]
signC2HemffTerm = [0.0 for i in range(numHemffTerms)]
signC3HemffTerm = [0.0 for i in range(numHemffTerms)]
#//we'll be evaluating the polynominal in theta logarithmically by adding logarithmic terms -
logC0HemffTerm[0] = math.log(9.66736)
signC0HemffTerm[0] = 1.0
logC0HemffTerm[1] = math.log(71.76242)
signC0HemffTerm[1] = -1.0
logC0HemffTerm[2] = math.log(105.29576)
signC0HemffTerm[2] = 1.0
logC0HemffTerm[3] = math.log(56.49259)
signC0HemffTerm[3] = -1.0
logC0HemffTerm[4] = math.log(10.69206)
signC0HemffTerm[4] = 1.0
logC1HemffTerm[0] = math.log(10.50614)
signC1HemffTerm[0] = -1.0
logC1HemffTerm[1] = math.log(48.28802)
signC1HemffTerm[1] = 1.0
logC1HemffTerm[2] = math.log(70.43363)
signC1HemffTerm[2] = -1.0
logC1HemffTerm[3] = math.log(37.80099)
signC1HemffTerm[3] = 1.0
logC1HemffTerm[4] = math.log(7.15445)
signC1HemffTerm[4] = -1.0
logC2HemffTerm[0] = math.log(2.74020)
signC2HemffTerm[0] = 1.0
logC2HemffTerm[1] = math.log(10.62144)
signC2HemffTerm[1] = -1.0
logC2HemffTerm[2] = math.log(15.50518)
signC2HemffTerm[2] = 1.0
logC2HemffTerm[3] = math.log(8.33845)
signC2HemffTerm[3] = -1.0
logC2HemffTerm[4] = math.log(1.57960)
signC2HemffTerm[4] = 1.0
logC3HemffTerm[0] = math.log(0.19923)
signC3HemffTerm[0] = -1.0
logC3HemffTerm[1] = math.log(0.77485)
signC3HemffTerm[1] = 1.0
logC3HemffTerm[2] = math.log(1.13200)
signC3HemffTerm[2] = -1.0
logC3HemffTerm[3] = math.log(0.60994)
signC3HemffTerm[3] = 1.0
logC3HemffTerm[4] = math.log(0.11564)
signC3HemffTerm[4] = -1.0
# //initialize accumulators:
cHemff = [0.0 for i in range(4)]
cHemff[0] = signC0HemffTerm[0] * math.exp(logC0HemffTerm[0]);
cHemff[1] = signC1HemffTerm[0] * math.exp(logC1HemffTerm[0]);
cHemff[2] = signC2HemffTerm[0] * math.exp(logC2HemffTerm[0]);
cHemff[3] = signC3HemffTerm[0] * math.exp(logC3HemffTerm[0]);
#//
#//Should the polynomial expansion for the Cs by in 10g10Theta?? No! Doesn't help:
#//double[] C0HemffTerm = new double[numHemffTerms];
#//double[] C1HemffTerm = new double[numHemffTerms];
#//double[] C2HemffTerm = new double[numHemffTerms];
#//double[] C3HemffTerm = new double[numHemffTerms];
#//
#//C0HemffTerm[0] = 9.66736;
#//C0HemffTerm[1] = -71.76242;
#//C0HemffTerm[2] = 105.29576;
#//C0HemffTerm[3] = -56.49259;
#//C0HemffTerm[4] = 10.69206;
#//C1HemffTerm[0] = -10.50614;
#//C1HemffTerm[1] = 48.28802;
#//C1HemffTerm[2] = -70.43363;
#//C1HemffTerm[3] = 37.80099;
#//C1HemffTerm[4] = -7.15445;
#//C2HemffTerm[0] = 2.74020;
#//C2HemffTerm[1] = -10.62144;
#//C2HemffTerm[2] = 15.50518;
#//C2HemffTerm[3] = -8.33845;
#//C2HemffTerm[4] = 1.57960;
#//C3HemffTerm[0] = -0.19923;
#//C3HemffTerm[1] = 0.77485;
#//C3HemffTerm[2] = -1.13200;
#//C3HemffTerm[3] = 0.60994;
#//C3HemffTerm[4] = -0.11564;
#//initialize accumulators:
#// double[] cHemff = new double[4];
#// cHemff[0] = C0HemffTerm[0];
#// cHemff[1] = C1HemffTerm[0];
#// cHemff[2] = C2HemffTerm[0];
#// cHemff[3] = C3HemffTerm[0];
#//
#// electron (e^-1) scattering (Thomson scattering)
#double kapE, logKapE;
alphaE = 0.6648e-24 #//cm^2/e^-1
logAlphaE = math.log(0.6648e-24)
#//Universal:
#//
# double theta, logTheta, log10Theta, log10ThetaFac;
# double logLambda, lambdaA, logLambdaA, log10LambdaA, lambdanm, logLambdanm;
#//Okay - here we go:
#//Make the wavelength loop the outer loop - lots of depth-independnet lambda-dependent quantities:
#//
#//
# //System.out.println("Kappas called...");
#//
#// **** START WAVELENGTH LOOP iLam
#//
#//
#//
for iLam in range(numLams):
#//
#//Re-initialize all accumulators to be on safe side:
kappa = 0.0
logKapH1bf = -99.0
logKapH1ff = -99.0
logKapHmbf = -99.0
logKapHmff = -99.0
logKapH2p = -99.0
logKapHe = -99.0
logKapHemff = -99.0
logKapE = -99.0
#//
#//*** CAUTION: lambda MUST be in nm here for consistency with Rydbeg
logLambda = math.log(lambdas[iLam]) #//log cm
lambdanm = 1.0e7 * lambdas[iLam]
logLambdanm = math.log(lambdanm)
lambdaA = 1.0e8 * lambdas[iLam] #//Angstroms
logLambdaA = math.log(lambdaA)
log10LambdaA = log10E * logLambdaA
logChiLambda = logChiFac - logLambdanm
chiLambda = math.exp(logChiLambda) #//eV
#// Needed for both g_bf AND g_ff:
logGauntHelp = logGauntPrefac - 0.333333*logLambdanm #//lambda in nm here
gauntHelp = math.exp(logGauntHelp)
#// if (iLam == 142){
#// System.out.println("lambdaA " + lambdaA);
#// }
#//HI b-f depth independent factors:
#//Start at largest threshold wavelength and break out of loop when next threshold lambda is less than current lambda:
#for (iThresh = numHlevs-1; iThresh >= 0; iThresh--){
for iThresh in range(0, numHlevs-1, -1):
if (threshLambs[iThresh] < lambdanm):
break
if (lambdanm <= threshLambs[iThresh]):
#//this E-level contributes
loggbfHelp = logLambdanm + math.log(invThresh[iThresh]) # //lambda in nm here; invThresh here as R/n^2
gbfHelp = math.exp(loggbfHelp)
gbf = 1.0 - (gauntHelp * (gbfHelp - 0.5))
#// if (iLam == 1){}
#// System.out.println("iThresh " + iThresh + " threshLambs " + threshLambs[iThresh] + " gbf " + gbf);
#// }
loggbf[iThresh] = math.log(gbf)
#//end iThresh loop
#//HI f-f depth independent factors:
# //logChi = logLog10E + logLambdanm - logChiFac; //lambda in nm here
# //chi = Math.exp(logChi);
loggffHelp = logLog10E - logChiLambda
#//
#//
#//
#// ****** Start depth loop iTau ******
#//
#//
#//
#//
for iTau in range(numDeps):
#//
# //Re-initialize all accumulators to be on safe side:
kappa = 0.0
logKapH1bf = -99.0
logKapH1ff = -99.0
logKapHmbf = -99.0
logKapHmff = -99.0
logKapH2p = -99.0
logKapHe = -99.0
logKapHemff = -99.0
logKapE = -99.0
#//
#//
#//if (iTau == 36 && iLam == 142){
#// System.out.println("lambdanm[142] " + lambdanm + " temp[0][iTau=36] " + temp[0][iTau=36]);
#// }
#//This is "theta" ~ 5040/T:
logTheta = logLog10E - logKeV - temp[1][iTau]
log10Theta = log10E * logTheta
theta = math.exp(logTheta)
#//System.out.println("theta " + theta + " logTheta " + logTheta);
#// temperature- and wavelength-dependent stimulated emission coefficient:
stimHelp = -1.0 * theta * chiLambda * logE10
stimEm = 1.0 - math.exp(stimHelp)
logStimEm = math.log(stimEm)
# // if (iTau == 36 && iLam == 142){
# // System.out.println("stimEm " + stimEm);
# //}
ffBracket = math.exp(loggffHelp - logTheta) + 0.5
gff = 1.0 + (gauntHelp*ffBracket)
#//if (iTau == 36 && iLam == 1){
#// System.out.println("gff " + gff);
#// }
loggff = math.log(gff)
#//H I b-f:
#//Start at largest threshold wavelength and break out of loop when next threshold lambda is less than current lambda:
bfSum = 0.0 #//initialize accumulator
logHelp3 = logA0 + 3.0*logLambdaA #//lambda in A here
#for (int iThresh = numHlevs-1; iThresh >= 0; iThresh--){
for iThresh in range(0, numHlevs-1, -1):
if (threshLambs[iThresh] < lambdanm):
break
n = 1.0 + float(iThresh)
if (lambdanm <= threshLambs[iThresh]):
#//this E-level contributes
logbfTerm = loggbf[iThresh] - 3.0*math.log(n)
logbfTerm = logbfTerm - (theta*chiHlev[iThresh])*logE10
bfSum = bfSum + math.exp(logbfTerm)
#//if (iTau == 36 && iLam == 142){
# //System.out.println("lambdanm " + lambdanm + " iThresh " + iThresh + " threshLambs[iThresh] " + threshLambs[iThresh]);
# //System.out.println("loggbf " + loggbf[iThresh] + " theta " + theta + " chiHlev " + chiHlev[iThresh]);
# //System.out.println("bfSum " + bfSum + " logbfTerm " + logbfTerm);
#// }
#//end iThresh loop
#// cm^2 per *neutral* H atom
logKapH1bf = logHelp3 + math.log(bfSum)
#//Stimulated emission correction
logKapH1bf = logKapH1bf + logStimEm
#//System.out.println("lambda " + lambdas[iLam] + "iTau " + iTau + " sigma " + Math.exp(logKapH1bf));
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapH1bf = logKapH1bf + logNH1[iTau]
#//System.out.println(" aH1 " + Math.exp(logKapH1bf));
#////Nasty fix to make Balmer lines show up in A0 stars!
#// if (teff > 8000){
#// logKapH1bf = logKapH1bf - logE10*1.5;
#//
kappa = math.exp(logKapH1bf)
#//System.out.println("HIbf " + log10E*logKapH1bf);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("lambdaA " + lambdaA + " logKapH1bf " + log10E*(logKapH1bf)); //-rho[1][iTau]));
#//}
#//H I f-f:
#// cm^2 per *neutral* H atom
logKapH1ff = logHelp3 + loggff + logffHelp - logTheta - (theta*chiIH)*logE10
#//Stimulated emission correction
logKapH1ff = logKapH1ff + logStimEm
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapH1ff = logKapH1ff + logNH1[iTau]
#////Nasty fix to make Balmer lines show up in A0 stars!
#// if (teff > 8000){
#// logKapH1ff = logKapH1ff - logE10*1.5;
#//
kappa = kappa + math.exp(logKapH1ff);
#//System.out.println("HIff " + log10E*logKapH1ff);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapH1ff " + log10E*(logKapH1ff)); //-rho[1][iTau]));
#//}
#//
#//Hminus:
#//
#// H^- b-f:
#//if (iTau == 36 && iLam == 142){
# // System.out.println("temp " + temp[0][iTau] + " lambdanm " + lambdanm);
# // }
logKapHmbf = -99.0 #//initialize default
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 10000.0) ){
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 8000.0) ){
#//Try lowering lower Teff limit to avoid oapcity collapse in outer layers of late-type stars
if ( (temp[0][iTau] > 1000.0) and (temp[0][iTau] < 10000.0) ):
if ((lambdanm > 225.0) and (lambdanm < 1500.0) ): # //nm
#//if (iTau == 36 && iLam == 142){
# // System.out.println("In KapHmbf condition...");
#//}
ii = 0.0
alphaHmbf = signAHm[0]*math.exp(logAHm[0]) #//initialize accumulator
#for (int i = 1; i < numHmTerms; i++){
for i in range(1, numHmTerms):
ii = float(i)
#//if (iTau == 36 && iLam == 142){
#// System.out.println("ii " + ii);
#//}
logTermHmbf = logAHm[i] + ii*logLambdaA
alphaHmbf = alphaHmbf + signAHm[i]*math.exp(logTermHmbf)
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logTermHmbf " + log10E*logTermHmbf + " i " + i + " logAHm " + log10E*logAHm[i]);
#//}
logAlphaHmbf = math.log(alphaHmbf)
#// cm^2 per neutral H atom
logKapHmbf = logAHmbf + logAlphaHmbf + pe[1][iTau] + 2.5*logTheta + (0.754*theta)*logE10
#//Stimulated emission correction
logKapHmbf = logKapHmbf + logStimEm
#//if (iTau == 36 && iLam == 142){
#// System.out.println("alphaHmbf " + alphaHmbf);
#// System.out.println("logKapHmbf " + log10E*logKapHmbf + " logAHmbf " + log10E*logAHmbf + " logAlphaHmbf " + log10E*logAlphaHmbf);
#// }
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapHmbf = logKapHmbf + logNH1[iTau]
kappa = kappa + math.exp(logKapHmbf)
#//System.out.println("Hmbf " + log10E*logKapHmbf);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapHmbf " + log10E*(logKapHmbf)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#// H^- f-f:
logKapHmff = -99.0 #//initialize default
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 10000.0) ){
#//Try lowering lower Teff limit to avoid oapcity collapse in outer layers of late-type stars
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 8000.0) ){
if ( (temp[0][iTau] > 1000.0) and (temp[0][iTau] < 10000.0) ):
if ((lambdanm > 260.0) and (lambdanm < 11390.0) ): #//nm
#//construct "f_n" polynomials in log(lambda)
for j in range(3):
fHm[j] = fHmTerms[j][0] #//initialize accumulators
ii = 0.0
for i in range(1, numHmffTerms):
ii = float(i)
logLambdaAFac = math.pow(log10LambdaA, ii)
for j in range(3):
fHm[j] = fHm[j] + (fHmTerms[j][i]*logLambdaAFac)
#} #// i
#} #// j
#//
fPoly = fHm[0] + fHm[1]*log10Theta + fHm[2]*log10Theta*log10Theta
#// In cm^2 per neutral H atom:
#// Stimulated emission alreadya ccounted for
logKapHmff = logAHmff + pe[1][iTau] + fPoly*logE10
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapHmff = logKapHmff + logNH1[iTau]
kappa = kappa + math.exp(logKapHmff)
#//System.out.println("Hmff " + log10E*logKapHmff);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapHmff " + log10E*(logKapHmff)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#// H^+_2:
#//
logKapH2p = -99.0 #//initialize default
if ( temp[0][iTau] < 4000.0 ):
if ((lambdanm > 380.0) and (lambdanm < 2500.0) ): # //nm
sigmaH2p = sigmaH2pTerm[0] #//initialize accumulator
UH2p = UH2pTerm[0] #//initialize accumulator
ii = 0.0#
for i in range(1, numH2pTerms):
ii = float(i)
logLambdaAFac = math.pow(log10LambdaA, ii)
#// kapH2p way too large with lambda in A - try cm: No! - leads to negative logs
#//logLambdaAFac = Math.pow(logLambda, ii);
sigmaH2p = sigmaH2p + sigmaH2pTerm[i] * logLambdaAFac
UH2p = UH2p + UH2pTerm[i] * logLambdaAFac
logSigmaH2p = math.log(sigmaH2p)
logKapH2p = logAH2p + logSigmaH2p - (UH2p*theta)*logE10 + logNH2[iTau]
#//Stimulated emission correction
logKapH2p = logKapH2p + logStimEm
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapH2p = logKapH2p + logNH1[iTau]
kappa = kappa + math.exp(logKapH2p)
#//System.out.println("H2p " + log10E*logKapH2p);
#//if (iTau == 16 && iLam == 142){
# //System.out.println("logKapH2p " + log10E*(logKapH2p-rho[1][iTau]) + " logAH2p " + log10E*logAH2p
#// + " logSigmaH2p " + log10E*logSigmaH2p + " (UH2p*theta)*logE10 " + log10E*((UH2p*theta)*logE10) + " logNH2[iTau] " + log10E*logNH2[iTau]);
#//}
#//wavelength condition
#// temperature condition
#//He I
#//
#// HeI b-f + f-f
#//Scale sum of He b-f and f-f with sum of HI b-f and f-f
#//wavelength condition comes from requirement that lower E level be greater than n=2 (edge at 22.78 nm)
logKapHe = -99.0 #//default intialization
if ( temp[0][iTau] > 10000.0 ):
if (lambdanm > 22.8): #//nm
totalH1Kap = math.exp(logKapH1bf) + math.exp(logKapH1ff)
logTotalH1Kap = math.log(totalH1Kap)
helpHe = Useful.k() * temp[0][iTau]
#// cm^2 per neutral H atom (after all, it's scaled wrt kappHI
#// Stimulated emission already accounted for
#//
#// *** CAUTION: Is this *really* the right thing to do???
#// - we're re-scaling the final H I kappa in cm^2/g corrected for stim em, NOT the raw cross section
logKapHe = math.log(4.0) - (10.92 / helpHe) + logTotalH1Kap
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapHe = logKapHe + logNH1[iTau]
kappa = kappa + math.exp(logKapHe)
#//System.out.println("He " + log10E*logKapHe);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapHe " + log10E*(logKapHe)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#//
#//He^- f-f:
logKapHemff = -99.0 #//default initialization
if ( (theta > 0.5) and (theta < 2.0) ):
if ((lambdanm > 500.0) and (lambdanm < 15000.0) ):
#// initialize accumulators:
cHemff[0] = signC0HemffTerm[0]*math.exp(logC0HemffTerm[0]);
#//System.out.println("C0HemffTerm " + signC0HemffTerm[0]*Math.exp(logC0HemffTerm[0]));
cHemff[1] = signC1HemffTerm[0]*math.exp(logC1HemffTerm[0]);
#//System.out.println("C1HemffTerm " + signC1HemffTerm[0]*Math.exp(logC1HemffTerm[0]));
cHemff[2] = signC2HemffTerm[0]*math.exp(logC2HemffTerm[0]);
#//System.out.println("C2HemffTerm " + signC2HemffTerm[0]*Math.exp(logC2HemffTerm[0]));
cHemff[3] = signC3HemffTerm[0]*math.exp(logC3HemffTerm[0]);
#//System.out.println("C3HemffTerm " + signC3HemffTerm[0]*Math.exp(logC3HemffTerm[0]));
#//build the theta polynomial coefficients
ii = 0.0
for i in range(1, numHemffTerms):
ii = float(i)
thisLogTerm = ii*logTheta + logC0HemffTerm[i]
cHemff[0] = cHemff[0] + signC0HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C0HemffTerm " + signC0HemffTerm[i]*Math.exp(logC0HemffTerm[i]));
thisLogTerm = ii*logTheta + logC1HemffTerm[i]
cHemff[1] = cHemff[1] + signC1HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C1HemffTerm " + signC1HemffTerm[i]*Math.exp(logC1HemffTerm[i]));
thisLogTerm = ii*logTheta + logC2HemffTerm[i]
cHemff[2] = cHemff[2] + signC2HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C2HemffTerm " + signC2HemffTerm[i]*Math.exp(logC2HemffTerm[i]));
thisLogTerm = ii*logTheta + logC3HemffTerm[i]
cHemff[3] = cHemff[3] + signC3HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C3HemffTerm " + signC3HemffTerm[i]*Math.exp(logC3HemffTerm[i]));
#//// Should polynomial expansion for Cs be in log10Theta??: - No! Doesn't help
#// initialize accumulators:
#// cHemff[0] = C0HemffTerm[0];
#// cHemff[1] = C1HemffTerm[0];
#// cHemff[2] = C2HemffTerm[0];
#// cHemff[3] = C3HemffTerm[0];
#// ii = 0.0;
#// for (int i = 1; i < numHemffTerms; i++){
#// ii = (double) i;
#// log10ThetaFac = Math.pow(log10Theta, ii);
#// thisTerm = log10ThetaFac * C0HemffTerm[i];
#// cHemff[0] = cHemff[0] + thisTerm;
#// thisTerm = log10ThetaFac * C1HemffTerm[i];
#// cHemff[1] = cHemff[1] + thisTerm;
#// thisTerm = log10ThetaFac * C2HemffTerm[i];
#// cHemff[2] = cHemff[2] + thisTerm;
#// thisTerm = log10ThetaFac * C3HemffTerm[i];
#// cHemff[3] = cHemff[3] + thisTerm;
#// }
#//Build polynomial in logLambda for alpha(He^1_ff):
log10AlphaHemff = cHemff[0] #//initialize accumulation
#//System.out.println("cHemff[0] " + cHemff[0]);
ii = 0.0
for i in range(1, 3+1):
#//System.out.println("i " + i + " cHemff[i] " + cHemff[i]);
ii = float(i)
thisTerm = cHemff[i] * math.pow(log10LambdaA, ii)
log10AlphaHemff = log10AlphaHemff + thisTerm
#//System.out.println("log10AlphaHemff " + log10AlphaHemff);
alphaHemff = math.pow(10.0, log10AlphaHemff) #//gives infinite alphas!
#// alphaHemff = log10AlphaHemff; // ?????!!!!!
#//System.out.println("alphaHemff " + alphaHemff);
#// Note: this is the extinction coefficient per *Hydrogen* particle (NOT He- particle!)
# //nHe = Math.exp(logNHe1[iTau]) + Math.exp(logNHe2[iTau]);
# //logNHe = Math.log(nHe);
# //logKapHemff = Math.log(alphaHemff) + Math.log(AHe) + pe[1][iTau] + logNHe1[iTau] - logNHe;
logKapHemff = logAHemff + math.log(alphaHemff) + pe[1][iTau] + logNHe1[iTau] - logNH[iTau]
#//Stimulated emission already accounted for
#//Add it in to total - opacity per H particle, so multiply by logNH
#// This is now linear opacity in cm^-1
logKapHemff = logKapHemff + logNH[iTau]
kappa = kappa + math.exp(logKapHemff)
#//System.out.println("Hemff " + log10E*logKapHemff);
#//if (iTau == 36 && iLam == 155){
#//if (iLam == 155){
#// System.out.println("logKapHemff " + log10E*(logKapHemff)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#//
#// electron (e^-1) scattering (Thomson scattering)
#//coefficient per *"hydrogen atom"* (NOT per e^-!!) (neutral or total H??):
logKapE = logAlphaE + Ne[1][iTau] - logNH[iTau]
#//Stimulated emission not relevent
#//Add it in to total - opacity per H particle, so multiply by logNH
#// This is now linear opacity in cm^-1
#//I know, we're adding logNH right back in after subtracting it off, but this is for dlarity and consistency for now... :
logKapE = logKapE + logNH[iTau]
kappa = kappa + math.exp(logKapE)
#//System.out.println("E " + log10E*logKapE);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapE " + log10E*(logKapE)); //-rho[1][iTau]));
#//}
#//Metal b-f
#//Fig. 8.6 Gray 3rd Ed.
#//
#//
#// This is now linear opacity in cm^-1
#// Divide by mass density
#// This is now mass extinction in cm^2/g
#//
logKappa[iLam][iTau] = math.log(kappa) - rho[1][iTau]
#// Fudge is in cm^2/g: Converto to natural log:
logEKapFudge = logE10 * logKapFudge
logKappa[iLam][iTau] = logKappa[iLam][iTau] + logEKapFudge
#//if (iTau == 36 && iLam == 142){
#//System.out.println(" " + log10E*(logKappa[iLam][iTau]+rho[1][iTau]));
#//}
#// close iTau depth loop
#//
#//close iLam wavelength loop
return logKappa
#} //end method kappas2
def kapRos(numDeps, numLams, lambdas, logKappa, temp):
kappaRos = [ [0.0 for i in range(numDeps)] for j in range(2) ]
#double numerator, denominator, deltaLam, logdBdTau, logNumerator, logDenominator;
#double logTerm, logDeltaLam, logInvKap, logInvKapRos;
for iTau in range(numDeps):
numerator = 0.0 #//initialize accumulator
denominator = 0.0
for iLam in range(1, numLams):
deltaLam = lambdas[iLam] - lambdas[iLam-1] #//lambda in cm
logDeltaLam = math.log(deltaLam)
logInvKap = -1.0 * logKappa[iLam][iTau]
logdBdTau = Planck.dBdT(temp[0][iTau], lambdas[iLam])
logTerm = logdBdTau + logDeltaLam
denominator = denominator + math.exp(logTerm)
logTerm = logTerm + logInvKap;
numerator = numerator + math.exp(logTerm)
logNumerator = math.log(numerator)
logDenominator = math.log(denominator)
logInvKapRos = logNumerator - logDenominator
kappaRos[1][iTau] = -1.0 * logInvKapRos #//logarithmic
kappaRos[0][iTau] = math.exp(kappaRos[1][iTau])
return kappaRos
#} //end method kapRos
| 32,644
| 39.857322
| 142
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LineGrid.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 13:31:10 2017
@author: Ian
"""
import math
import Useful
"""/**
* Line profile, phi_lambda(lambda): Assume Voigt function profile - need H(a,v)
* Assumes CRD, LTE, ??? Input parameters: lam0 - line center wavelength in nm
* mass - mass of absorbing particle (amu) logGammaCol - log_10(gamma) - base 10
* logarithmic collisional (pressure) damping co-efficient (s^-1) epsilon -
* convective microturbulence- non-thermal broadening parameter (km/s) Also
* needs atmospheric structure information: numDeps WON'T WORK - need observer's
* frame fixed lambda at all depths: temp structure for depth-dependent thermal
* line broadening Teff as typical temp instead of above pressure structure,
* pGas, if scaling gamma
*/"""
def lineGridDelta(lam0In, massIn, xiTIn, numDeps, teff):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
amu = Useful.amu()
ln10 = math.log(10.0)
ln2 = math.log(2.0)
logE = math.log10(math.e) #// for debug output
#//Put input parameters into linear cgs units:
#//double gammaCol = Math.pow(10.0, logGammaCol);
logTeff = math.log(teff)
xiT = xiTIn * 1.0E5 #//km/s to cm/s
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
logMass = math.log(massIn * amu) #//amu to g
#// Compute depth-independent Doppler width, Delta_lambda_D:
#double doppler, logDopp;
#double logHelp, help; //scratch
logHelp = ln2 + logK + logTeff - logMass #// M-B dist, square of v_mode
help = math.exp(logHelp) + xiT * xiT #// quadratic sum of thermal v and turbulent v
logHelp = 0.5 * math.log(help)
logDopp = logHelp + logLam0 - logC
doppler = math.exp(logDopp) #// cm
#//System.out.println("LineGrid: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
#//int numCore = 5;
#//int numWing = 5;
#//int numWing = 0; //debug
numPoints = 1
#// a 2D 2 X numPoints array of Delta Lambdas
#// Row 0 : Delta lambdas in cm - will need to be in nm for Planck and Rad Trans?
#// Row 1 : Delta lambdas in Doppler widths
linePoints = [ [ 0.0 for i in range(numPoints) ] for j in range(2) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [ 0.0 for i in range(numPoints) ]
#double logV, ii, jj;
il = 0
ii = float(il)
#// In core, space v points linearly:
#// Voigt "v" parameter
#// v > 0 --> This is the *red* wing:
v[il] = ii
linePoints[0][il] = doppler * v[il]
linePoints[1][il] = v[il]
#//System.out.println("LineGrid: il, lam, v: " + il + " " +
#// linePoints[0][il] + " " + linePoints[1][il]);
return linePoints
#} //end method lineGridDelta
#//
#//
#//
def lineGridGauss(lam0In, massIn, xiTIn, numDeps, teff, numCore):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
amu = Useful.amu()
dln10 = math.log(10.0)
ln2 = math.log(2.0)
logE = math.log10(math.e) #// for debug output
#//Put input parameters into linear cgs units:
#//double gammaCol = Math.pow(10.0, logGammaCol);
logTeff = math.log(teff)
xiT = xiTIn * 1.0E5 #//km/s to cm/s
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
logMass = math.log(massIn * amu) #//amu to g
#// Compute depth-independent Doppler width, Delta_lambda_D:
#double doppler, logDopp;
#double logHelp, help; //scratch
logHelp = ln2 + logK + logTeff - logMass #// M-B dist, square of v_mode
help = math.exp(logHelp) + xiT * xiT #// quadratic sum of thermal v and turbulent v
logHelp = 0.5 * math.log(help)
logDopp = logHelp + logLam0 - logC
doppler = math.exp(logDopp) #// cm
#//System.out.println("LineGrid: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
#//int numCore = 5;
#//int numWing = 5;
#//int numWing = 0; //debug
numPoints = numCore
#// a 2D 2 X numPoints array of Delta Lambdas
#// Row 0 : Delta lambdas in cm - will need to be in nm for Planck and Rad Trans?
#// Row 1 : Delta lambdas in Doppler widths
linePoints = [ [ 0.0 for i in range(numPoints) ] for j in range(2) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [0.0 for i in range(numPoints)]
maxCoreV = 3.5 #//core half-width ~ in Doppler widths
#//double maxWingDeltaLogV = 1.5 * ln10; //maximum base e logarithmic shift from line centre in Doppler widths
minWingDeltaLogV = math.log(maxCoreV + 1.5)
maxWingDeltaLogV = 9.0 + minWingDeltaLogV
#double logV, ii, jj;
for il in range(numPoints):
ii = float(il)
#// In core, space v points linearly:
#// Voigt "v" parameter
#// v > 0 --> This is the *red* wing:
v[il] = ii * maxCoreV / (numCore - 1)
linePoints[0][il] = doppler * v[il]
linePoints[1][il] = v[il]
#//System.out.println("LineGrid: il, lam, v: " + il + " " +
#// linePoints[0][il] + " " + linePoints[1][il]);
#} // il lambda loop
#// Add the negative DeltaLambda half of the line:
numPoints2 = (2 * numPoints) - 1
#//System.out.println("LineGrid: numpoints2: " + numPoints2);
#// Return a 2D 2 X (2xnumPoints-1) array of Delta Lambdas
#// Row 0 : Delta lambdas in cm - will need to be in nm for Planck and Rad Trans?
#// Row 1 : Delta lambdas in Doppler widths
linePoints2 = [ [ 0.0 for i in range(numPoints2) ] for j in range(2) ]
#//wavelengths are depth-independent - just put them in the 0th depth slot:
for il2 in range(numPoints2):
if (il2 < numPoints - 1):
il = (numPoints - 1) - il2
linePoints2[0][il2] = -1.0 * linePoints[0][il]
linePoints2[1][il2] = -1.0 * linePoints[1][il]
else:
#//Positive DelataLambda half:
il = il2 - (numPoints - 1)
linePoints2[0][il2] = linePoints[0][il]
linePoints2[1][il2] = linePoints[1][il]
#//System.out.println("LineGrid: il2, lam, v: " + il2 + " " +
#// linePoints2[0][il2] + " " + linePoints2[1][il2]);
#} //il2 loop
return linePoints2
#} //end method lineGridGauss
#//
#//
#//
def lineGridVoigt(lam0In, massIn, xiTIn, numDeps, teff, numCore, numWing, species):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
amu = Useful.amu()
ln10 = math.log(10.0)
ln2 = math.log(2.0)
logE = math.log10(math.e) #// for debug output
#//Put input parameters into linear cgs units:
#//double gammaCol = Math.pow(10.0, logGammaCol);
logTeff = math.log(teff)
xiT = xiTIn * 1.0E5 #//km/s to cm/s
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
logMass = math.log(massIn * amu) #//amu to g
#// Compute depth-independent Doppler width, Delta_lambda_D:
#double doppler, logDopp;
#double logHelp, help; //scratch
logHelp = ln2 + logK + logTeff - logMass #// M-B dist, square of v_mode
help = math.exp(logHelp) + xiT * xiT #// quadratic sum of thermal v and turbulent v
logHelp = 0.5 * math.log(help)
logDopp = logHelp + logLam0 - logC
doppler = math.exp(logDopp) #// cm
#//System.out.println("LineGrid: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
#//int numCore = 5;
#//int numWing = 5;
#//int numWing = 0; //debug
numPoints = numCore + numWing
#// a 2D 2 X numPoints array of Delta Lambdas
#// Row 0 : Delta lambdas in cm - will need to be in nm for Planck and Rad Trans?
#// Row 1 : Delta lambdas in Doppler widths
linePoints = [ [ 0.0 for i in range(numPoints) ] for j in range(2) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [0.0 for i in range(numPoints) ]
maxCoreV = 3.5 #//core half-width ~ in Doppler widths
#//double maxWingDeltaLogV = 1.5 * ln10; //maximum base e logarithmic shift from line centre in Doppler widths
minWingDeltaLogV = math.log(maxCoreV + 1.5)
maxWingDeltaLogV = 9.0 + minWingDeltaLogV
if(species=="HI" and teff>=7000):
maxCoreV = 3.5
minWingDeltaLogV = math.log(maxCoreV + 1.5)
maxWingDeltaLogV = 12.0 + minWingDeltaLogV
#//console.log("2)"+maxWingDeltaLogV);
#double logV, ii, jj;
for il in range(numPoints):
ii = float(il)
if (il < numCore):
#// In core, space v points linearly:
#// Voigt "v" parameter
#// v > 0 --> This is the *red* wing:
v[il] = ii * maxCoreV / (numCore - 1)
linePoints[0][il] = doppler * v[il]
linePoints[1][il] = v[il]
else:
#//Space v points logarithmically in wing
jj = ii - numCore
logV = (jj * (maxWingDeltaLogV - minWingDeltaLogV) / (numPoints - 1)) + minWingDeltaLogV
v[il] = math.exp(logV)
linePoints[0][il] = doppler * v[il]
linePoints[1][il] = v[il]
#} // end else
#//System.out.println("LineGrid: il, lam, v: " + il + " " +
#// linePoints[0][il] + " " + linePoints[1][il]);
#} // il lambda loop
#// Add the negative DeltaLambda half of the line:
numPoints2 = (2 * numPoints) - 1
#//System.out.println("LineGrid: numpoints2: " + numPoints2);
#// Return a 2D 2 X (2xnumPoints-1) array of Delta Lambdas
#// Row 0 : Delta lambdas in cm - will need to be in nm for Planck and Rad Trans?
#// Row 1 : Delta lambdas in Doppler widths
linePoints2 = [ [ 0.0 for i in range(numPoints2) ] for j in range(2) ]
#//wavelengths are depth-independent - just put them in the 0th depth slot:
for il2 in range(numPoints2):
if (il2 < numPoints - 1):
il = (numPoints - 1) - il2
linePoints2[0][il2] = -1.0 * linePoints[0][il]
linePoints2[1][il2] = -1.0 * linePoints[1][il]
else:
#//Positive DelataLambda half:
il = il2 - (numPoints - 1)
linePoints2[0][il2] = linePoints[0][il]
linePoints2[1][il2] = linePoints[1][il]
#//System.out.println("LineGrid: il2, lam, v: " + il2 + " " +
#// linePoints2[0][il2] + " " + linePoints2[1][il2]);
#} //il2 loop
return linePoints2
| 10,935
| 31.35503
| 114
|
py
|
ChromaStarPy
|
ChromaStarPy-master/MolecData.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 15:53:32 2017
@author: ishort
"""
#//Various diatomic molecular transition data needed for the
#//Just-overlapping-line-approximation (JOLA)
#// to molecular band opacity
#//Input SYSTEM is a string with both the molecular species AND the band "system"
import math
def getSqTransMoment(system):
"""//Input SYSTEM is a string with both the molecular species AND the band "system"
// Electronic transition moment, Re, needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2
// //Allen's Astrophysical quantities, 4.12.2 - 4.13.1
// // ROtational & vibrational constants for TiO states:, p. 87, Table 4.17"""
#// Square electronic transition moment, |Re|^2,
#// needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2
#// // //Allen's Astrophysical quantities, 4.12.2 - 4.13.1
#// As of Feb 2017 - try the band-head value R_00^2 from last column of table:
RSqu = 0.0 #//default initialization
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
RSqu = 0.84
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
RSqu = 4.63
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
RSqu = 5.24
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
RSqu = 0.081 #mean of two values given
#//
return RSqu
def getFel(system):
"""//Input SYSTEM is a string with both the molecular species AND the band "system"
// Oscillator strength "f_el(nu_00)" fromTab. 1, col. 4 of
// Jorgensen, 1994, A&A, 284, 179
// // ROtational & vibrational constants for TiO states:, Tab 2 of Jorgensen"""
fel = 0.0 #//default initialization
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
fel = 0.17
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
fel = 0.28
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
fel = 0.15
#TiO gamma prime system
if ("TiO_B3Pi_X3Delta" == system):
fel = 0.14
#TiO epsilon system
if ("TiO_E3Pi_X3Delta" == system):
fel = 0.014
#TiO delta system
if ("TiO_b1Pi_a1Delta" == system):
fel = 0.048
#TiO phi system
if ("TiO_b1Pi_d1Sigma" == system):
fel = 0.052
#//
return fel
#} //end of method getSqTransMoment
def getRotConst(system):
"""// vibrational constant, B (cm^-1): // ??? what is this???
// //Allen's Astrophysical quantities, p. 87, Table 4.17
//"""
#// Feb 2017 - Problem:
#// Eq. 1 of Zeidler & Koester 1982 1982A&A...113..173Z
#// suggests that "B" is a vibrational E-level constant
#// BUT: Allens Astrop. Quant., 4th Ed., p. 45 has
#// "B_e & alpha_e" as *rotational* constants and
#// 'omega_e" and "omega_e*x_e" as vibrational constants
#// and "T_0" as electronic energy, all in cm^-1
#// I dunno - assume we want Allen's "B_e" values from Table 4.17
#// values for now - I don'r really know what's going on in Zeidler & Koester 82
B = [0.0 for i in range(2)]
B[1] = 0.0 #//Blow = B" - upper vibrational level
B[0] = 0.0 #//Bup = B' - lower vibrational level
#// I dunno - assume we want Allen's "B_e" values from Table 4.17
#// values for now - I don'r really know what's going on in Zeidler & Koester 82
#// units: cm^-1
#//
#// Generally: Higher vibrational states have *smaller* B values
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
B[1] = 0.489888 #// upper
B[0] = 0.535431 #//lower
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
B[1] = 0.500000 #// upper - NO DATA in Allen - make up a value for now (that's right!)
B[0] = 0.537602 #//lower
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
B[1] = 0.507390 #// upper
B[0] = 0.535431 #//lower
#These ones are from Jorgensen, 1994, A&A, 284, 179
#gamma prime system
#Guess the lambda ranges of these bands: lambda_00 +/- 120 nm?
#TiO gamma prime system
if ("TiO_B3Pi_X3Delta" == system):
B[1] = 0.507812 #// upper
B[0] = 0.535431 #//lower
#TiO epsilon system
if ("TiO_E3Pi_X3Delta" == system):
B[1] = 0.5173 #// upper
B[0] = 0.535431 #//lower
#TiO delta system
if ("TiO_b1Pi_a1Delta" == system):
B[1] = 0.51334 #// upper
B[0] = 0.53760 #//lower
#TiO phi system
if ("TiO_b1Pi_d1Sigma" == system):
B[1] = 0.51334 #// upper
B[0] = 0.5490 #//lower
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
B[1] = 14.46 #// upper #Band average from Allen's Tab 3.12 for both levels
B[0] = 14.46 #//lower
"""/*
// Okay - try the omega_e values in Allen's Table 4.17
// units: cm^-1 - no!
if ("TiO_C3Delta_X3Delta".equals(system)){
B[1] = 838.2567; // upper
B[0] = 1009.1697; //lower
}
if ("TiO_c1Phi_a1Delta".equals(system)){
B[1] = 1018.273; // lower??
B[0] = 1150.0; //lower NO DATA in Allen - make up a value for now (that's right!)
}
if ("TiO_A3Phi_X3Delta".equals(system)){
B[1] = 867.7799; // upper
B[0] = 1009.1697;; //lower
}
*/"""
#//
return B
#} //end of method getRotConst
def getWaveRange(system):
#// vibrational constant, B: // ??? what is this???
#// //Allen's Astrophysical quantities, p. 87, Table 4.17
lambda2 = [0.0 for i in range(2)]
lambda2[1] = 0.0 #// upper end of approx wavelength range of band (nm)
lambda2[0] = 0.0 #// lower end of approx wavelength range of band (nm)
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
lambda2[0] = 405.0
lambda2[1] = 630.0
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
lambda2[0] = 490.0
lambda2[1] = 580.0
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
lambda2[0] = 570.0
lambda2[1] = 865.0
#These ones are from Jorgensen, 1994, A&A, 284, 179
#gamma prime system
#Guess the lambda ranges of these bands: lambda_00 +/- 120 nm?
#TiO gamma prime system
if ("TiO_B3Pi_X3Delta" == system):
lambda2[0] = 619.2 - 120.0
lambda2[1] = 619.2 + 120.0
#TiO epsilon system
if ("TiO_E3Pi_X3Delta" == system):
lambda2[0] = 840.8 - 120.0
lambda2[1] = 840.8 + 120.0
#TiO delta system
if ("TiO_b1Pi_a1Delta" == system):
lambda2[0] = 887.1 - 120.0
lambda2[1] = 887.1 + 120.0
#TiO phi system
if ("TiO_b1Pi_d1Sigma" == system):
lambda2[0] = 1104.5 - 150.0
lambda2[1] = 1104.5 + 150.0
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
lambda2[0] = 430.0 - 120.0 #// upper
lambda2[1] = 430.0 + 120.0 #//lower
#//
return lambda2
#} //end of method getWaveRange
def getQuantumS(system):
#//This is "script S" from Alles 4th Ed. p. 88 - Eq. for line strength, S
#//Computed from a Wigner 6-j symbols - ??
#//Here we tune the values by hand to make the band strengths look right
#// - I just don't have the molecular data, or knowledge to use it, that I need
#// Can anyone out there help, or am I really on my own??
jolaQuantumS = 1.0 #//default for a multiplicative factor
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
jolaQuantumS = 1.0e-15
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
jolaQuantumS = 1.0e-15
#TiO Gamma system
if ("TiO_A3Phi_X3Delta" == system):
jolaQuantumS = 1.5e-14
#These ones are from Jorgensen, 1994, A&A, 284, 179
#TiO gamma prime system
if ("TiO_B3Pi_X3Delta" == system):
jolaQuantumS = 1.0e-15
#TiO epsilon system
if ("TiO_E3Pi_X3Delta" == system):
jolaQuantumS = 1.0e-14
#TiO delta system
if ("TiO_b1Pi_a1Delta" == system):
jolaQuantumS = 5.0e-15
#TiO phi system
if ("TiO_b1Pi_d1Sigma" == system):
jolaQuantumS = 1.0e-14
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
jolaQuantumS = 1.0e-14 #who really knows...
return jolaQuantumS
#} //end method getQuantumS
def getOrigin(system):
#// Wavenumber of band origin, omega_0 (cm^-1)
#// //Allen's Astrophysical quantities, p. 91, Table 4.18
nu00 = 0.0 #//
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
nu00 = 19341.7
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
nu00 = 17840.6
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
nu00 = 14095.9
#These ones are from Jorgensen, 1994, A&A, 284, 179
#TiO gamma prime system
if ("TiO_B3Pi_X3Delta" == system):
nu00 = 16148.5
#TiO epsilon system
if ("TiO_E3Pi_X3Delta" == system):
nu00 = 11894.0
#TiO delta system
if ("TiO_b1Pi_a1Delta" == system):
nu00 = 11272.8
#TiO phi system
if ("TiO_b1Pi_d1Sigma" == system):
nu00 = 9054.0
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
nu00 = 23217.5
#//Return frequency:
# //no! double omega00 = Useful.c * nu00;
return nu00
#} //end of method getOrigin
def getDeltaLambda(system):
#// Wavenumber of band origin, omega_0 (cm^-1)
#// //Allen's Astrophysical quantities, p. 91, Table 4.18
# I *think* upper state fisrt, lower stste 2nd in system designation
# State: Sigma Pi Delta Phi??
#Lambda: 0 1 2 3
deltaLambda = 0 #//
#TiO alpha system
if ("TiO_C3Delta_X3Delta" == system):
deltaLambda = 0
#TiO beta system
if ("TiO_c1Phi_a1Delta" == system):
deltaLambda = 1
#TiO gamma system
if ("TiO_A3Phi_X3Delta" == system):
deltaLambda = 1
#These ones are from Jorgensen, 1994, A&A, 284, 179
#TiO gamma prime system
if ("TiO_B3Pi_X3Delta" == system):
deltaLambda = -1
#TiO epsilon system
if ("TiO_E3Pi_X3Delta" == system):
deltaLambda = -1
#TiO delta system
if ("TiO_b1Pi_a1Delta" == system):
deltaLambda = -1
#TiO phi system
if ("TiO_b1Pi_d1Sigma" == system):
deltaLambda = 1
#CH A^2Delta_X^2Pi system - "G band" at 4300 A
if ("CH_A2Delta_X2Pi" == system):
deltaLambda = 1
#//Return frequency:
# //no! double omega00 = Useful.c * nu00;
return deltaLambda
| 11,055
| 27.791667
| 99
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Dgefa.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 12:09:11 2019
@author:
"""
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
#from Documents.ChromaStarPy.GAS.blas.Ddot import ddot
#from Documents.ChromaStarPy.GAS.blas.Dscal import dscal
#from Documents.ChromaStarPy.GAS.blas.Idamax import idamax
#from Documents.ChromaStarPy.GAS.blas.Daxpy import daxpy
import Ddot
import Dscal
import Idamax
import Daxpy
def dgefa(a, lda, n):
#a = [ [0.0e0 for i in range(n)] for j in range(n) ] #output array
info = 0
ipvt = [0 for i in range(n)]
#aOut = [ [a[j][i] for i in range(n)] for j in range(n) ]
"""
c
c dgefa factors a double precision matrix by gaussian elimination.
c
c dgefa is usually called by dgeco, but it can be called
c directly with a saving in time if rcond is not needed.
c (time for dgeco) = (1 + 9/n)*(time for dgefa) .
c
c on entry
c
c a double precision(lda, n)
c the matrix to be factored.
c
c lda integer
c the leading dimension of the array a .
c
c n integer
c the order of the matrix a .
c
c on return
c
c a an upper triangular matrix and the multipliers
c which were used to obtain it.
c the factorization can be written a = l*u where
c l is a product of permutation and unit lower
c triangular matrices and u is upper triangular.
c
c ipvt integer(n)
c an integer vector of pivot indices.
c
c info integer
c = 0 normal value.
c = k if u(k,k) .eq. 0.0 . this is not an error
c condition for this subroutine, but it does
c indicate that dgesl or dgedi will divide by zero
c if called. use rcond in dgeco for a reliable
c indication of singularity.
c
c linpack. this version dated 08/14/78 .
c cleve moler, university of new mexico, argonne national lab.
c
c subroutines and functions
c
c blas daxpy,dscal,idamax
c
c internal variables
c
"""
"""
Port to python by Ian Short
Saint Mary's University
May 2019
"""
#c
#c
#c gaussian elimination with partial pivoting
#c
info = 0
nm1 = n - 1
#print("DGEFA: n ", n, " nm1 ", nm1)
if (nm1 >= 1):
for k in range(nm1):
#print("DGEFA: k ", k, " n-k ", n-k)
kp1 = k + 1
#c
#c find l = pivot index
#c
#l = idamax(n-k+1, a[k][k], 1) + k - 1
#l = idamax(n-k+1, [a[kk][k] for kk in range(k, n)], 1) + k - 1
#print("IDAMAX: a ", [a[kk][k] for kk in range(k, n)])
l = Idamax.idamax(n-k, [a[kk][k] for kk in range(k, n)], 1) + k
#print("l ", l)
ipvt[k] = l
#c
#c zero pivot implies this column already triangularized
#c
#if (a[l][k] != 0.0e0):
if (a[l][k] != 0.0e0):
#c
#c interchange if necessary
#c
if (l != k):
#print("l != k")
#t = a[l][k]
#a[l][k] = a[k][k]
#a[k][k] = t
t = a[l][k]
a[l][k] = a[k][k]
a[k][k] = t
#c
#c compute multipliers
#c
#t = -1.0e0/a[k][k]
t = -1.0e0/a[k][k]
#FORTRAN: call dscal(n-k, t, a[k+1][k], 1)
#3rd parameter is in/out
#a[k+1][k] = dscal(n-k, t, a[k+1][k], 1)
#[a[k+1][kk] for kk in range(k, n)] =\
#dscal(n-k, t, [a[k+1][kk] for kk in range(k, n)], 1)
#print("BEFORE DSCAL: t ", t, " a ", [a[kk][k] for kk in range(k+1, n)])
dscalOut =\
Dscal.dscal(n-k-1, t, [a[kk][k] for kk in range(k+1, n)], 1)
#dscalSize = len(dscalOut)
#[a[k+1][kk] for kk in range(k, n)] = [dscalOut[ll] for ll in range(dscalSize)]
dscalCount = 0
for kk in range(k+1, n):
a[kk][k] = dscalOut[dscalCount]
dscalCount+=1
#print("AFTER DSCAL: a ", [a[kk][k] for kk in range(n)])
#scipy: a[k+1][k] = dscal(t, a[k+1][k], n-k, 1)
#c
#c row elimination with column indexing
#c
for j in range(kp1, n):
#t = a[l][j]
t = a[l][j]
if (l != k):
#a[l][j] = a[k][j]
#a[k][j] = t
a[l][j] = a[k][j]
a[k][j] = t
#FORTRAN call daxpy(n-k, t, a[k+1][k] ,1, a[k+1][j], 1)
#5th parameter is in/out
#a[k+1][j] = daxpy(n-k, t, a[k+1][k] ,1, a[k+1][j], 1)
#[a[k+1][jj] for jj in range(j, n)] =\
#daxpy(n-k, t, [a[k+1][kk] for kk in range(k, n)], 1, [a[k+1][jj] for jj in range(j, n)], 1)
#print("k ", k, " j ", j, " l ", l, " t ", t)
#print("Before DAXPY: [a[kk][j] for kk in range(k+1, n)] ",\
# [a[kk][j] for kk in range(k+1, n)])
daxpyOut =\
Daxpy.daxpy(n-k-1, t, [a[kk][k] for kk in range(k+1, n)], 1, [a[kk][j] for kk in range(k+1, n)], 1)
#daxpySize = len(daxpyOut)
daxpyCount = 0
for kk in range(k+1, n):
a[kk][j] = daxpyOut[daxpyCount]
daxpyCount+=1
#print("After DAXPY: [a[kk][j] for kk in range(k+1, n)] ",\
# [a[kk][j] for kk in range(k+1, n)])
#scipy library: a[k+1][j] = daxpy(t, a[k+1][k], n-k, 1, 1)
if (a[l][k] == 0.0e0):
info = k
#print("DGEFA final n ", n)
#ipvt[n-1] = n
ipvt[n-1] = n-1
if (a[n-1][n-1] == 0.0e0):
#info = n
info = n-1
# Try returning a tupple:
return a, ipvt, info
| 6,906
| 31.125581
| 119
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LevelPopsGasServer.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 14:13:47 2017
@author: ishort
"""
import math
import Useful
import ToolBox
#import numpy
#JB#
#from matplotlib.pyplot import plot, title, show, scatter
#storage for fits (not all may be used)
uw = []
uwa = []
uwb = []
uwStage = []
uwbStage = []
uwu = []
uwl = []
uua=[]
uub=[]
"""
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
#holds the five temperature at which we have partition function data
"""
masterTemp = [130, 500, 3000, 8000, 10000]
#JB#
#def levelPops(lam0In, logNStage, chiL, log10UwStage, gwL, numDeps, temp):
def levelPops(lam0In, logNStage, chiL, logUw, gwL, numDeps, temp):
""" Returns depth distribution of occupation numbers in lower level of b-b transition,
// Input parameters:
// lam0 - line centre wavelength in nm
// logNStage - log_e density of absorbers in relevent ion stage (cm^-3)
// logFlu - log_10 oscillator strength (unitless)
// chiL - energy of lower atomic E-level of b-b transition in eV
// Also needs atsmopheric structure information:
// numDeps
// temp structure """
c = Useful.c()
logC = Useful.logC()
k = Useful.k()
logK = Useful.logK()
logH = Useful.logH()
logEe = Useful.logEe()
logMe = Useful.logMe()
ln10 = math.log(10.0)
logE = math.log10(math.e); #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#//double logNl = logNlIn * ln10; // Convert to base e
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double thisLogUw, Ttheta;
thisLogUw = 0.0 # //default initialization
#logUw = [ 0.0 for i in range(5) ]
logE10 = math.log(10.0)
#print("log10UwStage ", log10UwStage)
#for kk in range(len(logUw)):
# logUw[kk] = logE10*log10UwStage[kk] #// lburns new loop
logGwL = math.log(gwL)
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#////For testing with Ca II lines using gS3 internal line list only:
#//boolean ionized = true;
#//if (ionized) {
#// //System.out.println("ionized, doing chiL - chiI: " + ionized);
#// // chiL = chiL - chiI;
#// chiL = chiL - 6.113;
#// }
#// //
#//Log of line-center wavelength in cm
logLam0 = math.log(lam0In) #// * 1.0e-7);
#// energy of b-b transition
logTransE = logH + logC - logLam0 #//ergs
if (chiL <= 0.0):
chiL = 1.0e-49
logChiL = math.log(chiL) + Useful.logEv() #// Convert lower E-level from eV to ergs
logBoltzFacL = logChiL - Useful.logK() #// Pre-factor for exponent of excitation Boltzmann factor
boltzFacL = math.exp(logBoltzFacL)
boltzFacGround = 0.0 / k #//I know - its zero, but let's do it this way anyway'
#// return a 1D numDeps array of logarithmic number densities
#// level population of lower level of bb transition (could be in either stage I or II!)
logNums = [ 0.0 for i in range(numDeps)]
#double num, logNum, expFac;
#JB#
#print("thisLogUw:",numpy.shape(logUw))
logUwFit = ToolBox.cubicFit(masterTemp,logUw)#u(T) fit
uw.append(logUwFit)
#JB#
for id in range(numDeps):
#//Determine temperature dependenet partition functions Uw:
#Ttheta = 5040.0 / temp[0][id]
#//NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
"""
if (Ttheta >= 1.0):
thisLogUw = logUw[0]
if (Ttheta <= 0.5):
thisLogUw = logUw[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUw = ( logUw[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) \
+ ( logUw[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
thisLogUw = ToolBox.valueFromFit(logUwFit,thisTemp)#u(T) value extrapolated
#JB#
if (thisTemp >= 10000.0):
thisLogUw = logUw[4]
if (thisTemp <= 130.0):
thisLogUw = logUw[0]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUw = logUw[1] * (thisTemp - 130)/(500 - 130) \
+ logUw[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUw = logUw[2] * (thisTemp - 500)/(3000 - 500) \
+ logUw[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUw = logUw[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUw[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUw = logUw[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUw[3] * (10000 - thisTemp)/(10000 - 8000)
"""
#print("logUw ", logUw, " thisLogUw ", thisLogUw)
#//System.out.println("LevPops: ionized branch taken, ionized = " + ionized);
#// Take stat weight of ground state as partition function:
logNums[id] = logNStage[id] - boltzFacL / temp[0][id] + logGwL - thisLogUw #// lower level of b-b transition
#print("LevelPopsServer.stagePops id ", id, " logNStage[id] ", logNStage[id], " boltzFacL ", boltzFacL, " temp[0][id] ", temp[0][id], " logGwL ", logGwL, " thisLogUw ", thisLogUw, " logNums[id] ", logNums[id]);
#// System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id]: " + id + " "
#// + Math.exp(logNums[0][id]) + " "
#// + Math.exp(logNums[1][id]) + " "
#// + Math.exp(logNums[2][id]) + " "
#// + Math.exp(logNums[3][id]));
#//System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id], logNums[4][id]: " + id + " "
#// + logE * (logNums[0][id]) + " "
#// + logE * (logNums[1][id]) + " "
#// + logE * (logNums[2][id]) + " "
# // + logE * (logNums[3][id]) + " "
#// + logE * (logNums[4][id]) );
#//System.out.println("LevelPops: id, logIonFracI, logIonFracII: " + id + " " + logE*logIonFracI + " " + logE*logIonFracII
#// + "logNum, logNumI, logNums[0][id], logNums[1][id] "
#// + logE*logNum + " " + logE*logNumI + " " + logE*logNums[0][id] + " " + logE*logNums[1][id]);
#//System.out.println("LevelPops: id, logIonFracI: " + id + " " + logE*logIonFracI
#// + "logNums[0][id], boltzFacL/temp[0][id], logNums[2][id]: "
#// + logNums[0][id] + " " + boltzFacL/temp[0][id] + " " + logNums[2][id]);
#//id loop
#stop
#print (uw)
return logNums
#//This version - ionization equilibrium *WITHOUT* molecules - logNum is TOTAL element population
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops(logNum, Ne, chiIArr, logUw, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine WITHOUT molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
//
"""
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
thisLogUwA = 0.0 #// element A
#thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
#JB#
uua=[]
#uub=[]
#qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
#end method levelPops
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops2(logNum, Ne, chiIArr, logUw, \
numMols, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
ifMols = True
if (numMols == 0):
ifMols = False
numMols = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 19.0 #//eV
#//Molecular partition functions - default initialization:
#double[] thisLogUwB = new double[numMols];
thisLogUwB = [ 0.0 for i in range(numMols) ]
for iMol in range(numMols):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
if (numMols > 0):
for kk in range(len(logUwA)):
logUwA[kk] = logUw[0][kk]
#// lburns
#//}
#//// Molecular partition functions:
#//Molecular dissociation Boltzmann factors:
boltzFacIAB = [ 0.0 for i in range(numMols) ]
logMolSahaFac = [ 0.0 for i in range(numMols) ]
#//if (numMols > 0){
#double logDissE, logBoltzFacIAB;
for iMol in range(numMols):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//console.log("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//}
#// For molecular species:
logSahaMol = [ 0.0 for i in range(numMols) ]
invSahaMol = [ 0.0 for i in range(numMols) ]
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for iMol in range(numMols):
currentUwBArr=list(logUwB[iMol])#u(T) determined values
UwBFit = ToolBox.cubicFit(masterTemp,currentUwBArr)#u(T) fit
uub.append(UwBFit)
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
for iMol in range(numMols):
thisLogUwB[iMol] = ToolBox.valueFromFit(uub[iMol],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][4]
for iMol in range(numMols):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#// iMol loop
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Molecular Saha factors:
for iMol in range(numMols):
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUwB[iMol] + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//molecular contribution
if (ifMols == True):
for iMol in range(numMols):
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
def stagePops3(logNum, Ne, chiIArr, logUw, numDeps, temp):
#Version for ChromaStarPyGas: logNum is now *neutral stage* population from Phil
# Bennett's GAS package
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent neutral stage number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
#logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
#logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
logNums[0][id] = logNum[id]
for iStg in range(1, numStages):
#print("iStg ", iStg)
thisLogSaha = logSahaFac - logNe - (boltzFacI[iStg-1] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg] - thisLogUw[iStg-1]
#saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
logNums[iStg][id] = logNums[iStg-1][id] + thisLogSaha
#//id loop
return logNums;
#//end method stagePops
#def sahaRHS(chiI, log10UwUArr, log10UwLArr, temp):
def sahaRHS(chiI, logUwU, logUwL, temp):
"""RHS of partial pressure formulation of Saha equation in standard form (N_U*P_e/N_L on LHS)
// Returns depth distribution of LHS: Phi(T) === N_U*P_e/N_L (David Gray notation)
// Input parameters:
// chiI - ground state ionization energy of lower stage
// log10UwUArr, log10UwLArr - array of temperature-dependent partition function for upper and lower ionization stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
//
// Atomic element "A" is the one whose ionization fractions are being computed
// Element "B" refers to array of other species with which A forms molecules "AB" """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUwU = 0.0
thisLogUwL = 0.0
logE10 = math.log(10.0)
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#logUwU = [0.0 for i in range(5)]
#logUwL = [0.0 for i in range(5)]
for kk in range(len(logUwL)):
logUwU[kk] = logUwL[kk]
# logUwL[kk] = logE10*log10UwLArr[kk]
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
#double boltzFacI;
logChiI = math.log(chiI) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI = math.exp(logBoltzFacI)
#//Extra factor of k to get k^5/2 in the P_e formulation of Saha Eq.
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) + Useful.logK()
#//double[] logLHS = new double[numDeps];
#double logLHS;
#// For atomic ionization stages:
#double logSaha, saha, expFac;
#// for (int id = 0; id < numDeps; id++) {
#//
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0]
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (Ttheta <= 0.5):
thisLogUwU = logUwU[1]
thisLogUwL = logUwL[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwU = ( logUwU[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwU[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
thisLogUwL = ( logUwL[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwL[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
currentUwUArr=list(logUwU)#u(T) determined values
UwUFit = ToolBox.cubicFit(masterTemp,currentUwUArr)#u(T) fit
thisLogUwU = ToolBox.valueFromFit(UwUFit,thisTemp)#u(T) value extrapolated
currentUwLArr=list(logUwL)#u(T) determined values
UwLFit = ToolBox.cubicFit(masterTemp,currentUwLArr)#u(T) fit
thisLogUwL = ToolBox.valueFromFit(UwLFit,thisTemp)#u(T) value extrapolated
#JB#
#will need to do this one in Main as it goes through its own loop of temp
#if thisTemp == superTemp[0][len(superTemp[0])]:
# uwu.append(UwUFit)
# uwl.append(UwLFit)
#
#JB#
if (thisTemp <= 130.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (thisTemp >= 10000.0):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwU = logUwU[1] * (thisTemp - 130)/(500 - 130) \
+ logUwU[0] * (500 - thisTemp)/(500 - 130)
thisLogUwL = logUwL[1] * (thisTemp - 130)/(500 - 130) \
+ logUwL[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwU = logUwU[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwU[1] * (3000 - thisTemp)/(3000 - 500)
thisLogUwL = logUwL[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwL[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwU = logUwU[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwU[2] * (8000 - thisTemp)/(8000 - 3000)
thisLogUwL = logUwL[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwL[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwU = logUwU[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwU[3] * (10000 - thisTemp)/(10000 - 8000)
thisLogUwL = logUwL[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwL[3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
#//Ionization stage Saha factors:
#//Need T_kin^5/2 in the P_e formulation of Saha Eq.
logSaha = logSahaFac - (boltzFacI /temp[0]) + (5.0 * temp[1] / 2.0) + thisLogUwU - thisLogUwL
#// saha = Math.exp(logSaha);
#//logLHS[id] = logSaha;
logLHS = logSaha;
#// } //id loop
return logLHS;
#JB
#return [logLHS,[[UwUFit,thisLogUwU],[UwLFit,thisLogUwL]]]
#//
# } //end method sahaRHS
#def molPops(nmrtrLogNumB, nmrtrDissE, log10UwA, nmrtrLog10UwB, nmrtrLogQwAB, nmrtrLogMuAB, \
# numMolsB, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# logGroundRatio, numDeps, temp):
def molPops(nmrtrLogNumB, nmrtrDissE, logUwA, nmrtrLogUwB, nmrtrLogQwAB, nmrtrLogMuAB, \
numMolsB, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
logGroundRatio, numDeps, temp):
# line 1: //species A data - ionization equilibrium of A
# //data for set of species "B" - molecular equlibrium for set {AB}
"""Diatomic molecular equilibrium routine that accounts for molecule formation:
// Returns depth distribution of molecular population
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
//
// Atomic element "A" is the one kept on the LHS of the master fraction, whose ionization fractions are included
// in the denominator of the master fraction
// Element "B" refers to array of other sintpecies with which A forms molecules "AB" """
logE = math.log10(math.e) #// for debug output
#//System.out.println("molPops: nmrtrDissE " + nmrtrDissE + " log10UwA " + log10UwA[0] + " " + log10UwA[1] + " nmrtrLog10UwB " +
#// nmrtrLog10UwB[0] + " " + nmrtrLog10UwB[1] + " nmrtrLog10QwAB " + logE*nmrtrLogQwAB[2] + " nmrtrLogMuAB " + logE*nmrtrLogMuAB
#// + " numMolsB " + numMolsB + " dissEArr " + dissEArr[0] + " log10UwBArr " + log10UwBArr[0][0] + " " + log10UwBArr[0][1] + " log10QwABArr " +
#// logE*logQwABArr[0][2] + " logMuABArr " + logE*logMuABArr[0]);
#//System.out.println("Line: nmrtrLog10UwB[0] " + logE*nmrtrLog10UwB[0] + " nmrtrLog10UwB[1] " + logE*nmrtrLog10UwB[1]);
ln10 = math.log(10.0)
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
logE10 = math.log(10.0)
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
if (numMolsB == 0):
numMolsB = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 29.0 #//eV
#//var molPops = function(logNum, numeratorLogNumB, numeratorDissE, numeratorLog10UwA, numeratorLog10QwAB, numeratorLogMuAB, //species A data - ionization equilibrium of A
#//Molecular partition functions - default initialization:
thisLogUwB = [0.0 for i in range(numMolsB)]
for iMol in range(numMolsB):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
nmrtrThisLogUwB = 0.0 #// element A
thisLogQwAB = math.log(300.0)
nmrtrThisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#logUwA = [0.0 for i in range(5)]
#nmrtrLogUwB = [0.0 for i in range(5)]
#for kk in range(len(logUwA)):
#logUwA[kk] = logE10*log10UwA[kk]
#nmrtrLogUwB[kk] = logE10*nmrtrLog10UwB[kk]
#// lburns
#// Array of elements B for all molecular species AB:
#double[][] logUwB = new double[numMolsB][2];
#logUwB = [ [ 0.0 for i in range(5) ] for j in range(numMolsB) ]
#//if (numMolsB > 0){
#for iMol in range(numMolsB):
# for kk in range(5):
# logUwB[iMol][kk] = logE10*log10UwBArr[iMol][kk]
# // lburns new loop
#//}
#// Molecular partition functions:
#// double nmrtrLogQwAB = logE10*nmrtrLog10QwAB;
#// double[] logQwAB = new double[numMolsB];
#// //if (numMolsB > 0){
#// for (int iMol = 0; iMol < numMolsB; iMol++){
#// logQwAB[iMol] = logE10*log10QwABArr[iMol];
#// }
# //}
#//Molecular dissociation Boltzmann factors:
nmrtrBoltzFacIAB = 0.0
nmrtrLogMolSahaFac = 0.0
logDissE = math.log(nmrtrDissE) + Useful.logEv()
#//System.out.println("logDissE " + logE*logDissE)
logBoltzFacIAB = logDissE - Useful.logK()
#//System.out.println("logBoltzFacIAB " + logE*logBoltzFacIAB);
nmrtrBoltzFacIAB = math.exp(logBoltzFacIAB)
nmrtrLogMolSahaFac = (3.0 / 2.0) * (log2pi + nmrtrLogMuAB + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
#//System.out.println("nmrtrDissE " + nmrtrDissE + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrLogMuAB " + logE*nmrtrLogMuAB + " nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
boltzFacIAB = [0.0 for i in range(numMolsB)]
logMolSahaFac = [0.0 for i in range(numMolsB)]
#//if (numMolsB > 0){
for iMol in range(numMolsB):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#//System.out.println("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//double[] logNums = new double[numDeps]
#//}
#// For molecular species:
#double nmrtrSaha, nmrtrLogSahaMol, nmrtrLogInvSahaMol; //, nmrtrInvSahaMol;
logMolFrac = [0.0 for i in range(numDeps)]
logSahaMol = [0.0 for i in range(numMolsB)]
invSahaMol = [0.0 for i in range(numMolsB)]
#JB#
currentUwAArr=list(logUwA)#u(T) determined values
UwAFit = ToolBox.cubicFit(masterTemp, currentUwAArr)#u(T) fit
nmrtrLogUwBArr=list(nmrtrLogUwB)#u(T) determined values
nmrtrLogUwBFit = ToolBox.cubicFit(masterTemp, nmrtrLogUwBArr)#u(T) fit
#uwa.append(UwAFit)
#uwb.append(nmrtrLogUwBFit)
uwbFits=[]
qwabFit = []
for iMol in range(numMolsB):
currentUwBArr=list(logUwB[iMol])
UwBFit = ToolBox.cubicFit(masterTemp, currentUwBArr)
uwbFits.append(UwBFit)
currentLogQwABArr=list(logQwABArr[iMol])#u(T) determined values
QwABFit = ToolBox.cubicFit(masterTemp, currentLogQwABArr)#u(T) fit
qwabFit.append(QwABFit)
#nmrtrQwABArr=list(nmrtrLogQwAB)#u(T) determined values
#nmrtrQwABFit = ToolBox.cubicFit(masterTemp, nmrtrQwABArr)#u(T) fit
#for Mols in range(numMolsB):
# currentLogUwBArr=list(logUwB[Mols])#u(T) determined values
# UwBFit=cubicFit(masterTemp,currentLogUwBArr)#u(T) fit
#JB#
#//
temps=[]
#valb=[]
#vala=[]
#valnb=[]
#valqab=[]
#valnmrtrqwb=[]
#// System.out.println("molPops: id nmrtrLogNumB logNumBArr[0] logGroundRatio");
for id in range(numDeps):
#//System.out.format("%03d, %21.15f, %21.15f, %21.15f, %n", id, logE*nmrtrLogNumB[id], logE*logNumB[0][id], logE*logGroundRatio[id]);
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//Determine temparature dependent partition functions Uw:
thisTemp = temp[0][id]
temps.append(thisTemp)
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (Ttheta <= 0.5):
thisLogUwA = logUwA[1]
nmrtrThisLogUwB = nmrtrLogUwB[1]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwA = ( logUwA[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwA[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
nmrtrThisLogUwB = ( nmrtrLogUwB[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( nmrtrLogUwB[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
for iMol in range(numMolsB):
thisLogUwB[iMol] = ( logUwB[iMol][1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwB[iMol][0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
"""
#JB#
thisLogUwA = float(ToolBox.valueFromFit(UwAFit,thisTemp))#u(T) value extrapolated
#vala.append(thisLogUwA)
nmrtrThisLogUwB = float(ToolBox.valueFromFit(nmrtrLogUwBFit,thisTemp))#u(T) value extrapolated
#valnb.append(nmrtrThisLogUwB)
#for iMol in range(numMolsB):
# thisLogUwB[iMol]=logUwB[iMol]
for iMol in range(numMolsB):
thisLogUwB[iMol] = ToolBox.valueFromFit(uwbFits[iMol],thisTemp)#u(T) value extrapolated
#valb.append(thisLogUwB[iMol])
#// NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
if (thisTemp <= 130.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwA = logUwA[1] * (thisTemp - 130)/(500 - 130) \
+ logUwA[0] * (500 - thisTemp)/(500 - 130)
nmrtrThisLogUwB = nmrtrLogUwB[1] * (thisTemp - 130)/(500 - 130) \
+ nmrtrLogUwB[0] * (500 - thisTemp)/(500 - 130)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1] * (thisTemp - 130)/(500 - 130) \
+ logUwB[iMol][0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwA = logUwA[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwA[1] * (3000 - thisTemp)/(3000 - 500)
nmrtrThisLogUwB = nmrtrLogUwB[2] * (thisTemp - 500)/(3000 - 500) \
+ nmrtrLogUwB[1] * (3000 - thisTemp)/(3000 - 500)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][2] * (thisTemp - 500)/(3000 - 500) \
+ logUwB[iMol][1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwA = logUwA[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwA[2] * (8000 - thisTemp)/(8000 - 3000)
nmrtrThisLogUwB = nmrtrLogUwB[3] * (thisTemp - 3000)/(8000 - 3000) \
+ nmrtrLogUwB[2] * (8000 - thisTemp)/(8000 - 3000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwB[iMol][2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwA = logUwA[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwA[3] * (10000 - thisTemp)/(10000 - 8000)
nmrtrThisLogUwB = nmrtrLogUwB[4] * (thisTemp - 8000)/(10000 - 8000) \
+ nmrtrLogUwB[3] * (10000 - thisTemp)/(10000 - 8000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwB[iMol][3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
#iMol loops for Q's
for iMol in range(numMolsB):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
if (thisTemp < 3000.0):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( nmrtrLogQwAB[2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( nmrtrLogQwAB[3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( nmrtrLogQwAB[4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
# //Ionization stage Saha factors:
#//System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id]);
# // if (id == 16){
# // System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id] + " pp nmrtB " + (logE*(nmrtrLogNumB[id]+temp[1][id]+Useful.logK())) + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrLogQwAB " + logE*nmrtrThisLogQwAB);
# //System.out.println("nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + logE*nmrtrThisLogQwAB);
# // }
nmrtrLogSahaMol = nmrtrLogMolSahaFac - nmrtrLogNumB[id] - (nmrtrBoltzFacIAB / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + nmrtrThisLogUwB + thisLogUwA - nmrtrThisLogQwAB
nmrtrLogInvSahaMol = -1.0 * nmrtrLogSahaMol
#//System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#//nmrtrInvSahaMol = Math.exp(nmrtrLogSahaMol);
#// if (id == 16){
#// System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#// }
#// if (id == 16){
#// System.out.println("nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + nmrtrThisLogQwAB);
#// System.out.println("nmrtrLogSahaMol " + logE*nmrtrLogSahaMol); // + " nmrtrInvSahaMol " + nmrtrInvSahaMol);
#// }
#//Molecular Saha factors:
for iMol in range(numMolsB):
#//System.out.println("iMol " + iMol + " id " + id + " logNumB[iMol][id] " + logE*nmrtrLogNumB[id]);
#//System.out.println("iMol " + iMol + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " thisLogUwA " + logE*thisLogUwA + " thisLogQwAB " + logE*thisLogQwAB);
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + float(thisLogUwB[iMol]) + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//TEST invSahaMol[iMol] = 1.0e-99; //test
#// if (id == 16){
#// System.out.println("iMol " + iMol + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " logQwAB[iMol] " + logE*thisLogQwAB + " logNumB[iMol][id] " + logE*logNumB[iMol][id] + " logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#// System.out.println("iMol " + iMol + " logSahaMol " + logE*logSahaMol[iMol] + " invSahaMol[iMol] " + invSahaMol[iMol]);
#// }
#//Compute log of denominator is ionization fraction, f_stage
# //default initialization
# // - ratio of total atomic particles in all ionization stages to number in ground state:
denominator = math.exp(logGroundRatio[id]) #//default initialization - ratio of total atomic particles in all ionization stages to number in ground state
#//molecular contribution
for iMol in range(numMolsB):
#// if (id == 16){
#// System.out.println("invSahaMol[iMol] " + invSahaMol[iMol] + " denominator " + denominator);
#// }
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
#//System.out.println("logGroundRatio[id] " + logE*logGroundRatio[id] + " logDenominator " + logE*logDenominator);
#// if (id == 16){
#// System.out.println("id " + id + " logGroundRatio " + logGroundRatio[id] + " logDenominator " + logDenominator);
#// }
#//if (id == 36){
#// System.out.println("logDenominator " + logE*logDenominator);
#// }
#//var logDenominator = Math.log( 1.0 + saha21 + (saha32 * saha21) + (saha43 * saha32 * saha21) + (saha54 * saha43 * saha32 * saha21) );
logMolFrac[id] = nmrtrLogInvSahaMol - logDenominator
#// if (id == 16){
#// System.out.println("id " + id + " logMolFrac[id] " + logE*logMolFrac[id]);
#// }
#//logNums[id] = logNum[id] + logMolFrac;
#} //id loop
#JB - check (never used)#
#print(uwa)
#print(uwb)
#title("logUwA")
"""
plot(temps,vala)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwAFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogUwB")
plot(temps,valnb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrLogUwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logUwB")
plot(temps,valb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logQwAB")
plot(temps,valqab)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(QwABFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogQwAB")
plot(temps,valnmrtrqwb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrQwABFit,t))
scatter(masterTemp,(tempT))
show()
"""
#JB#
return logMolFrac
#//end method stagePops
| 54,663
| 39.977511
| 287
|
py
|
ChromaStarPy
|
ChromaStarPy-master/TauScale.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 09:46:06 2017
Create the standard optical depth scale sampling the model vertically - interpreted as the
Rosseland optical depth scale
Uniformly spaced in log(tau)
@author: ishort
"""
import math
def tauScale(numDeps, log10MinDepth, log10MaxDepth):
"""Create the standard optical depth scale sampling the model vertically - interpreted as the
Rosseland optical depth scale
Uniformly spaced in log(tau)"""
#//log_10 Rosseland optical depth scale
#double tauRos[][] = new double[2][numDeps];
tauRos = [ [ 0.0 for i in range(numDeps) ] for j in range(2)]
#// Construct the log ROsseland optical depth scale:
#// Try equal spacing in log depth
ln10 = math.log(10.0)
#// double log10MinDepth = -4.5;
#// double log10MaxDepth = 1.5;
logMinDepth = log10MinDepth * ln10
logMaxDepth = log10MaxDepth * ln10;
deltaLogTau = (logMaxDepth - logMinDepth)/(numDeps - 1.0);
ii = 0.0
for i in range(numDeps):
ii = float(i)
tauRos[1][i] = logMinDepth + ii*deltaLogTau
tauRos[0][i] = math.exp(tauRos[1][i])
#//System.out.println("i: " + i + " absTauDiff[1][i] " + tauRos[1][i] + " tauRos[0][i] " + tauRos[0][i]);
return tauRos
| 1,367
| 26.918367
| 113
|
py
|
ChromaStarPy
|
ChromaStarPy-master/FluxTrans.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 17:42:58 2017
@author: Ian
"""
import math
import numpy
import random
import numpy
import Useful
import ToolBox
#Returns a vector of reduced fluxes for each angle theta being tannsited by planet
#//
def fluxTrans(intens, flx, lambdas, cosTheta, radius,
iFirstTheta, numTransThetas, rPlanet):
#print("iFirstTheta ", iFirstTheta, " numTransThetas ", numTransThetas,\
# " rPlanet ", rPlanet)
#//console.log("Entering flux3");
logTiny = -49.0
tiny = math.exp(logTiny)
logPi = math.log(math.pi)
numLams = len(lambdas)
numThetas = len(cosTheta[0])
fluxTransSpec = [ [ [ numpy.double(0.0) for i in range(numTransThetas) ] for k in range(numLams) ] for j in range(2) ]
#Earth-radii to solar radii:
rPlanet = numpy.double(rPlanet)
rPlanet = rPlanet * Useful.rEarth() / Useful.rSun()
#dPlanet = 2.0 * rPlanet
#print("dPlanet ", dPlanet)
#subtract off flux eclipsed by transiting planet:
#thisImpct = rPlanet #Default
##Can it really be this simple??:
logOmega = math.log(math.pi) + ( numpy.double(2.0) * ( math.log(rPlanet) - math.log(radius) ) )
#omega = math.exp(logOmega)
#print("omega ", omega)
helper = 0.0
logHelper = 0.0
for it in range(iFirstTheta, numThetas):
for il in range(numLams):
#Subtracting the very small from the very large - let's be sophisticated about it:
logHelper = logPi + math.log(intens[il][it]) + logOmega - flx[1][il]
helper = numpy.double(1.0) - math.exp(logHelper)
#if (fluxTransSpec[0][il][it-iFirstTheta] > tiny):
fluxTransSpec[1][il][it-iFirstTheta] = flx[1][il] + math.log(helper)
#if (il == 150):
# print("logHelper ", logHelper, " helper ", helper, " logFluxTransSpec ", logFluxTransSpec)
fluxTransSpec[0][il][it-iFirstTheta] = math.exp(fluxTransSpec[1][il][it-iFirstTheta])
#if (il == 150):
# print("fluxTransSpec 2 ", fluxTransSpec[0][il][it-iFirstTheta])
#plt.plot(cosTheta[1][iFirstTheta: iFirstTheta+numTransThetas],\
# )
return fluxTransSpec
| 2,264
| 31.357143
| 122
|
py
|
ChromaStarPy
|
ChromaStarPy-master/T4300g45CaIIHK.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Model atmosphere
teff = 4300.0 #, K
logg = 4.5 #, cgs
log10ZScale = 0.0 # [A/H]
massStar = 0.75 #, solar masses
xiT = 2.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 390.0 #, nm
lambdaStop = 400.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.5
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 8.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,668
| 33.662338
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Hydrostat.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 16:07:56 2017
@author: ishort
"""
import math
import Useful
def hydroFormalSoln(numDeps, grav, tauRos, kappa, temp, guessPGas):
"""This approach is based on integrating the formal solution of the hydrostaitc equilibrium equation
// on the otical depth (Tau) scale. Advantage is that it makes better use of the itial guess at
// pgas
//
// Takes in *Gas* pressure, converts tot *total pressure*, then returns *Gas* pressure
//
"""
press = [ [0.0 for i in range(numDeps)] for j in range(2)]
logC = Useful.logC()
logSigma = Useful.logSigma()
radFac = math.log(4.0) + logSigma - math.log(3.0) - logC
logEg = math.log(grav) #//Natural log g!!
#// no needed if integrating in natural log?? //double logLogE = Math.log(Math.log10(Math.E));
log1p5 = math.log(1.5)
logE = math.log10(math.e)
#//Compute radiation pressure for this temperature structure and add it to Pgas
#//
#double pT, pRad;
logPRad = [0.0 for i in range(numDeps)]
logPTot = [0.0 for i in range(numDeps)]
#// System.out.println("hydroFormalSoln: ");
for i in range(numDeps):
logPRad[i] = radFac + 4.0 * temp[1][i]
pRad = math.exp(logPRad[i])
#//System.out.println("i " + i + " pRad " + pRad);
pT = guessPGas[0][i] + pRad
#// System.out.println("i " + i + " guessPGas[1] " + logE*guessPGas[1][i]);
logPTot[i] = math.log(pT)
#double help, logHelp, logPress;
#double term, logSum, integ, logInteg, lastInteg;
#double deltaLogT;
sum = [0.0 for i in range(numDeps)]
#//Upper boundary - inherit from intiial guess:
#//Carefull here - P at upper boundary can be an underestimate, but it must not be greater than value at next depth in!
#// press[1][0] = logPTot[0];
#// press[1][0] = guessPGas[1][0];
#// press[1][0] = Math.log(1.0e-4); //try same upper boundary as Phoenix
#//
#// press[0][0] = Math.exp(press[1][0]);
#Dangerous - cumulative press[0][0] = 0.1 * guessPGas[0][0]
press[0][0] = guessPGas[0][0]
press[1][0] = math.log(press[0][0])
#//Corresponding value of basic integrated quantity at top of atmosphere:
logSum = 1.5 * press[1][0] + math.log(0.666667) - logEg
sum[0] = math.exp(logSum)
#// Integrate inward on logTau scale
#// CAUTION; This is not an integral for Delta P, but for P once integral at each tau is exponentiated by 2/3!
#// Accumulate basic integral to be exponentiated, then construct pressure values later:
#//Jump start integration with an Euler step:
deltaLogT = tauRos[1][1] - tauRos[1][0]
#// log of integrand
logInteg = tauRos[1][1] + 0.5*logPTot[1] - kappa[1][1]
lastInteg = math.exp(logInteg)
sum[1] = sum[0] + lastInteg * deltaLogT
#// Continue with extended trapezoid rule:
for i in range(2, numDeps):
deltaLogT = tauRos[1][i] - tauRos[1][i-1]
logInteg = tauRos[1][i] + 0.5*logPTot[i] - kappa[1][i]
integ = math.exp(logInteg)
term = 0.5 * (integ + lastInteg) * deltaLogT
sum[i] = sum[i-1] + term #//accumulate basic integrated quantity
lastInteg = integ
#//System.out.println("hydroFormalSoln: ");
for i in range(1, numDeps):
#//Evaluate total pressures from basic integrated quantity at edach depth
#// our integration variable is the natural log, so I don't think we need the 1/log(e) factor
logPress = 0.666667 * (log1p5 + logEg + math.log(sum[i]))
#//Subtract radiation pressure:
logHelp = logPRad[i] - logPress
help = math.exp(logHelp)
# For hot and low g stars: limit Prad to 50% Ptot so we doen't get netaive Pgas and rho values:
if (help > 0.5):
help = 0.5
press[1][i] = logPress + math.log(1.0 - help)
#//System.out.println("i " + i + " guessPGas[1] " + logE*guessPGas[1][i] + " press[1] " + logE*press[1][i]);
press[0][i] = math.exp(press[1][i])
return press #//*Gas* pressure
#//end method hydroFormalSoln()
#// Compute radiation pressure
def radPress(numDeps, temp):
pRad = [ [0.0 for i in range(numDeps)] for j in range(2)]
logC = Useful.logC()
logSigma = Useful.logSigma()
radFac = math.log(4.0) + logSigma - math.log(3.0) - logC
for i in range(numDeps):
pRad[1][i] = radFac + 4.0 * temp[1][i]
pRad[0][i] = math.exp( pRad[1][i])
return pRad;
#//end method radPress
| 4,462
| 33.068702
| 119
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Jola.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 16:10:08 2017
@author: ishort
"""
#/**
# * Collection of methods for computing molecular band opacity in the
# * Just-overlapping-line approximation (JOLA)
# * Just-overlapping line approximation treats molecular ro-vibrational bands as pseudo-continuum
# * opacity sources by "smearing" out the individual rotational fine-structure lines
# *See 1982A&A...113..173Z, Zeidler & Koestler, 1982
# */
import math
import Useful
def jolaGrid(jolaLambda, jolaNumPoints):
#//Try linear wavelength sampling of JOLA band for now...
jolaPoints = [0.0 for i in range(jolaNumPoints)]
iLambD = 0.0
deltaLamb = (jolaLambda[1] - jolaLambda[0]) / jolaNumPoints
for iL in range(jolaNumPoints):
iLambD = float(iL)
jolaPoints[iL] = jolaLambda[0] + iLambD*deltaLamb #//nm
#//System.out.println("iL: " + iL + " jolaPoints " + jolaPoints[iL]);
return jolaPoints #//nm
#} //end method jolaGrid
def jolaProfilePR(omega0, logf, vibConst,
jolaPoints, alphP, alphR, numDeps, temp):
"""//
//JOLA profile for P (Delta J = 1) and R (Delta J = -1) branches
//Equation 19 from Zeidler & Koestler"""
nm2cm = 1.0e-7
log10E = math.log10(math.e)
numPoints = len(jolaPoints)
#// derivative of rotational-line oscillator strength with respect to frequency
dfBydw = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
fvv = math.exp(logf)
logHcBbyK = Useful.logH() + Useful.logC() + math.log(vibConst[0]) \
- Useful.logK()
#//System.out.println("omega0 " + omega0 + " logf " + log10E*logf + " vibConst " + vibConst[0] + " " + vibConst[1] + " alphP " + alphP + " alphR " + alphR);
Bsum = vibConst[1] + vibConst[0]
Bdiff = vibConst[1] - vibConst[0]
#//value of J-related "m" at band-head:
mH = -1.0 * Bsum / (2.0*Bdiff) #//Eq. 14
#//Frequency (or wavenumber??) at band head:
wH = ( -1.0 * Bdiff * mH*mH ) + omega0 #//Eq. 15
#//System.out.println("1.0/wH " + 1.0/wH + " 1.0/omega0 " + 1.0/omega0);
mTheta1 = 1.0 #//R branch?
mTheta2 = 1.0 #//P branch?
#double m1, m2; // related to J, for R & P branches, respectively
alpha1 = 1.0
alpha2 = 1.0
#//value of m is closely related to rotational quantum number J,
#//Near band origin, frequency, w, range should correspond to -1 <= m <= 1 - ???:
#//double wMin = Useful.c / (1.0e-7*jolaPoints[numPoints-1]); //first frequency omega
#//double wMax = Useful.c / (1.0e-7*jolaPoints[0]); //last frequency omega
#//double deltaW = 0.02;
#double w, logW, m1Fctr, m2Fctr, mHelp, wMinuswHOverBDiff;
#double denom1, denom2, m1Term, m2Term;
#double help1, logHcBbyKt, hcBbyKt;
#//Outer loop over frequency omega
#// for (int iW = -1; iW <= 1; iW++){
#for (int iW = numPoints-1; iW >= 0; iW--){
for iW in range(numPoints-1, 0, -1):
#//dW = (double) iW;
#//w = wMin + (dW*deltaW);
#//logW = Useful.logC() - Math.log(1.0e-7*jolaPoints[iW]); //if w is freq in Hz
logW = 0.0 - math.log(nm2cm*jolaPoints[iW]) #//if w is waveno in cm^-1
w = math.exp(logW)
#//System.out.println("logW " + log10E*logW);
#//I have no idea if this is right...
wMinuswHOverBDiff = (w - wH) / Bdiff
mHelp = math.sqrt(abs(wMinuswHOverBDiff)) #//Eq. 17
m1 = mH + mHelp
m2 = mH - mHelp #//Eq. 18
#//System.out.println("mH " + mH + " m1 " + m1 + " m2 " + m2);
m1Fctr = (m1*m1 - m1)
m2Fctr = (m2*m2 - m2)
#//The following association between the sign of m1 or m2 and whether
#//it's the P or the R branch might be backwards:
if (m1 < 0):
alpha1 = alphP
if (m1 >= 0):
alpha1 = alphR
if (m2 < 0):
alpha2 = alphP
if (m2 >= 0):
alpha2 = alphR
denom1 = abs(Bsum + 2.0*m1*Bdiff)
denom2 = abs(Bsum + 2.0*m2*Bdiff)
for iD in range(numDeps):
if (wMinuswHOverBDiff > 0):
logHcBbyKt = logHcBbyK - temp[1][iD]
hcBbyKt = math.exp(logHcBbyKt)
help1 = -1.0 * hcBbyKt * m1Fctr
m1Term = alpha1 * mTheta1 * math.exp(help1) / denom1
help1 = -1.0 * hcBbyKt * m2Fctr
m2Term = alpha2 * mTheta2 * math.exp(help1) / denom2
#//Can this be used like a differential cross-section (once converted to sigma)?
#// System.out.println("fvv " + fvv + " hcBbyKt " + hcBbyKt + " m1Term " + m1Term + " m2Term " + m2Term);
dfBydw[iW][iD] = fvv * hcBbyKt * ( m1Term + m2Term ) #// Eq. 19
else:
dfBydw[iW][iD] = 0.0
#}
#// if (iD%10 == 1){
#// System.out.println("PR iD " + iD + " iW " + iW + " dfBydw " + dfBydw[iW][iD]);
#// }
#} //iD - depth loop
#} //iW - frequency loop
return dfBydw
#} //end method jolaProfilePR
#//
def jolaProfileQ(omega0, logf, vibConst,
jolaPoints, alphQ, numDeps, temp):
"""//JOLA profile for Q (Delta J = 0) branch
//Equation 24 from Zeidler & Koestler"""
nm2cm = 1.0e-7
numPoints = len(jolaPoints)
#// derivative of rotational-line oscillator strength with respect to frequency
dfBydw = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
fvv = math.exp(logf)
logHcBbyK = Useful.logH() + Useful.logC() + math.log(vibConst[0]) \
- Useful.logK()
Bsum = vibConst[1] + vibConst[0]
Bdiff = vibConst[1] - vibConst[0]
#double mQ; #// related to J, for R & P branches, respectively
#//value of m is closely related to rotational quantum number J,
#//Near band origin, frequency, w, range should correspond to -1 <= m <= 1 - ???:
#// double wMin = Useful.c / (1.0e-7*lambda[1]); //first frequency omega
#// double wMax = Useful.c / (1.0e-7*lambda[0]); //last frequency omega
#// double deltaW = 0.02;
#double w, logW, mQFctr, mHelp;
#double denom, mQTerm, wMinusw0OverBDiff;
#double help1, logHcBbyKt, hcBbyKt;
#//Outer loop over frequency omega
#//for (int iW = -1; iW <= 1; iW++){
#for (int iW = numPoints-1; iW >= 0; iW--){
for iW in range(numPoints-1, 0, -1):
#//dW = (double) iW;
#//w = wMin + (dW*deltaW);
#//logW = Useful.logC() - Math.log(1.0e-7*jolaPoints[iW]); //if w is freq in Hz
logW = 0.0 - math.log(nm2cm*jolaPoints[iW]) #//if w is waveno in cm^-1
w = math.exp(logW)
#//I have no idea if this is right...
wMinusw0OverBDiff = (w - omega0) / Bdiff
mHelp = 0.25 + math.abs(wMinusw0OverBDiff)
mHelp = math.sqrt(mHelp) #//Eq. 17
mQ = -0.5 + mHelp
mQFctr = (mQ*mQ - mQ)
denom = math.abs(Bdiff);
for iD in range(numDeps):
if (wMinusw0OverBDiff > 0):
logHcBbyKt = logHcBbyK - temp[1][iD]
hcBbyKt = math.exp(logHcBbyKt)
help1 = -1.0 * hcBbyKt * mQFctr
mQTerm = math.exp(help1) / denom
#//Can this be used like a differential cross-section (once converted to sigma)?
#//System.out.println("alphQ " + alphQ + " fvv " + " logHcBbyKt " + logHcBbyKt + " mQTerm " + mQTerm);
dfBydw[iW][iD] = alphQ * fvv * hcBbyKt * mQTerm #// Eq. 24
else:
dfBydw[iW][iD] = 0.0;
#//if (iD%10 == 1){
#//System.out.println("Q iD " + iD + " iW " + iW + " dfBydw " + dfBydw[iW][iD]);
#//}
#//iD - depth loop
#} //iW - frequency loop
return dfBydw
#} //end method jolaProfileQ
# //
def jolaKap(jolaLogNums, dfBydw, jolaPoints,
numDeps, temp, rho):
log10E = math.log10(math.e)
nm2cm = 1.0e-7
numPoints = len(jolaPoints)
logKappaJola = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#//Initialize this carefully:
for iD in range(numDeps):
for iW in range(numPoints):
logKappaJola[iW][iD] = -999.0
#double stimEmExp, stimEmLogExp, stimEmLogExpHelp, stimEm;
#double freq, lastFreq, w, lastW, deltaW, thisDeltaF;
logSigma = -999.0
logFreq = Useful.logC() - math.log(nm2cm * jolaPoints[0])
logW = 0.0 - math.log(nm2cm * jolaPoints[0]) #//if w is waveno in cm^-1
#//lastFreq = Math.exp(logFreq)
lastW = math.exp(logW)
#//try accumulating oscillator strenth, f, across band - assumes f = 0 at first (largest) lambda- ??
thisF = 0.0
#//If f is cumulative in wavenumber, then we have to make the wavenumber loop the inner one even if it
#//means re-calculating depth-independent quantities each time:
for iD in range(numDeps):
thisF = 0.0 #//re-set accumulator
#//loop in order of *increasing* wavenumber
#for (int iW = numPoints-1; iW >=1; iW--){
for iW in range(numPoints-1, 1, -1):
#//df/dv is a differential oscillator strength in *frequency* space:
logFreq = Useful.logC() - math.log(nm2cm*jolaPoints[iW])
freq = math.exp(logFreq)
logW = 0.0 - math.log(nm2cm * jolaPoints[iW]) #//if w is waveno in cm^-1
w = math.exp(logW) #//if w is waveno in cm^-1
#//System.out.println("w " + w);
#//deltaW = Math.abs(freq - lastFreq);
deltaW = abs(w - lastW)
#//For LTE stimulated emission correction:
stimEmLogExpHelp = Useful.logH() + logFreq - Useful.logK();
#// for (int iD = 0; iD < numDeps; iD++){
thisDeltaF = deltaW * dfBydw[iW][iD]
if (thisDeltaF > 0.0):
#thisF += thisDeltaF #//cumulative version
thisF = thisDeltaF; #//non-cumulative version
logSigma = math.log(thisF) + math.log(math.pi) + 2.0*Useful.logEe() - Useful.logMe() - Useful.logC()
else:
logSigma = -999.0
#// LTE stimulated emission correction:
stimEmLogExp = stimEmLogExpHelp - temp[1][iD]
stimEmExp = -1.0 * math.exp(stimEmLogExp)
stimEm = ( 1.0 - math.exp(stimEmExp) )
#//extinction coefficient in cm^2 g^-1:
logKappaJola[iW][iD] = logSigma + jolaLogNums[iD] - rho[1][iD] + math.log(stimEm)
#//logKappaJola[iW][iD] = -999.0;
#//if (iD%10 == 1){
#//System.out.println("iD " + iD + " iW " + iW + " logFreq " + log10E*logFreq + " logW " + log10E*logW + " logStimEm " + log10E*Math.log(stimEm));
#//System.out.println("iD " + iD + " iW " + iW + " thisDeltaF " + thisDeltaF + " logSigma " + log10E*logSigma + " jolaLogNums " + log10E*jolaLogNums[iD] + " rho " + log10E*rho[1][iD] + " logKappaJola " + log10E*logKappaJola[iW][iD]);
#//}
#// } //iD loop - depths
lastFreq = freq;
#} //iW loop - wavelength
#} //iD loop - depths
return logKappaJola
#} //end method jolaKap
| 11,506
| 34.736025
| 245
|
py
|
ChromaStarPy
|
ChromaStarPy-master/A0Halpha.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Model atmosphere
teff = 9550.0 #, K
logg = 4.0 #, cgs
log10ZScale = 0.0 # [A/H]
massStar = 1.0 #, solar masses
xiT = 1.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 655.7 #, nm
lambdaStop = 657.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.0
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 1.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 0 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,668
| 33.217949
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/PartitionFn.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 17:12:02 2017
@author: ishort
"""
import math
def getPartFn(species):
"""// Partition functions at two temperatures (5000 K and 10000 K)
//From Allen's Astrophysical Quantities, 4th Ed.
// CAUTION: Return Base 10 log10 of partition fn"""
#//Ionization stages that don't exist (eg. "HIII") are given dummy values of 0.0;
#// CAUTION: log10 base 10!!
log10PartFn = [0.0 for i in range(2)]
#//default initialization
log10PartFn[0] = 0.0 # //for theta = 5040.0/T = 1.0
log10PartFn[1] = 0.0 # //for theta = 5040.0/T = 0.5
if ("HI" == species):
log10PartFn[0] = 0.30
log10PartFn[1] = 0.30
if ("HII" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HIII" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HIV" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HV" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HVI" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HeI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("HeII" == species):
log10PartFn[0] = 0.30
log10PartFn[1] = 0.30
if ("HeIII" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HeIV" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HeV" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("HeVI" == species):
log10PartFn[0] = 0.0 #//dummy
log10PartFn[1] = 0.0 #//dummy
if ("LiI" == species):
log10PartFn[0] = 0.32
log10PartFn[1] = 0.49
if ("LiII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("LiIII" == species):
log10PartFn[0] = math.log10(2.0)
log10PartFn[1] = math.log10(2.0)
if ("LiIV" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("LiV" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("LiVI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("BeI" == species):
log10PartFn[0] = 0.01
log10PartFn[1] = 0.13
if ("BeII" == species):
log10PartFn[0] = 0.30
log10PartFn[1] = 0.30
if ("BeIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("BeIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BeV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BeVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BI" == species):
log10PartFn[0] = 0.78
log10PartFn[1] = 0.78
if ("BII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("BIII" == species):
log10PartFn[0] = math.log10(2.0)
log10PartFn[1] = math.log10(2.0)
if ("BIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CI" == species):
log10PartFn[0] = 0.97
log10PartFn[1] = 1.0
if ("CII" == species):
log10PartFn[0] = 0.78
log10PartFn[1] = 0.78
if ("CIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("CIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NI" == species):
log10PartFn[0] = 0.61
log10PartFn[1] = 0.66
if ("NII" == species):
log10PartFn[0] = 0.95
log10PartFn[1] = 0.97
if ("NIII" == species):
log10PartFn[0] = math.log10(6.0)
log10PartFn[1] = math.log10(6.0)
if ("NIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("OI" == species):
log10PartFn[0] = 0.94
log10PartFn[1] = 0.97
if ("OII" == species):
log10PartFn[0] = 0.60
log10PartFn[1] = 0.61
if ("OIII" == species):
log10PartFn[0] = math.log10(9.0)
log10PartFn[1] = math.log10(9.0)
if ("OIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("OV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("OVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("FI" == species):
log10PartFn[0] = 0.75
log10PartFn[1] = 0.77
if ("FII" == species):
log10PartFn[0] = 0.92
log10PartFn[1] = 0.94
if ("FIII" == species):
log10PartFn[0] = math.log10(4.0)
log10PartFn[1] = math.log10(4.0)
if ("FIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("FV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("FVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NeI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("NeII" == species):
log10PartFn[0] = 0.73
log10PartFn[1] = 0.75
if ("NeIII" == species):
log10PartFn[0] = math.log10(9.0)
log10PartFn[1] = math.log10(9.0)
if ("NeIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NeV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NeVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NaI" == species):
log10PartFn[0] = 0.31
log10PartFn[1] = 0.60
if ("NaII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("NaIII" == species):
log10PartFn[0] = math.log10(6.0)
log10PartFn[1] = math.log10(6.0)
if ("NaIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NaV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NaVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("MgI" == species):
log10PartFn[0] = 0.01
log10PartFn[1] = 0.15
if ("MgII" == species):
log10PartFn[0] = 0.31
log10PartFn[1] = 0.31
if ("MgIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("MgIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("MgV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("MgVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("AlI" == species):
log10PartFn[0] = 0.77
log10PartFn[1] = 0.81
if ("AlII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.01
if ("AlIII" == species):
log10PartFn[0] = math.log10(2.0)
log10PartFn[1] = math.log10(2.0)
if ("AlIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("AlV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("AlVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SiI" == species):
log10PartFn[0] = 0.98
log10PartFn[1] = 1.04
if ("SiII" == species):
log10PartFn[0] = 0.76
log10PartFn[1] = 0.77
if ("SiIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("SiIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SiV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SiVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("PI" == species):
log10PartFn[0] = 0.65
log10PartFn[1] = 0.79
if ("PII" == species):
log10PartFn[0] = 0.91
log10PartFn[1] = 0.94
if ("PIII" == species):
log10PartFn[0] = math.log10(6.0)
log10PartFn[1] = math.log10(6.0)
if ("PIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("PV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("PVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SI" == species):
log10PartFn[0] = 0.91
log10PartFn[1] = 0.94
if ("SII" == species):
log10PartFn[0] = 0.62
log10PartFn[1] = 0.72
if ("SIII" == species):
log10PartFn[0] = math.log10(9.0)
log10PartFn[1] = math.log10(9.0)
if ("SIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ClI" == species):
log10PartFn[0] = 0.72
log10PartFn[1] = 0.75
if ("ClII" == species):
log10PartFn[0] = 0.89
log10PartFn[1] = 0.92
if ("ClIII" == species):
log10PartFn[0] = math.log10(4.0)
log10PartFn[1] = math.log10(4.0)
if ("ClIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ClV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ClVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ArI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("ArII" == species):
log10PartFn[0] = 0.69
log10PartFn[1] = 0.71
if ("ArIII" == species):
log10PartFn[0] = math.log10(9.0)
log10PartFn[1] = math.log10(9.0)
if ("ArIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ArV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ArVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("KI" == species):
log10PartFn[0] = 0.34
log10PartFn[1] = 0.60
if ("KII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("KIII" == species):
log10PartFn[0] = math.log10(6.0)
log10PartFn[1] = math.log10(6.0)
if ("KIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("KV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("KVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CaI" == species):
log10PartFn[0] = 0.07
log10PartFn[1] = 0.55
if ("CaII" == species):
log10PartFn[0] = 0.34
log10PartFn[1] = 0.54
if ("CaIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("CaIV" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("CaV" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("CaVI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("ScI" == species):
log10PartFn[0] = 1.08
log10PartFn[1] = 1.49
if ("ScII" == species):
log10PartFn[0] = 1.36
log10PartFn[1] = 1.52
if ("ScIII" == species):
log10PartFn[0] = math.log10(10.0)
log10PartFn[1] = math.log10(10.0)
if ("ScIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ScV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ScVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("TiI" == species):
log10PartFn[0] = 1.48
log10PartFn[1] = 1.88
if ("TiII" == species):
log10PartFn[0] = 1.75
log10PartFn[1] = 1.92
if ("TiIII" == species):
log10PartFn[0] = math.log10(21.0)
log10PartFn[1] = math.log10(21.0)
if ("TiIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("TiV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("TiVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("VI" == species):
log10PartFn[0] = 1.62
log10PartFn[1] = 2.03
if ("VII" == species):
log10PartFn[0] = 1.64
log10PartFn[1] = 1.89
if ("VIII" == species):
log10PartFn[0] = math.log10(28.0)
log10PartFn[1] = math.log10(28.0)
if ("VIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("VV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("VVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CrI" == species):
log10PartFn[0] = 1.02
log10PartFn[1] = 1.51
if ("CrII" == species):
log10PartFn[0] = 0.86
log10PartFn[1] = 1.22
if ("CrIII" == species):
log10PartFn[0] = math.log10(25.0)
log10PartFn[1] = math.log10(25.0)
if ("CrIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CrV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CrVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("MnI" == species):
log10PartFn[0] = 0.81
log10PartFn[1] = 1.16
if ("MnII" == species):
log10PartFn[0] = 0.89
log10PartFn[1] = 1.13
if ("MnIII" == species):
log10PartFn[0] = math.log10(6.0)
log10PartFn[1] = math.log10(6.0)
if ("MnIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("MnV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("MnVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("FeI" == species):
log10PartFn[0] = 1.43
log10PartFn[1] = 1.74
if ("FeII" == species):
log10PartFn[0] = 1.63
log10PartFn[1] = 1.80
if ("FeIII" == species):
log10PartFn[0] = math.log10(25.0)
log10PartFn[1] = math.log10(25.0)
if ("FeIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("FeV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("FeVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CoI" == species):
log10PartFn[0] = 1.52
log10PartFn[1] = 1.76
if ("CoII)" == species):
log10PartFn[0] = 1.46
log10PartFn[1] = 1.66
if ("CoIII" == species):
log10PartFn[0] = math.log10(28.0)
log10PartFn[1] = math.log10(28.0)
if ("CoIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CoV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CoVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NiI" == species):
log10PartFn[0] = 1.47
log10PartFn[1] = 1.60
if ("NiII" == species):
log10PartFn[0] = 1.02
log10PartFn[1] = 1.28
if ("NiIII" == species):
log10PartFn[0] = math.log10(21.0)
log10PartFn[1] = math.log10(21.0)
if ("NiIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NiV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NiVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CuI" == species):
log10PartFn[0] = 0.36
log10PartFn[1] = 0.58
if ("CuII" == species):
log10PartFn[0] = 0.01
log10PartFn[1] = 0.18
if ("CuIII" == species):
log10PartFn[0] = math.log10(10.0)
log10PartFn[1] = math.log10(10.0)
if ("CuIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CuV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CuVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ZnI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.03
if ("ZnII" == species):
log10PartFn[0] = 0.30
log10PartFn[1] = 0.30
if ("ZnIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("ZnIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ZnV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ZnVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("GaI" == species):
log10PartFn[0] = 0.73
log10PartFn[1] = 0.77
if ("GaII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("GaIII" == species):
log10PartFn[0] = math.log10(2.0)
log10PartFn[1] = math.log10(2.0)
if ("GaIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("GaV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("GaVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("KrI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("KrII" == species):
log10PartFn[0] = 0.62
log10PartFn[1] = 0.66
if ("KrIII" == species):
log10PartFn[0] = math.log10(9.0)
log10PartFn[1] = math.log10(9.0)
if ("KrIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("KrV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("KrVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("RbI" == species):
log10PartFn[0] = 0.36
log10PartFn[1] = 0.70
if ("RbII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("RbIII" == species):
log10PartFn[0] = math.log10(6.0)
log10PartFn[1] = math.log10(6.0)
if ("RbIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("RbV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("RbVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SrI" == species):
log10PartFn[0] = 0.10
log10PartFn[1] = 0.70
if ("SrII" == species):
log10PartFn[0] = 0.34
log10PartFn[1] = 0.53
if ("SrIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("SrIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SrV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("SrVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("YI" == species):
log10PartFn[0] = 1.08
log10PartFn[1] = 1.50
if ("YII" == species):
log10PartFn[0] = 1.18
log10PartFn[1] = 1.41
if ("YIII" == species):
log10PartFn[0] = math.log10(10.0)
log10PartFn[1] = math.log10(10.0)
if ("YIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("YV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("YVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ZrI" == species):
log10PartFn[0] = 1.53
log10PartFn[1] = 1.99
if ("ZrII" == species):
log10PartFn[0] = 1.66
log10PartFn[1] = 1.91
if ("ZrIII" == species):
log10PartFn[0] = math.log10(21.0)
log10PartFn[1] = math.log10(21.0)
if ("ZrIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ZrV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("ZrVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NbI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00;
if ("NbII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("NbIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("NbIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NbV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("NbVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CsI" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("CsII" == species):
log10PartFn[0] = 0.00
log10PartFn[1] = 0.00
if ("CsIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("CsIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CsV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("CsVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BaI" == species):
log10PartFn[0] = 0.36
log10PartFn[1] = 0.92
if ("BaII" == species):
log10PartFn[0] = 0.62
log10PartFn[1] = 0.85
if ("BaIII" == species):
log10PartFn[0] = math.log10(1.0)
log10PartFn[1] = math.log10(1.0)
if ("BaIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BaV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("BaVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("LaI" == species):
log10PartFn[0] = 1.41
log10PartFn[1] = 1.85
if ("LaII" == species):
log10PartFn[0] = 1.47
log10PartFn[1] = 1.71
if ("LaIII" == species):
log10PartFn[0] = math.log10(10.0)
log10PartFn[1] = math.log10(10.0)
if ("LaIV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("LaV" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
if ("LaVI" == species):
log10PartFn[0] = 0.0
log10PartFn[1] = 0.0
#//
return log10PartFn
#} //end of method getPartFn
def getMolPartFn(species):
"""// Diatomic Partition fn values, QAB, from
//http://vizier.cfa.harvard.edu/viz-bin/VizieR?-source=J/A+A/588/A96
//See: Barklem, P. S.; Collet, R., 2016, Astronomy & Astrophysics, Volume 588, id.A96 """
#//Just do linear piecewise interpolation in log of to hottest five values for now:
logPartFn = [0.0 for i in range(5)]
#//default initialization
logPartFn[0] = 0.0 #//for T = 130 K
logPartFn[1] = 0.0 #//for T = 500 K
logPartFn[2] = 0.0 #//for T = 3000 K
logPartFn[3] = 0.0 #//for T = 8000 K
logPartFn[4] = 0.0 #//for T = 10000 K
if ("H2" == species):
logPartFn[0] = math.log(8.83429e-01)
logPartFn[1] = math.log(3.12970e+00)
logPartFn[2] = math.log(2.22684e+01)
logPartFn[3] = math.log(1.24852e+02)
logPartFn[4] = math.log(1.94871e+02)
if ("C2" == species):
logPartFn[0] = math.log(2.53157e+01)
logPartFn[1] = math.log(2.08677e+02)
logPartFn[2] = math.log(6.75852e+03)
logPartFn[3] = math.log(6.15554e+04)
logPartFn[4] = math.log(1.07544e+05)
if ("N2" == species):
logPartFn[0] = math.log(2.28805e+01)
logPartFn[1] = math.log(8.76988e+01)
logPartFn[2] = math.log(7.89979e+02)
logPartFn[3] = math.log(4.32734e+03)
logPartFn[4] = math.log(6.68047e+03)
if ("O2" == species):
logPartFn[0] = math.log(9.78808e+01)
logPartFn[1] = math.log(3.70966e+02)
logPartFn[2] = math.log(4.34427e+03)
logPartFn[3] = math.log(3.30098e+04)
logPartFn[4] = math.log(5.76869e+04)
if ("H2+" == species):
logPartFn[0] = math.log(3.40918e+00)
logPartFn[1] = math.log(1.21361e+01)
logPartFn[2] = math.log(1.16205e+02)
logPartFn[3] = math.log(7.56297e+02)
logPartFn[4] = math.log(1.18728e+03)
if ("CH" == species):
logPartFn[0] = math.log(3.13181e+01)
logPartFn[1] = math.log(1.03985e+02)
logPartFn[2] = math.log(9.04412e+02)
logPartFn[3] = math.log(6.99662e+03)
logPartFn[4] = math.log(1.22732e+04)
if ("NH" == species):
logPartFn[0] = math.log(1.76430e+01)
logPartFn[1] = math.log(6.50991e+01)
logPartFn[2] = math.log(5.20090e+02)
logPartFn[3] = math.log(3.35774e+03)
logPartFn[4] = math.log(5.85785e+03)
if ("OH" == species):
logPartFn[0] = math.log(2.54704e+01)
logPartFn[1] = math.log(8.07652e+01)
logPartFn[2] = math.log(5.77700e+02)
logPartFn[3] = math.log(3.11647e+03)
logPartFn[4] = math.log(5.02698e+03)
if ("MgH" == species):
logPartFn[0] = math.log(3.22349e+01)
logPartFn[1] = math.log(1.24820e+02)
logPartFn[2] = math.log(1.69231e+03)
logPartFn[3] = math.log(1.72862e+04)
logPartFn[4] = math.log(3.16394e+04)
if ("CaH" == species):
logPartFn[0] = math.log(4.34133e+01)
logPartFn[1] = math.log(1.69692e+02)
logPartFn[2] = math.log(2.33105e+03)
logPartFn[3] = math.log(2.24220e+04)
logPartFn[4] = math.log(4.33139e+04)
if ("CN" == species):
logPartFn[0] = math.log(9.62592e+01)
logPartFn[1] = math.log(3.69706e+02)
logPartFn[2] = math.log(3.65207e+03)
logPartFn[3] = math.log(2.59277e+04)
logPartFn[4] = math.log(4.43257e+04)
if ("CO" == species):
logPartFn[0] = math.log(4.73391e+01)
logPartFn[1] = math.log(1.81659e+02)
logPartFn[2] = math.log(1.71706e+03)
logPartFn[3] = math.log(9.67381e+03)
logPartFn[4] = math.log(1.50689e+04)
if ("NO" == species):
logPartFn[0] = math.log(1.38024e+02)
logPartFn[1] = math.log(7.06108e+02)
logPartFn[2] = math.log(8.21159e+03)
logPartFn[3] = math.log(4.97309e+04)
logPartFn[4] = math.log(7.94214e+04)
if ("FeO" == species):
logPartFn[0] = math.log(1.85254e+03)
logPartFn[1] = math.log(7.52666e+03)
logPartFn[2] = math.log(1.23649e+05)
logPartFn[3] = math.log(9.55089e+05)
logPartFn[4] = math.log(1.58411e+06)
if ("SiO" == species):
logPartFn[0] = math.log(1.25136e+02)
logPartFn[1] = math.log(4.95316e+02)
logPartFn[2] = math.log(6.63653e+03)
logPartFn[3] = math.log(4.56577e+04)
logPartFn[4] = math.log(8.57529e+04)
if ("CaO" == species):
logPartFn[0] = math.log(2.03667e+02)
logPartFn[1] = math.log(8.94430e+02)
logPartFn[2] = math.log(2.08874e+04)
logPartFn[3] = math.log(5.21424e+05)
logPartFn[4] = math.log(1.08355e+06)
if ("TiO" == species):
logPartFn[0] = math.log(5.04547e+02)
logPartFn[1] = math.log(3.27426e+03)
logPartFn[2] = math.log(6.43969e+04)
logPartFn[3] = math.log(5.28755e+05)
logPartFn[4] = math.log(9.61395e+05)
if ("VO" == species):
logPartFn[0] = math.log(6.62935e+02)
logPartFn[1] = math.log(2.70111e+03)
logPartFn[2] = math.log(4.15856e+04)
logPartFn[3] = math.log(3.57467e+05)
logPartFn[4] = math.log(6.53298e+05)
return logPartFn
# } //end of method getMolPartFn
def getPartFn2(species):
"""// Diatomic Partition fn values, QAB, from
//http://vizier.cfa.harvard.edu/viz-bin/VizieR?-source=J/A+A/588/A96
//See: Barklem, P. S.; Collet, R., 2016, Astronomy & Astrophysics, Volume 588, id.A96"""
#//Just do linear piecewise interpolation in log of to hottest five values for now:
logPartFn = [0.0 for i in range(5)]
#//default initialization
logPartFn[0] = 0.0 #//for T = 130 K
logPartFn[1] = 0.0 #//for T = 500 K
logPartFn[2] = 0.0 #//for T = 3000 K
logPartFn[3] = 0.0 #//for T = 8000 K
logPartFn[4] = 0.0 #//for T = 10000 K
if ("HI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00001e+00)
logPartFn[4] = math.log(2.00015e+00)
if ("HII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
#//dummy
if ("HIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
#//dummy
if ("HIV" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("HII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("DI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00001e+00)
logPartFn[4] = math.log(2.00014e+00)
if ("DII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
#//dummy
if ("DIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
#//dummy
if ("DIV" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("HeI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("HeII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00000e+00)
logPartFn[4] = math.log(2.00000e+00)
if ("HeIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
#//dummy
if ("HeIV" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("LiI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00473e+00)
logPartFn[3] = math.log(2.70188e+00)
logPartFn[4] = math.log(3.86752e+00)
if ("LiII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("LiIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00000e+00)
logPartFn[4] = math.log(2.00000e+00)
#//dummy
if ("LiIV" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("BeI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00024e+00)
logPartFn[3] = math.log(1.17655e+00)
logPartFn[4] = math.log(1.41117e+00)
if ("BeII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.01924e+00)
logPartFn[4] = math.log(2.06070e+00)
if ("BeIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("BI" == species):
logPartFn[0] = math.log(5.37746e+00)
logPartFn[1] = math.log(5.82788e+00)
logPartFn[2] = math.log(5.97080e+00)
logPartFn[3] = math.log(6.06978e+00)
logPartFn[4] = math.log(6.27955e+00)
if ("BII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.01090e+00)
logPartFn[4] = math.log(1.04184e+00)
if ("BIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00100e+00)
logPartFn[4] = math.log(2.00569e+00)
if ("CI" == species):
logPartFn[0] = math.log(6.59516e+00)
logPartFn[1] = math.log(8.27478e+00)
logPartFn[2] = math.log(8.91124e+00)
logPartFn[3] = math.log(9.78474e+00)
logPartFn[4] = math.log(1.02090e+01)
if ("CII" == species):
logPartFn[0] = math.log(3.98273e+00)
logPartFn[1] = math.log(5.33283e+00)
logPartFn[2] = math.log(5.88018e+00)
logPartFn[3] = math.log(5.95988e+00)
logPartFn[4] = math.log(5.98845e+00)
if ("CIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00073e+00)
logPartFn[4] = math.log(1.00478e+00)
if ("NI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00100e+00)
logPartFn[3] = math.log(4.34860e+00)
logPartFn[4] = math.log(4.72409e+00)
if ("NII" == species):
logPartFn[0] = math.log(3.92596e+00)
logPartFn[1] = math.log(7.03961e+00)
logPartFn[2] = math.log(8.63000e+00)
logPartFn[3] = math.log(9.17980e+00)
logPartFn[4] = math.log(9.45305e+00)
if ("NIII" == species):
logPartFn[0] = math.log(2.58062e+00)
logPartFn[1] = math.log(4.42179e+00)
logPartFn[2] = math.log(5.67908e+00)
logPartFn[3] = math.log(5.87690e+00)
logPartFn[4] = math.log(5.90406e+00)
if ("OI" == species):
logPartFn[0] = math.log(5.60172e+00)
logPartFn[1] = math.log(7.42310e+00)
logPartFn[2] = math.log(8.68009e+00)
logPartFn[3] = math.log(9.16637e+00)
logPartFn[4] = math.log(9.41864e+00)
if ("OII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00003e+00)
logPartFn[3] = math.log(4.08460e+00)
logPartFn[4] = math.log(4.22885e+00)
if ("OIII" == species):
logPartFn[0] = math.log(2.02626e+00)
logPartFn[1] = math.log(5.23819e+00)
logPartFn[2] = math.log(8.15906e+00)
logPartFn[3] = math.log(8.80275e+00)
logPartFn[4] = math.log(9.00956e+00)
if ("FI" == species):
logPartFn[0] = math.log(4.02285e+00)
logPartFn[1] = math.log(4.62529e+00)
logPartFn[2] = math.log(5.64768e+00)
logPartFn[3] = math.log(5.85982e+00)
logPartFn[4] = math.log(5.88706e+00)
if ("FII" == species):
logPartFn[0] = math.log(5.07333e+00)
logPartFn[1] = math.log(6.36892e+00)
logPartFn[2] = math.log(8.33830e+00)
logPartFn[3] = math.log(8.85472e+00)
logPartFn[4] = math.log(9.03812e+00)
if ("FIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.02228e+00)
logPartFn[4] = math.log(4.07763e+00)
if ("NeI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("NeII" == species):
logPartFn[0] = math.log(4.00036e+00)
logPartFn[1] = math.log(4.21176e+00)
logPartFn[2] = math.log(5.37562e+00)
logPartFn[3] = math.log(5.73812e+00)
logPartFn[4] = math.log(5.78760e+00)
if ("NeIII" == species):
logPartFn[0] = math.log(5.00248e+00)
logPartFn[1] = math.log(5.54261e+00)
logPartFn[2] = math.log(7.84726e+00)
logPartFn[3] = math.log(8.56792e+00)
logPartFn[4] = math.log(8.73276e+00)
if ("NaI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00178e+00)
logPartFn[3] = math.log(3.40984e+00)
logPartFn[4] = math.log(7.08960e+00)
if ("NaII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("NaIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.03921e+00)
logPartFn[2] = math.log(5.03856e+00)
logPartFn[3] = math.log(5.56425e+00)
logPartFn[4] = math.log(5.64305e+00)
if ("MgI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00025e+00)
logPartFn[3] = math.log(1.21285e+00)
logPartFn[4] = math.log(1.64434e+00)
if ("MgII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00976e+00)
logPartFn[4] = math.log(2.03571e+00)
if ("MgIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("AlI" == species):
logPartFn[0] = math.log(3.15743e+00)
logPartFn[1] = math.log(4.89757e+00)
logPartFn[2] = math.log(5.79075e+00)
logPartFn[3] = math.log(6.19328e+00)
logPartFn[4] = math.log(7.05012e+00)
if ("AlII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.01064e+00)
logPartFn[4] = math.log(1.04138e+00)
if ("AlIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00037e+00)
logPartFn[4] = math.log(2.00260e+00)
if ("SiI" == species):
logPartFn[0] = math.log(2.70106e+00)
logPartFn[1] = math.log(6.03405e+00)
logPartFn[2] = math.log(8.62816e+00)
logPartFn[3] = math.log(1.04988e+01)
logPartFn[4] = math.log(1.13575e+01)
if ("SiII" == species):
logPartFn[0] = math.log(2.16657e+00)
logPartFn[1] = math.log(3.75040e+00)
logPartFn[2] = math.log(5.48529e+00)
logPartFn[3] = math.log(5.80440e+00)
logPartFn[4] = math.log(5.86668e+00)
if ("SiIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00066e+00)
logPartFn[4] = math.log(1.00443e+00)
if ("PI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.04361e+00)
logPartFn[3] = math.log(5.50312e+00)
logPartFn[4] = math.log(6.38380e+00)
if ("PII" == species):
logPartFn[0] = math.log(1.51156e+00)
logPartFn[1] = math.log(4.16319e+00)
logPartFn[2] = math.log(7.83534e+00)
logPartFn[3] = math.log(9.54223e+00)
logPartFn[4] = math.log(1.00500e+01)
if ("PIII" == species):
logPartFn[0] = math.log(2.00822e+00)
logPartFn[1] = math.log(2.80054e+00)
logPartFn[2] = math.log(5.05924e+00)
logPartFn[3] = math.log(5.61779e+00)
logPartFn[4] = math.log(5.69424e+00)
if ("SI" == species):
logPartFn[0] = math.log(5.03922e+00)
logPartFn[1] = math.log(6.15186e+00)
logPartFn[2] = math.log(8.30016e+00)
logPartFn[3] = math.log(9.66532e+00)
logPartFn[4] = math.log(1.01385e+01)
if ("SII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00804e+00)
logPartFn[3] = math.log(4.76202e+00)
logPartFn[4] = math.log(5.35265e+00)
if ("SIII" == species):
logPartFn[0] = math.log(1.11055e+00)
logPartFn[1] = math.log(2.72523e+00)
logPartFn[2] = math.log(6.97489e+00)
logPartFn[3] = math.log(8.80785e+00)
logPartFn[4] = math.log(9.31110e+00)
if ("ClI" == species):
logPartFn[0] = math.log(4.00011e+00)
logPartFn[1] = math.log(4.15794e+00)
logPartFn[2] = math.log(5.31000e+00)
logPartFn[3] = math.log(5.70664e+00)
logPartFn[4] = math.log(5.76344e+00)
if ("ClII" == species):
logPartFn[0] = math.log(5.00137e+00)
logPartFn[1] = math.log(5.46184e+00)
logPartFn[2] = math.log(7.78751e+00)
logPartFn[3] = math.log(9.10464e+00)
logPartFn[4] = math.log(9.53390e+00)
if ("ClIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00171e+00)
logPartFn[3] = math.log(4.41428e+00)
logPartFn[4] = math.log(4.82231e+00)
if ("ArI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00007e+00)
if ("ArII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.03252e+00)
logPartFn[2] = math.log(5.00667e+00)
logPartFn[3] = math.log(5.54606e+00)
logPartFn[4] = math.log(5.62775e+00)
if ("ArIII" == species):
logPartFn[0] = math.log(5.00001e+00)
logPartFn[1] = math.log(5.13320e+00)
logPartFn[2] = math.log(7.23696e+00)
logPartFn[3] = math.log(8.61527e+00)
logPartFn[4] = math.log(9.02887e+00)
if ("KI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.01222e+00)
logPartFn[3] = math.log(4.77353e+00)
logPartFn[4] = math.log(9.82105e+00)
if ("KII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("KIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00394e+00)
logPartFn[2] = math.log(4.70805e+00)
logPartFn[3] = math.log(5.35493e+00)
logPartFn[4] = math.log(5.46467e+00)
if ("CaI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00701e+00)
logPartFn[3] = math.log(2.60365e+00)
logPartFn[4] = math.log(5.69578e+00)
if ("CaII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.01415e+00)
logPartFn[3] = math.log(2.91713e+00)
logPartFn[4] = math.log(3.56027e+00)
if ("CaIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("ScI" == species):
logPartFn[0] = math.log(4.93134e+00)
logPartFn[1] = math.log(7.69658e+00)
logPartFn[2] = math.log(9.68986e+00)
logPartFn[3] = math.log(2.16410e+01)
logPartFn[4] = math.log(3.31527e+01)
if ("ScII" == species):
logPartFn[0] = math.log(6.34225e+00)
logPartFn[1] = math.log(1.13155e+01)
logPartFn[2] = math.log(1.78090e+01)
logPartFn[3] = math.log(2.94840e+01)
logPartFn[4] = math.log(3.36439e+01)
if ("ScIII" == species):
logPartFn[0] = math.log(4.67343e+00)
logPartFn[1] = math.log(7.39773e+00)
logPartFn[2] = math.log(9.45747e+00)
logPartFn[3] = math.log(9.81083e+00)
logPartFn[4] = math.log(9.88331e+00)
if ("TiI" == species):
logPartFn[0] = math.log(6.18965e+00)
logPartFn[1] = math.log(1.22473e+01)
logPartFn[2] = math.log(2.08195e+01)
logPartFn[3] = math.log(5.53232e+01)
logPartFn[4] = math.log(8.32038e+01)
if ("TiII" == species):
logPartFn[0] = math.log(6.90468e+00)
logPartFn[1] = math.log(1.72793e+01)
logPartFn[2] = math.log(4.40264e+01)
logPartFn[3] = math.log(7.23680e+01)
logPartFn[4] = math.log(8.37248e+01)
if ("TiIII" == species):
logPartFn[0] = math.log(5.99049e+00)
logPartFn[1] = math.log(1.17969e+01)
logPartFn[2] = math.log(1.89121e+01)
logPartFn[3] = math.log(2.32253e+01)
logPartFn[4] = math.log(2.49249e+01)
if ("VI" == species):
logPartFn[0] = math.log(5.55703e+00)
logPartFn[1] = math.log(1.32751e+01)
logPartFn[2] = math.log(3.47920e+01)
logPartFn[3] = math.log(7.90427e+01)
logPartFn[4] = math.log(1.11459e+02)
if ("VII" == species):
logPartFn[0] = math.log(5.45407e+00)
logPartFn[1] = math.log(1.46216e+01)
logPartFn[2] = math.log(3.18263e+01)
logPartFn[3] = math.log(6.43796e+01)
logPartFn[4] = math.log(8.08903e+01)
if ("VIII" == species):
logPartFn[0] = math.log(5.39755e+00)
logPartFn[1] = math.log(1.28067e+01)
logPartFn[2] = math.log(2.40588e+01)
logPartFn[3] = math.log(3.19510e+01)
logPartFn[4] = math.log(3.59622e+01)
if ("CrI" == species):
logPartFn[0] = math.log(7.00000e+00)
logPartFn[1] = math.log(7.00000e+00)
logPartFn[2] = math.log(7.65435e+00)
logPartFn[3] = math.log(2.01376e+01)
logPartFn[4] = math.log(3.31787e+01)
if ("CrII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.08747e+00)
logPartFn[3] = math.log(1.21840e+01)
logPartFn[4] = math.log(1.84825e+01)
if ("CrIII" == species):
logPartFn[0] = math.log(3.31635e+00)
logPartFn[1] = math.log(1.06851e+01)
logPartFn[2] = math.log(2.12330e+01)
logPartFn[3] = math.log(2.71108e+01)
logPartFn[4] = math.log(3.11257e+01)
if ("MnI" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.01140e+00)
logPartFn[3] = math.log(9.82265e+00)
logPartFn[4] = math.log(1.53539e+01)
if ("MnII" == species):
logPartFn[0] = math.log(7.00000e+00)
logPartFn[1] = math.log(7.00000e+00)
logPartFn[2] = math.log(7.07640e+00)
logPartFn[3] = math.log(1.07144e+01)
logPartFn[4] = math.log(1.45638e+01)
if ("MnIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.00011e+00)
logPartFn[3] = math.log(6.46711e+00)
logPartFn[4] = math.log(7.39061e+00)
if ("FeI" == species):
logPartFn[0] = math.log(9.07242e+00)
logPartFn[1] = math.log(1.20678e+01)
logPartFn[2] = math.log(2.19554e+01)
logPartFn[3] = math.log(4.28266e+01)
logPartFn[4] = math.log(5.96627e+01)
if ("FeII" == species):
logPartFn[0] = math.log(1.01172e+01)
logPartFn[1] = math.log(1.40327e+01)
logPartFn[2] = math.log(3.43147e+01)
logPartFn[3] = math.log(5.64784e+01)
logPartFn[4] = math.log(6.69023e+01)
if ("FeIII" == species):
logPartFn[0] = math.log(9.05759e+00)
logPartFn[1] = math.log(1.18492e+01)
logPartFn[2] = math.log(2.07199e+01)
logPartFn[3] = math.log(2.52719e+01)
logPartFn[4] = math.log(2.81882e+01)
if ("CoI" == species):
logPartFn[0] = math.log(1.00010e+01)
logPartFn[1] = math.log(1.08918e+01)
logPartFn[2] = math.log(2.44719e+01)
logPartFn[3] = math.log(4.80929e+01)
logPartFn[4] = math.log(6.08394e+01)
if ("CoII" == species):
logPartFn[0] = math.log(9.00019e+00)
logPartFn[1] = math.log(9.50563e+00)
logPartFn[2] = math.log(2.09531e+01)
logPartFn[3] = math.log(4.21891e+01)
logPartFn[4] = math.log(5.04464e+01)
if ("CoIII" == species):
logPartFn[0] = math.log(1.00007e+01)
logPartFn[1] = math.log(1.08219e+01)
logPartFn[2] = math.log(1.99830e+01)
logPartFn[3] = math.log(2.65869e+01)
logPartFn[4] = math.log(2.93889e+01)
if ("NiI" == species):
logPartFn[0] = math.log(9.72623e+00)
logPartFn[1] = math.log(1.34631e+01)
logPartFn[2] = math.log(2.63546e+01)
logPartFn[3] = math.log(3.63831e+01)
logPartFn[4] = math.log(4.15802e+01)
if ("NiII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.05237e+00)
logPartFn[2] = math.log(8.29948e+00)
logPartFn[3] = math.log(1.57985e+01)
logPartFn[4] = math.log(1.94018e+01)
if ("NiIII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.14687e+00)
logPartFn[2] = math.log(1.43380e+01)
logPartFn[3] = math.log(1.87862e+01)
logPartFn[4] = math.log(2.01688e+01)
if ("CuI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.03485e+00)
logPartFn[3] = math.log(3.25011e+00)
logPartFn[4] = math.log(4.17708e+00)
if ("CuII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00032e+00)
logPartFn[3] = math.log(1.30264e+00)
logPartFn[4] = math.log(1.69815e+00)
if ("CuIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.01031e+00)
logPartFn[2] = math.log(7.48119e+00)
logPartFn[3] = math.log(8.75641e+00)
logPartFn[4] = math.log(8.97397e+00)
if ("ZnI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.02806e+00)
logPartFn[4] = math.log(1.11187e+00)
if ("ZnII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00099e+00)
logPartFn[4] = math.log(2.00625e+00)
if ("ZnIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00001e+00)
logPartFn[4] = math.log(1.00021e+00)
if ("GaI" == species):
logPartFn[0] = math.log(2.00043e+00)
logPartFn[1] = math.log(2.37127e+00)
logPartFn[2] = math.log(4.69154e+00)
logPartFn[3] = math.log(5.64961e+00)
logPartFn[4] = math.log(6.47300e+00)
if ("GaII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00154e+00)
logPartFn[4] = math.log(1.00881e+00)
if ("GaIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00004e+00)
logPartFn[4] = math.log(2.00043e+00)
if ("GeI" == species):
logPartFn[0] = math.log(1.00630e+00)
logPartFn[1] = math.log(1.69040e+00)
logPartFn[2] = math.log(6.00402e+00)
logPartFn[3] = math.log(9.09691e+00)
logPartFn[4] = math.log(1.01931e+01)
if ("GeII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.02475e+00)
logPartFn[2] = math.log(3.71392e+00)
logPartFn[3] = math.log(4.91199e+00)
logPartFn[4] = math.log(5.10944e+00)
if ("GeIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00010e+00)
logPartFn[4] = math.log(1.00101e+00)
if ("AsI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.05774e+00)
logPartFn[3] = math.log(5.65799e+00)
logPartFn[4] = math.log(6.57374e+00)
if ("AsII" == species):
logPartFn[0] = math.log(1.00002e+00)
logPartFn[1] = math.log(1.14402e+00)
logPartFn[2] = math.log(4.31914e+00)
logPartFn[3] = math.log(7.47497e+00)
logPartFn[4] = math.log(8.25460e+00)
if ("AsIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00085e+00)
logPartFn[2] = math.log(2.97673e+00)
logPartFn[3] = math.log(4.35751e+00)
logPartFn[4] = math.log(4.62049e+00)
if ("SeI" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.01048e+00)
logPartFn[2] = math.log(6.50285e+00)
logPartFn[3] = math.log(8.64654e+00)
logPartFn[4] = math.log(9.28469e+00)
if ("SeII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.01539e+00)
logPartFn[3] = math.log(4.96394e+00)
logPartFn[4] = math.log(5.62894e+00)
if ("SeIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.02009e+00)
logPartFn[2] = math.log(3.06837e+00)
logPartFn[3] = math.log(6.14277e+00)
logPartFn[4] = math.log(6.95690e+00)
if ("BrI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00005e+00)
logPartFn[2] = math.log(4.34162e+00)
logPartFn[3] = math.log(5.03126e+00)
logPartFn[4] = math.log(5.18274e+00)
if ("BrII" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00038e+00)
logPartFn[2] = math.log(5.84067e+00)
logPartFn[3] = math.log(7.78362e+00)
logPartFn[4] = math.log(8.38287e+00)
if ("BrIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00537e+00)
logPartFn[3] = math.log(4.62671e+00)
logPartFn[4] = math.log(5.14171e+00)
if ("KrI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00001e+00)
logPartFn[4] = math.log(1.00044e+00)
if ("KrII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.15228e+00)
logPartFn[3] = math.log(4.76145e+00)
logPartFn[4] = math.log(4.92367e+00)
if ("KrIII" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00001e+00)
logPartFn[2] = math.log(5.42146e+00)
logPartFn[3] = math.log(7.07047e+00)
logPartFn[4] = math.log(7.64176e+00)
if ("RbI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.01473e+00)
logPartFn[3] = math.log(5.41664e+00)
logPartFn[4] = math.log(1.13631e+01)
if ("RbII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("RbIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.05824e+00)
logPartFn[3] = math.log(4.53101e+00)
logPartFn[4] = math.log(4.69229e+00)
if ("SrI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.01064e+00)
logPartFn[3] = math.log(2.98824e+00)
logPartFn[4] = math.log(6.20304e+00)
if ("SrII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00865e+00)
logPartFn[3] = math.log(2.78698e+00)
logPartFn[4] = math.log(3.40185e+00)
if ("SrIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("YI" == species):
logPartFn[0] = math.log(4.01695e+00)
logPartFn[1] = math.log(5.30447e+00)
logPartFn[2] = math.log(8.85992e+00)
logPartFn[3] = math.log(2.22176e+01)
logPartFn[4] = math.log(3.32369e+01)
if ("YII" == species):
logPartFn[0] = math.log(1.00032e+00)
logPartFn[1] = math.log(1.62308e+00)
logPartFn[2] = math.log(1.09488e+01)
logPartFn[3] = math.log(2.25715e+01)
logPartFn[4] = math.log(2.70276e+01)
if ("YIII" == species):
logPartFn[0] = math.log(4.00199e+00)
logPartFn[1] = math.log(4.74694e+00)
logPartFn[2] = math.log(8.29546e+00)
logPartFn[3] = math.log(9.79259e+00)
logPartFn[4] = math.log(1.01030e+01)
if ("ZrI" == species):
logPartFn[0] = math.log(5.01271e+00)
logPartFn[1] = math.log(6.60966e+00)
logPartFn[2] = math.log(1.99689e+01)
logPartFn[3] = math.log(6.46409e+01)
logPartFn[4] = math.log(9.29133e+01)
if ("ZrII" == species):
logPartFn[0] = math.log(4.18616e+00)
logPartFn[1] = math.log(7.54284e+00)
logPartFn[2] = math.log(2.91432e+01)
logPartFn[3] = math.log(6.73523e+01)
logPartFn[4] = math.log(8.12269e+01)
if ("ZrIII" == species):
logPartFn[0] = math.log(5.00371e+00)
logPartFn[1] = math.log(6.10992e+00)
logPartFn[2] = math.log(1.49741e+01)
logPartFn[3] = math.log(2.35798e+01)
logPartFn[4] = math.log(2.64105e+01)
if ("NbI" == species):
logPartFn[0] = math.log(2.80828e+00)
logPartFn[1] = math.log(8.30968e+00)
logPartFn[2] = math.log(3.50009e+01)
logPartFn[3] = math.log(9.41753e+01)
logPartFn[4] = math.log(1.32663e+02)
if ("NbII" == species):
logPartFn[0] = math.log(1.55657e+00)
logPartFn[1] = math.log(5.28597e+00)
logPartFn[2] = math.log(2.62767e+01)
logPartFn[3] = math.log(7.22928e+01)
logPartFn[4] = math.log(9.34755e+01)
if ("NbIII" == species):
logPartFn[0] = math.log(4.01990e+00)
logPartFn[1] = math.log(5.66842e+00)
logPartFn[2] = math.log(1.76187e+01)
logPartFn[3] = math.log(3.32180e+01)
logPartFn[4] = math.log(3.96549e+01)
if ("MoI" == species):
logPartFn[0] = math.log(7.00000e+00)
logPartFn[1] = math.log(7.00000e+00)
logPartFn[2] = math.log(7.13826e+00)
logPartFn[3] = math.log(1.94435e+01)
logPartFn[4] = math.log(3.41087e+01)
if ("MoII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.10323e+00)
logPartFn[3] = math.log(1.57858e+01)
logPartFn[4] = math.log(2.53531e+01)
if ("MoIII" == species):
logPartFn[0] = math.log(1.20909e+00)
logPartFn[1] = math.log(3.47425e+00)
logPartFn[2] = math.log(1.49713e+01)
logPartFn[3] = math.log(2.84296e+01)
logPartFn[4] = math.log(3.53071e+01)
if ("TcI" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00698e+00)
logPartFn[2] = math.log(1.26513e+01)
logPartFn[3] = math.log(4.01671e+01)
logPartFn[4] = math.log(5.99532e+01)
if ("TcII" == species):
logPartFn[0] = math.log(7.00000e+00)
logPartFn[1] = math.log(7.00047e+00)
logPartFn[2] = math.log(1.05476e+01)
logPartFn[3] = math.log(2.00471e+01)
logPartFn[4] = math.log(2.31663e+01)
if ("TcIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.00000e+00)
logPartFn[3] = math.log(6.00000e+00)
logPartFn[4] = math.log(6.00000e+00)
if ("RuI" == species):
logPartFn[0] = math.log(1.10000e+01)
logPartFn[1] = math.log(1.13122e+01)
logPartFn[2] = math.log(2.23319e+01)
logPartFn[3] = math.log(5.81063e+01)
logPartFn[4] = math.log(7.89315e+01)
if ("RuII" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.01050e+01)
logPartFn[2] = math.log(1.71302e+01)
logPartFn[3] = math.log(3.64331e+01)
logPartFn[4] = math.log(4.65540e+01)
if ("RuIII" == species):
logPartFn[0] = math.log(9.00002e+00)
logPartFn[1] = math.log(9.28086e+00)
logPartFn[2] = math.log(1.64154e+01)
logPartFn[3] = math.log(2.09762e+01)
logPartFn[4] = math.log(2.17901e+01)
if ("RhI" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.01020e+01)
logPartFn[2] = math.log(1.86621e+01)
logPartFn[3] = math.log(3.88108e+01)
logPartFn[4] = math.log(4.81045e+01)
if ("RhII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.00716e+00)
logPartFn[2] = math.log(1.22675e+01)
logPartFn[3] = math.log(2.08582e+01)
logPartFn[4] = math.log(2.51822e+01)
if ("RhIII" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.00168e+01)
logPartFn[2] = math.log(1.45746e+01)
logPartFn[3] = math.log(2.50952e+01)
logPartFn[4] = math.log(2.95669e+01)
if ("PdI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.46384e+00)
logPartFn[3] = math.log(5.77132e+00)
logPartFn[4] = math.log(7.96499e+00)
if ("PdII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00015e+00)
logPartFn[2] = math.log(6.73288e+00)
logPartFn[3] = math.log(8.40680e+00)
logPartFn[4] = math.log(9.23953e+00)
if ("PdIII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.00065e+00)
logPartFn[2] = math.log(1.10655e+01)
logPartFn[3] = math.log(1.69387e+01)
logPartFn[4] = math.log(1.89830e+01)
if ("AgI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00001e+00)
logPartFn[3] = math.log(2.07520e+00)
logPartFn[4] = math.log(2.29282e+00)
if ("AgII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.01183e+00)
logPartFn[4] = math.log(1.05173e+00)
if ("AgIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00001e+00)
logPartFn[2] = math.log(6.43868e+00)
logPartFn[3] = math.log(7.74647e+00)
logPartFn[4] = math.log(8.06409e+00)
if ("CdI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.03706e+00)
logPartFn[4] = math.log(1.13787e+00)
if ("CdII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00166e+00)
logPartFn[4] = math.log(2.00881e+00)
if ("CdIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00001e+00)
logPartFn[4] = math.log(1.00013e+00)
if ("InI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00688e+00)
logPartFn[2] = math.log(3.38443e+00)
logPartFn[3] = math.log(4.93682e+00)
logPartFn[4] = math.log(5.96634e+00)
if ("InII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00309e+00)
logPartFn[4] = math.log(1.01538e+00)
if ("InIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00013e+00)
logPartFn[4] = math.log(2.00111e+00)
if ("SnI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.02333e+00)
logPartFn[2] = math.log(3.37985e+00)
logPartFn[3] = math.log(7.09384e+00)
logPartFn[4] = math.log(8.36474e+00)
if ("SnII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00002e+00)
logPartFn[2] = math.log(2.52079e+00)
logPartFn[3] = math.log(3.86439e+00)
logPartFn[4] = math.log(4.18355e+00)
if ("SnIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00033e+00)
logPartFn[4] = math.log(1.00255e+00)
if ("SbI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.12205e+00)
logPartFn[3] = math.log(6.14860e+00)
logPartFn[4] = math.log(7.20611e+00)
if ("SbII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00046e+00)
logPartFn[2] = math.log(2.03582e+00)
logPartFn[3] = math.log(5.05509e+00)
logPartFn[4] = math.log(5.97984e+00)
if ("SbIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.17082e+00)
logPartFn[3] = math.log(3.22630e+00)
logPartFn[4] = math.log(3.55562e+00)
if ("TeI" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.44375e+00)
logPartFn[3] = math.log(7.47891e+00)
logPartFn[4] = math.log(8.22338e+00)
if ("TeII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.04540e+00)
logPartFn[3] = math.log(5.38198e+00)
logPartFn[4] = math.log(6.15477e+00)
if ("TeIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.40826e+00)
logPartFn[3] = math.log(3.64876e+00)
logPartFn[4] = math.log(4.47090e+00)
if ("II" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.05220e+00)
logPartFn[3] = math.log(4.51156e+00)
logPartFn[4] = math.log(4.69382e+00)
if ("III" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.15261e+00)
logPartFn[3] = math.log(6.58097e+00)
logPartFn[4] = math.log(7.18642e+00)
if ("IIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.01931e+00)
logPartFn[3] = math.log(4.94321e+00)
logPartFn[4] = math.log(5.56234e+00)
if ("XeI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00015e+00)
logPartFn[4] = math.log(1.00319e+00)
if ("XeII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.01278e+00)
logPartFn[3] = math.log(4.30069e+00)
logPartFn[4] = math.log(4.43930e+00)
if ("XeIII" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.04902e+00)
logPartFn[3] = math.log(5.97976e+00)
logPartFn[4] = math.log(6.47644e+00)
if ("CsI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.03442e+00)
logPartFn[3] = math.log(7.88683e+00)
logPartFn[4] = math.log(1.69354e+01)
if ("CsII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00001e+00)
if ("CsIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00261e+00)
logPartFn[3] = math.log(4.16580e+00)
logPartFn[4] = math.log(4.27282e+00)
if ("BaI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.21060e+00)
logPartFn[3] = math.log(8.29000e+00)
logPartFn[4] = math.log(1.66116e+01)
if ("BaII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.78137e+00)
logPartFn[3] = math.log(5.96568e+00)
logPartFn[4] = math.log(6.97202e+00)
if ("BaIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("LaI" == species):
logPartFn[0] = math.log(4.00005e+00)
logPartFn[1] = math.log(4.29315e+00)
logPartFn[2] = math.log(1.42312e+01)
logPartFn[3] = math.log(5.77343e+01)
logPartFn[4] = math.log(8.81122e+01)
if ("LaII" == species):
logPartFn[0] = math.log(5.00009e+00)
logPartFn[1] = math.log(5.51405e+00)
logPartFn[2] = math.log(2.03638e+01)
logPartFn[3] = math.log(4.28241e+01)
logPartFn[4] = math.log(5.27740e+01)
if ("LaIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.05954e+00)
logPartFn[2] = math.log(7.09841e+00)
logPartFn[3] = math.log(1.19933e+01)
logPartFn[4] = math.log(1.34792e+01)
if ("CeI" == species):
logPartFn[0] = math.log(9.39731e+00)
logPartFn[1] = math.log(1.20470e+01)
logPartFn[2] = math.log(7.59152e+01)
logPartFn[3] = math.log(4.66858e+02)
logPartFn[4] = math.log(7.08652e+02)
if ("CeII" == species):
logPartFn[0] = math.log(8.00018e+00)
logPartFn[1] = math.log(8.83511e+00)
logPartFn[2] = math.log(8.20718e+01)
logPartFn[3] = math.log(3.70729e+02)
logPartFn[4] = math.log(4.91152e+02)
if ("CeIII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.13795e+00)
logPartFn[2] = math.log(2.59288e+01)
logPartFn[3] = math.log(7.43305e+01)
logPartFn[4] = math.log(9.01542e+01)
if ("PrI" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.02326e+01)
logPartFn[2] = math.log(3.50798e+01)
logPartFn[3] = math.log(3.18645e+02)
logPartFn[4] = math.log(5.02583e+02)
if ("PrII" == species):
logPartFn[0] = math.log(9.08268e+00)
logPartFn[1] = math.log(1.22739e+01)
logPartFn[2] = math.log(5.41964e+01)
logPartFn[3] = math.log(2.74460e+02)
logPartFn[4] = math.log(3.80580e+02)
if ("PrIII" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.02182e+01)
logPartFn[2] = math.log(2.19864e+01)
logPartFn[3] = math.log(6.64136e+01)
logPartFn[4] = math.log(9.66706e+01)
if ("NdI" == species):
logPartFn[0] = math.log(9.00004e+00)
logPartFn[1] = math.log(9.44310e+00)
logPartFn[2] = math.log(2.95270e+01)
logPartFn[3] = math.log(3.13305e+02)
logPartFn[4] = math.log(5.42293e+02)
if ("NdII" == species):
logPartFn[0] = math.log(8.03412e+00)
logPartFn[1] = math.log(1.05550e+01)
logPartFn[2] = math.log(4.95287e+01)
logPartFn[3] = math.log(3.20614e+02)
logPartFn[4] = math.log(5.04711e+02)
if ("NdIII" == species):
logPartFn[0] = math.log(9.00004e+00)
logPartFn[1] = math.log(9.43038e+00)
logPartFn[2] = math.log(2.35346e+01)
logPartFn[3] = math.log(4.66105e+01)
logPartFn[4] = math.log(5.66490e+01)
if ("PmI" == species):
logPartFn[0] = math.log(6.00110e+00)
logPartFn[1] = math.log(6.86120e+00)
logPartFn[2] = math.log(2.40772e+01)
logPartFn[3] = math.log(7.47623e+01)
logPartFn[4] = math.log(1.13054e+02)
if ("PmII" == species):
logPartFn[0] = math.log(5.05009e+00)
logPartFn[1] = math.log(7.39584e+00)
logPartFn[2] = math.log(4.17360e+01)
logPartFn[3] = math.log(1.35782e+02)
logPartFn[4] = math.log(1.78314e+02)
if ("PmIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.00000e+00)
logPartFn[3] = math.log(6.00000e+00)
logPartFn[4] = math.log(6.00000e+00)
if ("SmI" == species):
logPartFn[0] = math.log(1.11838e+00)
logPartFn[1] = math.log(2.88715e+00)
logPartFn[2] = math.log(1.84133e+01)
logPartFn[3] = math.log(1.09023e+02)
logPartFn[4] = math.log(1.89582e+02)
if ("SmII" == species):
logPartFn[0] = math.log(2.10827e+00)
logPartFn[1] = math.log(4.26988e+00)
logPartFn[2] = math.log(3.12536e+01)
logPartFn[3] = math.log(1.19821e+02)
logPartFn[4] = math.log(1.75950e+02)
if ("SmIII" == species):
logPartFn[0] = math.log(1.11724e+00)
logPartFn[1] = math.log(2.88059e+00)
logPartFn[2] = math.log(1.77904e+01)
logPartFn[3] = math.log(3.49109e+01)
logPartFn[4] = math.log(4.10217e+01)
if ("EuI" == species):
logPartFn[0] = math.log(8.00000e+00)
logPartFn[1] = math.log(8.00000e+00)
logPartFn[2] = math.log(8.15223e+00)
logPartFn[3] = math.log(2.69921e+01)
logPartFn[4] = math.log(5.13327e+01)
if ("EuII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.05745e+00)
logPartFn[2] = math.log(1.24460e+01)
logPartFn[3] = math.log(2.38118e+01)
logPartFn[4] = math.log(3.15054e+01)
if ("EuIII" == species):
logPartFn[0] = math.log(8.00000e+00)
logPartFn[1] = math.log(8.00000e+00)
logPartFn[2] = math.log(8.00004e+00)
logPartFn[3] = math.log(8.65078e+00)
logPartFn[4] = math.log(1.03670e+01)
if ("GdI" == species):
logPartFn[0] = math.log(5.67234e+00)
logPartFn[1] = math.log(1.14248e+01)
logPartFn[2] = math.log(3.48245e+01)
logPartFn[3] = math.log(1.36394e+02)
logPartFn[4] = math.log(2.17943e+02)
if ("GdII" == species):
logPartFn[0] = math.log(6.45033e+00)
logPartFn[1] = math.log(1.18671e+01)
logPartFn[2] = math.log(4.85326e+01)
logPartFn[3] = math.log(1.46650e+02)
logPartFn[4] = math.log(1.95931e+02)
if ("GdIII" == species):
logPartFn[0] = math.log(5.32234e+00)
logPartFn[1] = math.log(9.64026e+00)
logPartFn[2] = math.log(3.56490e+01)
logPartFn[3] = math.log(6.82437e+01)
logPartFn[4] = math.log(7.69994e+01)
if ("TbI" == species):
logPartFn[0] = math.log(1.67331e+01)
logPartFn[1] = math.log(2.93841e+01)
logPartFn[2] = math.log(8.97544e+01)
logPartFn[3] = math.log(3.47204e+02)
logPartFn[4] = math.log(5.12797e+02)
if ("TbII" == species):
logPartFn[0] = math.log(1.70002e+01)
logPartFn[1] = math.log(1.78116e+01)
logPartFn[2] = math.log(5.25407e+01)
logPartFn[3] = math.log(1.68157e+02)
logPartFn[4] = math.log(2.17753e+02)
if ("TbIII" == species):
logPartFn[0] = math.log(1.60000e+01)
logPartFn[1] = math.log(1.60044e+01)
logPartFn[2] = math.log(2.36107e+01)
logPartFn[3] = math.log(7.90783e+01)
logPartFn[4] = math.log(1.08698e+02)
if ("DyI" == species):
logPartFn[0] = math.log(1.70000e+01)
logPartFn[1] = math.log(1.70001e+01)
logPartFn[2] = math.log(2.11524e+01)
logPartFn[3] = math.log(1.37365e+02)
logPartFn[4] = math.log(2.61442e+02)
if ("DyII" == species):
logPartFn[0] = math.log(1.80017e+01)
logPartFn[1] = math.log(1.94761e+01)
logPartFn[2] = math.log(3.37600e+01)
logPartFn[3] = math.log(1.26585e+02)
logPartFn[4] = math.log(2.08424e+02)
if ("DyIII" == species):
logPartFn[0] = math.log(1.70000e+01)
logPartFn[1] = math.log(1.70000e+01)
logPartFn[2] = math.log(1.70000e+01)
logPartFn[3] = math.log(1.70000e+01)
logPartFn[4] = math.log(1.70000e+01)
if ("HoI" == species):
logPartFn[0] = math.log(1.60000e+01)
logPartFn[1] = math.log(1.60000e+01)
logPartFn[2] = math.log(1.87758e+01)
logPartFn[3] = math.log(9.97150e+01)
logPartFn[4] = math.log(1.71521e+02)
if ("HoII" == species):
logPartFn[0] = math.log(1.70130e+01)
logPartFn[1] = math.log(1.93968e+01)
logPartFn[2] = math.log(3.03102e+01)
logPartFn[3] = math.log(5.61173e+01)
logPartFn[4] = math.log(7.13807e+01)
if ("HoIII" == species):
logPartFn[0] = math.log(1.60000e+01)
logPartFn[1] = math.log(1.60000e+01)
logPartFn[2] = math.log(1.73144e+01)
logPartFn[3] = math.log(3.55564e+01)
logPartFn[4] = math.log(5.14625e+01)
if ("ErI" == species):
logPartFn[0] = math.log(1.30000e+01)
logPartFn[1] = math.log(1.30000e+01)
logPartFn[2] = math.log(1.62213e+01)
logPartFn[3] = math.log(1.03737e+02)
logPartFn[4] = math.log(1.94418e+02)
if ("ErII" == species):
logPartFn[0] = math.log(1.40917e+01)
logPartFn[1] = math.log(1.73794e+01)
logPartFn[2] = math.log(2.71056e+01)
logPartFn[3] = math.log(9.05747e+01)
logPartFn[4] = math.log(1.40942e+02)
if ("ErIII" == species):
logPartFn[0] = math.log(1.30000e+01)
logPartFn[1] = math.log(1.30000e+01)
logPartFn[2] = math.log(1.52920e+01)
logPartFn[3] = math.log(3.23775e+01)
logPartFn[4] = math.log(4.33307e+01)
if ("TmI" == species):
logPartFn[0] = math.log(8.00000e+00)
logPartFn[1] = math.log(8.00000e+00)
logPartFn[2] = math.log(8.16070e+00)
logPartFn[3] = math.log(2.89498e+01)
logPartFn[4] = math.log(5.79555e+01)
if ("TmII" == species):
logPartFn[0] = math.log(9.50853e+00)
logPartFn[1] = math.log(1.25401e+01)
logPartFn[2] = math.log(1.54699e+01)
logPartFn[3] = math.log(3.09619e+01)
logPartFn[4] = math.log(4.97225e+01)
if ("TmIII" == species):
logPartFn[0] = math.log(8.00000e+00)
logPartFn[1] = math.log(8.00000e+00)
logPartFn[2] = math.log(8.08981e+00)
logPartFn[3] = math.log(1.09856e+01)
logPartFn[4] = math.log(1.49391e+01)
if ("YbI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00151e+00)
logPartFn[3] = math.log(2.45238e+00)
logPartFn[4] = math.log(5.30693e+00)
if ("YbII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00045e+00)
logPartFn[3] = math.log(2.90499e+00)
logPartFn[4] = math.log(4.84517e+00)
if ("YbIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.10983e+00)
logPartFn[4] = math.log(1.44387e+00)
if ("LuI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.01936e+00)
logPartFn[2] = math.log(6.69800e+00)
logPartFn[3] = math.log(1.39052e+01)
logPartFn[4] = math.log(2.05472e+01)
if ("LuII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.03231e+00)
logPartFn[3] = math.log(2.81256e+00)
logPartFn[4] = math.log(4.21040e+00)
if ("LuIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.35394e+00)
logPartFn[3] = math.log(4.70352e+00)
logPartFn[4] = math.log(5.50348e+00)
if ("HfI" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00797e+00)
logPartFn[2] = math.log(8.96794e+00)
logPartFn[3] = math.log(2.73353e+01)
logPartFn[4] = math.log(4.03170e+01)
if ("HfII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00104e+00)
logPartFn[2] = math.log(7.28122e+00)
logPartFn[3] = math.log(2.20042e+01)
logPartFn[4] = math.log(2.94434e+01)
if ("HfIII" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.00000e+00)
logPartFn[3] = math.log(5.00000e+00)
logPartFn[4] = math.log(5.00000e+00)
if ("TaI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.01856e+00)
logPartFn[2] = math.log(8.90727e+00)
logPartFn[3] = math.log(3.81227e+01)
logPartFn[4] = math.log(5.94676e+01)
if ("TaII" == species):
logPartFn[0] = math.log(3.00006e+00)
logPartFn[1] = math.log(3.26126e+00)
logPartFn[2] = math.log(1.20456e+01)
logPartFn[3] = math.log(4.24778e+01)
logPartFn[4] = math.log(5.72237e+01)
if ("TaIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.00000e+00)
logPartFn[4] = math.log(4.00000e+00)
if ("WI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.02634e+00)
logPartFn[2] = math.log(6.30546e+00)
logPartFn[3] = math.log(2.85590e+01)
logPartFn[4] = math.log(4.57837e+01)
if ("WII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.05127e+00)
logPartFn[2] = math.log(6.98039e+00)
logPartFn[3] = math.log(2.94443e+01)
logPartFn[4] = math.log(4.35189e+01)
if ("WIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00456e+00)
logPartFn[2] = math.log(3.26242e+00)
logPartFn[3] = math.log(1.74093e+01)
logPartFn[4] = math.log(2.62418e+01)
if ("ReI" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.10431e+00)
logPartFn[3] = math.log(1.55905e+01)
logPartFn[4] = math.log(2.56949e+01)
if ("ReII" == species):
logPartFn[0] = math.log(7.00000e+00)
logPartFn[1] = math.log(7.00000e+00)
logPartFn[2] = math.log(7.02641e+00)
logPartFn[3] = math.log(1.17977e+01)
logPartFn[4] = math.log(1.72060e+01)
if ("ReIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.00000e+00)
logPartFn[3] = math.log(6.00000e+00)
logPartFn[4] = math.log(6.00000e+00)
if ("OsI" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.00193e+00)
logPartFn[2] = math.log(1.28046e+01)
logPartFn[3] = math.log(3.57251e+01)
logPartFn[4] = math.log(5.01909e+01)
if ("OsII" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.00003e+01)
logPartFn[2] = math.log(1.29335e+01)
logPartFn[3] = math.log(2.68382e+01)
logPartFn[4] = math.log(3.34231e+01)
if ("OsIII" == species):
logPartFn[0] = math.log(7.00000e+00)
logPartFn[1] = math.log(7.00000e+00)
logPartFn[2] = math.log(7.00000e+00)
logPartFn[3] = math.log(7.00000e+00)
logPartFn[4] = math.log(7.00000e+00)
if ("IrI" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.00029e+01)
logPartFn[2] = math.log(1.43208e+01)
logPartFn[3] = math.log(3.28930e+01)
logPartFn[4] = math.log(4.25998e+01)
if ("IrII" == species):
logPartFn[0] = math.log(1.10000e+01)
logPartFn[1] = math.log(1.10141e+01)
logPartFn[2] = math.log(1.64858e+01)
logPartFn[3] = math.log(3.43934e+01)
logPartFn[4] = math.log(4.27953e+01)
if ("IrIII" == species):
logPartFn[0] = math.log(1.00000e+01)
logPartFn[1] = math.log(1.00000e+01)
logPartFn[2] = math.log(1.00000e+01)
logPartFn[3] = math.log(1.00000e+01)
logPartFn[4] = math.log(1.00000e+01)
if ("PtI" == species):
logPartFn[0] = math.log(7.00192e+00)
logPartFn[1] = math.log(8.37770e+00)
logPartFn[2] = math.log(1.68661e+01)
logPartFn[3] = math.log(2.39027e+01)
logPartFn[4] = math.log(2.70210e+01)
if ("PtII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00001e+00)
logPartFn[2] = math.log(7.18367e+00)
logPartFn[3] = math.log(1.45322e+01)
logPartFn[4] = math.log(1.81439e+01)
if ("PtIII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.00000e+00)
logPartFn[2] = math.log(9.00000e+00)
logPartFn[3] = math.log(9.00000e+00)
logPartFn[4] = math.log(9.00000e+00)
if ("AuI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.07431e+00)
logPartFn[3] = math.log(3.26015e+00)
logPartFn[4] = math.log(3.89945e+00)
if ("AuII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00623e+00)
logPartFn[3] = math.log(1.73082e+00)
logPartFn[4] = math.log(2.36680e+00)
if ("AuIII" == species):
logPartFn[0] = math.log(6.00000e+00)
logPartFn[1] = math.log(6.00000e+00)
logPartFn[2] = math.log(6.00000e+00)
logPartFn[3] = math.log(6.00000e+00)
logPartFn[4] = math.log(6.00000e+00)
if ("HgI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00627e+00)
logPartFn[4] = math.log(1.03521e+00)
if ("HgII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.01083e+00)
logPartFn[4] = math.log(2.04111e+00)
if ("HgIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00459e+00)
logPartFn[4] = math.log(1.02282e+00)
if ("TlI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.09532e+00)
logPartFn[3] = math.log(3.13616e+00)
logPartFn[4] = math.log(4.01172e+00)
if ("TlII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00046e+00)
logPartFn[4] = math.log(1.00317e+00)
if ("TlIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00000e+00)
logPartFn[3] = math.log(2.00006e+00)
logPartFn[4] = math.log(2.00068e+00)
if ("PbI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.10102e+00)
logPartFn[3] = math.log(2.61747e+00)
logPartFn[4] = math.log(3.50725e+00)
if ("PbII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00467e+00)
logPartFn[3] = math.log(2.31815e+00)
logPartFn[4] = math.log(2.52964e+00)
if ("PbIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00005e+00)
logPartFn[4] = math.log(1.00051e+00)
if ("BiI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.02047e+00)
logPartFn[3] = math.log(4.95911e+00)
logPartFn[4] = math.log(5.65786e+00)
if ("BiII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00645e+00)
logPartFn[3] = math.log(1.51854e+00)
logPartFn[4] = math.log(1.91272e+00)
if ("BiIII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.00019e+00)
logPartFn[3] = math.log(2.09519e+00)
logPartFn[4] = math.log(2.20117e+00)
if ("PoI" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.02832e+00)
logPartFn[3] = math.log(5.51747e+00)
logPartFn[4] = math.log(5.89033e+00)
if ("PoII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.00000e+00)
logPartFn[4] = math.log(4.00000e+00)
if ("PoIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("AtI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.00297e+00)
logPartFn[4] = math.log(4.01505e+00)
if ("AtII" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.00000e+00)
logPartFn[3] = math.log(5.00000e+00)
logPartFn[4] = math.log(5.00000e+00)
if ("AtIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.00000e+00)
logPartFn[4] = math.log(4.00000e+00)
if ("RnI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00086e+00)
logPartFn[4] = math.log(1.00996e+00)
if ("RnII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.00773e+00)
logPartFn[4] = math.log(4.02348e+00)
if ("RnIII" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00000e+00)
logPartFn[2] = math.log(5.00000e+00)
logPartFn[3] = math.log(5.00000e+00)
logPartFn[4] = math.log(5.00000e+00)
if ("FrI" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.01509e+00)
logPartFn[3] = math.log(4.72683e+00)
logPartFn[4] = math.log(8.72909e+00)
if ("FrII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("FrIII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.00000e+00)
logPartFn[3] = math.log(4.00000e+00)
logPartFn[4] = math.log(4.00000e+00)
if ("RaI" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.02509e+00)
logPartFn[3] = math.log(3.46852e+00)
logPartFn[4] = math.log(5.89341e+00)
if ("RaII" == species):
logPartFn[0] = math.log(2.00000e+00)
logPartFn[1] = math.log(2.00000e+00)
logPartFn[2] = math.log(2.02050e+00)
logPartFn[3] = math.log(3.04735e+00)
logPartFn[4] = math.log(3.76227e+00)
if ("RaIII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("AcI" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00977e+00)
logPartFn[2] = math.log(6.29803e+00)
logPartFn[3] = math.log(1.61130e+01)
logPartFn[4] = math.log(2.21832e+01)
if ("AcII" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.99066e+00)
logPartFn[3] = math.log(9.40315e+00)
logPartFn[4] = math.log(1.34413e+01)
if ("AcIII" == species):
logPartFn[0] = math.log(2.00057e+00)
logPartFn[1] = math.log(2.39921e+00)
logPartFn[2] = math.log(5.52353e+00)
logPartFn[3] = math.log(8.45689e+00)
logPartFn[4] = math.log(9.28067e+00)
if ("ThI" == species):
logPartFn[0] = math.log(5.00000e+00)
logPartFn[1] = math.log(5.00263e+00)
logPartFn[2] = math.log(1.10507e+01)
logPartFn[3] = math.log(4.50659e+01)
logPartFn[4] = math.log(5.99031e+01)
if ("ThII" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.09432e+00)
logPartFn[2] = math.log(1.45463e+01)
logPartFn[3] = math.log(6.18533e+01)
logPartFn[4] = math.log(8.01502e+01)
if ("ThIII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.00000e+00)
logPartFn[2] = math.log(9.00000e+00)
logPartFn[3] = math.log(9.00000e+00)
logPartFn[4] = math.log(9.00000e+00)
if ("PaI" == species):
logPartFn[0] = math.log(1.20011e+01)
logPartFn[1] = math.log(1.30217e+01)
logPartFn[2] = math.log(4.32464e+01)
logPartFn[3] = math.log(1.22610e+02)
logPartFn[4] = math.log(1.49295e+02)
if ("PaII" == species):
logPartFn[0] = math.log(9.00122e+00)
logPartFn[1] = math.log(1.01871e+01)
logPartFn[2] = math.log(4.27330e+01)
logPartFn[3] = math.log(9.03874e+01)
logPartFn[4] = math.log(1.01197e+02)
if ("PaIII" == species):
logPartFn[0] = math.log(1.20000e+01)
logPartFn[1] = math.log(1.20000e+01)
logPartFn[2] = math.log(1.20000e+01)
logPartFn[3] = math.log(1.20000e+01)
logPartFn[4] = math.log(1.20000e+01)
if ("UI" == species):
logPartFn[0] = math.log(1.30115e+01)
logPartFn[1] = math.log(1.48466e+01)
logPartFn[2] = math.log(3.35353e+01)
logPartFn[3] = math.log(1.07772e+02)
logPartFn[4] = math.log(1.36160e+02)
if ("UII" == species):
logPartFn[0] = math.log(1.04902e+01)
logPartFn[1] = math.log(1.60511e+01)
logPartFn[2] = math.log(5.15324e+01)
logPartFn[3] = math.log(1.55945e+02)
logPartFn[4] = math.log(1.91265e+02)
if ("UIII" == species):
logPartFn[0] = math.log(9.00000e+00)
logPartFn[1] = math.log(9.00000e+00)
logPartFn[2] = math.log(9.00000e+00)
logPartFn[3] = math.log(9.00000e+00)
logPartFn[4] = math.log(9.00000e+00)
if ("H-" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("C-" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.08591e+00)
logPartFn[3] = math.log(5.67986e+00)
logPartFn[4] = math.log(6.40004e+00)
if ("O-" == species):
logPartFn[0] = math.log(4.28183e+00)
logPartFn[1] = math.log(5.20160e+00)
logPartFn[2] = math.log(5.83718e+00)
logPartFn[3] = math.log(5.93732e+00)
logPartFn[4] = math.log(5.94969e+00)
if ("F-" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
if ("Si-" == species):
logPartFn[0] = math.log(4.00000e+00)
logPartFn[1] = math.log(4.00000e+00)
logPartFn[2] = math.log(4.38825e+00)
logPartFn[3] = math.log(7.70408e+00)
logPartFn[4] = math.log(8.92238e+00)
if ("S-" == species):
logPartFn[0] = math.log(4.00949e+00)
logPartFn[1] = math.log(4.49753e+00)
logPartFn[2] = math.log(5.58609e+00)
logPartFn[3] = math.log(5.83344e+00)
logPartFn[4] = math.log(5.86560e+00)
if ("Cl-" == species):
logPartFn[0] = math.log(1.00000e+00)
logPartFn[1] = math.log(1.00000e+00)
logPartFn[2] = math.log(1.00000e+00)
logPartFn[3] = math.log(1.00000e+00)
logPartFn[4] = math.log(1.00000e+00)
return logPartFn
# } //end of method getPartFn2
| 107,648
| 29.677971
| 93
|
py
|
ChromaStarPy
|
ChromaStarPy-master/TransitLightCurveAnlytc2.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 12 13:06:03 2020
@author: iansh
"""
import math
import numpy
import Useful
#import sys
import matplotlib.pyplot as plt
"""
#
# Compute the analytic light curve in the small-planet approximation of Mandel
# and Agol 2002, Eq. Section 5 ("Analytic Lightcurves for Planetary Transit Searches")
#
# Reads in planetary system parameters for a single planet around a single star
#and computes the impact parameter and time coordinate of snapshots
# corresponding to the Gauss-Legendre quadrature as the planet transits the star
#in the stellar atmosphere coordinate system
#Integrated transit solution with stellar atmosphere and radiative tranfer code
# :: transit light curve entirely due to specific intensity variation across
# projected disk of background star
# - ie. No limb darkening coefficient (LDC) parameterization!
Assumptions:
o Planet's transit path is a chord (not an arc)
:: equal intevals of length along chord --> equal intervals of transit time
- okay if r_orbit >> R_star
o Eclipsed intensity (I) does not vary as a function of position across the
projected disk of the planet
- okay if R_planet << R_star
o Planet's orbit is circular and centered on centre of star
o Planet's intensity is 0
o Ingress and egress excluded
- All light variation will be from background stellar intensity
variation across projected stellar disk
- Okay if r_planet << R_star - ??
#Input:
o radius = radius of star (R_Sun)
- NOTE: already fixed by stellar parameters massStar and grav
o cosTheta - 2D array [2 x numThetas] - 2nd row is grid of cosTheta values in
stellar atmosphere coord system from main program
o vTrans - velocity of planet's transit motion projected to surface of star
o Input parameter "p" defined in Mandel 7 Agol (2002) Section 2
o intensLam is the 1D monochromatic I_lambda (theta) distribution "sliced" from the
2D I(lambda, cosTheta) "intens" array in CSPy
#Output:
o 1D vector of transit times corresponding to theta values transited along transit chord
"""
def transLightAnlytc2(intensLam, radius, p, cosTheta, vTrans, iFirstTheta, numTransThetas, impct):
#Safety first:
tiny = numpy.double(1.0e-49)
logTiny = math.log(tiny)
#M&A 2002 seems to have a curve with more flux removed than we have:
#fudge = 2.0/math.pi
fudge = 2.0
if (impct >= radius):
#There is no eclipse (transit)
return
#thetaMinRad is also the minimum theta of the eclipse path chord, in RAD
thetaMinRad = math.asin(impct/radius)
#cos(theta) *decreases* with increasing theta in Quadrant I:
cosThetaMax = math.cos(thetaMinRad)
#Compute array of distances traveled, r, along semi-chord from position of
#minimum impact parameter
#12D array of length number-of-eclipse-thetas
#Mandel & Agol (2002)'s "F" - "relative flux"
F = [0.0 for i in range(numTransThetas)]
#for i in range(numTransThetas):
# print("i ", i, " intensLam[i] ", intensLam[i])
#test = [0.0 for i in range(numThetas)]
#Parameters for Mandel & Agol (2002) Eq. Sect 5 for relatie flux, their "F":
#coefficients for Claret (2000)'s four-parameter limb darkening law:
#A&A 363, 1081
# From J/A+A/363/1081
#http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=J/A+A/363/1081
#Non-linear limb-darkening law for LTE models (Claret 2000) 2000A&A...363.1081C
#In order of *increasing* order: c_0 to c_4 (Claret's a_1 to a_4)
#For 5750/4.5/0.0/1.0 ATLAS9, V band:
c = [0.0, 0.5169, -0.0211, 0.6944, -0.3892]
c[0] = 1.0 - c[1] - c[2] - c[3] - c[4]
omegaMA = 0.0
for n in range(5):
omegaMA+= c[n] / (n+4)
#print("omegaMA ", omegaMA)
#thisB = numpy.double(0.0)
mu = 0.0 #Initialize
I = 0.0
#Note Mandel & Agol Sect 5 Eq:
# Istar(z) = 1/(4pz)*Int^z+p_z-p {I(z)2r dr}
# in case of small planet (p<z):
# --> I(z) = constant over range (z-p to z+p)
# Istar(z) = I(z) * 4pz/4pz = I(z)
for i in range(0, numTransThetas):
#print("i ", i)
#thisTheta = math.acos(cosTheta[1][i+iFirstTheta])
#test[i+iFirstTheta] = math.exp(logRatio)
# impact parameter corresponding to this theta, in solar radii:
#thisB = radius * math.sin(thisTheta)
# Build Mandel & Agol's "F" from Eq.Sect 5:
#Test with actual CSPy I(theta) values:
#F[i] = 1.0 - fudge * ( p**2 * intensLam[i]/(4.0*omegaMA) )
#With I(theta) from Claret (2000) four paramter limb-darkening law
# consistent with Mandel & Agol (2002)
mu = cosTheta[1][i+iFirstTheta]
I = 1.0 - c[1]*(1-math.sqrt(mu)) - c[2]*(1-mu)\
- c[3]*(1-math.pow(mu, 1.5)) - c[4]*(1-mu**2)
F[i] = 1.0 - fudge * ( p**2 * I/(4.0*omegaMA) )
#print("i ", i, " F[i] ", F[i])
return F
| 4,989
| 34.899281
| 98
|
py
|
ChromaStarPy
|
ChromaStarPy-master/TransitLightCurve2.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 12 13:06:03 2020
@author: iansh
"""
import math
import numpy
import Useful
#import sys
import matplotlib.pyplot as plt
"""
# Reads in planetary system parameters for a single planet around a single star
#and computes the impact parameter and time coordinate of snapshots
# corresponding to the Gauss-Legendre quadrature as the planet transits the star
#in the stellar atmosphere coordinate system
#Integrated transit solution with stellar atmosphere and radiative tranfer code
# :: transit light curve entirely due to specific intensity variation across
# projected disk of background star
# - ie. No limb darkening coefficient (LDC) parameterization!
Assumptions:
o Planet's transit path is a chord (not an arc)
:: equal intevals of length along chord --> equal intervals of transit time
- okay if r_orbit >> R_star
o Eclipsed intensity (I) does not vary as a function of position across the
projected disk of the planet
- okay if R_planet << R_star
o Planet's orbit is circular and centered on centre of star
o Planet's intensity is 0
o Ingress and egress excluded
- All light variation will be from background stellar intensity
variation across projected stellar disk
- Okay if r_planet << R_star - ??
#Input:
o radius = radius of star (R_Sun)
- NOTE: already fixed by stellar parameters massStar and grav
o cosTheta - 2D array [2 x numThetas] - 2nd row is grid of cosTheta values in
stellar atmosphere coord system from main program
o vTrans - velocity of planet's transit motion projected to surface of star
#Output:
o 1D vector of transit times corresponding to theta values transited along transit chord
"""
def TransLight2(radius, cosTheta, vTrans, iFirstTheta, numTransThetas, impct):
#Safety first:
tiny = numpy.double(1.0e-49)
logTiny = math.log(tiny)
if (impct >= radius):
#There is no eclipse (transit)
return
#thetaMinRad is also the minimum theta of the eclipse path chord, in RAD
thetaMinRad = math.asin(impct/radius)
#cos(theta) *decreases* with increasing theta in Quadrant I:
cosThetaMax = math.cos(thetaMinRad)
#Compute array of distances traveled, r, along semi-chord from position of
#minimum impact parameter
#12D array of length number-of-eclipse-thetas
transit = [0.0 for i in range(numTransThetas)]
#test = [0.0 for i in range(numThetas)]
thisImpct = numpy.double(0.0)
for i in range(numTransThetas):
#print("i ", i)
thisTheta = math.acos(cosTheta[1][i+iFirstTheta])
thisImpct = radius * math.sin(thisTheta)
#test[i+iFirstTheta] = math.exp(logRatio)
# impact parameter corresponding to this theta:
thisB = radius * math.sin(thisTheta)
# linear distance travelled along transit semi-path in solar radii
transit[i] = math.sqrt(thisB**2 - impct**2)
transit[i] = transit[i]*Useful.rSun() #RSun to cm
#row 1 is Times at which successive annuli are eclipsed, in s:
#Ephemeris zero point at transit mid-point
transit[i] = transit[i]/vTrans
#print("i ", i, " i+iFirstTheta ", i+iFirstTheta, " transit[1] ", transit[1][i+iFirstTheta])
return transit
| 3,370
| 33.050505
| 100
|
py
|
ChromaStarPy
|
ChromaStarPy-master/State.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 15:10:06 2017
@author: ishort
"""
import math
import Useful
import AtomicMass
def massDensity(numDeps, temp, press, mmw, zScale):
"""Solves the equation of state (EOS) for the mass density (rho) given total
* pressure from HSE solution, for a mixture of ideal gas particles and photons
*
* Need to assume a mean molecular weight structure, mu(Tau)
"""
logE = math.log10(math.e) #// for debug output
#//press is a 4 x numDeps array:
#// rows 0 & 1 are linear and log *gas* pressure, respectively
#// rows 2 & 3 are linear and log *radiation* pressure
#// double c = 9.9989E+10; // light speed in vaccuum in cm/s
#// double sigma = 5.670373E-5; //Stefan-Boltzmann constant ergs/s/cm^2/K^4
#//Row 0 of mmwNe is Mean molecular weight in amu
k = Useful.k()
logK = Useful.logK()
amu = Useful.amu()
logAmu = Useful.logAmu()
#double logMuAmu
#//System.out.println("STATE: logK " + logK + " logMuAmu " + logMuAmu);
rho = [[0.0 for i in range(numDeps)] for j in range(2)]
#// Declare scatch variables:
#// double logPrad, pRad, pGas, logPgas;
for i in range(numDeps):
logMuAmu = math.log(mmw[i]) + logAmu
#// Compute LTE bolometric radiation contribution to total HSE pressure
#//logPrad = radFac + 4.0*temp[1][i] ;
#//pRad = Math.exp(logPrad);
#//pGas = press[0][i] - pRad;
#//logPgas = Math.log(pGas);
rho[1][i] = press[1][i] - temp[1][i] + (logMuAmu - logK)
rho[0][i] = math.exp(rho[1][i])
#//System.out.println("i " + i + " press[1] " + logE * press[1][i] + " mmw[i] " + mmw[i] + " rho " + logE * rho[1][i]);
#//System.out.println("temp " + temp[0][i] + " rho " + rho[0][i]);
return rho
def mmwFn(numDeps, temp, zScale):
#// mean molecular weight in amu
mmw = [0.0 for i in range(numDeps)]
#double logMu, logMuN, logMuI, logTempN, logTempI;
#// Carrol & Ostlie 2nd Ed., p. 293: mu_N = 1.3, mu_I = 0.62
logMuN = math.log(1.3)
logMuI = math.log(0.62)
logTempN = math.log(4000.0) #// Teff in K for fully neutral gas?
logTempI = math.log(10000.0) #// Teff in K for *Hydrogen* fully ionized?
#//System.out.println("temp logNe mu");
for id in range(numDeps):
#//Give mu the same temperature dependence as 1/Ne between the fully neutral and fully ionized limits?? - Not yet!
if (temp[1][id] < logTempN):
mmw[id] = math.exp(logMuN)
elif ((temp[1][id] > logTempN) and (temp[1][id] < logTempI)):
logMu = logMuN + ((temp[1][id] - logTempN) / (logTempI - logTempN)) * (logMuI - logMuN)
#//Mean molecular weight in amu
mmw[id] = math.exp(logMu)
else:
mmw[id] = math.exp(logMuI)
return mmw
def getNz(numDeps, temp, pGas, pe, ATot, nelemAbnd, logAz):
#double[][] logNz = new double[nelemAbnd][numDeps];
logNz = [ [0.0 for i in range(numDeps)] for j in range(nelemAbnd) ]
logATot = math.log(ATot)
#double help, logHelp, logNumerator;
for i in range(numDeps):
#// Initial safety check to avoid negative logNz as Pg and Pe each converge:
#// maximum physical Pe is about 0.5*PGas (complete ionization of pure H):
if (pe[0][i] > 0.5 * pGas[0][i]):
pe[0][i] = 0.5 * pGas[0][i]
pe[1][i] = math.log(pe[0][i])
#// H (Z=1) is a special case: N_H(tau) = (Pg(tau)-Pe(tau))/{kTk(tau)A_Tot}
logHelp = pe[1][i] - pGas[1][i]
help = 1.0 - math.exp(logHelp)
logHelp = math.log(help)
logNumerator = pGas[1][i] + logHelp
logNz[0][i] = logNumerator - Useful.logK() - temp[1][i] - logATot
#// Remaining elements:
for j in range(nelemAbnd):
#// N_z = A_z * N_H:
logNz[j][i] = logAz[j] + logNz[0][i]
return logNz
def massDensity2(numDeps, nelemAbnd, logNz, cname):
rho = [[0.0 for i in range(numDeps)] for j in range(2)]
#double logAddend, addend;
lAmu = Useful.logAmu()
#//Prepare log atomic masses once for each element:
logAMass = [0.0 for i in range(nelemAbnd)]
for j in range(nelemAbnd):
logAMass[j] = math.log(AtomicMass.getMass(cname[j]))
#//System.out.println("j " + j + " logAMass " + logAMass[j]);
for i in range(numDeps):
rho[0][i] = 0.0
for j in range(nelemAbnd):
logAddend = logNz[j][i] + lAmu + logAMass[j]
rho[0][i] = rho[0][i] + math.exp(logAddend)
rho[1][i] = math.log(rho[0][i])
return rho
| 4,649
| 32.695652
| 127
|
py
|
ChromaStarPy
|
ChromaStarPy-master/AlfBooCaIIHK.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Griffin, R. E. M., Lynas-Gray, A. E., 1999, \aj, 117, 2998
#Decin, L., Vandenbussche, B., Waelkens, C., Decin, G., Eriksson, K., Gustafsson, B., Plez, B., Sauval, A. J., 2003a, \aap, 400, 709
#Model atmosphere
teff = 4300.0 #, K
logg = 2.0 #, cgs
log10ZScale = -0.7 # [A/H]
massStar = 0.75 #, solar masses
xiT = 2.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 390.0 #, nm
lambdaStop = 400.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.5
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 1.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,865
| 34.382716
| 132
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Useful.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 12:00:20 2017
linear and logarithmic physical constant and conversion factors cgs units
@author: ishort
"""
import math
def c():
return 2.9979249E+10 #// light speed in vaccuum in cm/s
def sigma():
return 5.670373E-5 #//Stefan-Boltzmann constant ergs/s/cm^2/K^4
def k():
return 1.3806488E-16 #// Boltzmann constant in ergs/K
def h():
return 6.62606957E-27 #//Planck's constant in ergs sec
def ee():
return 4.80320425E-10 #//fundamental charge unit in statcoulombs (cgs)
def mE():
return 9.10938291E-28 #//electron mass (g)
def GConst():
return 6.674e-8 #//Newton's gravitational constant (cgs)
# //Conversion factors
def amu():
return 1.66053892E-24 #// atomic mass unit in g
def eV():
return 1.602176565E-12 #// eV in ergs
def rSun():
return 6.955e10 #// solar radii to cm
def mSun():
return 1.9891e33 #// solar masses to g
def lSun():
return 3.846e33 #// solar bolometric luminosities to ergs/s
#//Natural logs more useful than base 10 logs - Eg. Formal soln module:
#// Fundamental constants
def logC():
return math.log(c())
def logSigma():
return math.log(sigma())
def logK():
return math.log(k())
def logH():
return math.log(h())
def logEe():
return math.log(ee())
#//Named so won't clash with log_10(e)
def logMe():
return math.log(mE())
def logGConst():
return math.log(GConst())
#//Conversion factors
def logAmu():
return math.log(amu())
def logEv():
return math.log(eV())
def logRSun():
return math.log(rSun())
def logMSun():
return math.log(mSun())
def logLSun():
return math.log(lSun())
| 1,792
| 18.922222
| 76
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Restart.py
|
# Teff 3750.0 logg 2.0 [Fe/H] 0.0 massStar 1.0 xiT 1.0 HeFe 0.0 CO 0.0 AlfFe 0.0 lineThresh -3.0 voigtThresh -3.0 lambda0 5.15e-05 lambda1 5.1899999999999994e-05 logGamCol 0.0 logKapFudge 0.0 macroV 1.0 rotV 1.0 rotI 90.0 RV 0.0 nInner 12 nOuter 12 ifMols 1 sampling fine
teffRS = 3750.0 # K
loggRS = 2.0 #log (cm/^2)
log10ZScaleRS = 0.0
xiTRS = 1.0 # (km/s)
logKapFudgeRS = 0.0
logHeFeRS = 0.0
logCORS = 0.0
logAlphaFeRS = 0.0
numDeps = 48
tauRosRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
tauRosRS[0] = [\
9.999999999999987e-07, 1.479833198237528e-06, 2.1899062946059137e-06, 3.2406960357871624e-06, 4.795689579154601e-06, 7.0968206476747486e-06, 1.0502110796366661e-05, 1.5541372208032142e-05, 2.299863853961207e-05, 3.4034148825183046e-05, 5.03648633052627e-05, 7.453159674382292e-05, 0.00011029433117916116, 0.00016321721285632738, 0.00024153425010859465, 0.0003574304018221048, 0.0005289373746757308, 0.0007827390868337495, 0.00115832328625471, 0.0017141252532913136, 0.0025366194557577986, 0.0037537736819256025, 0.005554958913183833, 0.008220412614574905, 0.012164839490258518, 0.01800193332891547, 0.026639858572587716, 0.03942254711206796, 0.05833879397552123, 0.08633168407011593, 0.12775649214671145, 0.1890582983690759, 0.2797747463288548, 0.41401995764592286, 0.6126804780573327, 0.9066649113412799, 1.3417128354799128, 1.9855111964445844, 2.9382253839710137, 4.348083467104512, 6.434438263328999, 9.52189535408409, 14.090816855117312, 20.852058572487586, 30.857568527160552, 45.66405432338183, 67.57518355386232, 100.00000000000004\
]
tauRosRS[1] = [\
-13.815510557964275, -13.423581180433374, -13.031651802902472, -12.639722425371572, -12.24779304784067, -11.855863670309768, -11.463934292778866, -11.072004915247966, -10.680075537717064, -10.288146160186162, -9.89621678265526, -9.504287405124359, -9.112358027593459, -8.720428650062557, -8.328499272531655, -7.936569895000754, -7.544640517469852, -7.15271113993895, -6.760781762408049, -6.368852384877147, -5.9769230073462465, -5.5849936298153455, -5.193064252284444, -4.801134874753542, -4.40920549722264, -4.017276119691738, -3.625346742160838, -3.2334173646299362, -2.8414879870990344, -2.4495586095681325, -2.0576292320372325, -1.6656998545063306, -1.2737704769754288, -0.8818410994445269, -0.4899117219136251, -0.09798234438272502, 0.29394703314817683, 0.6858764106790787, 1.0778057882099805, 1.4697351657408806, 1.8616645432717824, 2.2535939208026843, 2.6455232983335843, 3.037452675864488, 3.429382053395388, 3.8213114309262917, 4.213240808457192, 4.605170185988092\
]
tempRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
tempRS[0] = [\
2251.5634364803777, 2282.988371246748, 2319.1367324814332, 2358.4176875244616, 2398.574647262628, 2437.842667276012, 2475.6624080836486, 2511.756196421699, 2546.455189633573, 2580.0523085502573, 2612.6411794087235, 2644.4090517534783, 2675.5033789886315, 2705.9311551275987, 2735.74164679706, 2765.034404334214, 2793.8076931097353, 2822.0967394700065, 2850.0459929138424, 2877.7862432645843, 2905.5791934441763, 2933.735906651785, 2962.7073066708545, 2993.31518005942, 3025.833105370122, 3060.9925819031346, 3100.5829000114654, 3144.8113178389926, 3194.794775760339, 3254.445640302385, 3324.470935841555, 3407.8023554452047, 3514.6053555505637, 3649.097281329877, 3823.966512959054, 4032.0114470582885, 4301.244801356288, 4622.905635288828, 4988.327668459459, 5441.580799835303, 5960.05968952008, 6424.599937901092, 6686.160087719801, 6877.765989264577, 7037.2327254439315, 7177.509429425402, 7308.426775995669, 7431.488997142502\
]
tempRS[1] = [\
7.71938011455252, 7.733240552888592, 7.748950297346344, 7.765746201765258, 7.7826299429354675, 7.798868774437926, 7.814263278823451, 7.828737467313283, 7.842457549324584, 7.855564952342018, 7.868116934778022, 7.880202898443081, 7.891892820687819, 7.903201366843011, 7.9141578469436435, 7.924808356676208, 7.935160708926189, 7.94523541228941, 7.955090410996989, 7.964776612022035, 7.974388027534755, 7.984031943285535, 7.993858759930425, 8.004136808041624, 8.014941739281394, 8.026494515519783, 8.039345405059152, 8.053509173006985, 8.069278131809751, 8.087777229939281, 8.109065823389361, 8.133822891973225, 8.164682523560462, 8.202235095804262, 8.24904351706157, 8.30202064879933, 8.366659748475398, 8.43877871177229, 8.514855995993479, 8.601824885842433, 8.692835775029392, 8.767889641350244, 8.807795010414576, 8.836049167334695, 8.858970293104207, 8.878707725742235, 8.896783314259613, 8.913481521017646\
]
pGasRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
pGasRS[0] = [\
4.3079702288150075e-12, 5.844376175496002, 10.340585815694489, 15.922567511067644, 22.731165910687007, 30.96429408584286, 40.88750490542381, 52.84038381770614, 67.16069086872933, 84.34178065760274, 105.03856444020131, 129.96825389035521, 159.99235781496168, 196.15320030254668, 239.71592862722585, 292.21097348085164, 355.49511755855036, 431.82623046913284, 523.9348305284568, 635.1113886035603, 769.2949094156568, 931.1566373169975, 1126.1650203964846, 1360.5278962848886, 1641.2767908597957, 1976.2450788425758, 2373.2900541925032, 2840.188699999715, 3384.2932301560586, 4009.053265849281, 4712.631676712049, 5486.263261130342, 6304.225637360232, 7127.0932186717755, 7914.195920117954, 8654.5998174795, 9375.110495562103, 10152.66232906745, 11117.994427584783, 12227.204036144427, 13177.324359937658, 13854.416177123658, 14437.077399921322, 15066.777460631723, 15801.889882204438, 16691.89894083353, 17789.616284394237, 19157.11766025673\
]
pGasRS[1] = [\
-26.17055426733261, 1.7654798613404385, 2.336076522765035, 2.7677374432335218, 3.1237369304639224, 3.4328347369670085, 3.7108245128381743, 3.9672757433996235, 4.207088119064252, 4.434877360943569, 4.654327563109463, 4.86729021979028, 5.075126050436618, 5.278895988110262, 5.479454591578019, 5.677476053302923, 5.8735115151364665, 6.068023263037762, 6.261367307366047, 6.453800398632975, 6.645474393278894, 6.836427509437808, 7.026573352487755, 7.215628062448875, 7.4032297489322465, 7.588953898371347, 7.772032479978595, 7.951625772609841, 8.126900368909421, 8.296310399121785, 8.458001773373757, 8.610002658157024, 8.748975423625634, 8.871658747067206, 8.976413377782226, 9.065846229392813, 9.145813636962446, 9.225491248506005, 9.316320194306668, 9.411418587348086, 9.486252779855684, 9.536359318345273, 9.577554995834625, 9.620247430706927, 9.667884824659767, 9.722678787345735, 9.786370211253246, 9.860429604939894\
]
peRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
peRS[0] = [\
4.890810455543413e-16, 1.6307468874791344e-05, 2.872602331858042e-05, 4.5357038997587745e-05, 6.722972952744894e-05, 9.539895155380024e-05, 0.00013115376829335657, 0.0001760135383741337, 0.00023177257128322043, 0.00030087629358862113, 0.0003864071461274035, 0.0004920109304293629, 0.0006221410302264567, 0.0007820887650820286, 0.0009783613702154207, 0.0012190115545842942, 0.001513668938934944, 0.0018741659726048676, 0.0023153685345266247, 0.0028555614665400656, 0.00351808609087619, 0.004332529782430744, 0.005337494682572588, 0.006586634792881788, 0.008144026115882929, 0.010100893466320376, 0.01260492443262349, 0.015831809400456285, 0.020065285245440354, 0.025887724381115126, 0.03412404794371244, 0.04639736406547916, 0.06685736808601031, 0.10294202692885193, 0.1715480636767287, 0.2907821633854285, 0.49623358278014174, 0.7737095952311563, 1.1219523582552045, 2.225725581240935, 6.824058194141577, 18.517543557283986, 31.48493623310624, 45.829630761813554, 62.23319374563227, 81.23443321124941, 104.04709333651566, 131.40662026956883\
]
peRS[1] = [\
-35.254003460820925, -11.023887341921775, -10.457707110196273, -10.000945178506425, -9.60739500458455, -9.257442969572518, -8.939140119366062, -8.644949643304207, -8.369753963295539, -8.108811362448192, -7.858618961678387, -7.617009625398902, -7.382343754227492, -7.153542313525699, -6.929631456979703, -6.709714949787734, -6.493218814367352, -6.2795915331298655, -6.068186409934021, -5.858486794424749, -5.649838161426628, -5.441603662207547, -5.232998896667218, -5.022712714479614, -4.810470612415491, -4.595131397035538, -4.373667713392077, -4.145734109634059, -3.9087640592945134, -3.6539863847513003, -3.3777529249515497, -3.0705126302972547, -2.7051937633176317, -2.2735892945603586, -1.762891796965345, -1.2351808715676504, -0.7007085300868852, -0.25655867575727237, 0.11507034475086739, 0.8000829663863844, 1.9204543380166703, 2.9187185831252482, 3.449509216628388, 3.8249308417967502, 4.130888518903816, 4.397339211607738, 4.644843617192509, 4.878296487350368\
]
pRadRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
pRadRS[0] = [\
0.06481363292064833, 0.06850849273751716, 0.0729516357043252, 0.07802117893642481, 0.0834723366482764, 0.08907427794833939, 0.0947316940721814, 0.10037823258722396, 0.10604098866598642, 0.1117490035825186, 0.11750292732879264, 0.12332302117386186, 0.12922651443121663, 0.13520619383794866, 0.14126349359965193, 0.14741163835980833, 0.15364401617526155, 0.1599621391237675, 0.1663937806165732, 0.17296719559623533, 0.17974651668298158, 0.18681583289480955, 0.19430528327081537, 0.20246008171170116, 0.2114021967296465, 0.22140056443852602, 0.23307892446881975, 0.24666526118772325, 0.2627250465248368, 0.282903097502616, 0.3080489814944873, 0.34011611550269405, 0.38480072459779135, 0.4471686832219754, 0.5392447642181718, 0.6665253029376774, 0.8631897172395774, 1.1518335634974688, 1.561527037370953, 2.2112097677647475, 3.182233563610934, 4.296491648669162, 5.04007150899102, 5.643118837007505, 6.184965334338419, 6.693059751800814, 7.19490751113811, 7.69188835112445\
]
pRadRS[1] = [\
-2.7362393131737335, -2.6807975598294433, -2.617958581998437, -2.5507749643227804, -2.483239999641942, -2.418284673632108, -2.356706656090008, -2.2988099021306816, -2.2439295740854774, -2.1914999620157403, -2.141292032271725, -2.092948177611486, -2.0461884886325343, -2.000954304011767, -1.9571283836092377, -1.914526344678979, -1.8731169356790573, -1.832818122226172, -1.7933981273958572, -1.7546533232956705, -1.7162076612447912, -1.6776319982416723, -1.638324731662113, -1.5972125392173169, -1.5539928142582369, -1.5077817093046804, -1.4563781511472058, -1.3997230793558728, -1.3366472441448067, -1.262650851626688, -1.1774964778263666, -1.0784682034909139, -0.9550296771419653, -0.8048193881667629, -0.6175857031375287, -0.4056771761864937, -0.14712077748222185, 0.14135507570534855, 0.4456642125901027, 0.7935397719859196, 1.1575833287337574, 1.4577987940171653, 1.6174202702744935, 1.7304368979549665, 1.8221214010330158, 1.9010711315851267, 1.9733734856546405, 2.0401663126867717\
]
rhoRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
rhoRS[0] = [\
2.8513229502558984e-23, 4.0260551242140064e-11, 7.079228012422651e-11, 1.0739505765790436e-10, 1.5046952984028515e-10, 2.0103538098121602e-10, 2.6053966811446825e-10, 3.308743956357852e-10, 4.137054223489997e-10, 5.115701583527999e-10, 6.279157614868502e-10, 7.663417558349897e-10, 9.311299231985806e-10, 1.127503323955344e-09, 1.361745072159217e-09, 1.6413589923849332e-09, 1.9754961954047115e-09, 2.3752041893443767e-09, 2.8535898682360526e-09, 3.4262894057175584e-09, 4.1114676833480475e-09, 4.930018389468273e-09, 5.9051442276374495e-09, 7.060264799261462e-09, 8.42184435307942e-09, 1.0014969989098841e-08, 1.1853269070142148e-08, 1.395207113328115e-08, 1.6313831594389246e-08, 1.8892865551841263e-08, 2.1638289516667193e-08, 2.4451638627424697e-08, 2.7100123632365768e-08, 2.937105990489173e-08, 3.100959884299593e-08, 3.208726356520355e-08, 3.2533728760503553e-08, 3.2750801395682606e-08, 3.322309977834231e-08, 3.348645285058567e-08, 3.2936300087260257e-08, 3.209792806462718e-08, 3.21120622075836e-08, 3.255090260600465e-08, 3.3335434007153583e-08, 3.4492598431743606e-08, 3.606682069748461e-08, 3.815732444364475e-08\
]
rhoRS[1] = [\
-51.91167405921452, -23.935649003755493, -23.37127115896734, -22.954506952995477, -22.617260511770304, -22.327540198577847, -22.068265989497093, -21.8292822819465, -21.60586693558793, -21.393536377962654, -21.188615096217415, -20.989392889243604, -20.794622296080178, -20.603260096480366, -20.414498818427166, -20.227741284361137, -20.04244623196875, -19.858182428694608, -19.674688032410195, -19.491787967256183, -19.309485771612202, -19.1279231187835, -18.94744196293195, -18.768783279167085, -18.592436988370157, -18.419184864428342, -18.250662136512577, -18.087637871363803, -17.931252524887686, -17.784481470175407, -17.64880142876004, -17.526568603744664, -17.423727546991245, -17.343256004375743, -17.288969040293225, -17.254806659215614, -17.240988477745663, -17.23433840506736, -17.220020426508096, -17.212124872423928, -17.22869044129672, -17.254474355166938, -17.25403410768754, -17.240460738845126, -17.216644921397513, -17.18252107413604, -17.137892488486177, -17.08154810698469\
]
kappa500RS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
kappa500RS[0] = [\
0.0005719905072357202, 0.0005386151853403772, 0.0005832423389394874, 0.0006377324050900673, 0.0007023475122123608, 0.0007773706020613326, 0.0008637358268097783, 0.0009628120168755467, 0.001076133197846917, 0.001206057729318816, 0.0013556768457611175, 0.0015282152909054777, 0.001727422210204247, 0.00195760661230646, 0.0022238821181049633, 0.0025322873832611214, 0.002889805766554352, 0.0033046704247221307, 0.003786631194644532, 0.004347034472884874, 0.004999320998517218, 0.0057592571916385135, 0.006645610675384097, 0.007681504099985087, 0.008892562438188265, 0.010311793238249082, 0.011986160371548692, 0.013966811358791423, 0.01633253664449869, 0.019237194867202583, 0.02288325197044339, 0.027650915803122657, 0.03443406635835833, 0.044409962810404886, 0.05941604172920986, 0.07879698495246697, 0.1003347706670502, 0.11392686624712071, 0.11952456082846699, 0.16571441713023527, 0.3542183810196648, 0.7218591567231797, 1.0582610677213296, 1.3892628470327186, 1.7366955612974322, 2.1126145276199364, 2.5382861232288123, 3.0234865432280476\
]
kappa500RS[1] = [\
-7.466388162463935, -7.526509183887717, -7.44690778227204, -7.357591790362425, -7.261082244784665, -7.159593355978533, -7.054243591920318, -6.94565237095798, -6.834381035083317, -6.7203983133992375, -6.603454432200766, -6.483654700973436, -6.361125033608848, -6.236032668079737, -6.108500908447794, -5.978632280642828, -5.846565987921132, -5.712418530855982, -5.576278522012597, -5.438261396736654, -5.298453176066288, -5.1569467723753455, -5.013798691056915, -4.868939904645725, -4.722540032722596, -4.574467064142748, -4.424002597130039, -4.271071380957816, -4.114596047596677, -3.950909641296924, -3.7773499909949524, -3.5880964302920315, -3.368708903512378, -3.1142914470933634, -2.8231910262926156, -2.5408805448748444, -2.2992429774187633, -2.1721985606003713, -2.124233398783627, -1.7974893509100727, -1.0378416605144183, -0.3259252329092874, 0.05662705887715636, 0.3287732805943541, 0.5519842049419225, 0.7479262930376102, 0.9314890986467553, 1.1064106499946005\
]
kappaRosRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
kappaRosRS[0] = [\
0.00012300015004865705, 1.4713399060788344e-05, 1.7256778269113525e-05, 2.0752321434000524e-05, 2.5305303771541744e-05, 3.102867981933107e-05, 3.8118008246115586e-05, 4.681910680499437e-05, 5.805776544664166e-05, 7.131542735244091e-05, 8.760455630297348e-05, 0.0001076296640253383, 0.000132252818777172, 0.00016248269999292306, 0.00019956225681052093, 0.00024503601209102807, 0.0003007261580165399, 0.00036886741661484244, 0.0004522901797043358, 0.0005544714129308269, 0.0006799118802703104, 0.0008343798094090317, 0.0010255537804216275, 0.0012644984448568568, 0.0015642770859872084, 0.0019439534837889197, 0.002435437756969271, 0.003075234416043505, 0.0039220757275651805, 0.005096547152904082, 0.006757734561801292, 0.009208221243685949, 0.013194288524466568, 0.01989106245418726, 0.031578628849088804, 0.049234218300709516, 0.07304759765544797, 0.09333774603229136, 0.1054358589150194, 0.15119906135588837, 0.32258129273395675, 0.6464785002960725, 0.9369099280033946, 1.2191774843764738, 1.512701025315841, 1.8277735169015068, 2.182148049912581, 2.583624669409529\
]
kappaRosRS[1] = [\
-9.00332498268482, -11.126751978610214, -10.967305548531826, -10.782852441207883, -10.584496548962392, -10.380598625709382, -10.17482373012057, -9.969219173294196, -9.754072087076368, -9.548397881544656, -9.342677548783515, -9.136814260263707, -8.930795173447924, -8.72493902344393, -8.519384306103127, -8.314105370094598, -8.109310481342396, -7.905073283055922, -7.701186593623467, -7.497495307181241, -7.29354735602913, -7.088821852306541, -6.882522538704915, -6.673079721703285, -6.460331487597417, -6.243031501322291, -6.017628761616097, -5.784374147769556, -5.541134242989191, -5.279191997381716, -4.997067569087751, -4.687658580517807, -4.327971230566488, -3.917484771042753, -3.455274689428821, -3.0111664033108276, -2.6166440276308087, -2.3715306866812025, -2.2496524833256437, -1.8891580232196752, -1.1314001040178496, -0.43621533685905334, -0.06516812942899364, 0.19817643824073627, 0.4138968113909307, 0.6030985686939374, 0.7803097356648543, 0.9491933235937964\
]
mmwRS = [ [ 0.0 for i in range(48) ] for j in range(2) ]
mmwRS = [\
2.057441247350461e-24, 2.171274619778677e-24, 2.191980097521377e-24, 2.196147032428716e-24, 2.19204560534862e-24, 2.185176312573441e-24, 2.1779312548140926e-24, 2.1714217360569926e-24, 2.165616737257968e-24, 2.1605307531380053e-24, 2.156263743909528e-24, 2.152699190924526e-24, 2.149740590594403e-24, 2.1473789150080867e-24, 2.145574063166658e-24, 2.144258335077512e-24, 2.1434264027307785e-24, 2.1430551052953983e-24, 2.1430665717294382e-24, 2.143394859981897e-24, 2.1439085130476804e-24, 2.144451959356247e-24, 2.1447996652765236e-24, 2.1445466407357967e-24, 2.1435808173217896e-24, 2.141611240359696e-24, 2.1379629864478454e-24, 2.1328268180150415e-24, 2.1261821602816206e-24, 2.1173982358874274e-24, 2.1074219085854646e-24, 2.0968880680215813e-24, 2.0858622211027654e-24, 2.076166375656989e-24, 2.0685830699386922e-24, 2.063844552368294e-24, 2.060730243911066e-24, 2.0588587680675617e-24, 2.0579660692531483e-24, 2.057484656171585e-24, 2.056684679081627e-24, 2.054964740324279e-24, 2.0532185803637832e-24, 2.05144344771912e-24, 2.0495970039220943e-24, 2.0476862210288786e-24, 2.0456655904985552e-24, 2.043586554491407e-24\
]
| 18,260
| 245.77027
| 1,126
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Thetas.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 27 17:28:47 2017
@author: Ian
"""
import math
def thetas():
#//int numThetas = 10; // guess
#//double[] cosTheta = new double[numThetas];
#// Try equal distribution in cos(theta) space (rather than Gaussian quadrature)
""" /*
double ii;
for (int i = 0; i < numThetas; i++){
ii = (double) i;
cosTheta[i] = 1.0 - ii/numThetas;
}
*/ """
#// cosTheta is a 2xnumThetas array:
#/ row 0 is used for Gaussian quadrature weights
#// row 1 is used for cos(theta) values
#// Gaussian quadrature:
"""/*
"n = 21" Gaussian quadrature weights, w_i, and abscissae from
http://pomax.github.io/bezierinfo/legendre-gauss.html
- ie. 11 point among 0 and positive abcissae
This 11/21 of a 21-point formula: 0 plus the positive abscissae ,
so I *think* it represents *half* the integral on the interval [-1,1],
ie., on the interval[0,1]. SO: Divide the first weight (x=0) by 2 so
that the quadrature sum is exactly half the integral on [-1,1].
*/"""
nGauss = 11
#//int nGauss = 7;
#//int nGauss = 21;
theta = [0.0 for i in range(nGauss)]
weight = [0.0 for i in range(nGauss)]
cosTheta = [ [ 0.0 for i in range(nGauss) ] for j in range(2) ]
#// I *think* the "thetas" being assigned here (abcissae) are fractional
#// angles, theta/(pi/2).
#// 11 points on [0,1] from 21 point Gaussian quadrature on [-1,1]
#//weight[0] = 0.5 * 0.1460811336496904; // Divide the weight of the x=0 point by 2!
"""
// weight[0] = 0.1460811336496904;
// theta[0] = 0.0000000000000000; //disk centre
// weight[1] = 0.1445244039899700;
// theta[1] = 0.1455618541608951;
// weight[2] = 0.1398873947910731;
// theta[2] = 0.2880213168024011;
// weight[3] = 0.1322689386333375;
// theta[3] = 0.4243421202074388;
// weight[4] = 0.1218314160537285;
// theta[4] = 0.5516188358872198;
// weight[5] = 0.1087972991671484;
// theta[5] = 0.6671388041974123;
// weight[6] = 0.0934444234560339;
// theta[6] = 0.7684399634756779;
// weight[7] = 0.0761001136283793;
// theta[7] = 0.8533633645833173;
// weight[8] = 0.0571344254268572;
// theta[8] = 0.9200993341504008;
// weight[9] = 0.0369537897708525;
// theta[9] = 0.9672268385663063;
// weight[10] = 0.0160172282577743;
// theta[10] = 0.9937521706203895; //limb
"""
"""
// 7 points on [0,1] from 13 point Gaussian quadrature on [-1,1]
// weight[0] = 0.2325515532308739; // disk center
// theta[0] = 0.0000000000000000;
// weight[1] = 0.2262831802628972;
// theta[1] = 0.2304583159551348;
// weight[2] = 0.2078160475368885;
// theta[2] = 0.4484927510364469;
// weight[3] = 0.1781459807619457;
// theta[3] = 0.6423493394403402;
// weight[4] = 0.1388735102197872;
// theta[4] = 0.8015780907333099;
// weight[5] = 0.0921214998377285;
// theta[5] = 0.9175983992229779;
// weight[6] = 0.0404840047653159;
// theta[6] = 0.9841830547185881; //limb
"""
"""
// // 9 points on [0,1] from 17 point Gaussian quadrature on [-1,1]
// weight[0] = 0.1794464703562065; //disk center
// theta[0] = 0.0000000000000000;
// weight[1] = 0.1765627053669926;
// theta[1] = 0.1784841814958479;
// weight[2] = 0.1680041021564500;
// theta[2] = 0.3512317634538763;
// weight[3] = 0.1540457610768103;
// theta[3] = 0.5126905370864769;
// weight[4] = 0.1351363684685255;
// theta[4] = 0.6576711592166907;
// weight[5] = 0.1118838471934040;
// theta[5] = 0.7815140038968014;
// weight[6] = 0.0850361483171792;
// theta[6] = 0.8802391537269859;
// weight[7] = 0.0554595293739872;
// theta[7] = 0.9506755217687678;
// weight[8] = 0.0241483028685479;
// theta[8] = 0.9905754753144174; //limb
"""
#// For nGauss = 11;
#// 11 points on [0,1] from 21 point Gaussian quadrature on [-1,1]
#// // No? weight[0] = 0.5 * 0.1460811336496904; // Divide the weight of the x=0 point by 2!
weight[0] = 0.1460811336496904
theta[0] = 0.0000000000000000 #//disk centre
weight[1] = 0.1445244039899700
theta[1] = 0.1455618541608951
weight[2] = 0.1398873947910731
theta[2] = 0.2880213168024011
weight[3] = 0.1322689386333375
theta[3] = 0.4243421202074388
weight[4] = 0.1218314160537285
theta[4] = 0.5516188358872198
weight[5] = 0.1087972991671484
theta[5] = 0.6671388041974123
weight[6] = 0.0934444234560339
theta[6] = 0.7684399634756779
weight[7] = 0.0761001136283793
theta[7] = 0.8533633645833173
weight[8] = 0.0571344254268572
theta[8] = 0.9200993341504008
weight[9] = 0.0369537897708525
theta[9] = 0.9672268385663063
weight[10] = 0.0160172282577743
theta[10] = 0.9937521706203895 #//limb
"""
/*
// For nGauss = 21;
// 21 points on [0,1] from 41 point Gaussian quadrature on [-1,1]
weight[0] = 0.0756955356472984;
theta[0] = 0.0000000000000000;
weight[1] = 0.0754787470927158;
theta[1] = 0.0756232589891630;
weight[2] = 0.0748296231762215;
theta[2] = 0.1508133548639922;
weight[3] = 0.0737518820272235;
theta[3] = 0.2251396056334228;
weight[4] = 0.0722516968610231;
theta[4] = 0.2981762773418249;
weight[5] = 0.0703376606208175;
theta[5] = 0.3695050226404815;
weight[6] = 0.0680207367608768;
theta[6] = 0.4387172770514071;
weight[7] = 0.0653141964535274;
theta[7] = 0.5054165991994061;
weight[8] = 0.0622335425809663;
theta[8] = 0.5692209416102159;
weight[9] = 0.0587964209498719;
theta[9] = 0.6297648390721963;
weight[10] = 0.0550225192425787;
theta[10] = 0.6867015020349513;
weight[11] = 0.0509334542946175;
theta[11] = 0.7397048030699261;
weight[12] = 0.0465526483690143;
theta[12] = 0.7884711450474093;
weight[13] = 0.0419051951959097;
theta[13] = 0.8327212004013613;
weight[14] = 0.0370177167035080;
theta[14] = 0.8722015116924414;
weight[15] = 0.0319182117316993;
theta[15] = 0.9066859447581012;
weight[16] = 0.0266358992071104;
theta[16] = 0.9359769874978539;
weight[17] = 0.0212010633687796;
theta[17] = 0.9599068917303463;
weight[18] = 0.0156449384078186;
theta[18] = 0.9783386735610834;
weight[19] = 0.0099999387739059;
theta[19] = 0.9911671096990163;
weight[20] = 0.0043061403581649;
theta[20] = 0.9983215885747715;
*/
"""
#// Fill cosTheta[][]
for it in range(nGauss):
cosTheta[0][it] = weight[it]
theta[it] = theta[it] * math.pi / 2.0
cosTheta[1][it] = math.cos(theta[it])
#// Try equal distribution in cos(theta) space (rather than Gaussian quadrature)
"""/*
double ii;
for (int i = 0; i < numThetas; i++){
ii = (double) i;
cosTheta[i] = 1.0 - ii/numThetas;
}
*/"""
return cosTheta
| 7,386
| 35.751244
| 97
|
py
|
ChromaStarPy
|
ChromaStarPy-master/alfbootest.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "AlfBooAtlas/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wav = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = dataPath + "ar3900"
with open(inFile, 'r') as inputHandle:
#No header - we'll figure out number of records on fly
wave = []
flux = []
#for i in range(num):
inLine = inputHandle.readline()
while (inLine != ""):
inLine = inputHandle.readline()
#print(inLine)
if not inLine:
break
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = 0.1 * float(wavStr)
wave.append(wav)
flux.append(float(flxStr))
pylab.plot(wave, flux, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "4300.0"
loggStr = "2.0"
logZStr = "-0.7"
massStarStr = "0.75"
xiTStr = "2.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.3"
#Spectrum synthesis
lambdaStartStr = "390.0"
lambdaStopStr = "400.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.5"
logKapFudgeStr = "0.0"
macroVStr = "1.0"
rotVStr = "1.0"
rotIStr = "90.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "Run"
teff = 4300.0
logg = 2.0
log10ZScale = -0.7
lambdaStart = 390.0
lambdaStop = 400.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
pylab.ylim(0.0, 1.6)
pylab.plot(waveMod, fluxMod, color="gray")
#add the line IDs
for i in range(numLineIds):
if "Ca II" in lblIds[i]:
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.5, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + '.eps'
plt.savefig(epsName, format='eps', dpi=1000)
| 6,107
| 27.409302
| 106
|
py
|
ChromaStarPy
|
ChromaStarPy-master/CSGsRead2.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 11:32:00 2019
@author: Phil Bennett
- ported to python and integrated with ChromaStarPy (CSPy) by Ian Short
"""
#FORTRAN unit 4 with data about species to include:
# No! import Fort4
#import Documents.ChromaStarPy.GAS.BlockData
import CSBlockData
import CSGasData # with H2O - causes problems
#import GasData2 # Without H2O
#from ..GAS.BlockData import *
#from ..GAS.GasData import *
def gsread(cname, eheu):
#print("GsRead called")
#Try this:
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
global nlin1, lin1, linv1, nlin2, lin2, linv2 #/lin/
global logk, logwt, it, kt, type0 #equil
#global chix, nix, nopac, ixa, ixn, opinit, opflag, opchar, iopt #/opacty/
global chix, nix, ixa, ixn #/opacty/
outString = ""
#BlockData.block_data()
#ip = [0.0e0 for i in range(150)]
ip = CSGasData.ip
#ip = GasData2.ip
#comp = [0.0e0 for i in range(40)]
comp = CSGasData.comp
#comp = GasData2.comp
name = CSGasData.name
#name = GasData2.name
#awt = [0.0e0 for i in range(150)]
awt = CSGasData.awt
#awt = GasData2.awt
#itab = [0 for i in range(83)]
itab = CSBlockData.itab
#ntab = [0 for i in range(5)]
ntab = CSBlockData.ntab
#indx = [ [ [ [ [0 for i in range(2)] for j in range(5) ] for k in range(7) ] for l in range(26) ] for m in range(4) ]
indx = CSBlockData.indx
#name = [' ' for i in range(150)]
#name = BlockData.name
#gsinit = False
#print0 = False
print0 = CSBlockData.print0
#ipr = [0 for i in range(150)]
ipr = CSGasData.ipr
#ipr = GasData2.ipr
#nch = [0 for i in range(150)]
nch = CSGasData.nch
#nch = GasData2.nch
#nel = [0 for i in range(150)]
nel = CSGasData.nel
#nel = GasData2.nel
#ntot = [0 for i in range(150)]
ntot = CSBlockData.ntot
#nat = [ [0 for i in range(150)] for j in range(5) ]
nat = CSGasData.nat
#nat = GasData2.nat
#zat = [ [0 for i in range(150)] for j in range(5) ]
zat = CSGasData.zat
#zat = GasData2.zat
#neut = [0 for i in range(150)]
neut = CSBlockData.neut
#idel = [0 for i in range(150)]
idel = CSBlockData.idel
#indsp = [0 for i in range(40)]
indsp = CSBlockData.indsp
#indzat = [0 for i in range(100)]
indzat = CSBlockData.indzat
#iat = [0 for i in range(150)]
iat = CSBlockData.iat
#natsp = [0 for i in range(40)]
natsp = CSBlockData.natsp
#iatsp = [ [0 for i in range(40)] for j in range(40) ]
iatsp = CSBlockData.iatsp
#lin1 = [0 for i in range(40)]
lin1 = CSBlockData.lin1
#lin2 = [0 for i in range(40)]
lin2 = CSBlockData.lin2
#linv1 = [0 for i in range(40)]
linv1 = CSBlockData.linv1
#linv2 = [0 for i in range(40)]
linv2 = CSBlockData.linv2
#logk = [ [0.0e0 for i in range(150)] for j in range(5) ]
#logwt = [0.0e0 for i in range(150)]
logk = CSGasData.logk
#logk = GasData2.logk
logwt = CSGasData.logwt
#logwt = GasData2.logwt
it = [0.0e0 for i in range(150)]
kt = [0.0e0 for i in range(150)]
type0 = [0 for i in range(150)]
#ixa = [ [0 for i in range(70)] for j in range(5) ]
ixa = CSBlockData.ixa
#ixn = [0 for i in range(70)]
ixn = CSBlockData.ixn
#chix = [' ' for i in range(70)]
chix = CSBlockData.chix
#opchar = [' ' for i in range(25)]
#opflag = [False for i in range(25)]
#opinit = False
nix = CSBlockData.nix
#gsline = ""
#namet = ""
iprt = 0
ncht = 0
#nnz = [0 for i in range(4)]
ix = [0 for i in range(5)]
#blank = ' '
ename = 'e-'
mxatom = 30
mxspec = 150
#c
#c The first line specifies whether intermediate results are outputted.
#c iprint .eq. 0 - No.
#c iprint .ne. 0 - Yes.
#c
#with open("", 'r', encoding='utf-8') as inputHandle:
#dataPath = "./"
#inFile = dataPath + "fort.4"
n = 0 #record counter
np = 0
natom = -1 #neutral atomic species counter
nlin1 = -1
nlin2 = -1
tcomp = 0.0e0
"""
with open(inFile, 'r') as inputHandle:
gsline = inputHandle.readline()
lineLength = len(gsline)
#Should be only onen field, but to be on the safe side - assume iprint is first field:
fields = gsline.split()
iprint = int(fields[0].strip())
#iprint = gsline.strip()
"""
iprint = 0
#c
#c The first line specifies whether intermediate results are outputted.
#c iprint .eq. 0 - No.
#c iprint .ne. 0 - Yes.
#c
if (iprint == 0):
print0 = False
else:
print0 = True
"""
lineLength = 1 #initialization
#Get first line of data:
gsline = inputHandle.readline()
lineLength = len(gsline)
"""
#nspec = len(name)
#print("nspec ", nspec)
while (name[n] != ' '):
#for n in range(nspec):
#c
#c Each following input line specifies a distinct chemical species.
#c
#1
"""
#print("1: gsline: ", gsline)
namet = gsline[0:4].strip()
iprt = int(gsline[4:6].strip())
ncht = int(gsline[6:9].strip())
#print("1: namet ", namet, " iprt ", iprt, " ncht ", ncht)
"""
#if (namet != blank):
"""
n = n + 1
if (n >= mxspec-1):
print('(" *19 Error: Too many species specified. Limit is")', mxspec)
name[n] = namet
ipr[n] = iprt #class (1, 2, or 3, p. 34-35 of P. Bennett M.Sc. thesis)
nch[n] = ncht #electronic charge in fcu
"""
#namet = name[n]
iprt = ipr[n]
ncht = nch[n]
idel[n] = 1
#print("iprt ", iprt, " ncht ", ncht)
#c
#c Determine the species type:
#c TYPE(N) = 1 --> Neutral atom
#c = 2 --> Neutral molecule
#c = 3 --> Negative ion
#c = 4 --> Positive ion
#c
if (nch[n] == 0):
#c
#c Species is neutral
#c
np = n
"""
nelt = int(gsline[9:11].strip())
nat1 = int(gsline[11:13].strip())
zat1 = int(gsline[13:16].strip())
#print("2: nelt ", nelt, " nat1 ", nat1, " zat1 ", zat1)
nel[n] = nelt #no. of distinct elements in species
nat[0][n] = nat1 #no. of recurrences of most numerous element in species
zat[0][n] = zat1 #atomic number of heaviest element in species
#print("GsRead: n ", n, " zat[0] ", zat[0][n])
"""
nelt = nel[n]
nat1 = nat[0][n]
#zat1 = zat[0][n]
#print("nch = 0 nelt ", nelt, " nat1 ", nat1)
if (nelt <= 1 and nat1 <= 1):
#c
#c Neutral atom (one atom of single element Z present)
#c
type0[n] = 1
natom = natom + 1
if (natom >= mxatom):
print('(" *20 Error: Too many elements specified.", " Limit is")', mxatom)
iat[n] = natom
#print("Setting indsp, n: ", n, " natom ", natom)
indsp[natom] = n #pointer to iat[], etc....
indzat[zat[0][n]-1] = natom #indzat's index is atomic number - 1
ntot[n] = 1
neut[n] = n
#awt[n] = float(gsline[16:23].strip()) #atomic weight in amu
#comp[natom] = float(gsline[23:32].strip()) #abundance as N/N_H
#print("3: n ", n, " awt ", awt[n], " comp ", comp[natom])
tcomp = tcomp + comp[natom]
iprt = ipr[n]
if (iprt == 1):
nlin1 = nlin1 + 1
lin1[natom] = nlin1
linv1[nlin1] = natom
if ( (iprt == 1) or (iprt == 2) ):
nlin2 = nlin2 + 1
lin2[natom] = nlin2
linv2[nlin2] = natom
else:
#c
#c Neutral molecule ( >1 atom present in species)
#c
type0[n] = 2
ntot[n] = nat1
neut[n] = n
nleft = (nelt - 1)*2
#print("Neutral mol: n ", n, " name ", name[n], " nelt ", nelt, " nleft ", nleft)
if (nleft > 0):
"""
nnz[0] = int(gsline[16:18].strip())
nnz[1] = int(gsline[18:21].strip())
nnzTest = gsline[21:23].strip()
if (nnzTest != ''):
nnz[2] = int(nnzTest)
else:
nnz[2] = 0
nnzTest = gsline[23:26].strip()
if (nnzTest != ''):
nnz[3] = int(nnzTest)
else:
nnz[3] = 0
#print("4: nnz ", nnz[0], " ", nnz[1], " ", nnz[2], " ", nnz[3])
for i in range(0, nleft, 2):
ii = int((i + 1)/2 + 1)
nat[ii][n] = nnz[i]
zat[ii][n] = nnz[i+1]
print("i ", i, " ii ", ii, " nat ", nat[ii][n], " zat ", zat[ii][n])
ntot[n] = ntot[n] + nat[ii][n]
"""
for ii in range(1, 3):
ntot[n] = ntot[n] + nat[ii][n]
"""
logk[0][n] = float(gsline[26:33].strip())
logk[1][n] = float(gsline[33:41].strip())
logk[2][n] = float(gsline[41:50].strip())
logk[3][n] = float(gsline[50:62].strip())
logk[4][n] = float(gsline[62:74].strip())
"""
#print("5: n ", n, " logk ", logk[0][n], " ", logk[1][n], " ", logk[2][n], " ", logk[3][n], " ", logk[4][n])
else:
#c
#c Ionic species (nch .ne. 0)
#c
if (np <= -1):
print('(" *** error: ionic species encountered out of", " sequence")')
if (ncht < 0):
type0[n] = 3
elif (ncht > 0):
type0[n] = 4
neut[n] = np
nel[n] = nel[np]
nelt = nel[n]
for i in range(nelt):
nat[i][n] = nat[i][np]
zat[i][n] = zat[i][np]
ntot[n] = ntot[np]
#ip[n] = float(gsline[9:16].strip()) #ground state ionization potential (eV)
#logwt[n] = float(gsline[16:23].strip()) #log partition fn??
#print("6: n ", n, " ip ", ip[n], " logwt ", logwt[n])
#c
#c Generate master array tying chemical formula of species to
#c its table index. A unique index is generated for a given
#c (possibly charged) species containing up to 4 atoms.
#c
#c Index #1 <-- Ionic charge + 2 (dim. 4, allows chg -1 to +2)
#c #2 <--> Index to Z of 1st atom in species (23 allowed Z)
#c #3 <--> " 2nd " ( 6 allowed Z)
#c #4 <--> " 3rd " ( 4 allowed Z)
#c #5 <--> " 4th " ( 1 allowed Z)
#c
#ix[0] = nch[n] + 2
ix[0] = nch[n] + 1
nelt = nel[n]
#k = 1
k = 0
#print("n ", n, " name ", name[n])
for i in range(nelt):
nats = nat[i][n]
for j in range(nats):
k = k + 1
if (k > 4):
print('(" *21 Error: species ",a8," contains > 4 atoms")', name[n])
ix[k] = itab[zat[i][n]-1]
#print("i ", i, " j ", j, " k ", k, " ix ", ix[k], "ntab ", ntab[k])
#print("zat-1 ", zat[i][n]-1, "itab ", itab[zat[i][n]-1])
if ( (ix[k] <= 0) or (ix[k] > ntab[k]) ):
print('(" *22 Error: species atom z= not in allowed element list")', name[n], zat[i][n]-1)
if (k < 4):
kp = k + 1
for kk in range(kp, 5):
ix[kk] = 0
#print("kk ", kk, " ix ", ix[kk])
indx[ix[0]][ix[1]][ix[2]][ix[3]][ix[4]] = n
n = n + 1
#print("n ", n, " name ", name[n], " ix ", ix[0], ix[1], ix[2], ix[3], ix[4],\
# " indx ", indx[ix[0]][ix[1]][ix[2]][ix[3]][ix[4]])
#go to 1
#Ends if namet != ''??
#Get next line of data and test of end-of-file:
#gsline = inputHandle.readline()
#lineLength = len(gsline)
#print("lineLength = ", lineLength)
#Ends file read loop "with open(infile...??)
#After read loop:
#print("tcomp ", tcomp)
# Replace Gas abundances with the ones from CSPy
#CSPy/Phoenix eheu[] values on A_12 scale where
# eheu[i] = log_10(N_i/N_H) + 12
# I *think* GAS comp[] value are comp[i] = N_i/N_tot
#print("n ", n)
CSNiOverNH = 0.0
convTerm = 0.0
invComp = 1.0
#skip Hydrogen
for i in range(n):
#if (name[i].strip() != 'H'):
#print("element: name ", name[i], " comp[] ", comp[iat[i]])
for j in range(len(cname)):
#print("element: name ", name[i], " cname ", cname[j])
if (name[i].strip() == cname[j].strip()):
CSNiOverNH = 10.0**(eheu[j]-12.0)
#Assumes 1st GAS element is H
for k in range(1, n):
convTerm += comp[iat[k]]/comp[iat[i]]
invComp = 1.0/CSNiOverNH + convTerm
comp[iat[i]] = 1.0 / invComp
#print("Abundance fix element: name ", name[i], " cname ", cname[j], " newComp ", comp[iat[i]])
convTerm = 0.0 #reset accumulator
#c
#c Normalize abundances such that SUM(COMP) = 1
#c
nspec = n
#name[nspec+1] = ename
name[nspec] = ename
iat[mxspec-1] = mxatom
comp[mxatom-1] = 0.0e0
neut[mxspec-1] = mxspec
nsp1 = nspec + 1
for n in range(nsp1-1, mxspec):
idel[n] = 0
#print("GsRead: nspec ", nspec, " natom ", natom)
if (nspec != 0):
for j in range(natom):
natsp[j] = -1
comp[j] = comp[j]/tcomp
#c
#c Calculate the atomic (molecular) weight of each constituent
#c
for n in range(nspec):
#print("name ", name[n], " nel ", nel[n])
nelt = nel[n]
sum0 = 0.0e0
iprt = ipr[n]
for i in range(nelt):
#print("i ", i, " n ", n, " zat ", zat[i][n]-1, " indzat ", indzat[zat[i][n]-1])
j = indzat[zat[i][n]-1]
#print("j ", j)
nn = indsp[j]
#print(" nn ", nn)
natsp[j] = natsp[j] + 1
iatsp[j][natsp[j]] = n
sum0 = sum0 + nat[i][n]*awt[nn]
if (ipr[nn] > iprt):
iprt = ipr[nn]
awt[n] = sum0
ipr[n] = iprt
#c
#c Fill array of direct indices of species needed for opacity
#c calculations.
#c
if (nix > 0):
for i in range(nix):
ixn[i] = indx[ixa[0][i]][ixa[1][i]][ixa[2][i]][ixa[3][i]][ixa[4][i]]
if (ixn[i] == 149):
print('("0*** Warning: Opacity source ", " not included in GAS data tables")', chix[i])
"""
#c
#c Output species table
#c
#print("I am here!")
for j in range(1, 5):
#outString = "j " + str(j) + "\n"
#outFile.write(outString)
if (j == 1):
outString = "1 %5s %10s %8s %5s %7s\n" %("#", "Name", "At.Weight", "Z", "Abundance")
#outFile.write("1 # Name At.Weight Z Abundance\n")
outFile.write(outString)
elif (j == 2):
outString = "0 %5s %10s %8s %5s" %("#", "Name", "At.Weight", "Nel")
outString = outString + " n1 Z1 n2 Z2 ...\n"
#outFile.write("0 # Name At.Weight Nel n1 Z1 n2 Z2 ...\n")
outFile.write(outString)
elif (j == 3):
outFile.write("0 # Name At.Weight Chg Natom I.P. Log(2*g1/g0)\n")
for i in range(nspec):
ityp = type0[i]
#outString = "i " + str(i) + " type " + str(type0[i]) + "\n"
#outFile.write(outString)
if (ityp == j):
if (ityp == 1):
ii = iat[i]
#print("i ", i, " iat ", iat[i])
#outString = str(i) + " " + str(name[i]) + " " + str(awt[i]) + " " + str(zat[0][i]) + " " + str(comp[ii]) + "\n"
outString = "%5d %10s %8.3f %5d %7.2e\n" %(i, name[i], awt[i], zat[0][i], comp[ii])
outFile.write(outString)
elif (ityp == 2):
nelt = nel[i]
#outString = str(i) + " " + str(name[i]) + " " + str(awt[i]) + " " + str(nelt) + "\n"
outString = "%5d %10s %8.3f %5d" %(i, name[i], awt[i], nelt)
#outFile.write(outString)
for k in range(nelt):
outString = outString + " " + str(nat[k][i]) + " " + str(zat[k][i])
outString += "\n"
outFile.write(outString)
else:
outString = "%5d %10s %8.3f %6d %6d %10.3f %7.3f\n" %(i, name[i], awt[i], nch[i], neut[i], ip[i], logwt[i])
#outString = str(i) + " " + str(name[i]) + " " + str(awt[i]) + " " + str(nch[i]) + " " + str(neut[i]) + " " + str(ip[i]) + " " + str(logwt[i]) + "\n"
outFile.write(outString)
"""
#cis: Try this:
nlin1+=1
nlin2+=1
natom+=1
return
| 18,369
| 32.278986
| 173
|
py
|
ChromaStarPy
|
ChromaStarPy-master/KappasMetal.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 14:48:00 2017
@author: ishort
"""
import math
import Useful
import PartitionFn
import ToolBox
#import numpy
#JB#
"""
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
"""
masterTemp=[130,500,3000,8000,10000]
#JB#
def masterMetal(numDeps, numLams, temp, lambdaScale, stagePops):
"""/* Metal b-f opacity routines taken from Moog (moogjul2014/, MOOGJUL2014.tar)
Chris Sneden (Universtiy of Texas at Austin) and collaborators
http://www.as.utexas.edu/~chris/moog.html
//From Moog source file Opacitymetals.f
*/"""
#//System.out.println("masterMetal called...");
#//From Moog source file Opacitymetals.f
#// From how values such as aC1[] are used in Moog file Opacit.f to compute the total opacity
#// and then the optical depth scale, I infer that they are extinction coefficients
#// in cm^-1
#// There does not seem to be any correction for stimulated emission
logE = math.log10(math.e)
masterBF = [ [ 0.0 for i in range(numDeps) ] for j in range(numLams) ]
logUC1 = [0.0 for i in range(5)]
logUMg1 = [0.0 for i in range(5)]
logUMg2 = [0.0 for i in range(5)]
logUAl1 = [0.0 for i in range(5)]
logUSi1 = [0.0 for i in range(5)]
logUSi2 = [0.0 for i in range(5)]
logUFe1 = [0.0 for i in range(5)]
logStatWC1 = 0.0
logStatWMg1 = 0.0
logStatWMg2 = 0.0
logStatWAl1 = 0.0
logStatWSi1 = 0.0
logStatWSi2 = 0.0
logStatWFe1 = 0.0
theta = 1.0
species = ""
logGroundPopsC1 = [0.0 for i in range(numDeps)]
logGroundPopsMg1 = [0.0 for i in range(numDeps)]
logGroundPopsMg2 = [0.0 for i in range(numDeps)]
logGroundPopsAl1 = [0.0 for i in range(numDeps)]
logGroundPopsSi1 = [0.0 for i in range(numDeps)]
logGroundPopsSi2 = [0.0 for i in range(numDeps)]
logGroundPopsFe1 = [0.0 for i in range(numDeps)]
#//
#// C I: Z=6 --> iZ=5:
aC1 = [0.0 for i in range(numDeps)]
#// Mg I: Z=12 --> iZ=11:
aMg1 = [0.0 for i in range(numDeps)]
#// Mg II: Z=12 --> iZ=11:
aMg2 = [0.0 for i in range(numDeps)]
#// Al I: Z=13 --> iZ=12:
aAl1 = [0.0 for i in range(numDeps)]
#// Si I: Z=14 --> iZ=13:
aSi1 = [0.0 for i in range(numDeps)]
#// Si II: Z=14 --> iZ =13:
aSi2 = [0.0 for i in range(numDeps)]
#// Fe I: Z=26 --> iZ=25
aFe1 = [0.0 for i in range(numDeps)]
species = "CI"
logUC1 = PartitionFn.getPartFn2(species)
species = "MgI"
logUMg1 = PartitionFn.getPartFn2(species)
species = "MgII"
logUMg2 = PartitionFn.getPartFn2(species)
species = "AlI"
logUAl1 = PartitionFn.getPartFn2(species)
species = "SiI"
logUSi1 = PartitionFn.getPartFn2(species)
species = "SiII"
logUSi2 = PartitionFn.getPartFn2(species)
species = "FeI"
logUFe1 = PartitionFn.getPartFn2(species)
#//System.out.println("iD PpC1 PpMg1 PpMg2 PpAl1 PpSi1 PpSi2 PpFe1");
for iD in range(numDeps):
#//neutral stage
#//Assumes ground state stat weight, g_1, is 1.0
#theta = 5040.0 / temp[0][iD]
#// U[0]: theta = 1.0, U[1]: theta = 0.5
"""
if (theta <= 0.5):
logStatWC1 = logUC1[1]
logStatWMg1 = logUMg1[1]
logStatWMg2 = logUMg2[1]
logStatWAl1 = logUAl1[1]
logStatWSi1 = logUSi1[1]
logStatWSi2 = logUSi2[1]
logStatWFe1 = logUFe1[1]
elif ( (theta < 1.0) and (theta > 0.5) ):
logStatWC1 = ( (theta-0.5) * logUC1[0] ) + ( (1.0-theta) * logUC1[1] )
#//divide by common factor of interpolation interval of 0.5 = (1.0 - 0.5):
logStatWC1 = 2.0 * logStatWC1
logStatWMg1 = ( (theta-0.5) * logUMg1[0] ) + ( (1.0-theta) * logUMg1[1] );
logStatWMg1 = 2.0 * logStatWMg1;
logStatWMg2 = ( (theta-0.5) * logUMg2[0] ) + ( (1.0-theta) * logUMg2[1] );
logStatWMg2 = 2.0 * logStatWMg2;
logStatWAl1 = ( (theta-0.5) * logUAl1[0] ) + ( (1.0-theta) * logUAl1[1] );
logStatWAl1 = 2.0 * logStatWAl1;
logStatWSi1 = ( (theta-0.5) * logUSi1[0] ) + ( (1.0-theta) * logUSi1[1] );
logStatWSi1 = 2.0 * logStatWSi1;
logStatWSi2 = ( (theta-0.5) * logUSi2[0] ) + ( (1.0-theta) * logUSi2[1] );
logStatWSi2 = 2.0 * logStatWSi2;
logStatWFe1 = ( (theta-0.5) * logUFe1[0] ) + ( (1.0-theta) * logUFe1[1] );
logStatWFe1 = 2.0 * logStatWFe1;
else:
logStatWC1 = logUC1[0]
logStatWMg1 = logUMg1[0]
logStatWMg2 = logUMg2[0]
logStatWAl1 = logUAl1[0]
logStatWSi1 = logUSi1[0]
logStatWSi2 = logUSi2[0]
logStatWFe1 = logUFe1[0]
"""
thisTemp = temp[0][iD]
#JB#
logWC1Fit = ToolBox.cubicFit(masterTemp,logUC1)
logStatWC1 = ToolBox.valueFromFit(logWC1Fit,thisTemp)
logWMg1Fit = ToolBox.cubicFit(masterTemp,logUMg1)
logStatWMg1 = ToolBox.valueFromFit(logWMg1Fit,thisTemp)
logWSi1Fit = ToolBox.cubicFit(masterTemp,logUSi1)
logStatWSi1 = ToolBox.valueFromFit(logWSi1Fit,thisTemp)
logWMg2Fit = ToolBox.cubicFit(masterTemp,logUMg2)
logStatWMg2 = ToolBox.valueFromFit(logWMg2Fit,thisTemp)
logWSi2Fit = ToolBox.cubicFit(masterTemp,logUSi2)
logStatWSi2 = ToolBox.valueFromFit(logWSi2Fit,thisTemp)
logWFe1Fit = ToolBox.cubicFit(masterTemp,logUFe1)
logStatWFe1 = ToolBox.valueFromFit(logWFe1Fit,thisTemp)
logWAl1Fit = ToolBox.cubicFit(masterTemp,logUAl1)
logStatWAl1 = ToolBox.valueFromFit(logWAl1Fit,thisTemp)
#logStatWC1Fun = spline(masterTemp,logUC1)
#logStatWC1=logStatWC1Fun(thisTemp)
#logStatWMg1Fun = spline(masterTemp,logUMg1)
#logStatWMg1=logStatWMg1Fun(thisTemp)
#logStatWMg2Fun = spline(masterTemp,logUMg2)
#logStatWMg2=logStatWMg2Fun(thisTemp)
#logStatWAl1Fun = spline(masterTemp,logUAl1)
#logStatWAl1=logStatWAl1Fun(thisTemp)
#logStatWSi1Fun = spline(masterTemp,logUSi1)
#logStatWSi1=logStatWSi1Fun(thisTemp)
#logStatWSi2Fun = spline(masterTemp,logUSi2)
#logStatWSi2=logStatWSi2Fun(thisTemp)
#logStatWFe1Fun = spline(masterTemp,logUFe1)
#logStatWFe1=logStatWFe1Fun(thisTemp)
#JB#
#// NEW Interpolation involving temperature for new partition function: lburns
thisTemp = temp[0][iD]
if (thisTemp <= 130.0):
logStatWC1 = logUC1[0]
logStatWMg1 = logUMg1[0]
logStatWMg2 = logUMg2[0]
logStatWAl1 = logUAl1[0]
logStatWSi1 = logUSi1[0]
logStatWSi2 = logUSi2[0]
logStatWFe1 = logUFe1[0]
if (thisTemp >= 10000.0):
logStatWC1 = logUC1[4]
logStatWMg1 = logUMg1[4]
logStatWMg2 = logUMg2[4]
logStatWAl1 = logUAl1[4]
logStatWSi1 = logUSi1[4]
logStatWSi2 = logUSi2[4]
logStatWFe1 = logUFe1[4]
"""
elif (thisTemp > 130 and thisTemp <= 500):
#// Add in interpolation here lburns
logStatWC1 = logUC1[1] * (thisTemp - 130)/(500 - 130) \
+ logUC1[0] * (500 - thisTemp)/(500 - 130)
logStatWMg1 = logUMg1[1] * (thisTemp - 130)/(500 - 130) \
+ logUMg1[0] * (500 - thisTemp)/(500 - 130)
logStatWMg2 = logUMg2[1] * (thisTemp - 130)/(500 - 130) \
+ logUMg2[0] * (500 - thisTemp)/(500 - 130)
logStatWAl1 = logUAl1[1] * (thisTemp - 130)/(500 - 130) \
+ logUAl1[0] * (500 - thisTemp)/(500 - 130)
logStatWSi1 = logUSi1[1] * (thisTemp - 130)/(500 - 130) \
+ logUSi1[0] * (500 - thisTemp)/(500 - 130)
logStatWSi2 = logUSi2[1] * (thisTemp - 130)/(500 - 130) \
+ logUSi2[0] * (500 - thisTemp)/(500 - 130)
logStatWFe1 = logUFe1[1] * (thisTemp - 130)/(500 - 130) \
+ logUFe1[0] * (500 - thisTemp)/(500 - 130)
elif (thisTemp > 500 and thisTemp <= 3000):
logStatWC1 = logUC1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUC1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWMg1 = logUMg1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUMg1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWMg2 = logUMg2[2] * (thisTemp - 500)/(3000 - 500) \
+ logUMg2[1] * (3000 - thisTemp)/(3000 - 500)
logStatWAl1 = logUAl1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUAl1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWSi1 = logUSi1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUSi1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWSi2 = logUSi2[2] * (thisTemp - 500)/(3000 - 500) \
+ logUSi2[1] * (3000 - thisTemp)/(3000 - 500)
logStatWFe1 = logUFe1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUFe1[1] * (3000 - thisTemp)/(3000 - 500)
elif (thisTemp > 3000 and thisTemp <= 8000):
logStatWC1 = logUC1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUC1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWMg1 = logUMg1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUMg1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWMg2 = logUMg2[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUMg2[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWAl1 = logUAl1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUAl1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWSi1 = logUSi1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUSi1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWSi2 = logUSi2[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUSi2[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWFe1 = logUFe1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUFe1[2] * (8000 - thisTemp)/(8000 - 3000)
elif (thisTemp > 8000 and thisTemp < 10000):
logStatWC1 = logUC1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUC1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWMg1 = logUMg1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUMg1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWMg2 = logUMg2[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUMg2[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWAl1 = logUAl1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUAl1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWSi1 = logUSi1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUSi1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWSi2 = logUSi2[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUSi2[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWFe1 = logUFe1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUFe1[3] * (10000 - thisTemp)/(10000 - 8000)
else:
#// for temperatures greater than or equal to 10000
logStatWC1 = logUC1[4]
logStatWMg1 = logUMg1[4]
logStatWMg2 = logUMg2[4]
logStatWAl1 = logUAl1[4]
logStatWSi1 = logUSi1[4]
logStatWSi2 = logUSi2[4]
logStatWFe1 = logUFe1[4]
"""
logGroundPopsC1[iD] = stagePops[5][0][iD] - logStatWC1
logGroundPopsMg1[iD] = stagePops[11][0][iD] - logStatWMg1
logGroundPopsMg2[iD] = stagePops[11][1][iD] - logStatWMg2
logGroundPopsAl1[iD] = stagePops[12][0][iD] - logStatWAl1
logGroundPopsSi1[iD] = stagePops[13][0][iD] - logStatWSi1
logGroundPopsSi2[iD] = stagePops[13][1][iD] - logStatWSi2
logGroundPopsFe1[iD] = stagePops[25][0][iD] - logStatWFe1
#// if (iD%5 == 1){
#// System.out.format("%03d, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f %n",
#// iD, logE*(logGroundPopsC1[iD]+temp[1][iD]+Useful.logK()),
#// logE*(logGroundPopsMg1[iD]+temp[1][iD]+Useful.logK()),
#// logE*(logGroundPopsMg2[iD]+temp[1][iD]+Useful.logK()),
#// logE*(logGroundPopsAl1[iD]+temp[1][iD]+Useful.logK()),
#// logE*(logGroundPopsSi1[iD]+temp[1][iD]+Useful.logK()),
#// logE*(logGroundPopsSi2[iD]+temp[1][iD]+Useful.logK()),
#// logE*(logGroundPopsFe1[iD]+temp[1][iD]+Useful.logK()));
#id loop// }
#double waveno; //cm??
#double freq, logFreq, kapBF;
#double stimEmExp, stimEmLogExp, stimEmLogExpHelp, stimEm;
#//System.out.println("iD iL lambda stimEm aC1 aMg1 aMg2 aAl1 aSi1 aSi2 aFe1 ");
for iL in range(numLams):
#print("iL ", iL)
#//
#//initialization:
for i in range(numDeps):
aC1[i] = 0.0
aMg1[i] = 0.0
aMg2[i] = 0.0
aAl1[i] = 0.0
aSi1[i] = 0.0
aSi2[i] = 0.0
aFe1[i] = 0.0
waveno = 1.0 / lambdaScale[iL] #//cm^-1??
logFreq = Useful.logC() - math.log(lambdaScale[iL])
freq = math.exp(logFreq)
#if (iL%20 == 1):
# print("freq ", freq)
stimEmLogExpHelp = Useful.logH() + logFreq - Useful.logK()
#//System.out.println("Calling opacC1 from masterMetal...");
if (freq >= 2.0761e15):
aC1 = opacC1(numDeps, temp, lambdaScale[iL], logGroundPopsC1)
if (freq >= 2.997925e+14):
#print("opacMg1 called")
aMg1 = opacMg1(numDeps, temp, lambdaScale[iL], logGroundPopsMg1)
if (freq >= 2.564306e15):
aMg2 = opacMg2(numDeps, temp, lambdaScale[iL], logGroundPopsMg2)
if (freq >= 1.443e15):
aAl1 = opacAl1(numDeps, temp, lambdaScale[iL], logGroundPopsAl1)
if (freq >= 2.997925e+14):
#print("opacSi1 called")
aSi1 = opacSi1(numDeps, temp, lambdaScale[iL], logGroundPopsSi1)
if (freq >= 7.6869872e14):
aSi2 = opacSi2(numDeps, temp, lambdaScale[iL], logGroundPopsSi2)
if (waveno >= 21000.0):
aFe1 = opacFe1(numDeps, temp, lambdaScale[iL], logGroundPopsFe1)
for iD in range(numDeps):
kapBF = 1.0e-99 #minimum safe value
stimEmLogExp = stimEmLogExpHelp - temp[1][iD]
stimEmExp = -1.0 * math.exp(stimEmLogExp)
stimEm = ( 1.0 - math.exp(stimEmExp) ) #//LTE correction for stimulated emission
kapBF = kapBF + aC1[iD] + aMg1[iD] + aMg2[iD] + aAl1[iD] + aSi1[iD] + aSi2[iD] + aFe1[iD]
#kapBF = aC1[iD] + aMg2[iD] + aAl1[iD] + aSi2[iD] + aFe1[iD]
#if ( (iL%20 == 1) and (iD%10 == 1) ):
#print("iL ", iL, " iD ", iD, " stimEm ", stimEm, " kapBF ", kapBF)
# print("aMg1 ", aMg1[iD], " aSi1 ", aSi1[iD])
masterBF[iL][iD] = math.log(kapBF) + math.log(stimEm)
#// if ( (iD%10 == 0) && (iL%10 == 0) ) {
#// System.out.format("%03d, %03d, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %21.15f, %n",
#// iD, iL, lambdaScale[iL], Math.log10(stimEm), Math.log10(aC1[iD]), Math.log10(aMg1[iD]), Math.log10(aMg2[iD]), Math.log10(aAl1[iD]), Math.log10(aSi1[iD]), Math.log10(aSi2[iD]), Math.log10(aFe1[iD]));
#// }
#} //iD
#} //iL
return masterBF
#} //end method masterMetal
def opacC1(numDeps, temp, lambda2, logGroundPops):
"""#//c******************************************************************************
#//c This routine computes the bound-free absorption due to C I.
#//c******************************************************************************"""
#//System.out.println("opacC1 called...");
#// include 'Atmos.com'
#// include 'Kappa.com'
sigma = 0.0
aC1 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aC1[i] = 0.0
waveno = 1.0 / lambda2 #//cm^-1??
freq = Useful.c / lambda2
#double arg;
c1240 = [0.0 for i in range(numDeps)]
c1444 = [0.0 for i in range(numDeps)]
freq1 = 0.0
#double logTkev;
tkev = [0.0 for i in range(numDeps)]
#// int modcount = 0;
for i in range(numDeps):
logTkev = temp[1][i] + Useful.logK() - Useful.logEv()
tkev[i] = math.exp(logTkev)
#//c initialize some quantities for each new model atmosphere
for i in range(numDeps):
c1240[i] = 5.0 * math.exp(-1.264/tkev[i])
c1444[i] = math.exp(-2.683/tkev[i])
#//c initialize some quantities for each new model atmosphere or new frequency;
#//c Luo, D. and Pradhan, A.K. 1989, J.Phys. B, 22, 3377-3395.
#//c Burke, P.G. and Taylor, K.T. 1979, J. Phys. B, 12, 2971-2984.
#// if (modelnum.ne.modcount .or. freq.ne.freq1) then
#double aa, bb, eeps;
#freq1 = freq;
ryd = 109732.298 #//Rydberg constant in cm^-1
#//waveno = freq/2.99792458d10
xs0 = 0.0
xs1 = 0.0
xd0 = 0.0
xd1 = 0.0
xd2 = 0.0
x1444 = 0.0
x1240 = 0.0
x1100 = 0.0
#//c P2 3P 1
#//c I AM NOT SURE WHETHER THE CALL TO SEATON IN THE NEXT STATEMENT IS
#//c CORRECT, BUT IT ONLY AFFECTS THINGS BELOW 1100A
if (freq >= 2.7254e15):
arg = -16.80 - ( (waveno-90777.000)/3.0/ryd )
x1100 = math.pow(10.0, arg) * seaton (2.7254e15, 1.219e-17, 2.0e0, 3.317e0, freq)
#//c P2 1D 2
if (freq >= 2.4196e15):
arg = -16.80 - ( (waveno-80627.760)/3.0/ryd )
xd0 = math.pow(10.0, arg)
eeps = (waveno-93917.0) * 2.0/9230.0
aa = 22.0e-18
bb = 26.0e-18
xd1 = ((aa*eeps) + bb) / (math.pow(eeps, 2)+1.0)
eeps = (waveno-111130.0) * 2.0/2743.0
aa = -10.5e-18
bb = 46.0e-18
xd2 = ( (aa*eeps) + bb) / (math.pow(eeps, 2)+1.0)
x1240 = xd0 + xd1 + xd2
#//c P2 1S 3
if (freq >= 2.0761e15):
arg = -16.80 - ( (waveno-69172.400)/3.0/ryd )
xs0 = math.pow(10.0, arg)
eeps = (waveno-97700.0) * 2.0/2743.0
aa = 68.0e-18
bb = 118.0e-18
xs1 = ( (aa*eeps) + bb) / (math.pow(eeps, 2)+1.0)
x1444 = xs0 + xs1
#//System.out.println("freq " + freq + " lambda " + lambda);
for i in range(numDeps):
if (freq >= 2.0761e15):
sigma = (x1100*9.0 + x1240*c1240[i] + x1444*c1444[i])
aC1[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aC1 " + aC1[i]);
#//System.out.println("i " + i + " logPop " + logGroundPops[i] + " aC1 " + aC1[i]);
return aC1
#} //end method opacC1
def seaton(freq0, xsect, power, a, freq):
"""#//c******************************************************************************
#//c This function is a general representation for b-f absorption above
#//c a given ionization limit freq0, using cross-section xsect,
#//c******************************************************************************"""
#//include 'Kappa.com'
freqratio = freq0/freq
#double seaton;
#int help;
help = int(math.floor( (2.0*power) + 0.01 ))
seaton = xsect * (a + freqratio*(1.0-a))* \
math.sqrt( math.pow(freqratio, help) )
return seaton
#} //end method seaton
def opacMg1(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes the bound-free absorption due to Mg I.
//c******************************************************************************"""
#//System.out.println("opacMg1 called...");
sigma = 0.0
aMg1 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aMg1[i] = 0.0
freq = Useful.c() / lambda2
#//System.out.println("opacMg1: lambda, freq " + lambda + " " + freq);
freqlg = math.log(freq) #//base e?
#// include 'Atmos.com'
#// include 'Kappa.com'
#// real*8 flog(9), freqMg(7), peach(7,15), xx(7), tlg(7)
#// real*8 dt(100)
#// integer nt(100)
xx = [0.0 for i in range(7)]
dt = [0.0 for i in range(100)]
nt = [0 for i in range(100)]
#// data peach/
# //double[][] peach = new double[7][15];
#//c 4000 K
peach0 = [ -42.474, -41.808, -41.273, -45.583, -44.324, -50.969, -50.633, -53.028, -51.785, -52.285, -52.028, -52.384, -52.363, -54.704, -54.359 ]
#//c 5000 K
peach1 = [ -42.350, -41.735, -41.223, -44.008, -42.747, -48.388, -48.026, -49.643, -48.352, -48.797, -48.540, -48.876, -48.856, -50.772, -50.349 ]
#//c 6000 K
peach2 = [ -42.109, -41.582, -41.114, -42.957, -41.694, -46.630, -46.220, -47.367, -46.050, -46.453, -46.196, -46.513, -46.493, -48.107, -47.643 ]
#//c 7000 K
peach3 = [ -41.795, -41.363, -40.951, -42.205, -40.939, -45.344, -44.859, -45.729, -44.393, -44.765, -44.507, -44.806, -44.786, -46.176, -45.685 ]
#//c 8000 K
peach4 = [ -41.467, -41.115, -40.755, -41.639, -40.370, -44.355, -43.803, -44.491, -43.140, -43.486, -43.227, -43.509, -43.489, -44.707, -44.198 ]
#//c 9000 K
peach5 = [ -41.159, -40.866, -40.549, -41.198, -39.925, -43.568, -42.957, -43.520, -42.157, -42.480, -42.222, -42.488, -42.467, -43.549, -43.027 ]
#//c 10000 K
peach6 = [ -40.883, -40.631, -40.347, -40.841, -39.566, -42.924, -42.264, -42.736, -41.363, -41.668, -41.408, -41.660, -41.639, -42.611, -42.418 ]
peach = [ peach0, peach1, peach2, peach3, peach4, peach5, peach6 ]
#// double[] freqMg = new double[7];
freqMg = [ 1.9341452e15, 1.8488510e15, 1.1925797e15, \
7.9804046e14, 4.5772110e14, 4.1440977e14, \
4.1113514e14 ]
#// double[] flog = new double[9];
flog = [ 35.23123, 35.19844, 35.15334, 34.71490, 34.31318, \
33.75728, 33.65788, 33.64994, 33.43947 ]
#// double[] tlg = new double[7];
tlg = [ 8.29405, 8.51719, 8.69951, 8.85367, 8.98720, 9.10498, \
9.21034 ] #//base e?
freq1 = 0.0
#//modcount/0/
#int thelp, nn;
#double dd, dd1;
#//double log10E = Math.log10(Math.E);
#//c initialize some quantities for each new model atmosphere
#// if (modelnum .ne. modcount) then
#// modcount = modelnum
# //System.out.println("opacMg1 call, lambda " + lambda);
for i in range(numDeps):
thelp = int(math.floor((temp[0][i]/1000.0))) - 3
#//System.out.println("i " + i + " temp[0] " + temp[0][i] + " thelp " + thelp);
#//n = Math.max( Math.min(6, thelp-3), 1 );
#// -1 term to adjust from FORTRAN to Java subscripting
nn = max( min(6, thelp), 1 ) - 1 #// -1 term to adjust from FORTRAN to Java subscripting
nt[i] = nn
dt[i] = (temp[1][i]-tlg[nn]) / (tlg[nn+1]-tlg[nn]) #//base e?
#//System.out.println(" nn " + nn + " temp[1] " + temp[1][i] + " tlg[nn+1] " + tlg[nn+1] + " tlg[nn] " + tlg[nn] + " dt[i] " + dt[i]);
#// endif
#//c initialize some quantities for each new model atmosphere or new frequency;
#//if (modelnum.ne.modcount .or. freq.ne.freq1) then
freq1 = freq
#// do n=1,7
#// if (freq .gt. freqMg(n)) go to 23
#// enddo
#//n = 7;
#// n = 0;
#// while ( (freq <= freqMg[n]) && (n < 6) ) {
#// n++;
#// }
nn = 0
for n in range(7):
#//System.out.println("freq " + freq + " n " + n + " freqMg[n] " + freqMg[n]);
if (freq > freqMg[n]):
break
nn+=1
if (freq <= freqMg[6]):
nn = 7
#//System.out.println("nn " + nn + " flog[nn+1] " + flog[nn+1] + " flog[nn] " + flog[nn]);
dd = (freqlg-flog[nn]) / (flog[nn+1]-flog[nn])
#//System.out.println("dd " + dd + " freqlg " + freqlg);
#//if (n .gt. 2) n = 2*n -2
#// -1 term to adjust from FORTRAN to Java subscripting
#//if (n > 2){
if (nn > 1):
#// -1 term to adjust from FORTRAN to Java subscripting
#//n = 2*n - 2 - 1;
nn = 2*nn - 2 #// - 1;
dd1 = 1.0 - dd
#//do it=1,7
#//System.out.println("nn " + nn + " dd1 " + dd1);
for it in range(7):
xx[it] = peach[it][nn+1]*dd + peach[it][nn]*dd1
#//System.out.println("it " + it + " peach[it][nn+1] " + peach[it][nn+1] + " peach[it][nn] " + peach[it][nn] + " xx[it] " + xx[it]);
#//enddo
#//endif
#//do i=1,ntau
for i in range(numDeps):
#//if (freq .ge. 2.997925d+14) then
if (freq >= 2.997925e+14):
nn = nt[i]
sigma = math.exp( (xx[nn]*(1.0e0-dt[i])) + (xx[nn+1]*dt[i]) )
aMg1[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aMg1 " + aMg1[i]);
#//endif
#//enddo
return aMg1;
#} //end method opacMg1
def opacMg2(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes the bound-free absorption due to Mg II.
//c******************************************************************************"""
#//System.out.println("opacMg2 called...");
sigma = 0.0
aMg2 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aMg2[i] = 0.0
freq = Useful.c / lambda2
#double logTkev;
tkev = [0.0 for i in range(numDeps)]
#// include 'Atmos.com'
#// include 'Kappa.com'
c1169 = [0.0 for i in range(100)]
freq1 = 0.0
x824 = 0.0
x1169 = 0.0
#//data modcount/0/
#// initialize some quantities for each new model atmosphere
#// if (modelnum .ne. modcount) then
#// modcount = modelnum
for i in range(numDeps):
logTkev = temp[1][i] + Useful.logK() - Useful.logEv()
tkev[i] = math.exp(logTkev);
#//do i=1,ntau
for i in range(numDeps):
c1169[i] = 6.0 * math.exp(-4.43e+0/tkev[i])
#// endif
#//c initialize some quantities for each new model atmosphere or new frequency;
#//c there are two edges, one at 824 A and the other at 1169 A
#// if (modelnum.ne.modcount .or. freq.ne.freq1) then
#freq1 = freq;
if (freq >= 3.635492e15):
x824 = seaton(3.635492e15, 1.40e-19, 4.0e0, 6.7e0, freq)
else:
x824 = 1.0e-99
if (freq >= 2.564306e15):
x1169 = 5.11e-19 * math.pow( (2.564306e15/freq), 3.0)
else:
x1169 = 1.0e-99
#// endif
for i in range(numDeps):
if (x1169 >= 1.0e-90):
sigma = (x824*2.0 + x1169*c1169[i])
aMg2[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aMg2 " + aMg2[i]);
return aMg2
#} //end method opacMg2
def opacAl1(numDeps, temp, lambda2, logGroundPops):
""" //c******************************************************************************
//c This routine computes the bound-free absorption due to Al I.
//c******************************************************************************"""
#//System.out.println("opacAl1 called...");
sigma = 0.0
aAl1 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aAl1[i] = 0.0
freq = Useful.c / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
# //do i=1,ntau
for i in range(numDeps):
#//if (freq .ge. 1.443d15) then
if (freq >= 1.443e15):
sigma = 6.0 * 6.5e-17 * math.pow((1.443e15/freq), 5.0)
aAl1[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aAl1 " + aAl1[i]);
return aAl1
#} //end method opacAl1
def opacSi1(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes the bound-free absorption due to Si I.
//c******************************************************************************"""
#//System.out.println("opacSi1 called...");
sigma = 0.0;
aSi1 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aSi1[i] = 0.0
freq = Useful.c() / lambda2
freqlg = math.log(freq) #//base e?
#// include 'Atmos.com'
#// include 'Kappa.com'
xx = [0.0 for i in range(9)]
dt = [0.0 for i in range(100)]
nt = [0 for i in range(100)]
#// save
#//c 4000
peach0 = [ 38.136, 37.834, 37.898, 40.737, 40.581, 45.521, 45.520, 55.068, 53.868, 54.133, 54.051, 54.442, 54.320, 55.691, 55.661, 55.973, 55.922, 56.828, 56.657 ]
#//c 5000
peach1 = [ 38.138, 37.839, 37.898, 40.319, 40.164, 44.456, 44.455, 51.783, 50.369, 50.597, 50.514, 50.854, 50.722, 51.965, 51.933, 52.193, 52.141, 52.821, 52.653 ]
#//c 6000
peach2 = [ 38.140, 37.843, 37.897, 40.047, 39.893, 43.753, 43.752, 49.553, 48.031, 48.233, 48.150, 48.455, 48.313, 49.444, 49.412, 49.630, 49.577, 50.110, 49.944 ]
#//c 7000
peach3 = [ 38.141, 37.847, 37.897, 39.855, 39.702, 43.254, 43.251, 47.942, 46.355, 46.539, 46.454, 46.733, 46.583, 47.615, 47.582, 47.769, 47.715, 48.146, 47.983 ]
#//c 8000
peach4 = [ 38.143, 37.850, 37.897, 39.714, 39.561, 42.878, 42.871, 46.723, 45.092, 45.261, 45.176, 45.433, 45.277, 46.221, 46.188, 46.349, 46.295, 46.654, 46.491 ]
#//c 9000
peach5 = [ 38.144, 37.853, 37.896, 39.604, 39.452, 42.580, 42.569, 45.768, 44.104, 44.262, 44.175, 44.415, 44.251, 45.119, 45.085, 45.226, 45.172, 45.477, 45.315 ]
#//c 10000
peach6 = [ 38.144, 37.855, 37.895, 39.517, 39.366, 42.332, 42.315, 44.997, 43.308, 43.456, 43.368, 43.592, 43.423, 44.223, 44.189, 44.314, 44.259, 44.522, 44.360 ]
#//c 11000
peach7 = [ 38.145, 37.857, 37.895, 39.445, 39.295, 42.119, 42.094, 44.360, 42.652, 42.790, 42.702, 42.912, 42.738, 43.478, 43.445, 43.555, 43.500, 43.730, 43.569 ]
#//c 12000
peach8 = [ 38.145, 37.858, 37.894, 39.385, 39.235, 41.930, 41.896, 43.823, 42.100, 42.230, 42.141, 42.340, 42.160, 42.848, 42.813, 42.913, 42.858, 43.061, 42.901 ]
#// real*8 peach(9,19)
# //double[][] peach = new double[9][19];
peach = [ peach0, peach1, peach2, peach3, peach4, peach5, peach6, peach7, peach8 ]
#//c 3P, 1D, 1S, 1D, 3D, 3F, 1D, 3P
freqSi = [ 2.1413750e15, 1.9723165e15, 1.7879689e15, \
1.5152920e15, 5.5723927e14, 5.3295914e14, \
4.7886458e14, 4.7216422e14, 4.6185133e14 ]
#//double[] flog = new double[11];
flog = [ 35.45438, 35.30022, 35.21799, 35.11986, 34.95438, \
33.95402, 33.90947, 33.80244, 33.78835, 33.76626, \
33.70518 ]
#//double[] tlg = new double[9];
tlg = [ 8.29405, 8.51719, 8.69951, 8.85367, 8.98720, 9.10498, \
9.21034, 9.30565, 9.39266 ]
freq1 = 0.0
#//, modcount/0/
# int thelp, nn;
# double dd, dd1;
#//c initialize some quantities for each new model atmosphere
#// if (modelnum .ne. modcount) then
#// modcount = modelnum
# //do i=1,ntau
for i in range(numDeps):
thelp = int(math.floor(temp[0][i]/1000.0)) - 3
#// -1 term to adjust from FORTRAN to Java subscripting
#//n = Math.max( Math.min(8, thelp-3), 1 );
nn = max( min(8, thelp), 1 ) - 1
nt[i] = nn
dt[i] = (temp[1][i]-tlg[nn]) / (tlg[nn+1]-tlg[nn])
#// endif
#// initialize some quantities for each new model atmosphere or new frequency
#//if (modelnum.ne.modcount .or. freq.ne.freq1) then
freq1 = freq
#// do n=1,9
#// if (freq .gt. freqSi(n)) go to 23
#// enddo
#// n = 9;
# // n = 0;
# // while ( (freq <= freqSi[n]) && (n < 8) ) {
# // n++;
# // }
nn = 0
for n in range(9):
if (freq > freqSi[n]):
break
nn+=1
if (freq <= freqSi[8]):
nn = 9
#//
dd = (freqlg-flog[nn]) / (flog[nn+1]-flog[nn])
#// -1 term to adjust from FORTRAN to Java subscripting
#//if (n > 2) {
if (nn > 1):
#// -1 term to adjust from FORTRAN to Java subscripting
nn = 2*nn - 2; #// - 1 #// n already adjusted by this point?
dd1 = 1.0 - dd
for it in range(9):
xx[it] = peach[it][nn+1]*dd + peach[it][nn]*dd1
#//endif
for i in range(numDeps):
if (freq >= 2.997925e+14):
nn = nt[i]
sigma = ( 9.0 * math.exp( -(xx[nn]*(1.-dt[i]) + xx[nn+1]*dt[i]) ) )
aSi1[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aSi1 " + aSi1[i]);
return aSi1
#} //endmethod aSi1()
def opacSi2(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes the bound-free absorption due to Si II.
//c******************************************************************************"""
#//System.out.println("opacSi2 called...");
sigma = 0.0;
aSi2 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aSi2[i] = 0.0
freq = Useful.c() / lambda2
freqlg = math.log(freq) #//base e?
#int thelp, nn;
#double dd, dd1;
#// include 'Atmos.com'
#// include 'Kappa.com'
xx = [0.0 for i in range(6)]
dt = [0.0 for i in range(100)]
nt = [0 for i in range(100)]
#/double peach = new double[6][14];
#//c 10000
peach0 = [ -43.8941, -42.2444, -40.6054, -54.2389, -50.4108, -52.0936, -51.9548, -54.2407, -52.7355, -53.5387, -53.2417, -53.5097, -54.0561, -53.8469 ]
#//c 12000
peach1 = [ -43.8941, -42.2444, -40.6054, -52.2906, -48.4892, -50.0741, -49.9371, -51.7319, -50.2218, -50.9189, -50.6234, -50.8535, -51.2365, -51.0256 ]
#//c 14000
peach2 = [ -43.8941, -42.2444, -40.6054, -50.8799, -47.1090, -48.5999, -48.4647, -49.9178, -48.4059, -49.0200, -48.7252, -48.9263, -49.1980, -48.9860 ]
#//c 16000
peach3 = [ -43.8941, -42.2444, -40.6054, -49.8033, -46.0672, -47.4676, -47.3340, -48.5395, -47.0267, -47.5750, -47.2810, -47.4586, -47.6497, -47.4368 ]
#//c 18000
peach4 = [ -43.8941, -42.2444, -40.6054, -48.9485, -45.2510, -46.5649, -46.4333, -47.4529, -45.9402, -46.4341, -46.1410, -46.2994, -46.4302, -46.2162 ]
#//c 20000
peach5 = [ -43.8941, -42.2444, -40.6054, -48.2490, -44.5933, -45.8246, -45.6947, -46.5709, -45.0592, -45.5082, -45.2153, -45.3581, -45.4414, -45.2266 ]
peach = [ peach0, peach1, peach2, peach3, peach4, peach5 ]
#//double[] freqSi = new double[7];
freqSi = [ 4.9965417e15, 3.9466738e15, 1.5736321e15, \
1.5171539e15, 9.2378947e14, 8.3825004e14, \
7.6869872e14]
#//c 2P, 2D, 2P, 2D, 2P
# //double[] flog = new double[9];
flog = [ 36.32984, 36.14752, 35.91165, 34.99216, 34.95561, \
34.45951, 34.36234, 34.27572, 34.20161 ]
#//double[] tlg = new double[6];
tlg = [ 9.21034, 9.39266, 9.54681, 9.68034, 9.79813, 9.90349 ]
freq1 = 0.0
# // modcount/0/
#//c set up some data upon first entrance with a new model atmosphere
#// if (modelnum .ne. modcount) then
#// modcount = modelnum
for i in range(numDeps):
thelp = int(math.floor(temp[0][i]/2000.0)) - 4
#// -1 term to adjust from FORTRAN to Java subscripting
#//n = Math.max( Math.min(5, thelp-4), 1 );
nn = max( min(5, thelp), 1 ) - 1
nt[i] = nn
dt[i] = (temp[1][i]-tlg[nn]) / (tlg[nn+1]-tlg[nn])
#// endif
#//c initialize some quantities for each new model atmosphere or new frequency
#// if (modelnum.ne.modcount .or. freq.ne.freq1) then
freq1 = freq
#// do n=1,7
#// if (freq .gt. freqSi(n)) go to 23
#// enddo
#// n = 8
# //n = 0;
# //while ( (freq <= freqSi[n]) && (n < 6) ) {
# // n++;
# // }
nn = 0
for n in range(7):
if (freq > freqSi[n]):
break
nn+=1
if (freq <= freqSi[6]):
nn = 7
#//
#//
dd = (freqlg-flog[nn]) / (flog[nn+1]-flog[nn])
#// -1 term to adjust from FORTRAN to Java subscripting
#//if (n > 2){
if (nn > 1):
#// -1 term to adjust from FORTRAN to Java subscripting
#//n = 2*n - 2;
nn = 2*nn - 2 #// - 1; //n already adjusted by this point?
#// -1 term to adjust from FORTRAN to Java subscripting
#//if (n == 14){
if (nn == 13):
#// -1 term to adjust from FORTRAN to Java subscripting
#//n = 13;
nn = 12
dd1 = 1.0 - dd
for it in range(6):
xx[it] = peach[it][nn+1]*dd + peach[it][nn]*dd1
#// endif
for i in range(numDeps):
if (freq >= 7.6869872e14):
nn = nt[i]
sigma = ( 6.0 * math.exp(xx[nn]*(1.0-dt[i]) + xx[nn+1]*dt[i]) )
aSi2[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aSi2 " + aSi2[i]);
return aSi2
#} //end method opacSi2
def opacFe1(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes the bound-free absorption due to Fe I.
//c******************************************************************************"""
#//System.out.println("opacFe1 called...");
sigma = 0.0
aFe1 = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
aFe1[i] = 0.0
waveno = 1.0 / lambda2 #//cm^-1??
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
# //real*8 bolt(48,100), gg(48), ee(48), wno(48), xsect(48)
#// double[] gg = new double[48];
bolt = [ [ 0.0 for i in range(100) ] for j in range(48) ]
xsect = [ 0.0 for i in range(48) ]
gg = [25.0, 35.0, 21.0, 15.0, 9.0, 35.0, 33.0, 21.0, 27.0, 49.0, 9.0, 21.0, 27.0, 9.0, 9.0, \
25.0, 33.0, 15.0, 35.0, 3.0, 5.0, 11.0, 15.0, 13.0, 15.0, 9.0, 21.0, 15.0, 21.0, 25.0, 35.0, \
9.0, 5.0, 45.0, 27.0, 21.0, 15.0, 21.0, 15.0, 25.0, 21.0, 35.0, 5.0, 15.0, 45.0, 35.0, 55.0, 25.0]
#// double[] ee = new double[48];
ee = [500.0, 7500.0, 12500.0, 17500.0, 19000.0, 19500.0, 19500.0, 21000.0,
22000.0, 23000.0, 23000.0, 24000.0, 24000.0, 24500.0, 24500.0, 26000.0, 26500.0,
26500.0, 27000.0, 27500.0, 28500.0, 29000.0, 29500.0, 29500.0, 29500.0, 30000.0,
31500.0, 31500.0, 33500.0, 33500.0, 34000.0, 34500.0, 34500.0, 35000.0, 35500.0,
37000.0, 37000.0, 37000.0, 38500.0, 40000.0, 40000.0, 41000.0, 41000.0, 43000.0,
43000.0, 43000.0, 43000.0, 44000.0]
#// double[] wno = new double[48];
wno = [63500.0, 58500.0, 53500.0, 59500.0, 45000.0, 44500.0, 44500.0, 43000.0,
58000.0, 41000.0, 54000.0, 40000.0, 40000.0, 57500.0, 55500.0, 38000.0, 57500.0,
57500.0, 37000.0, 54500.0, 53500.0, 55000.0, 34500.0, 34500.0, 34500.0, 34000.0,
32500.0, 32500.0, 32500.0, 32500.0, 32000.0, 29500.0, 29500.0, 31000.0, 30500.0,
29000.0, 27000.0, 54000.0, 27500.0, 24000.0, 47000.0, 23000.0, 44000.0, 42000.0,
42000.0, 21000.0, 42000.0, 42000.0]
#//data freq1, modcount/0., 0/
freq1 = 0.0
#double hkt;
#//c set up some data upon first entrance with a new model atmosphere
#// if (modelnum .ne. modcount) then
#// modcount = modelnum
for i in range(numDeps):
hkt = 6.6256e-27 / (1.38054e-16*temp[0][i])
#//do k=1,48
for k in range(48):
bolt[k][i] = gg[k] * math.exp(-ee[k]*Useful.c()*hkt)
#// endif
#//c initialize some quantities for each new model atmosphere or new frequency;
#//c the absorption begins at 4762 A.
#// if (modelnum.ne.modcount .or. freq.ne.freq1) then
freq1 = freq;
#//waveno = freq/2.99792458d10
#//if (waveno .ge. 21000.) then
if (waveno >= 21000.0):
#//do k=1,48
for k in range(48):
xsect[k] = 0.0
#//if (wno(k) .lt. waveno){
if (wno[k] < waveno):
xsect[k]= 3.0e-18 / ( 1.0 + math.pow( ( (wno[k]+3000.0-waveno)/wno[k]/0.1 ), 4 ) )
#// endif
# //do i=1,ntau
for i in range(numDeps):
#//aFe1 seems to be cumulative. Moog does not seem to have this reset for each depth, but my aFe is blowing up, so let's try it...
aFe1[i] = 0.0 #//reset accumulator each depth- ???
#//if (waveno .ge. 21000.) then
if (waveno >= 21000.0):
#//do k=1,48
for k in range(48):
aFe1[i] = 0.0 #//reset accumulator each 'k' - ??? (like removing aFe1 term in expression below...
sigma = aFe1[i] + xsect[k]*bolt[k][i]
aFe1[i] = sigma * math.exp(logGroundPops[i])
#//System.out.println("i " + i + " sigma " + sigma + " aFe1 " + aFe1[i]);
return aFe1
#} // end opacFe1 method
| 42,559
| 37.62069
| 220
|
py
|
ChromaStarPy
|
ChromaStarPy-master/SunNaID.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Model atmosphere
teff = 5777.0 #, K
logg = 4.44 #, cgs
log10ZScale = 0.0 # [A/H]
massStar = 1.0 #, solar masses
xiT = 1.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 588.0 #, nm
lambdaStop = 592.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.5
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 2.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,669
| 33.230769
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/MulGrayTCorr.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 12:25:33 2017
@author: ishort
"""
import math
import Useful
import ToolBox
def mgTCorr(numDeps, teff, tauRos, temp, rho, kappa):
#// updated temperature structure
newTemp = [ [ 0.0 for i in range(numDeps) ] for j in range(2) ]
#//Teff boundary between early and late-type stars:
isCool = 6500.0
#//Set up multi-gray opacity:
#// lambda break-points and gray levels:
#// No. multi-gray bins = num lambda breakpoints +1
#//double[] grayLams = {30.0, 1.0e6}; //nm //test
#//double[] grayLevel = {1.0}; //test
#// *** Late type stars, Teff < 9500 K (???):
#//
minLambda = 30.0 #//nm
maxLambda = 1.0e6 #//nm
maxNumBins = 11
grayLams = [0.0 for i in range(maxNumBins + 1)]
grayLevel = [0.0 for i in range(maxNumBins)]
epsilon = [0.0 for i in range(maxNumBins)]
#//initialize everything first:
for iB in range(maxNumBins):
grayLams[iB] = maxLambda
grayLevel[iB] = 1.0
epsilon[iB] = 0.99
grayLams[maxNumBins] = maxLambda #//Set final wavelength
grayLevelsEpsilons = grayLevEps(maxNumBins, minLambda, maxLambda, teff, isCool)
#//Find actual number of multi-gray bins:
numBins = 0 #//initialization
for i in range(maxNumBins):
if (grayLevelsEpsilons[0][i] < maxLambda):
numBins+=1
#//Add one more for final lambda:
#//numBins++;
#//System.out.println("numBins: " + numBins);
"""/*
if (teff < isCool) {
#// physically based wavelength break-points and gray levels for Sun from Rutten Fig. 8.6
#// H I Balmer and Lyman jumps for lambda <=3640 A, H^- b-f opacity hump in visible & hole at 1.6 microns, increasing f-f beyond that
double[] lamSet = {minLambda, 91.1, 158.5, 364.0, 794.3, 1600.0, 3.0e3, 1.0e4, 3.3e4, 1.0e5, 3.3e5, maxLambda}; //nm
double[] levelSet = {1000.0, 100.0, 5.0, 1.0, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0, 1000.0};
#//photon *thermal* destruction and creation probability (as opposed to scattering)
#//WARNING: THese cannot be set exactly = 1.0 or a Math.log() will blow up!!
double[] epsilonSet = {0.50, 0.50, 0.50, 0.50, 0.50, 0.9, 0.99, 0.99, 0.99, 0.99, 0.99};
int numBins = levelSet.length;
for (int iB = 0; iB < numBins; iB++) {
grayLams[iB] = lamSet[iB] * 1.0e-7;
grayLevel[iB] = levelSet[iB];
epsilon[iB] = epsilonSet[iB];
}
grayLams[numBins] = lamSet[numBins] * 1.0e-7; //Get final wavelength
} else {
#// *** Early type stars, Teff > 9500 K (???)
#// It's all about H I b-f (??) What about Thomson scattering (gray)?
#// Lyman, Balmer, Paschen, Brackett jumps
#//What about He I features?
double[] lamSet = {minLambda, 91.1, 364.0, 820.4, 1458.0, maxLambda}; //nm
double[] levelSet = {100.0, 10.0, 2.0, 1.0, 1.0}; //???
double[] epsilonSet = {0.5, 0.6, 0.7, 0.8, 0.5};
int numBins = levelSet.length;
for (int iB = 0; iB < numBins; iB++) {
grayLams[iB] = lamSet[iB] * 1.0e-7;;
grayLevel[iB] = levelSet[iB];
epsilon[iB] = epsilonSet[iB];
}
grayLams[numBins] = lamSet[numBins] * 1.0e-7; //Get final wavelength
}
#//Find out how many bins we really have:
#int numBins = 0; //initialize
#for (int iB = 0; iB < maxNumBins; iB++) {
if (grayLams[iB] < maxLambda) {
numBins++;
#}
}
*/"""
for iB in range(numBins):
grayLams[iB] = grayLevelsEpsilons[0][iB]
grayLevel[iB] = grayLevelsEpsilons[1][iB]
epsilon[iB] = grayLevelsEpsilons[2][iB];
grayLams[numBins] = grayLevelsEpsilons[0][numBins] #//Get final wavelength
#//Set overall gray-level - how emissive and absorptive the gas is overall
#// a necessary "fudge" because our kappa values are arbitrary rather than "in situ"
graySet = 1.0
#//double tcDamp = 0.5; // damp the temperature corrections, Delta T, by this *multiplicative* factor
tcDamp = 1.0 #// no damping - Lambda iteration is slow rather than oscillatory
logE = math.log10(math.E) #// for debug output
#//double[][] planckBol = MulGrayTCorr.planckBin(numDeps, temp, lamStart, lamStop);
planckBol = [0.0 for i in range(numDeps)] #//just for reference - not really needed - ??
jayBol = [0.0 for i in range(numDeps)] #//just for reference - not really needed - ??
dBdTBol = [0.0 for i in range(numDeps)] #//just for reference - not really needed - ??
cool = [0.0 for i in range(numDeps)] #// cooling term in Stromgren equation
heat = [0.0 for i in range(numDeps)] #// heating term in Stromgren equation
corrDenom = [0.0 for i in range(numDeps)] #//denominator in 1st order temp correction
#//double[] accumB = new double[numDeps]; //accumulator
#//CAUTION: planckBin[2][]: Row 0 is bin-integrated B_lambda; row 1 is bin integrated dB/dT_lambda
planckBin = [ [ 0.0 for i in range(numDeps) ] for j in range(2) ]
jayBin = [0.0 for i in range(numDeps)]
dBdTBin = [0.0 for i in range(numDeps)]
#//double logCool, logHeat, logCorrDenom, logCoolTherm, logCoolScat;
#// initialize accumulators & set overell gray kappa level:
for iTau in range(numDeps):
planckBol[iTau] = 0.0 #//just for reference - not really needed - ??
jayBol[iTau] = 0.0 #//just for reference - not really needed - ??
dBdTBol[iTau] = 0.0 #//just for reference - not really needed - ??
cool[iTau] = 0.0
heat[iTau] = 0.0
corrDenom[iTau] = 0.0
kappa[1][iTau] = kappa[1][iTau] + math.log(graySet)
kappa[0][iTau] = math.exp(kappa[1][iTau])
for iB in range(numBins):
#//System.out.println("iB: " + iB + " grayLams[iB] " + grayLams[iB]);
planckBin = planckBinner(numDeps, temp, grayLams[iB], grayLams[iB + 1])
#// We are lambda-operating on a wavelength integrated B_lambda for each multi-gray bin
jayBin = jayBinner(numDeps, tauRos, temp, planckBin, grayLevel[iB])
#//System.out.println("tauRos[1][iTau] planckBin[0] planckBin[1] jayBin");
for iTau in range(numDeps):
#//System.out.format("%12.8f %12.8f %12.8f %12.8f%n",
#// logE * tauRos[1][iTau], logE * Math.log(planckBin[0][iTau]), logE * Math.log(planckBin[1][iTau]), logE * Math.log(jayBin[iTau]));
#//CAUTION: planckBin[2][]: Row 0 is bin-integrated B_lambda; row 1 is bin integrated dB/dT_lambda
#//Net LTE volume cooling rate deltaE = Integ_lam=0^infnty(4*pi*kappa*rho*B_lam)dlam - Integ_lam=0^infnty(4*pi*kappa*rho*J_lam)dlam
#// where Jlam = LambdaO[B_lam] - Rutten Eq. 7.32, 7.33
#// CAUTION: the 4pi and rho factors cancel out when diving B-J term by dB/dT term
planckBol[iTau] = planckBol[iTau] + planckBin[0][iTau] #//just for reference - not really needed - ??
#//logCool = Math.log(grayLevel[iB]) + kappa[1][iTau] + Math.log(planckBin[0][iTau]) #//no scatering
#//cool[iTau] = cool[iTau] + Math.exp(logCool); //no scattering
logCoolTherm = math.log(grayLevel[iB]) + math.log(epsilon[iB]) + kappa[1][iTau] + math.log(planckBin[0][iTau])
logCoolScat = math.log(grayLevel[iB]) + math.log((1.0 - epsilon[iB])) + kappa[1][iTau] + math.log(jayBin[iTau])
cool[iTau] = cool[iTau] + math.exp(logCoolTherm) + math.exp(logCoolScat)
jayBol[iTau] = jayBol[iTau] + jayBin[iTau] #//just for reference - not really needed - ??
logHeat = math.log(grayLevel[iB]) + kappa[1][iTau] + math.log(jayBin[iTau])
heat[iTau] = heat[iTau] + math.exp(logHeat)
dBdTBol[iTau] = dBdTBol[iTau] + planckBin[1][iTau] #//just for reference - not really needed - ??
logCorrDenom = math.log(grayLevel[iB]) + kappa[1][iTau] + math.log(planckBin[1][iTau])
corrDenom[iTau] = corrDenom[iTau] + math.exp(logCorrDenom)
#// if (iTau == 10){
#// System.out.format("iB: %02d %12.8f %12.8f %12.8f %12.8f%n", iB, logE*Math.log(planckBin[0][iTau]), logE*Math.log(cool[iTau]), logE*Math.log(heat[iTau]), logE*Math.log(corrDenom[iTau]));
#//}
#} // iTau loop
#} //iB loop
#// System.out.println("i tauRos[1][iTau] planckBol[0] planckBol[1] jayBol cool heat corrDenom");
#// for (int iTau = 0; iTau < numDeps; iTau++) {
#//System.out.format("%02d %12.8f %12.8f %12.8f %12.8f %12.8f %12.8f %12.8f%n", iTau,
#// logE * tauRos[1][iTau], logE * Math.log(planckBol[iTau]), logE * Math.log(dBdTBol[iTau]), logE * Math.log(jayBol[iTau]),
#// logE * Math.log(cool[iTau]), logE * Math.log(heat[iTau]), logE * Math.log(corrDenom[iTau]));
#// }
#double logRatio, ratio, deltaTemp, logDeltaTemp;
sign = 1.0 #//initialize for positive JminusB
#//System.out.println("tauRos[1][iTau] deltaTemp[iTau]");
for iTau in range(numDeps):
#// Compute a 1st order T correction: Compute J-B so that DeltaT < 0 if J < B:
#// avoid direct subtraction of two large almost equal numbers, J & B:
"""/*
//Gray method:
double JminusB
logRatio = Math.log(planckBol[iTau]) - Math.log(jayBol[iTau]);
ratio = Math.exp(logRatio);
JminusB = jayBol[iTau] * (1.0 - ratio);
if (JminusB < 0.0) {
sign = -1.0;
}
#// DeltaB/DeltaT ~ dB/dT & dB/dT = (4/pi)sigma*T^3
logDeltaTemp = Math.log(Math.abs(JminusB)) + Math.log(Math.PI) - Math.log(4.0) - Useful.logSigma() - 3.0 * temp[1][iTau];
deltaTemp[iTau] = sign * Math.exp(logDeltaTemp) * tcDamp;
#//System.out.format("%12.8f %12.8f%n", tauRos[1][iTau], deltaTemp[iTau]);
sign = 1.0; //reset sign
*/"""
#//Multi-Gray method:
#double deltaE;
#//double logHeatNorm, heatNorm, logCoolNorm, deltaENorm;
#////Normalize numbers by dividing heat and cool terms each by common denominator derivative term first:
#//logHeatNorm = Math.log(heat[iTau]) - Math.log(corrDenom[iTau]);
#//heatNorm = Math.exp(logHeatNorm);
#//logCoolNorm = Math.log(cool[iTau]) - Math.log(corrDenom[iTau]);
logRatio = math.log(cool[iTau]) - math.log(heat[iTau])
#//logRatio = logCoolNorm - logHeatNorm
ratio = math.exp(logRatio)
deltaE = heat[iTau] * (1.0 - ratio)
#//deltaENorm = heatNorm * (1.0 - ratio)
if (deltaE < 0.0):
sign = -1.0
#//CHEAT: Try a Tau-dependent deltaE damping here - things are flaky at tdepth where t(Tau) steepens
deltaE = deltaE * math.exp(1.0 * (tauRos[0][0] - tauRos[0][iTau]))
#// DeltaE/DeltaT ~ dB/dT_Bol
logDeltaTemp = math.log(math.abs(deltaE)) - math.log(corrDenom[iTau])
deltaTemp = sign * math.exp(logDeltaTemp) * tcDamp
#//deltaTemp = sign * deltaENorm * tcDamp;
newTemp[0][iTau] = temp[0][iTau] + deltaTemp;
newTemp[1][iTau] = math.log(newTemp[0][iTau]);
#} //iTau loop
return newTemp
#} //end method
def jayBinner(numDeps, tauRos, temp, planckBin, grayLevel):
"""// method jayBolom computes bolometric angle-averaged mean intensity, J
// This is a Lambda operation, ie. the Schwartzschild equation"""
#// For bolometric J on a Gray Tau scale in LTE:
#// J(Tau) = 1/2 * Sigma_Tau=0^Infty { E_1(|t-Tau|)*Planck_Bol(Tau) }
logE = math.log10(math.e) #// for debug output
#double E1 #//E_1(x)
#//Set up local optical depth scale:
tauBin = [ [ 0.0 for i in range(numDeps) ] for j in range(2) ]
#double deltaTauRos
tauBin[0][0] = tauRos[0][0] * grayLevel #// Is this a good idea??
tauBin[1][0] = math.log(tauBin[0][0])
for iTau in range(1, numDeps):
deltaTauRos = tauRos[0][iTau] - tauRos[0][iTau - 1]
#//grayLevel *is*, by definition, the ratio kappa_Bin/kappaRos that we need here!
tauBin[0][iTau] = tauBin[0][iTau - 1] + grayLevel * deltaTauRos;
tauBin[1][iTau] = math.log(tauBin[0][iTau]);
#double logInteg, integ, integ1, integ2, logInteg1, logInteg2, meanInteg, logMeanInteg, term, logTerm;
#double deltaTau, logDeltaTau; //accumulator
accum = 0.0 #//accumulator
jayBin = [0.0 for i in range(numDeps)]
#// if E_1(t-Tau) evaluated at Tau=bottom of atmosphere, then just set Jay=B at that Tau - we're deep enough to be thermalized
#// and we don't want to let lambda operation implicitly include depths below bottom of model where B=0 implicitly
tiny = 1.0e-14 #//tuned to around maxTauDIff at Tau_Ros ~ 3
#double maxTauDiff;
#//stipulate the {|t-Tau|} grid at which E_1(x)B will be evaluated - necessary to sample the
#// sharply peaked integrand properly
#// ** CAUTION: minLog10TmTau and maxLog10TmTau are tau offsets from the centre of J integration,
#// NOT the optical depth scale of the atmosphere!
#//stipulate the {|t-Tau|} grid at which E_1(x)B will be evaluated - necessary to sample the
#// sharply peaked integrand properly
fineFac = 3.0 #// integrate E1 on a grid fineFac x finer in logTau space
E1Range = 36.0 #// number of master tauBin intervals within which to integrate J
numInteg = E1Range * fineFac
deltaLogTauE1 = (tauBin[1][numDeps - 1] - tauBin[1][0]) / numDeps
deltaLogTauE1 = deltaLogTauE1 / fineFac
#double thisTau1, logThisTau1, thisTau2, logThisTau2, logE1, deltaTauE1, logThisPlanck, iFloat;
#//Prepare 1D vectors for Interpol.interpol:
logTauBin = [0.0 for i in range(numDeps)]
logPlanck = [0.0 for i in range(numDeps)]
#//System.out.println("logTauBin logB");
for k in range(numDeps):
logTauBin[k] = tauBin[1][k]
logPlanck[k] = math.log(planckBin[0][k])
#//System.out.format("%12.8f %12.8f%n", logE*logTauBin[k], logE*logPlanck[k]);
#//Outer loop over Taus where Jay(Tau) being computed:
#// Start from top and work down to around tau=1 - below that assume we're thermalized with J=B
#//System.out.println("For logTauRos = " + logE*tauRos[1][40] + ": thisTau E1xB E1 B");
#//System.out.println("tauRos[1][iTau] Math.log(planckBin[iTau]) jayBin[1][iTau]");
for iTau in range(numDeps):
#//System.out.println("jayBinner: iTau: " + iTau + " tauRos[0] " + tauRos[0][iTau] + " tauRos[1] " + logE * tauRos[1][iTau]);
jayBin[iTau] = planckBin[0][iTau] #//default initialization J_bin = B_bin
if (tauRos[0][iTau] < 66.67):
#//System.out.println("tauRos[0] < limit condition passed");
#// initial test - don't let E_1(x) factor in integrand run off bottom of atmosphere
#// - we have no emissivity down there and J will drop below B again, like at surface!
maxTauDiff = math.abs(tauBin[0][numDeps - 1] - tauBin[0][iTau])
#//System.out.println("tauBin[0][numDeps - 1]: " + tauBin[0][numDeps - 1] + " tauBin[0][iTau] " + tauBin[0][iTau] + " maxTauDiff " + maxTauDiff);
#//System.out.println("maxTauDiff= " + maxTauDiff + " expOne(maxTauDiff)= " + expOne(maxTauDiff));
if (expOne(maxTauDiff) < tiny):
#//System.out.println("maxTauDiff < tiny condition passed, expOne(maxTauDiff): " + expOne(maxTauDiff));
#// We're above thermalization depth: J may not = B:
#//Inner loop over depths contributing to each Jay(iTau):
#// work outward from t=Tau (ie. i=iTau) piece-wise
accum = 0.0;
#// conribution from depths above Tau:
#//initial integrand:
#// start at i=1 instead of i=0 - cuts out troublesome central cusp of E_1(x) - but now we underestimate J!
logThisTau1 = tauBin[1][iTau] - deltaLogTauE1
thisTau1 = math.exp(logThisTau1)
deltaTauE1 = tauBin[0][iTau] - thisTau1
E1 = expOne(deltaTauE1)
logE1 = math.log(E1)
logThisPlanck = ToolBox.interpol(logTauBin, logPlanck, logThisTau1)
logInteg1 = logE1 + logThisPlanck
integ1 = Math.exp(logInteg1);
for i in range(2, numInteg-1):
iFloat = float(i)
#// Evaluate E_1(x) and log(E_1(x)) one and for all here
#//System.out.format("%02d %12.8f %12.8f%n", j, tmTau[j], E1);
#// LTE bolometric source function is Bolometric Planck function
#// Extended trapezoidal rule for non-uniform abscissae - this is an exponential integrand!
#// We cannot evaluate E_1(x) at x=0 - singular:
logThisTau2 = tauBin[1][iTau] - iFloat * deltaLogTauE1
thisTau2 = math.exp(logThisTau2)
#//if (i == numInteg - 2) {
#// System.out.println("i " + i + " logThisTau1 " + logE * logThisTau1 + " logThisTau2 " + logE * logThisTau2);
#//}
#// Make sure we're still in the atmosphere!
if (logThisTau2 > tauBin[1][0]):
#//if (i == numInteg - 2) {
#// System.out.println("thisTau2 > tauBin[0][0] condition passed");
#//}
#//if (iTau == 37) {
#// System.out.println("i " + i + " logThisTau1 " + logE * logThisTau1 + " logThisTau2 " + logE * logThisTau2);
#//}
deltaTauE1 = tauBin[0][iTau] - thisTau2
E1 = expOne(deltaTauE1)
logE1 = math.log(E1)
#// interpolate log(B(log(Tau)) to the integration abscissa
logThisPlanck = ToolBox.interpol(logTauBin, logPlanck, logThisTau2)
logInteg2 = logE1 + logThisPlanck
integ2 = math.exp(logInteg2)
logDeltaTau = math.log(thisTau1 - thisTau2) #// logDeltaTau *NOT* the same as deltaLogTau!!
meanInteg = 0.5 * (integ1 + integ2) #//Trapezoid rule
logMeanInteg = math.log(meanInteg)
#//if (iTau == 40) {
#// System.out.format("%15.8f %15.8f %15.8f %15.8f%n", logE*Math.log(thisTau1), logE*logMeanInteg, logE*logE1, logE*logThisPlanck);
#//}
logTerm = logMeanInteg + logDeltaTau
term = math.exp(logTerm)
accum = accum + term
integ1 = integ2
thisTau1 = thisTau2
#//if (iTau == 41){
#// System.out.println("term " + term + " accum " + accum);
#//}
#} // thisTau > 0
#} // i ("t") loop, above iTau
jayBin[iTau] = 0.5 * accum #//store what we have.
#//test jayBin[iTau] = 0.5 * planckBin[0][iTau]; // fake upper half with isotropic result
#//test jayBin[iTau] = jayBin[iTau] + 0.5 * planckBin[0][iTau]; // test upper atmosphere part of J integration by fixing lower part with isotropic result
#// conribution from depths below Tau:
#// include iTau itself so we don't miss the area under the central peak of E_1(x) - the expOne function
#// will protect itself from the x=0 singularity using variable 'tiny'
accum = 0.0
#//initial integrand:
#// start at i=1 instead of i=0 - cuts out troublesome central cusp of E_1(x) - but now we underestimate J!
logThisTau1 = tauBin[1][iTau] + deltaLogTauE1
thisTau1 = math.exp(logThisTau1)
deltaTauE1 = thisTau1 - tauBin[0][iTau]
E1 = expOne(deltaTauE1)
logE1 = math.log(E1)
logThisPlanck = ToolBox.interpol(logTauBin, logPlanck, logThisTau1)
logInteg1 = logE1 + logThisPlanck
integ1 = math.exp(logInteg1)
for i in range(2, numInteg - 1):
iFloat = float(i)
logThisTau2 = tauBin[1][iTau] + iFloat * deltaLogTauE1
thisTau2 = math.exp(logThisTau2)
#// We cannot evaluate E_1(x) at x=0 - singular:
#// Extended trapezoidal rule for non-uniform abscissae - the is an exponential integrand!
#// make sure we're still in the atmosphere!
if (logThisTau2 < tauBin[1][numDeps - 1]):
deltaTauE1 = thisTau2 - tauBin[0][iTau]
E1 = expOne(deltaTauE1)
logE1 = math.log(E1)
logThisPlanck = ToolBox.interpol(logTauBin, logPlanck, logThisTau2)
logInteg2 = logE1 + logThisPlanck
integ2 = math.exp(logInteg2)
logDeltaTau = math.log(thisTau2 - thisTau1) #// logDeltaTau *NOT* the same as deltaLogTau!!
meanInteg = 0.5 * (integ1 + integ2) #//Trapezoid rule
logMeanInteg = math.log(meanInteg)
#//if (iTau == 40) {
#// System.out.format("%15.8f %15.8f %15.8f %15.8f%n", logE*Math.log(thisTau1), logE*logMeanInteg, logE*logE1, logE*logThisPlanck);
#//}
#// LTE bolometric source function is Bolometric Plnack function
logTerm = logMeanInteg + logDeltaTau
term = math.exp(logTerm)
accum = accum + term
integ1 = integ2
thisTau1 = thisTau2
#}// if thisTau < tauBin[0][numDeps-1]
#} #// i ("t") loop, below iTau
jayBin[iTau] = jayBin[iTau] + 0.5 * accum
#} //if branch for E_1(x) safely dwindling away before reaching bottom of atmosphere
#} #// if branch for above thermalization depth of Tau=10?
#//System.out.format("%12.8f %12.8f %12.8f%n",
#// logE * tauRos[1][iTau], Math.log10(planckBin[iTau]), Math.log10(jayBin[iTau]));
#} //iTau loop
return jayBin
#} //end method
def planckBinner(numDeps, temp, lamStart, lamStop):
"""// Compute linear wave-bin-specific lambda-integrated Planck fn AND it's T derivative at all depths:
// Row 0: B_bin(tau); Row 1: dB/dT_bin(tau);"""
planckBin = [ [ 0.0 for i in range(numDeps) ] for j in range(2) ]
logE = math.log10(math.E) #// for debug output
#//MultiGray-ready:
#// Parameters of overall lambda grid (nm):
#// Planck.planck() will convert nm to cm - not any more!
#//double log10LamStart = 1.5 * 1.0e-7; //must be < first Gray lambda break point
#//double log10LamStop = 5.0 * 1.0e-7; //must be > last Gray lambda break point
log10LamStart = math.log10(lamStart)
log10LamStop = math.log10(lamStop)
deltaLog10Lam = 0.1
#int numLamAll;
numLamAll = int((log10LamStop - log10LamStart) / deltaLog10Lam)
#//System.out.println("lamStart " + lamStart + " log10LamStart " + log10LamStart + " lamStop " + lamStop + " log10LamStop " + log10LamStop + " numLamAll " + numLamAll);
lambda2 = [0.0 for i in range(numLamAll)]
#//Generate lambda grid separately to avoid duplicate lambda generation
#double iFloat, thisLogLam;
#//System.out.println("lambdas");
for i in range(numLamAll):
iFloat = float(i)
thisLogLam = log10LamStart + iFloat * deltaLog10Lam
lambda2[i] = math.pow(10.0, thisLogLam)
#//System.out.format("%02d %12.8f%n", i, lambda[i]);
#double thisLam1, thisLam2, deltaLam, planck1, planck2, logPlanck1, logPlanck2;
#double term, integ, accum;
#double dBdT1, dBdT2, logdBdT1, logdBdT2, accum2;
#//trapezoid rule integration
#//System.out.println("Trapezoid: ");
for iTau in range(numDeps):
#//reset accumulators for new depth
accum = 0.0
accum2 = 0.0
#//initial integrands:
logPlanck1 = Planck.planck(temp[0][iTau], lambda2[0])
planck1 = math.exp(logPlanck1)
logdBdT1 = Planck.dBdT(temp[0][iTau], lambda2[0])
dBdT1 = math.exp(logdBdT1)
for i in range(1, numLamAll - 1):
deltaLam = lambda2[i + 1] - lambda2[i]
#//deltaLam = deltaLam * 1.0e-7; //nm to cm
#//Planck.planck returns log(B_lambda)
logPlanck2 = Planck.planck(temp[0][iTau], lambda2[i])
planck2 = math.exp(logPlanck2)
#//if (i == 20) {
#// System.out.println("lambda " + thisLam1 + " temp[0][iTau] " + temp[0][iTau] + " logPlanck1 " + logE*logPlanck1);
#//}
#//trapezoid rule integration
integ = 0.5 * (planck1 + planck2) * deltaLam
accum = accum + integ
planck1 = planck2
#//Now do the same for dB/dT:
#//Planck.dBdT returns log(dB/dT_lambda)
logdBdT2 = Planck.dBdT(temp[0][iTau], lambda2[i])
dBdT2 = math.exp(logdBdT2)
#//trapezoid rule integration
integ = 0.5 * (dBdT1 + dBdT2) * deltaLam
accum2 = accum2 + integ
dBdT1 = dBdT2
#} // lambda i loop
planckBin[0][iTau] = accum
planckBin[1][iTau] = accum2
#//System.out.format("%02d %12.8f%n", iTau, planckBin[iTau]);
#} //iTau loop
#//// Gray only:
#////if (lamStart == 1000.0) { //Could be for any gray wavelength
#//double[][] planckBol = new double[2][numDeps];
#//double[][] dBdTBol = new double[2][numDeps];
#//System.out.println("Stefan-Boltzmann: tauRos[1] B_Bol dBdT_Bol");
#//for (int i = 0; i < numDeps; i++) {
#// planckBol[1][i] = Useful.logSigma() + 4.0 * temp[1][i] - Math.log(Math.PI);
#// planckBol[0][i] = Math.exp(planckBol[1][i]);
#// dBdTBol[1][i] = Math.log(4.0) + Useful.logSigma() + 3.0 * temp[1][i] - Math.log(Math.PI);
#// dBdTBol[0][i] = Math.exp(dBdTBol[1][i]);
#// System.out.format("%02d %12.8f %12.8f%n", i, logE * planckBol[1][i], logE * dBdTBol[1][i]);
#//}
#//}
return planckBin
#} // end method
def grayLevEps(maxNumBins, minLambda, maxLambda, teff, isCool):
#//double minLambda = 30.0; //nm
#//double maxLambda = 1.0e6; //nm
#//int maxNumBins = 11;
grayLevelsEpsilons = [ [ 0.0 for i in range(3) ] for j in range(maxNumBins + 1) ]
#// The returned structure:
#//Row 0 is wavelength breakpoints
#//Row 1 is relative opacity gray levels
#//Row 2 is absolute thermal photon creation fractions, epsilon
#//initialize everything first:
for iB in range(maxNumBins):
grayLevelsEpsilons[0][iB] = maxLambda
grayLevelsEpsilons[1][iB] = 1.0
grayLevelsEpsilons[2][iB] = 0.99
grayLevelsEpsilons[0][maxNumBins] = maxLambda #//Set final wavelength
if (teff < isCool):
#// physically based wavelength break-points and gray levels for Sun from Rutten Fig. 8.6
#// H I Balmer, Lyman, and Paschen jumps for lambda <=3640 A, H^- b-f opacity hump in visible & hole at 1.6 microns, increasing f-f beyond that
lamSet = [minLambda, 91.1, 158.5, 364.0, 820.4, 1600.0, 3.0e3, 1.0e4, 3.3e4, 1.0e5, 3.3e5, maxLambda] #//nm
#//double[] levelSet = {1000.0,100.0, 5.0, 0.5, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0, 1000.0};
levelSet = [1000.0, 100.0, 5.0, 1.0, 0.5, 0.1, 3.0, 10.0, 30.0, 100.0, 1000.0]
#//photon *thermal* destruction and creation probability (as opposed to scattering)
#//WARNING: THese cannot be set exactly = 1.0 or a Math.log() will blow up!!
#//double[] epsilonSet = {0.50, 0.50, 0.50, 0.50, 0.50, 0.9, 0.99, 0.99, 0.99, 0.99, 0.99};
epsilonSet = [0.50, 0.50, 0.90, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99]
numBins = len(levelSet)
for iB in range(numBins):
grayLevelsEpsilons[0][iB] = lamSet[iB] * nm2cm
grayLevelsEpsilons[1][iB] = levelSet[iB]
grayLevelsEpsilons[2][iB] = epsilonSet[iB];
grayLevelsEpsilons[0][numBins] = lamSet[numBins] * 1.0e-7; //Get final wavelength
else:
#// *** Early type stars, Teff > 9500 K (???)
#// It's all about H I b-f (??) What about Thomson scattering (gray)?
#// Lyman, Balmer, Paschen, Brackett jumps
#//What about He I features?
lamSet = [minLambda, 91.1, 364.0, 820.4, 1458.0, maxLambda] #//nm
levelSet = [100.0, 10.0, 2.0, 1.0, 1.0] #//???
epsilonSet = [0.5, 0.6, 0.7, 0.8, 0.5]
numBins = len(levelSet)
for iB in range(numBins):
grayLevelsEpsilons[0][iB] = lamSet[iB] * nm2cm #//cm
grayLevelsEpsilons[1][iB] = levelSet[iB]
grayLevelsEpsilons[2][iB] = epsilonSet[iB];
#}
grayLevelsEpsilons[0][numBins] = lamSet[numBins] * 1.0e-7; //Get final wavelength
#}
return grayLevelsEpsilons
#} //end method
def expOne(x):
"""// Approximate first exponential integral function E_1(x) = -Ei(-x)"""
#// From http://en.wikipedia.org/wiki/Exponential_integral
#// Series expansion for first exponential integral function, E_1(x) = -Ei(-x)
#// Ee_one(x) = -gamma - ln(abs(x)) - Sigma_k=1^infnty{(-x)^k)/(k*k!)}
#// where: gamma = Euler–Mascheroni constant = 0.577215665...
#double E1;
x = math.abs(x) #// x must be positive
#// E1(x) undefined at x=0 - singular:
#//double tiny = 1.25; //tuned to give J ~ 0.5B @ tau=0
tiny = 1.0e-6
if (x < tiny):
x = tiny
#// Caution: even at 11th order acuracy (k=11), approximation starts to diverge for x . 3.0:
if (x > 3.0):
E1 = math.exp(-1.0 * x) / x #// large x approx
else:
gamma = 0.577215665 #//Euler–Mascheroni constant
kTerm = 0.0
order = 11 #//order of approximation
#double kFloat;
accum = 0.0 #//accumulator
kFac = 1.0 #// initialize k! (k factorial)
for k in range(1, order+1):
kFloat = float(k)
kFac = kFac * kFloat
accum = accum + Math.pow((-1.0 * x), kFloat) / (k * kFac);
#//System.out.println("k: " + k + " kFac: " + kFac);
#//System.out.println("k: " + k + " Math.pow(x, kFloat): " + Math.pow(x, kFloat));
kTerm = accum
E1 = -1.0 * gamma - Math.log(Math.abs(x)) - kTerm
#}
#//System.out.println("x: " + x + " exp1(x): " + E1);
return E1
#}
| 31,141
| 45.550075
| 212
|
py
|
ChromaStarPy
|
ChromaStarPy-master/SpecSyn.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 17:03:30 2017
@author: ishort
"""
#/**
# *
# * Create master kappa_lambda(lambda) and tau_lambda(lambda) for
# * FormalSoln.formalSoln()
# *
# * @author Ian
# */
import math
import ToolBox
#plotting:
import matplotlib
import pylab
def masterLambda(numLams, numMaster, numNow, masterLams, numPoints, listLineLambdas):
"""//Merge continuum and line wavelength scales - for one line
//This expects *pure* line opacity - no continuum opacity pre-added!"""
#//int numCnt = lambdaScale.length;
#//skip the last wavelength point in the line lambda grid - it holds the line centre wavelength
#//int numLine = lineLambdas.length - 1;
numTot = numNow + numPoints #//current dynamic total
#//System.out.println("numCnt " + numCnt + " numLine " + numLine + " numTot " + numTot);
"""/*
for (int i = 0; i < numCnt; i++) {
System.out.println("i " + i + " lambdaScale[i] " + lambdaScale[i]);
}
for (int i = 0; i < numLine; i++) {
System.out.println("i " + i + " lineLambdas[i] " + lineLambdas[i]);
}
*/ """
#//Row 0 is merged lambda scale
#//Row 1 is log of *total* (line plus continuum kappa
masterLamsOut = [0.0 for i in range(numTot)]
#// Merge wavelengths into a sorted master list
#//initialize with first continuum lambda:
lastLam = masterLams[0]
masterLamsOut[0] = masterLams[0]
nextCntPtr = 1
nextLinePtr = 0
for iL in range(1, numTot):
if (nextCntPtr < numNow):
#//System.out.println("nextCntPtr " + nextCntPtr + " lambdaScale[nextCntPtr] " + lambdaScale[nextCntPtr]);
#//System.out.println("nextLinePtr " + nextLinePtr + " lineLambdas[nextLinePtr] " + lineLambdas[nextLinePtr]);
if ((masterLams[nextCntPtr] <= listLineLambdas[nextLinePtr])
or (nextLinePtr >= numPoints - 1)):
#//Next point is a continuum point:
masterLamsOut[iL] = masterLams[nextCntPtr]
nextCntPtr+=1
elif ((listLineLambdas[nextLinePtr] < masterLams[nextCntPtr])
and (nextLinePtr < numPoints - 1)):
#//Next point is a line point:
masterLamsOut[iL] = listLineLambdas[nextLinePtr]
nextLinePtr+=1
#//System.out.println("iL " + iL + " masterLamsOut[iL] " + masterLamsOut[iL]);
#} //iL loop
#//Make sure final wavelength point in masterLams is secured:
masterLamsOut[numTot-1] = masterLams[numNow-1]
return masterLamsOut
#}
def masterKappa(numDeps, numLams, numMaster, numNow, masterLams, masterLamsOut, logMasterKaps, \
numPoints, listLineLambdas, listLogKappaL):
#//
logE = math.log10(math.e) #// for debug output
#//int numLams = masterLams.length;
numTot = numNow + numPoints
logMasterKapsOut = [ [ 0.0 for i in range(numDeps) ] for j in range(numTot) ]
#//double[][] kappa2 = new double[2][numTot];
#//double[][] lineKap2 = new double[2][numTot];
#double kappa2, lineKap2, totKap;
#lineKap2 = 1.0e-99 #//initialization
logLineKap2 = -49.0 #//initialization
#//int numCnt = lambdaScale.length;
#//int numLine = lineLambdas.length - 1;
#kappa1D = [0.0 for i in range(numNow)]
logKappa1D = [0.0 for i in range(numNow)]
thisMasterLams = [0.0 for i in range(numNow)]
#lineKap1D = [0.0 for i in range(numPoints)]
logLineKap1D = [0.0 for i in range(numPoints)]
#//System.out.println("iL masterLams logMasterKappa");
#print("SpecSyn: numNow ", numNow, " numPoints ", numPoints)
#print("SpecSyn: len(thisMasterLams) ", len(thisMasterLams), " len(logKappa1D) ", len(logKappa1D))
#print("SpecSyn: len(listLineLambdas) ", len(listLineLambdas), " len(logLineKap1D) ", len(logLineKap1D))
for k in range(numNow):
thisMasterLams[k] = masterLams[k]
for iD in range(numDeps):
#//Extract 1D *linear* opacity vectors for interpol()
for k in range(numNow):
#kappa1D[k] = math.exp(logMasterKaps[k][iD]) #//now wavelength dependent
logKappa1D[k] = logMasterKaps[k][iD] #//now wavelength dependent
for k in range(numPoints):
#lineKap1D[k] = math.exp(listLogKappaL[k][iD])
logLineKap1D[k] = listLogKappaL[k][iD]
#// if (iD%10 == 1){
#// System.out.println("iD " + iD + " k " + k + " listLineLambdas " + listLineLambdas[k] + " lineKap1D " + lineKap1D[k]);
#// }
#//Interpolate continuum and line opacity onto master lambda scale, and add them lambda-wise:
for iL in range(numTot):
#kappa2 = ToolBox.interpol(masterLams, kappa1D, masterLamsOut[iL])
#logKappa2 = ToolBox.interpol(masterLams, logKappa1D, masterLamsOut[iL])
logKappa2 = ToolBox.interpol(thisMasterLams, logKappa1D, masterLamsOut[iL])
#lineKap2 = 1.0e-49 #//re-initialization
logLineKap2 = -49.0 #//re-initialization
if ( (masterLamsOut[iL] >= listLineLambdas[0]) and (masterLamsOut[iL] <= listLineLambdas[numPoints-1]) ):
#lineKap2 = ToolBox.interpol(listLineLambdas, lineKap1D, masterLamsOut[iL])
logLineKap2 = ToolBox.interpol(listLineLambdas, logLineKap1D, masterLamsOut[iL])
#if (lineKap2 <= 0.0):
# lineKap2 = 1.0e-49
#//lineKap2 = 1.0e-99; //test
#//test lineKap2 = 1.0e-99; //test
#// if (iD%10 == 1){
#// System.out.println("iD " + iD + " iL " + iL + " masterLamsOut " + masterLamsOut[iL] + " kappa2 " + kappa2 + " lineKap2 " + lineKap2);
#//}
#totKap = kappa2 + lineKap2
totKap = math.exp(logKappa2) + math.exp(logLineKap2)
logMasterKapsOut[iL][iD] = math.log(totKap)
#//if (iD == 36) {
#// System.out.format("%02d %12.8e %12.8f%n", iL, masterLams[iL], logE * logMasterKappa[iL][iD]);
#//}
#} iL loop
#} iD loop
#pylab.plot(masterLamsOut, [logMasterKaps[i][12] for i in range(numTot)])
#pylab.plot(masterLamsOut, [logMasterKaps[i][12] for i in range(numTot)], '.')
return logMasterKapsOut;
#}
| 6,444
| 41.682119
| 151
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LineKappa.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 14:03:52 2017
@author: Ian
"""
"""// Assumes CRD, LTE, ???
// Input parameters:
// lam0 - line centre wavelength in nm
// logNl - log_10 column density of absorbers in lower E-level, l (cm^-2)
// logFlu - log_10 oscillator strength (unitless)
// chiL - energy of lower atomic E-level of b-b transition in eV
// chiI - ground state ionization energy to niext higher stage in (ev)
//
// * PROBLEM: line kappaL values converted to mass extinction by division by rho() are
// * not consistent with fake Kramer's Law based scaling of kappa_Ros with g.
//* Try leaving kappaLs as linear extinctions and converting the scaled kappa_Ros back to linear units
// * with solar rho() in LineTau2
//
// Also needs atsmopheric structure information:
// numDeps
// tauRos structure
// temp structure
// rho structure
// Level population now computed in LevelPops.levelPops()"""
import math
import numpy
import Useful
import ToolBox
def lineKap(lam0In, logNums, logFluIn, linePoints, lineProf,
numDeps, zScale, tauRos, temp, rho, logFudgeTune):
logE10 = math.log(10.0) #//natural log of 10
c = Useful.c()
logC = Useful.logC()
k = Useful.k()
logK = Useful.logK()
logH = Useful.logH()
logEe = Useful.logEe()
logMe = Useful.logMe()
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
#//double logNl = logNlIn * ln10; // Convert to base e
logFlu = logFluIn * ln10 #// Convert to base e
logKScale = math.log10(zScale)
#//chiI = chiI * Useful.eV; // Convert lower E-level from eV to ergs
#//double boltzFacI = chiI / k; // Pre-factor for exponent of excitation Boltzmann factor
#//double logSahaFac = log2 + (3.0/2.0) * ( log2pi + logMe + logK - 2.0*logH);
#//chiL = chiL * Useful.eV; // Convert lower E-level from eV to ergs
#//double boltzFac = chiL / k; // Pre-factor for exponent of excitation Boltzmann factor
numPoints = len(linePoints[0])
#//System.out.println("LineKappa: numPoints: " + numPoints);
#double logPreFac;
#//This converts f_lu to a volume extinction coefficient per particle - Rutten, p. 23
logPreFac = logFlu + math.log(math.pi) + 2.0 * logEe - logMe - logC
#//System.out.println("LINEKAPPA: logPreFac " + logPreFac);
#//Assume wavelength, lambda, is constant throughout line profile for purpose
#// of computing the stimulated emission correction
#double logExpFac;
logExpFac = logH + logC - logK - logLam0
#//System.out.println("LINEKAPPA: logExpFac " + logExpFac);
#// int refRhoIndx = TauPoint.tauPoint(numDeps, tauRos, 1.0);
#// double refLogRho = rho[1][refRhoIndx];
#//System.out.println("LINEKAPPA: refRhoIndx, refRho " + refRhoIndx + " " + logE*refRho);
#// return a 2D numPoints x numDeps array of monochromatic *LINE* extinction line profiles
logKappaL = [ [ 0.0 for i in range(numDeps)] for j in range(numPoints) ]
#double num, logNum, logExpFac2, expFac, stimEm, logStimEm, logSaha, saha, logIonFrac;
#double logNe;
for id in range(numDeps):
logExpFac2 = logExpFac - temp[1][id]
expFac = -1.0 * math.exp(logExpFac2)
stimEm = 1.0 - math.exp(expFac)
logStimEm = math.log(stimEm)
logNum = logNums[id]
#//if (id == refRhoIndx) {
#// System.out.println("LINEKAPPA: logStimEm " + logE*logStimEm);
#//}
for il in range(numPoints):
#// From Radiative Transfer in Stellar Atmospheres (Rutten), p.31
#// This is a *volume* co-efficient ("alpha_lambda") in cm^-1:
logKappaL[il][id] = logPreFac + logStimEm + logNum + math.log(lineProf[il][id])
#//if (id == 36) {
#// System.out.println("il " + il + " logNum " + logE*logNum + " Math.log(lineProf[il][id]) " + logE*Math.log(lineProf[il][id]));
#//// //System.out.println("logPreFac " + logPreFac + " logStimEm " + logStimEm);
#//}
#//System.out.println("LINEKAPPA: id, il " + id + " " + il + " logKappaL " + logE * logKappaL[il][id]);
#//Convert to mass co-efficient in g/cm^2:
#// This direct approach won't work - is not consistent with fake Kramer's law scaling of Kapp_Ros with g instead of rho
logKappaL[il][id] = logKappaL[il][id] - rho[1][id]
#//Try something:
#//
#// **********************
#// Opacity problem #2
#//
#//Line opacity needs to be enhanced by same factor as the conitnuum opacity
#// - related to Opacity problem #1 (logFudgeTune in GrayStarServer3.java) - ??
#//
logKappaL[il][id] = logKappaL[il][id] + logE10*logFudgeTune
#//if (id == 12) {
#// System.out.println("LINEKAPPA: id, il " + id + " " + il + " logKappaL " + logE * logKappaL[il][id]
#// + " logPreFac " + logE*logPreFac + " logStimEm " + logE*logStimEm + " logNum " + logE*logNum
#// + " log(lineProf[il]) " + logE*Math.log(lineProf[il][id]) + " rho[1][id] " + logE * rho[1][id]);
#// }
#//if (id == refRhoIndx-45) {
#// System.out.println("LINEKAPPA: id, il " + id + " " + il + " logKappaL " + logE*logKappaL[il][id]
#// + " logPreFac " + logE*logPreFac + " logStimEm " + logE*logStimEm + " logNum " + logE*logNum + " logRho " + logE*rho[1][id]
#// + " log(lineProf[1]) " + logE*Math.log(lineProf[1][il]) );
#//}
#} // il - lambda loop
#} // id - depth loop
return logKappaL
#}
#//Create total extinction throughout line profile:
def lineTotalKap(linePoints, logKappaL, numDeps, kappa,
numLams, lambdaScale):
logE = math.log10(math.e) #// for debug output
numPoints = len(linePoints)
#// return a 2D numPoints x numDeps array of monochromatic *TOTAL* extinction line profiles
logTotKappa = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#double kappaL, logKappaC;
#//Interpolate continuum opacity onto onto line-blanketed opacity lambda array:
#//
kappaC = [0.0 for i in range(numLams)]
kappaC2 = [0.0 for i in range(numPoints)]
kappa2 = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
for id in range(1, numDeps):
for il in range(numLams):
kappaC[il] = kappa[il][id]
#kappaC2 = ToolBox.interpolV(kappaC, lambdaScale, linePoints);
kappaC2 = numpy.interp(linePoints, lambdaScale, kappaC);
for il in range(numPoints):
kappa2[il][id] = kappaC2[il]
for id in range(numDeps):
for il in range(numPoints):
#//Both kappaL and kappa (continuum) are *mass* extinction (cm^2/g) at thsi point:
#//logKappaC = kappa[1][id];
#//kappaL = Math.exp(logKappaL[il][id]) + Math.exp(logKappaC);
kappaL = math.exp(logKappaL[il][id]) + math.exp(kappa2[il][id])
logTotKappa[il][id] = math.log(kappaL)
#//logTotKappa[il][id] = kappa[1][id]; //test - no line opacity
#//if (id == 12) {
#// System.out.println("il " + il + " linePoints[0][il] " + 1.0e7*linePoints[0][il] + " logTotKappa[il][id] " + logE*logTotKappa[il][id] + " logKappaL[il][id] " + logE*logKappaL[il][id] + " kappa[1][id] " + logE*kappa[1][id]);
#// }
#}
#}
return logTotKappa
| 7,712
| 41.379121
| 241
|
py
|
ChromaStarPy
|
ChromaStarPy-master/ChromaStarGasPy.py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is the main source file for ChromaStarPy. We start here.
"""
"""
/*
* The openStar project: stellar atmospheres and spectra
*
* ChromaStarPy
*
* Version 2020-05-15
* Use date based versioning with ISO 8601 date (YYYY-MM-DD)
*
* December 2017
*
* C. Ian Short
* Saint Mary's University
* Department of Astronomy and Physics
* Institute for Computational Astrophysics (ICA)
* Halifax, NS, Canada
* * ian.short@smu.ca
* www.ap.smu.ca/~ishort/
*
*
* Philip D. Bennett
* Saint Mary's University
* Department of Astronomy and Physics
* Eureka Scientific
* Halifax, NS, Canada
*
*
* * Co-developers:
* *
* * Lindsey Burns (SMU) - 2017 - "lburns"
* * Jason Bayer (SMU) - 2017 - "JB"
*
*
* Open source pedagogical computational stellar astrophysics
*
* 1D, static, plane-parallel, LTE stellar atmospheric model
* Voigt spectral line profile
*
* July 2019 - Equation sof state (EOS) and chemical/ionization equilibrium now computed
* with Phil Bennett's "GAS" package. Includes 51 molecules, including 16 polyatomic
* molecules
*
*
*
* Suitable for pedagogical purposes only
*
*
* python V. 3
*
* System requirements for Java version: Java run-time environment (JRE)
* System requirements for JavaScript version: JavaScript intrepretation enabld in WWW browser (usually by default)
*
* Code provided "as is" - there is no formal support
*
*/
"""
"""/*
* The MIT License (MIT)
* Copyright (c) 2016 C. Ian Short
*
* Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
*
* The above copyright notice and this permission notice shall
be included in all copies or substantial portions of the
Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
*
*/"""
#from decimal import Decimal as D
import Input
import Restart
import Useful
import LamGrid
import TauScale
import ScaleSolar
import State
import Hydrostat
import ScaleT4250g20
import ScaleT5000
import ScaleT10000
import LevelPopsGasServer
import Kappas
import KappasMetal
import KappasRaylGas
import DepthScale
import IonizationEnergy
import PartitionFn
import AtomicMass
import ToolBox
import Thetas
import MolecData
import Jola
import SpecSyn
import SpecSyn2
import HjertingComponents
import LineGrid
import LineProf
import LineKappa
import LineTau2
import FormalSoln
import Flux
import FluxTrans
import LDC
import PostProcess
#Integrated Planetary transit light curve modelling
import TransitLightCurve2
#No - only valid for ingress and egress
import TransitLightCurveAnlytc2
# GAS ESO/checmial equilibrium package, ported from Phil Bennett/Athena
#Requires special python ports of some blas and lapack routines - part of CSPy distribution
#import CSBlockData
#import GasData
#import GasData2
import CSGsRead2
import CSGasEst
import CSGas
#from Documents.ChromaStarPy.GAS import BlockData
#from Documents.ChromaStarPy.GAS.GsRead2 import gsread
#from Documents.ChromaStarPy.GAS.GasEst import gasest
#from Documents.ChromaStarPy.GAS.Gas import gas
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#Type %matplotlib qt5 at python IDE prompt before running to get multiple plots in different qt windows.
import math
import numpy
from functools import reduce
import subprocess
import os
import sys
#debugging in spyder
import pdb
#############################################
#
#
#
# Initial set-up:
# - import all python modules
# - set input parameters for *everything* (atmospheric modeling
# spectrum synthesis, user-defined 2-level atom & line,
# post-processing, ...)
# - prepare reference solar model and template models
# for re-scaling to initial guess
#
#
#
##############################################
#Detect python version
pythonV = sys.version_info
if pythonV[0] != 3:
print("")
print("")
print(" ********************************************* ")
print("")
print("WARNING!! WARNING!! WARNING!!")
print("")
print("")
print("ChromaStarPy developed for python V. 3!!" )
print("")
print("May not work in other version")
print("")
print("")
print("*********************************************** ")
print("")
print("")
thisOS = "unknown" #default
myOS= ""
#returns 'posix' form unix-like OSes and 'nt' for Windows??
thisOS = os.name
print("")
print("Running on OS: ", thisOS)
print("")
absPath0 = "./" #default
if thisOS == "nt":
#windows
absPath0 = subprocess.check_output("cd", shell=True)
backSpace = 2
elif thisOS == "posix":
absPath0 = subprocess.check_output("pwd", shell=True)
backSpace = 1
absPath0 = bytes.decode(absPath0)
#remove OS_dependent trailing characters 'r\n'
nCharsPath = len(absPath0)
nCharsPath -= backSpace
absPath0 = absPath0[0: nCharsPath]
slashIndex = absPath0.find('\\') #The first backslash is the escape character!
while slashIndex != -1:
#python strings are immutable:
absPathCopy = absPath0[0: slashIndex]
absPathCopy += '/'
absPathCopy += absPath0[slashIndex+1: len(absPath0)]
absPath0 = absPathCopy
#print(absPathCopy, absPath0)
slashIndex = absPath0.find('\\')
absPath = absPath0 + '/'
#Deprecated
#makePlot = Input.makePlot
#print("")
#print("Will make plot: ", makePlot)
#print("")
makePlotStruc = Input.makePlotStruc
makePlotSED = Input.makePlotSED
makePlotSpec = Input.makePlotSpec
makePlotLDC = Input.makePlotLDC
makePlotFT = Input.makePlotFT
makePlotTLA = Input.makePlotTLA
makePlotTrans = Input.makePlotTrans
makePlotPPress = Input.makePlotPPress
print("")
print("Type %matplotlib qt5 at python IDE prompt before running to get multiple plots in different qt windows")
print("")
#stop
#color platte for plt plotting
#palette = ['black', 'brown','red','orange','yellow','green','blue','indigo','violet']
#grayscale
#stop
#Grayscale:
numPal = 12
palette = ['0.0' for i in range(numPal)]
delPal = 0.04
#for i in range(numPal):
# ii = float(i)
# helpPal = 0.481 - ii*delPal
# palette[i] = str(helpPal)
palette = [ str( 0.481 - float(i)*delPal ) for i in range(numPal) ]
numClrs = len(palette)
#General file for printing ad hoc quantities
dbgHandle = open("debug.out", 'w')
outPath = absPath + "/Outputs/"
fileStem = Input.fileStem+"Gas"
#Not usedoutFileString = outPath+fileStem+"Gas.*"
print(" ")
print("Writing to files ", outPath + fileStem)
print(" ")
""" test
#// Representative spectral line and associated atomic parameters
#//NaID
userLam0 = 589.592 #nm
userA12 = 6.24 #// A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = math.log10(math.e)*math.log(0.320) #// log(f) oscillaotr strength // saturated line
testAij = math.log(6.14e+07)
userStage = 0 #//ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.5 #// ground state chi_I, eV
userChiI2 = 8.0 #// 1st ionized state chi_I, eV
userChiI3 = 20.0 #// 2nd ionized state chi_I, eV
userChiI4 = 40.0 #// 3rd ionized state chi_I, eV
userChiL = 0.0 #// lower atomic E-level, eV
userGw1 = 2 #// ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #// ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #// ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #// ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #// lower E-level state. weight - unitless
userMass = 22.0 #//amu
userLogGammaCol = 1.0 #log_10 Lorentzian broadening enhancement factor
"""
#True for Voigt computed with true convolution instead of power-law expansion approx - probably not working well right now
ifVoigt = False
#Scattering term in line source fn - not yet enabled
ifScatt = False
#// Argument 0: Effective temperature, Teff, in K:
#teff = float(teffStr)
teff = Input.teff
#print(type(teff))
#// Argument 1: Logarithmic surface gravity, g, in cm/s/s:
#logg = float(loggStr)
logg = Input.logg
#//Argument 2: Linear sclae factor for solar Rosseland oapcity distribution
#log10ZScale = float(logZStr)
log10ZScale = Input.log10ZScale
#//Argument 3: Stellar mass, M, in solar masses
#massStar = float(massStarStr)
massStar = Input.massStar
#// Sanity check:
F0Vtemp = 7300.0; #// Teff of F0 V star (K)
if (teff < 3000.0):
teff = 3000.0
# teffStr = "3000"
if (teff > 50000.0):
teff = 50000.0
# teffStr = "50000"
#//logg limit is strongly Teff-dependent:
minLogg = 3.0; #//safe initialization
minLoggStr = "3.0";
if (teff <= 4000.0):
minLogg = 0.0
# minLoggStr = "0.0"
elif ((teff > 4000.0) and (teff <= 5000.0)):
minLogg = 0.5
# minLoggStr = "0.5"
elif ((teff > 5000.0) and (teff <= 6000.0)):
minLogg = 1.5
# minLoggStr = "1.5"
elif ((teff > 6000.0) and (teff <= 7000.0)):
minLogg = 2.0
# minLoggStr = "2.0"
elif ((teff > 7000.0) and (teff < 9000.0)):
minLogg = 2.5
# minLoggStr = "2.5"
elif (teff >= 9000.0):
minLogg = 3.0
# minLoggStr = "3.0"
if (logg < minLogg):
logg = minLogg
# loggStr = minLoggStr
if (logg > 7.0):
logg = 7.0
# loggStr = "7.0"
if (log10ZScale < -3.0):
log10ZScale = -3.0
# logZStr = "-3.0"
if (log10ZScale > 1.0):
log10ZScale = 1.0
# logZStr = "1.0"
if (massStar < 0.1):
massStar = 0.1
# massStarStr = "0.1"
if (massStar > 20.0):
massStar = 20.0
# massStarStr = "20.0"
grav = math.pow(10.0, logg)
zScale = math.pow(10.0, log10ZScale)
#// Argument 5: microturbulence, xi_T, in km/s:
#xiT = float(xiTStr)
xiT = Input.xiT
if (xiT < 0.0):
xiT = 0.0
# xitStr = "0.0"
if (xiT > 8.0):
xiT = 8.0
# xitStr = "8.0"
#// Add new variables to hold values for new metallicity controls lburns
#logHeFe = float(logHeFeStr) #// lburns
#logCO = float(logCOStr) #// lburns
#logAlphaFe = float(logAlphaFeStr) #// lburns
logHeFe = Input.logHeFe
logCO = Input.logCO
logAlphaFe = Input.logAlphaFe
#// For new metallicity commands lburns
#// For logHeFe: (lburns)
if (logHeFe < -1.0):
logHeFe = -1.0;
# logHeFeStr = "-1.0";
if (logHeFe > 1.0):
logHeFe = 1.0
# logHeFeStr = "1.0"
#// For logCO: (lburns)
if (logCO < -2.0):
logCO = -2.0
# logCOStr = "-2.0"
if (logCO > 2.0):
logCO = 2.0
# logCOStr = "2.0"
#// For logAlphaFe: (lburns)
if (logAlphaFe < -0.5):
logAlphaFe = -0.5
# logAlphaFeStr = "-0.5"
if (logAlphaFe > 0.5):
logAlphaFe = 0.5
# logAlphaFeStr = "0.5"
#// Argument 6: minimum ratio of monochromatic line center to background continuous
#// extinction for inclusion of linein spectrum
#lineThreshStr = args[5];
#lineThreshStr = "-3.0"; #//test
#lineThresh = float(lineThreshStr)
lineThresh = Input.lineThresh
if (lineThresh < -4.0):
lineThresh = -4.0
# lineThreshStr = "-4.0"
if (lineThresh > 6.0):
lineThresh = 6.0
# lineThreshStr = "6.0"
#// Argument 7: minimum ratio of monochromatic line center to background continuous
#voigtThresh = float(voigtThreshStr);
voigtThresh = Input.voigtThresh
if (voigtThresh < lineThresh):
voigtThresh = lineThresh
# voigtThreshStr = lineThreshStr
if (voigtThresh > 6.0):
voigtThresh = 6.0
# voigtThreshStr = "6.0"
#//User defined spetrum synthesis region:
lamUV = numpy.double(260.0);
lamIR = numpy.double(2600.0);
#// Argument 8: starting wavelength for spectrum synthesis
#lambdaStart = float(lambdaStartStr)
lambdaStart = Input.lambdaStart
if (lambdaStart < lamUV):
lambdaStart = lamUV
# lambdaStartStr = str(lamUV)
if (lambdaStart > lamIR - 1.0):
lambdaStart = lamIR - 1.0
# lambdaStartStr = str(lamIR - 1.0)
#// Argument 9: stopping wavelength for spectrum synthesis
#lambdaStop = float(lambdaStopStr)
lambdaStop = Input.lambdaStop
if (lambdaStop < lamUV + 1.0):
lambdaStop = lamUV + 1.0
# lambdaStartStr = str(lamUV + 1.0)
if (lambdaStop > lamIR):
lambdaStop = lamIR
# lambdaStartStr = str(lamIR)
#//Prevent negative or zero lambda range:
if (lambdaStop <= lambdaStart):
lambdaStop = lambdaStart + 0.5 #//0.5 nm = 5 A
# lambdaStopStr = str(lambdaStop)
"""
#//limit size of synthesis region (nm):
maxSynthRange = 5.0 #//set default to minimum value //nm
#//if we're not in the blue we can get away wth more:
if (lambdaStart > 350.0):
maxSynthRange = 10.0
if (lambdaStart > 550.0):
maxSynthRange = 20.0
if (lambdaStart > 700.0):
maxSynthRange = 50.0
if (lambdaStart > 1000.0):
maxSynthRange = 100.0
if (lambdaStart > 1600.0):
maxSynthRange = 200.0
#//console.log("maxSynthRange " + maxSynthRange + " lambdaStop " + lambdaStop);
if (lambdaStop > (lambdaStart+maxSynthRange)):
#//console.log("lambdaStop > (lambdaStart+maxSynthRange) condition");
lambdaStop = lambdaStart + maxSynthRange #//10 nm = 100 A
lambdaStopStr = str(lambdaStop)
#//console.log("lambdaStop " + lambdaStop);
"""
if (lambdaStop > lamIR):
#//console.log("lambdaStop > lamIR condition");
lambdaStop = lamIR
# lambdaStopStr = str(lamIR)
#//console.log("lambdaStop " + lambdaStop);
nm2cm = numpy.double(1.0e-7)
cm2nm = numpy.double(1.0e7)
lambdaStart = nm2cm * lambdaStart #//nm to cm
lambdaStop = nm2cm * lambdaStop #//nm to cm
lamUV = nm2cm * lamUV
lamIR = nm2cm * lamIR
#//argument 10: line sampling selection (fine or coarse)
#sampling = "fine"
sampling = Input.sampling
vacAir = Input.vacAir
#// Argument 11: Lorentzian line broadening enhancement
#logGammaCol = float(logGammaColStr)
logGammaCol = Input.logGammaCol
if (logGammaCol < 0.0):
logGammaCol = 0.0
# logGammaColStr = "0.0"
if (logGammaCol > 1.0):
logGammaCol = 1.0
# logGammaColStr = "1.0"
#// Argument 12: log_10 gray mass extinction fudge
#logKapFudge = float(logKapFudgeStr)
logKapFudge = Input.logKapFudge
if (logKapFudge < -2.0):
logKapFudge = -2.0
# logKapFudgeStr = "-2.0"
if (logKapFudge > 2.0):
logKapFudge = 2.0
# logKapFudgeStr = "2.0"
#// Argument 13: macroturbulent velocity broadening parameter (sigma) (km/s)
#macroV = float(macroVStr)
macroV = Input.macroV
#// Argument 14: surface equatorial linear rotational velocity (km/s)
#rotV = float(rotVStr)
rotV = Input.rotV
#// Argument 15: inclination of rotation axis wrt line-of-sight (degrees)
#rotI = float(rotIStr)
rotI = Input.rotI
#print("Before test rotI ", rotI, " rotIStr ", rotIStr)
#// Argument 16: number of outer HSE-EOS-Opac iterations
#nOuterIter = int(nOuterIterStr)
nOuterIter = Input.nOuterIter
#// Argument 17: number of inner Pe-IonFrac iterations
#nInnerIter = int(nInnerIterStr)
nInnerIter = Input.nInnerIter
#//Argument 18: If TiO JOLA bands should be included:
#ifTiO = int(ifTiOStr)
ifMols = Input.ifMols
if (macroV < 0.0):
macroV = 0.0
# macroVStr = "0.0"
if (macroV > 8.0):
macroV = 8.0
# macroVStr = "8.0"
if (rotV < 0.0):
rotV = 0.0
# rotVStr = "0.0"
if (rotV > 300.0):
rotV = 300.0
# rotVStr = "300.0"
if (rotI < 0.0):
rotI = 0.0
# rotIStr = "0.0"
if (rotI > 90.0):
rotI = 90.0
# rotIStr = "90.0"
if (nOuterIter < 1):
nOuterIter = 1
# nOuterIterStr = "1"
if (nOuterIter > 30):
nOuterIter = 30
# nOuterIterStr = "12"
if (nInnerIter < 1):
nInnerIter = 1
# nInnerIterStr = "1"
if (nInnerIter > 30):
nInnerIter = 30
# nInnerIterStr = "12"
#print("After test rotI ", rotI, " rotIStr ", rotIStr)
#//For rotation:
inclntn = math.pi * rotI / 180.0 #//degrees to radians
vsini = rotV * math.sin(inclntn)
#// Argument 19: wavelength of narrow Gaussian filter in nm
#diskLambda = float(diskLambdaStr) #//nm
diskLambda = Input.diskLambda
#// Argument 20: bandwidth, sigma, of narrow Gaussian filter in nm
#diskSigma = float(diskSigmaStr) #//nm
diskSigma = Input.diskSigma
#// Argument 21: radial velocity of star in km/s
#RV = float(RVStr) #//nm
RV = Input.RV
#// Argument 22: Spectrum synthesis wavelength scale options:
if (diskLambda < lamUV):
diskLambda = lamUV
# diskLambdaStr = str(lamUV)
if (diskLambda > lamIR):
diskLambda = lamIR
# diskLambdaStr = str(lamIR)
if (diskSigma < 0.005):
diskSigma = 0.005
# diskSigmaStr = "0.005";
if (diskSigma > 10.0):
diskSigma = 10.0
# diskSigmaStr = "10"
if (RV < -200.0):
RV = -200.0
# RVStr = "-200"
if (RV > 200.0):
RV = 200.0
# RVStr = "200"
#vacAir = "vacuum" #//test
#// Representative spectral line and associated atomic parameters
#//
"""
userLam0 = float(userLam0Str)
userA12 = float(userA12Str)
userLogF = float(userLogFStr)
userStage = float(userStageStr)
userChiI1 = float(userChiI1Str)
userChiI2 = float(userChiI2Str)
userChiI3 = float(userChiI3Str)
userChiI4 = float(userChiI4Str)
userChiL = float(userChiLStr)
userGw1 = float(userGw1Str)
userGw2 = float(userGw2Str)
userGw3 = float(userGw3Str)
userGw4 = float(userGw4Str)
userGwL = float(userGwLStr)
userMass = float(userMassStr)
userLogGammaCol = float(userGammaColStr)
"""
userLam0 = Input.userLam0
userA12 = Input.userA12
userLogF = Input.userLogF
userStage = Input.userStage
userChiI1 = Input.userChiI1
userChiI2 = Input.userChiI2
userChiI3 = Input.userChiI3
userChiI4 = Input.userChiI4
userChiL = Input.userChiL
userGw1 = Input.userGw1
userGw2 = Input.userGw2
userGw3 = Input.userGw3
userGw4 = Input.userGw4
userGwL = Input.userGwL
userMass = Input.userMass
userLogGammaCol = Input.userLogGammaCol
if (userLam0 < 260.0):
userLam0 = 260.0
# userLamStr = "260"
if (userLam0 > 2600.0):
userLam0 = 2600.0
# userLamStr = "2600"
if (userA12 < 2.0):
userA12 = 2.0
# userNStr = "2.0"
#//Upper limit set high to accomodate Helium!:
if (userA12 > 11.0):
userA12 = 11.0
# userNStr = "11.0"
if (userLogF < -6.0):
userLogF = -6.0
# userFStr = "-6.0"
if (userLogF > 1.0):
userLogF = 1.0
# userFStr = "1.0"
if ( (userStage != 0) and (userStage != 1) and (userStage != 2) and (userStage != 3) ):
userStage = 0
userStageStr = "I"
if (userChiI1 < 3.0):
userChiI1 = 3.0
# userIonStr = "3.0"
if (userChiI1 > 25.0):
userChiI1 = 25.0
# userIonStr = "25.0"
if (userChiI2 < 5.0):
userChiI2 = 5.0
# userIonStr = "5.0"
if (userChiI2 > 55.0):
userChiI2 = 55.0
# userIonStr = "55.0"
if (userChiI3 < 5.0):
userChiI3 = 5.0
# userIonStr = "5.0"
if (userChiI3 > 55.0):
userChiI3 = 55.0
# userIonStr = "55.0"
if (userChiI4 < 5.0):
userChiI4 = 5.0
# userIonStr = "5.0"
if (userChiI4 > 55.0):
userChiI4 = 55.0
# userIonStr = "55.0"
#// Note: Upper limit of chiL depends on value of chiI1 above!
if (userChiL < 0.0):
userChiL = 0.0 #// Ground state case!
# userExcStr = "0.0"
if ( (userStage == 0) and (userChiL >= userChiI1) ):
#//ionized = false;
userChiL = 0.9 * userChiI1
# userExcStr = userIonStr
if ( (userStage == 1) and (userChiL >= userChiI2) ):
#//ionized = false;
userChiL = 0.9 * userChiI2
# userExcStr = userIonStr
if ( (userStage == 2) and (userChiL >= userChiI3) ):
#//ionized = false;
userChiL = 0.9 * userChiI3
# userExcStr = userIonStr
if ( (userStage == 3) and (userChiL >= userChiI4) ):
#//ionized = false;
userChiL = 0.9 * userChiI4
# userExcStr = userIonStr
if (userGw1 < 1.0):
userGw1 = 1.0
# userWghtStr = "1"
if (userGw1 > 100.0):
userGw1 = 100.0
# userWghtStr = "100"
if (userGw2 < 1.0):
userGw2 = 1.0
# userWghtStr = "1";
if (userGw2 > 100.0):
userGw2 = 100.0
# userWghtStr = "100";
if (userGw3 < 1.0):
userGw3 = 1.0
# userWghtStr = "1"
if (userGw3 > 100.0):
userGw3 = 100.0
# userWghtStr = "100"
if (userGw4 < 1.0):
userGw4 = 1.0
# userWghtStr = "1"
if (userGw4 > 100.0):
userGw4 = 100.0
# userWghtStr = "100"
if (userGwL < 1.0):
userGwL = 1.0
# userLWghtStr = "1"
if (userGwL > 100.0):
userGwL = 100.0
# userLWghtStr = "100"
if (userMass < 1.0):
userMass = 1.0
# userMassStr = "1.0"
if (userMass > 200.0):
userMass = 200.0
# userMassStr = "200"
if (userLogGammaCol < 0.0):
userLogGammaCol = 0.0
# useLogGammaColStr = "0.0"
if (userLogGammaCol > 1.0):
userLogGammaCol = 1.0
# useLogGammaColStr = "1.0"
userLam0 = userLam0 * nm2cm #// line centre lambda from nm to cm
#
#Planetary transit light-curve modelling input:
# Oribital period is not a free parameter - it is set by the
# size of the orbit and the planet's mass by basic
#Kepler's 3rd law
#Sanity checks on inputs occur after stellar 'radius' defined below...
#For development - plantary transit light curve stuff
#TransLight(rotI, radius, rOrbit, rPlanet, cosTheta, phi):
#ifTransit = True
#rOrbit = 1.0 # AU
#rPlanet = 1.0 #Earth radii
#mPlanet = 1.0 #Earth masses
ifTransit = Input.ifTransit
rOrbit = Input.rOrbit # AU
rPlanet = Input.rPlanet #Earth radii
#mPlanet = Input.mPlanet #Earth masses (not needed (yet??))
#Upper limit ensures Kepler III is valid
#if (mPlanet > 0.1*massStar):
# mPlanet = 0.1*massStar
#
#if (mPlanet <= 0.0):
# mPlanet = 0.001*massStar
#stop
#Create output file
"""
#File for structure output:
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
sedFile = "sed." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"lThr"+lineThreshStr+ \
"Mac" + macroVStr + "Rot"+rotVStr+"-"+rotIStr+"RV"+ RVStr + ".out"
ldcFile = "ldc." + strucStem + "L" + diskLambdaStr + "S" + diskSigmaStr + ".out"
lineFile = "line." + strucStem + "L0" + userLam0Str + ".out"
"""
#Echo input parameters *actually used* to console:
inputParamString = "Teff " + str(teff) + " logg " + str(logg) + " [Fe/H] " + str(log10ZScale) + " massStar " + \
str(massStar) + " xiT " + str(xiT) + " HeFe " + str(logHeFe) + " CO " + str(logCO) + " AlfFe " + str(logAlphaFe) + \
" lineThresh " + str(lineThresh) + " voigtThresh " + \
str(voigtThresh) + " lambda0 " + str(lambdaStart) + " lambda1 " + str(lambdaStop) + " logGamCol " + \
str(logGammaCol) + " logKapFudge " + str(logKapFudge) + " macroV " + str(macroV) + " rotV " + str(rotV) + \
" rotI " + str(rotI) + " RV " + str(RV) + " nInner " + str(nInnerIter) + " nOuter " + str(nOuterIter) + \
" ifMols " + str(ifMols) + " sampling " + sampling
print(inputParamString)
#stop
#// Wavelengths in Air :
# if ($("#air").is(":checked")) {
# vacAir = $("#air").val(); // radio
# }
#// Wavelengths in vacuum: (default)
# if ($("#vacuum").is(":checked")) {
# vacAir = $("#vacuum").val(); // radio
# }
#//
#// ************************
#//
#// OPACITY PROBLEM #1 - logFudgeTune: late type star coninuous oapcity needs to have by multiplied
#// by 10.0^0.5 = 3.0 for T_kin(tau~1) to fall around Teff and SED to look like B_lmabda(Trad=Teff).
#// - related to Opacity problem #2 in LineKappa.lineKap() - ??
#//
logFudgeTune = 0.0
#//sigh - don't ask me - makes the Balmer lines show up around A0:
if (teff <= F0Vtemp):
logFudgeTune = 0.5
#logFudgeTune = 0.0
if (teff > F0Vtemp):
logFudgeTune = 0.0
logTotalFudge = logKapFudge + logFudgeTune
logE = math.log10(math.e) #// for debug output
logE10 = math.log(10.0) #//natural log of 10
tiny = numpy.double(1.0e-49)
logTiny = math.log(tiny)
#//Gray structure and Voigt line code code begins here:
#// Initial set-up:
#// optical depth grid
numDeps = 48
log10MinDepth = numpy.double(-6.0)
log10MaxDepth = numpy.double(2.0)
#//int numThetas = 10; #// Guess
#//wavelength grid (cm):
lamSetup = [ numpy.double(0.0) for i in range(3) ]
#for i in range(3):
# lamSetup.append(0.0)
lamSetup[0] = numpy.double(260.0) * nm2cm #// test Start wavelength, cm
#lamSetup[0] = numpy.double(100.0) * 1.0e-7; // test Start wavelength, cm
lamSetup[1] = numpy.double(2600.0) * nm2cm #// test End wavelength, cm
lamSetup[2] = numpy.double(250); #// test number of lambda
#//int numLams = (int) (( lamSetup[1] - lamSetup[0] ) / lamSetup[2]) + 1;
numLams = int(lamSetup[2])
#//CONTINUUM lambda scale (nm)
lambdaScale = LamGrid.lamgrid(numLams, lamSetup) #//cm
#// Solar parameters:
teffSun = 5778.0
loggSun = 4.44
gravSun = math.pow(10.0, loggSun)
log10ZScaleSun = 0.0
zScaleSun = math.exp(log10ZScaleSun)
#//Solar units:
massSun = 1.0
radiusSun = 1.0
#//double massStar = 1.0; //solar masses // test
logRadius = 0.5 * (math.log(massStar) + math.log(gravSun) - math.log(grav))
radius = math.exp(logRadius); #//solar radii
#//double radius = Math.sqrt(massStar * gravSun / grav); // solar radii
logLum = 2.0 * math.log(radius) + 4.0 * math.log(teff / teffSun)
bolLum = math.exp(logLum) #// L_Bol in solar luminosities
#//cgs units:
#rSun = 6.955e10 #// solar radii to cm
cgsRadius = radius * Useful.rSun()
omegaSini = (1.0e5 * vsini) / cgsRadius #// projected rotation rate in 1/sec
macroVkm = macroV * 1.0e5 #//km/s to cm/s
#//Composition by mass fraction - needed for opacity approximations
#// and interior structure
massX = 0.70 #//Hydrogen
massY = 0.28 #//Helium
massZSun = 0.02 #// "metals"
massZ = massZSun * zScale #//approximation
#For planetary transit light curve:
#Now we can do sanity checks on these inputs:
rPlanetSol = rPlanet * Useful.rEarth() / Useful.rSun() #Earth radii to solar radii
if (rPlanetSol > 0.1*radius):
rPlanetSol = 0.1*radius
rPlanet = rPlanetSol * Useful.rSun() / Useful.rEarth()
if (rPlanet <= 0.0):
rPlanetSol = 0.001*radius
rPlanet = rPlanetSol * Useful.rSun() / Useful.rEarth()
rOrbitSol = rOrbit * Useful.AU2cm() / Useful.rSun()
if (rOrbitSol < radius):
rOrbitSol = radius
rOrbit = rOrbitSol * Useful.rSun() / Useful.AU2cm()
if (rOrbit > 100.0):
rOrbit = 100.0
logMassStar = math.log(massStar) + Useful.logMSun() #MSun to g
#print("MassStar ", math.exp(logMassStar))
logROrbCm = math.log(rOrbit) + Useful.logAU2cm() #AU to cm
#print("ROrbCm ", math.exp(logROrbCm))
#linear velocity of planetary orbit from Kepler's 3rd law
#Assumes planet at same distance as stellar surface
logVtransSq = Useful.logGConst() + logMassStar - logROrbCm
logVtrans = 0.5*logVtransSq
vTrans = math.exp(logVtrans) #cm/s approximately at star's surface
#print("vTrans ", vTrans)
#For period calculation only:
#angular velocity of planetary orbit from Kepler's 3rd law
logOmegaSq = Useful.logGConst() + logMassStar - 3*logROrbCm
logOmega = 0.5 * logOmegaSq # RAD/s
#print("Omega ", math.exp(logOmega))
#Orbital period - for interest
logPplanet = math.log(2.0) + math.log(math.pi) - logOmega
pPlanet = math.exp(logPplanet)
print("Planetary orbital period (s) ", pPlanet) # in s
#Establish ephemeris with zero epoch (phase = 0) at mid-transit
#time interval should be equal to or less than time taken for plane to
#move through its own diameter - time interval of ingress or egress
#ingressT = ( 2.0*rPlanet*Useful.rEarth() ) / vTrans
#print("Old ingressT ", ingressT)
#//double logNH = 17.0
#//
#////Detailed checmical composition:
#//Abundance table adapted from PHOENIX V. 15 input bash file
#// Grevesse Asplund et al 2010
#//Solar abundances:
#// c='abundances, Anders & Grevesse',
nelemAbnd = 41
numStages = 7
nome = [0 for i in range(nelemAbnd)]
eheu = [0.0 for i in range(nelemAbnd)] #log_10 "A_12" values
logAz = [0.0 for i in range(nelemAbnd)] #N_z/H_H for element z
cname = ["" for i in range(nelemAbnd)]
logNH = [0.0 for i in range(numDeps)]
#double[][] logNz = new double[nelemAbnd][numDeps]; //N_z for element z
#logNz *normally* holds total population of that element over all ionization stages
logNz = [ [ 0.0 for i in range(numDeps) ] for j in range(nelemAbnd) ] #N_z for element z
#double[][][] masterStagePops = new double[nelemAbnd][numStages][numDeps];
masterStagePops = [ [ [ 0.0 for i in range(numDeps) ] for j in range(numStages) ] for k in range(nelemAbnd) ]
#//nome is the Kurucz code - in case it's ever useful
nome[0]= 100
nome[1]= 200
nome[2]= 300
nome[3]= 400
nome[4]= 500
nome[5]= 600
nome[6]= 700
nome[7]= 800
nome[8]= 900
nome[9]= 1000
nome[10]= 1100
nome[11]= 1200
nome[12]= 1300
nome[13]= 1400
nome[14]= 1500
nome[15]= 1600
nome[16]= 1700
nome[17]= 1800
nome[18]= 1900
nome[19]= 2000
nome[20]= 2100
nome[21]= 2200
nome[22]= 2300
nome[23]= 2400
nome[24]= 2500
nome[25]= 2600
nome[26]= 2700
nome[27]= 2800
nome[28]= 2900
nome[29]= 3000
nome[30]= 3100
nome[31]= 3600
nome[32]= 3700
nome[33]= 3800
nome[34]= 3900
nome[35]= 4000
nome[36]= 4100
nome[37]= 5600
nome[38]= 5700
nome[39]= 5500
nome[40]= 3200
"""
#//log_10 "A_12" values:
eheu[0]= 12.00
eheu[1]= 10.93
eheu[2]= 1.05
eheu[3]= 1.38
eheu[4]= 2.70
eheu[5]= 8.43
eheu[6]= 7.83
eheu[7]= 8.69
eheu[8]= 4.56
eheu[9]= 7.93
eheu[10]= 6.24
eheu[11]= 7.60
eheu[12]= 6.45
eheu[13]= 7.51
eheu[14]= 5.41
eheu[15]= 7.12
eheu[16]= 5.50
eheu[17]= 6.40
eheu[18]= 5.03
eheu[19]= 6.34
eheu[20]= 3.15
eheu[21]= 4.95
eheu[22]= 3.93
eheu[23]= 5.64
eheu[24]= 5.43
eheu[25]= 7.50
eheu[26]= 4.99
eheu[27]= 6.22
eheu[28]= 4.19
eheu[29]= 4.56
"""
#Reset 1st 30 element abundances to Phoenix values for
#comparison of GAS package molecular equilibrium with PPRESS
eheu[0]= 12.00
eheu[1]= 10.99
eheu[2]= 1.16
eheu[3]= 1.15
eheu[4]= 2.60
eheu[5]= 8.55
eheu[6]= 7.97
eheu[7]= 8.87
eheu[8]= 4.56
eheu[9]= 8.08
eheu[10]= 6.33
eheu[11]= 7.58
eheu[12]= 6.47
eheu[13]= 7.55
eheu[14]= 5.45
eheu[15]= 7.21
eheu[16]= 5.50
eheu[17]= 6.52
eheu[18]= 5.12
eheu[19]= 6.36
eheu[20]= 3.17
eheu[21]= 5.02
eheu[22]= 4.00
eheu[23]= 5.67
eheu[24]= 5.39
eheu[25]= 7.50
eheu[26]= 4.92
eheu[27]= 6.25
eheu[28]= 4.21
eheu[29]= 4.60
# Remainder are original CSPy values as of Jan 2020
eheu[30]= 3.04
eheu[31]= 3.25
eheu[32]= 2.52
eheu[33]= 2.87
eheu[34]= 2.21
eheu[35]= 2.58
eheu[36]= 1.46
eheu[37]= 2.18
eheu[38]= 1.10
eheu[39]= 1.12
eheu[40]= 3.65 #// Ge - out of sequence
cname[0]="H";
cname[1]="He";
cname[2]="Li";
cname[3]="Be";
cname[4]="B";
cname[5]="C";
cname[6]="N";
cname[7]="O";
cname[8]="F";
cname[9]="Ne";
cname[10]="Na";
cname[11]="Mg";
cname[12]="Al";
cname[13]="Si";
cname[14]="P";
cname[15]="S";
cname[16]="Cl";
cname[17]="Ar";
cname[18]="K";
cname[19]="Ca";
cname[20]="Sc";
cname[21]="Ti";
cname[22]="V";
cname[23]="Cr";
cname[24]="Mn";
cname[25]="Fe";
cname[26]="Co";
cname[27]="Ni";
cname[28]="Cu";
cname[29]="Zn";
cname[30]="Ga";
cname[31]="Kr";
cname[32]="Rb";
cname[33]="Sr";
cname[34]="Y";
cname[35]="Zr";
cname[36]="Nb";
cname[37]="Ba";
cname[38]="La";
cname[39]="Cs";
cname[40]="Ge";
CSGsRead2.gsread(cname, eheu)
gsNspec = CSGsRead2.nspec
gsName = CSGsRead2.name
#GAS composition shoudl be corrected to CSPy values at this point:
gsComp = CSGsRead2.comp
# Number of atomic elements in GAS package:
gsNumEls = len(gsComp)
#Array of pointers FROM CSPy elements TO GAS elements
#CAUTION: elements are not contiguous in GAS' species array (are
# NOT the first gsNumEls entries!)
#Default value of -1 means CSPy element NOT in GAS package
csp2gas = [-1 for i in range(nelemAbnd)]
csp2gasIon1 = [-1 for i in range(nelemAbnd)]
csp2gasIon2 = [-1 for i in range(nelemAbnd)]
#gas2csp = [-1 for i in range(gsNspec)]
for i in range(nelemAbnd):
for j in range(gsNspec):
#print("i ", i, " j ", j, " cname ", cname[i], " gsName ", gsName[j]);
#Captures neutral stages only in gsName[]
if (cname[i].strip() == gsName[j].strip()):
csp2gas[i] = j
if (cname[i].strip()+"+" == gsName[j].strip()):
csp2gasIon1[i] = j
if (cname[i].strip()+"++" == gsName[j].strip()):
csp2gasIon2[i] = j
#for i in range(gsNspec):
# for j in range(nelemAbnd):
# if (gsName[i].strip() == cname[j].strip()):
# gas2csp[i] = j
#print("csp2gas ", csp2gas)
gsLogk = CSGsRead2.logk
gsFirstMol = -1 # index of 1st molecular species in Gas' species list
for i in range(gsNspec):
gsFirstMol+=1
if (gsLogk[0][i] != 0.0):
break
# Number of molecular species in GAS package:
gsNumMols = gsNspec - gsFirstMol
# Number of ionic species in GAS package:
gsNumIons = gsNspec - gsNumEls - gsNumMols
#print("gsNspec ", gsNspec, " gsFirstMol ", gsFirstMol, " gsNumMols ",
#gsNumMols, " gsNumIon ", gsNumIons)
#//Set up for molecules with JOLA bands:
jolaTeff = 5000.0
numJola = 7 #//for now
#//int numJola = 1; //for now
jolaSpecies = ["" for i in range(numJola)] #molecule name
jolaSystem = ["" for i in range(numJola)] #band system
jolaDeltaLambda = [0 for i in range(numJola)]
jolaWhichF = ["" for i in range(numJola)]
if (teff <= jolaTeff):
jolaSpecies[0] = "TiO" #// molecule name
jolaSystem[0] = "TiO_C3Delta_X3Delta" #//band system
jolaWhichF[0] = "Jorgensen"
#jolaDeltaLambda[0] = 0
jolaSpecies[1] = "TiO" #// molecule name
jolaSystem[1] = "TiO_c1Phi_a1Delta" #//band system
jolaWhichF[1] = "Jorgensen"
#jolaDeltaLambda[1] = 1
jolaSpecies[2] = "TiO" #// molecule name
jolaSystem[2] = "TiO_A3Phi_X3Delta" #//band system
jolaWhichF[2] = "Jorgensen"
#jolaDeltaLambda[2] = 1
jolaSpecies[3] = "TiO" #// molecule name
jolaSystem[3] = "TiO_B3Pi_X3Delta" #//band system
jolaWhichF[3] = "Jorgensen"
jolaSpecies[4] = "TiO" #// molecule name
jolaSystem[4] = "TiO_E3Pi_X3Delta" #//band system
jolaWhichF[4] = "Jorgensen"
jolaSpecies[5] = "TiO" #// molecule name
jolaSystem[5] = "TiO_b1Pi_a1Delta" #//band system
jolaWhichF[5] = "Jorgensen"
jolaSpecies[6] = "TiO" #// molecule name
jolaSystem[6] = "TiO_b1Pi_d1Sigma" #//band system
jolaWhichF[6] = "Jorgensen"
#"G-band" at 4300 A - MK classification diagnostic:
#Needs Allen's approach to getting f
#jolaSpecies[7] = "CH" #// molecule name
#jolaSystem[7] = "CH_A2Delta_X2Pi" #//band system
#jolaWhichF[7] = "Allen"
ATot = 0.0
thisAz = 0.0
eheuScale = 0.0
#// Set value of eheuScale for new metallicity options. 06/17 lburns
if (logHeFe != 0.0):
eheu[1] = eheu[1] + logHeFe
if (logAlphaFe != 0.0):
eheu[7] = eheu[7] + logAlphaFe
eheu[9] = eheu[9] + logAlphaFe;
eheu[11] = eheu[11] + logAlphaFe
eheu[13] = eheu[13] + logAlphaFe
eheu[15] = eheu[15] + logAlphaFe
eheu[17] = eheu[17] + logAlphaFe
eheu[19] = eheu[19] + logAlphaFe
eheu[21] = eheu[21] + logAlphaFe
if (logCO > 0.0):
eheu[5] = eheu[5] + logCO
#//console.log("logCO " + logCO);
if (logCO < 0.0):
eheu[7] = eheu[7] + math.abs(logCO)
#//console.log("logCO " + logCO);
#//console.log("logCO " + logCO);
#for i in range(nelemAbnd):
# eheuScale = eheu[i] #//default initialization //still base 10
# if (i > 1): #//if not H or He
# eheuScale = eheu[i] + log10ZScale #//still base 10
#
# #//logAz[i] = logE10 * (eheu[i] - 12.0); //natural log
# logAz[i] = logE10 * (eheuScale - 12.0) #//natural log
# thisAz = math.exp(logAz[i])
# ATot = ATot + thisAz;
#//System.out.println("i " + i + " logAz " + logE*logAz[i]);
#H and He do NOT get re-scaled with metallicity parameter:
logAz[0:2] = [ logE10 * (x - 12.0) for x in eheu[0:2] ]
#Everything else does:
logAz[2:] = [ logE10 * (x + log10ZScale - 12.0) for x in eheu[2:] ]
#print("logAz ", [logE*x for x in logAz] )
expAz = [ math.exp(x) for x in logAz ]
ATot = sum(expAz)
logATot = math.log(ATot) #//natural log
#print("logATot ", logATot)
"""//Apr 2016: Replace the following initial guesses with the following PSEUDOCODE:
//
// PHOENIX models at Teff0=5000 K, log(g0)=4.5, M0=0.0 (linear "zscl" = 10.0^M)
// Teff0=10000 K, log(g0)=4.0, M0=0.0 (linear "zscl" = 10.0^M)
// --> Tk0(tau), Pe0(tau), Pg0(tau)
//
//From Gray 3rd Ed. Ch.9, esp p. 189, 196
// 1) Tk(tau)=Teff/Teff0*tk0(tau)
// 2) Pg(tau)=(g/g0)^exp * Pg0(tau); exp = 0.64(bottom) - 0.54(top) for "cool" models
// exp = 0.85(bottom) - 0.53(top) for "hotter" models
// Pg(tau)= zscl^-0.333*Pg0(tau) if metals neutral - cooler models
// Pg(tau)= zscl^-0.5*Pg0(tau) if metals ionized - hotter models
// Pg(tau) = {(1+4A_He)/(1+4A_He0)}^2/3 * Pg0(tau)
// 3) Pe(tau)=(g/g0)^exp * Pe0(tau); exp = 0.33(bottom) - 0.48(top) for "cool" models
// exp = 0.82(bottom) - 0.53(top) for "hotter" models
// Pe(tau)=exp(omega*Teff)/exp(omega*Teff0)* Pe0(tau), Teff < 10000 K
// - omega = 0.0015@log(tau)=1.0 & 0.0012@log(tau)=-1 to -3
// Pe(tau)= zscl^+0.333*Pe0(tau) if metals neutral - cooler models
// Pe(tau)= zscl^+0.5*Pe0(tau) if metals ionized - hotter models
// Pe(tau) = {(1+4A_He)/(1+4A_He0)}^1/3 * Pe0(tau)"""
#//
#// END Initial guess for Sun section:
#//
#//Rescaled kinetic temperature structure:
#//double F0Vtemp = 7300.0; // Teff of F0 V star (K)
tauRos = [ [numpy.double(0.0) for i in range(numDeps)] for j in range(2) ]
temp = [ [numpy.double(0.0) for i in range(numDeps)] for j in range(2) ]
guessPGas = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
guessPe = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
guessNe = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
kappaRos = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
kappa500 = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
pGas = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
newPe = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
pRad = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
rho = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
newNe = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
mmw = [ numpy.double(0.0) for i in range(numDeps) ]
depths = [ 0.0 for i in range(numDeps) ]
if Input.specSynMode == True:
#ensure self-consistency between parameters and model being read in:
print(" ")
print(" !!!!!!!!!! ALERT !!!!!!!!!!!!!!!!! ")
print(" ")
print("Spectrum synthesis mode - structure will NOT be re-converged")
print(" ")
print(" !!!!!!!!!! ALERT !!!!!!!!!!!!!!!!! ")
print(" ")
if (Restart.teffRS != teff):
print(" ")
print(" !!!!!!!!!! BOOM !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.teff = ", teff, " BUT Restart.teff =", Restart.teffRS)
print(" ")
print(" Calling sys.exit() ")
print(" ")
print(" !!!!!!!!!! BOOM !!!!!!!!!!!!!!!!! ")
print(" ")
sys.exit()
if (Restart.loggRS != logg):
print(" ")
print(" !!!!!!!!!! BOOM !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.logg = ", logg, " BUT Restart.logg =", Restart.loggRS)
print(" ")
print(" Calling sys.exit() ")
print(" ")
print(" !!!!!!!!!! BOOM !!!!!!!!!!!!!!!!! ")
print(" ")
sys.exit()
if (Restart.log10ZScaleRS != log10ZScale):
print(" ")
print(" !!!!!!!!!! BOOM !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.log10ZScale = ", log10ZScale, " BUT Restart.log10ZScale =", Restart.log10ZScaleRS)
print(" ")
print(" Calling sys.exit() ")
print(" ")
print(" !!!!!!!!!! BOOM !!!!!!!!!!!!!!!!! ")
print(" ")
sys.exit()
if (Restart.massStarRS != massStar):
print(" ")
print(" !!!!!!!!!! WARNING !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.massStar = ", massStar, " BUT Restart.massStar =", Restart.massStarRS)
print(" ")
print(" !!!!!!!!!! Warning !!!!!!!!!!!!!!!!! ")
print(" ")
if (Restart.logKapFudgeRS != logKapFudge):
print(" ")
print(" !!!!!!!!!! WARNING !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.logKapFudge = ", logKapFudge, " BUT Restart.logKapFudge =", Restart.logKapFudgeRS)
print(" ")
print(" !!!!!!!!!! Warning !!!!!!!!!!!!!!!!! ")
print(" ")
if (Restart.logHeFeRS != logHeFe):
print(" ")
print(" !!!!!!!!!! WARNING !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.logHeFe = ", logHeFe, " BUT Restart.logHeFe =", Restart.logHeFeRS)
print(" ")
print(" !!!!!!!!!! Warning !!!!!!!!!!!!!!!!! ")
print(" ")
if (Restart.logCORS != logCO):
print(" ")
print(" !!!!!!!!!! WARNING !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.logCO = ", logCO, " BUT Restart.logCO =", Restart.logCORS)
print(" ")
print(" !!!!!!!!!! Warning !!!!!!!!!!!!!!!!! ")
print(" ")
if (Restart.logAlphaFeRS != logAlphaFe):
print(" ")
print(" !!!!!!!!!! WARNING !!!!!!!!!!!!!!!!! ")
print(" ")
print("Input.logAlphaFe = ", logAlphaFe, " BUT Restart.logAlphaFe =", Restart.logAlphaFeRS)
print(" ")
print(" !!!!!!!!!! Warning !!!!!!!!!!!!!!!!! ")
print(" ")
teff = Restart.teffRS
logg = Restart.loggRS
log10ZScale = Restart.log10ZScaleRS
massStar = Restart.massStarRS
xiT = Restart.xiTRS
logKapFudge = Restart.logKapFudgeRS
logHeFe = Restart.logHeFeRS
logCO = Restart.logCORS
logAlphaFe = Restart.logAlphaFeRS
tauRos[0] = [ numpy.double(x) for x in Restart.tauRosRS[0] ]
tauRos[1] = [ numpy.double(x) for x in Restart.tauRosRS[1] ]
temp[0] = [ numpy.double(x) for x in Restart.tempRS[0] ]
temp[1] = [ numpy.double(x) for x in Restart.tempRS[1] ]
pGas[0] = [ numpy.double(x) for x in Restart.pGasRS[0] ]
pGas[1] = [ numpy.double(x) for x in Restart.pGasRS[1] ]
newPe[0] = [ numpy.double(x) for x in Restart.peRS[0] ]
newPe[1] = [ numpy.double(x) for x in Restart.peRS[1] ]
# set up everything as in normal structure mode:
guessPGas[0] = [ numpy.double(x) for x in Restart.pGasRS[0] ]
guessPGas[1] = [ numpy.double(x) for x in Restart.pGasRS[1] ]
guessPe[0] = [ numpy.double(x) for x in Restart.peRS[0] ]
guessPe[1] = [ numpy.double(x) for x in Restart.peRS[1] ]
guessNe[1] = [newPe[1][iD] - temp[1][iD] - Useful.logK() for iD in range(numDeps)]
guessNe[0] = [math.exp(guessNe[1][iD]) for iD in range(numDeps)]
pRad[0] = [ numpy.double(x) for x in Restart.pRadRS[0] ]
pRad[1] = [ numpy.double(x) for x in Restart.pRadRS[1] ]
rho[0] = [ numpy.double(x) for x in Restart.rhoRS[0] ]
rho[1] = [ numpy.double(x) for x in Restart.rhoRS[1] ]
kappa500[0] = [ numpy.double(x) for x in Restart.kappa500RS[0] ]
kappa500[1] = [ numpy.double(x) for x in Restart.kappa500RS[1] ]
kappaRos[0] = [ numpy.double(x) for x in Restart.kappaRosRS[0] ]
kappaRos[1] = [ numpy.double(x) for x in Restart.kappaRosRS[1] ]
mmw = [ numpy.double(x) for x in Restart.mmwRS ]
#We are reading in a converged model - minimal processing:
nOuterIter = 1
nInnerIter = 1
else:
tauRos = TauScale.tauScale(numDeps, log10MinDepth, log10MaxDepth)
if (teff <= F0Vtemp):
if (logg > 3.5):
#//We're a cool dwarf! - rescale from Teff=5000 reference model!
#print("cool star branch")
temp = ScaleT5000.phxRefTemp(teff, numDeps, tauRos)
else:
#We're a cool giant - rescale from teff=4250, log(g) = 2.0 model
temp = ScaleT4250g20.phxRefTemp(teff, numDeps, tauRos)
elif (teff > F0Vtemp):
#//We're a HOT star! - rescale from Teff=10000 reference model!
temp = ScaleT10000.phxRefTemp(teff, numDeps, tauRos)
#//Scaled from Phoenix solar model:
#//double[][] guessKappa = new double[2][numDeps];
if (teff <= F0Vtemp):
if (logg > 3.5):
#//We're a cool dwarf - rescale from Teff=5000 reference model!
#// logAz[1] = log_e(N_He/N_H)
guessPGas = ScaleT5000.phxRefPGas(grav, zScale, logAz[1], numDeps, tauRos)
guessPe = ScaleT5000.phxRefPe(teff, grav, numDeps, tauRos, zScale, logAz[1])
guessNe = ScaleT5000.phxRefNe(numDeps, temp, guessPe)
#//Ne = ScaleSolar.phxSunNe(grav, numDeps, tauRos, temp, kappaScale);
#//guessKappa = ScaleSolar.phxSunKappa(numDeps, tauRos, kappaScale);
else:
#We're a cool giant - rescale from teff=4250, log(g) = 2.0 model
guessPGas = ScaleT4250g20.phxRefPGas(grav, zScale, logAz[1], numDeps, tauRos)
guessPe = ScaleT4250g20.phxRefPe(teff, grav, numDeps, tauRos, zScale, logAz[1])
guessNe = ScaleT4250g20.phxRefNe(numDeps, temp, guessPe)
elif (teff > F0Vtemp):
#//We're a HOT star!! - rescale from Teff=10000 reference model
#// logAz[1] = log_e(N_He/N_H)
guessPGas = ScaleT10000.phxRefPGas(grav, zScale, logAz[1], numDeps, tauRos)
guessPe = ScaleT10000.phxRefPe(teff, grav, numDeps, tauRos, zScale, logAz[1])
guessNe = ScaleT10000.phxRefNe(numDeps, temp, guessPe)
#//logKapFudge = -1.5; //sigh - don't ask me - makes the Balmer lines show up around A0
##In every case - initialization:
#newNe[0] = [ guessNe[0][i] for i in range(numDeps)]
#newNe[1] = [ guessNe[1][i] for i in range(numDeps)]
#//Now do the same for the Sun, for reference:
tempSun = ScaleSolar.phxSunTemp(teffSun, numDeps, tauRos)
#//Now do the same for the Sun, for reference:
pGasSunGuess = ScaleSolar.phxSunPGas(gravSun, numDeps, tauRos)
NeSun = ScaleSolar.phxSunNe(gravSun, numDeps, tauRos, tempSun, zScaleSun)
kappaSun = ScaleSolar.phxSunKappa(numDeps, tauRos, zScaleSun)
mmwSun = State.mmwFn(numDeps, tempSun, zScaleSun)
rhoSun = State.massDensity(numDeps, tempSun, pGasSunGuess, mmwSun, zScaleSun)
pGasSun = Hydrostat.hydroFormalSoln(numDeps, gravSun, tauRos, kappaSun, tempSun, pGasSunGuess)
#Total population of element over all ionization stages:
logNz = State.getNz(numDeps, temp, guessPGas, guessPe, ATot, nelemAbnd, logAz)
#for i in range(numDeps):
# logNH[i] = logNz[0][i]
##//set the initial guess H^+ number density to the e^-1 number density
# masterStagePops[0][1][i] = guessPe[1][i] #//iElem = 0: H; iStage = 1: II
#//System.out.println("i " + i + " logNH[i] " + logE*logNH[i]);
logNH = [ x for x in logNz[0] ]
masterStagePops[0][1] = [ numpy.double(x) for x in guessPe[1] ]
#//Load the total no. density of each element into the nuetral stage slots of the masterStagePops array as a first guess at "species B" neutral
#//populations for the molecular Saha eq. - Reasonable first guess at low temp where molecuales form
#for iElem in range(nelemAbnd):
# for iD in range(numDeps):
# masterStagePops[iElem][0][iD] = logNz[iElem][iD]
#Initial default - set neutral stage population to total number density of that element:
masterStagePops[:][0][:] = [ [ logNz[i][j] for j in range(numDeps) ] for i in range(nelemAbnd) ]
warning = "";
if (teff < F0Vtemp):
#//warning = "<span style='color:red'><em>T</em><sub>eff</sub> < 6000 K <br />Cool star mode";
warning = "Cool star mode"
print(warning)
else:
#//warning = "<span style='color:blue'><em>T</em><sub>eff</sub> > 6000 K <br />Hot star mode</span>";
warning = "Hot star mode"
print(warning)
#//Add subclass to each spectral class (lburns)
spectralClass = " "
subClass = " " #//Create a variable for the subclass of the star. lburns
luminClass = "V" #//defaults to V
#//Determine the spectralClass and subClass of main sequence stars, subdwarfs and white dwarfs
#//var luminClass = "V" or luminClass = "VI" or luminClass = "WD"
#// Based on the data in Appendix G of An Introduction to Modern Astrophysics, 2nd Ed. by
#// Carroll & Ostlie
if ((logg >= 4.0) and (logg <= 6.0)):
if (teff < 3000.0):
spectralClass = "L";
elif ((teff >= 3000.0) and (teff < 3900.0)):
spectralClass = "M";
if ((teff >= 3000.0) and (teff <= 3030.0)):
subClass = "6";
elif ((teff > 3030.0) and (teff <= 3170.0)):
subClass = "5";
elif ((teff > 3170.0) and (teff <= 3290.0)):
subClass = "4";
elif ((teff > 3290.0) and (teff <= 3400.0)):
subClass = "3";
elif ((teff > 3400.0) and (teff <= 3520.0)):
subClass = "2";
elif ((teff > 3520.0) and (teff <= 3660.0)):
subClass = "1";
elif ((teff > 3660.0) and (teff < 3900.0)):
subClass = "0";
elif ((teff >= 3900.0) and (teff < 5200.0)):
spectralClass = "K";
if ((teff >= 3900.0) and (teff <= 4150.0)):
subClass = "7";
elif ((teff > 4150.0) and (teff <= 4410.0)):
subClass = "5";
elif ((teff > 4410.0) and (teff <= 4540.0)):
subClass = "4";
elif ((teff > 4540.0) and (teff <= 4690.0)):
subClass = "3";
elif ((teff > 4690.0) and (teff <= 4990.0)):
subClass = "1";
elif ((teff > 4990.0) and (teff < 5200.0)):
subClass = "0";
elif ((teff >= 5200.0) and (teff < 5950.0)):
spectralClass = "G";
if ((teff >= 5200.0) and (teff <= 5310.0)):
subClass = "8";
elif ((teff > 5310.0) and (teff <= 5790.0)):
subClass = "2";
elif ((teff > 5790.0) and (teff < 5950.0)):
subClass = "0";
elif ((teff >= 5950.0) and (teff < 7300.0)):
spectralClass = "F";
if ((teff >= 5950.0) and (teff <= 6250.0)):
subClass = "8";
elif ((teff > 6250.0) and (teff <= 6650.0)):
subClass = "5";
elif ((teff > 6650.0) and (teff <= 7050.0)):
subClass = "2";
elif ((teff > 7050.0) and (teff < 7300.0)):
subClass = "0";
elif ((teff >= 7300.0) and (teff < 9800.0)):
spectralClass = "A";
if ((teff >= 7300.0) and (teff <= 7600.0)):
subClass = "8";
elif ((teff > 7600.0) and (teff <= 8190.0)):
subClass = "5";
elif ((teff > 8190.0) and (teff <= 9020.0)):
subClass = "2";
elif ((teff > 9020.0) and (teff <= 9400.0)):
subClass = "1";
elif ((teff > 9400.0) and (teff < 9800.0)):
subClass = "0";
elif ((teff >= 9800.0) and (teff < 30000.0)):
spectralClass = "B";
if ((teff >= 9300.0) and (teff <= 10500.0)):
subClass = "9";
elif ((teff > 10500.0) and (teff <= 11400.0)):
subClass = "8";
elif ((teff > 11400.0) and (teff <= 12500.0)):
subClass = "7";
elif ((teff > 12500.0) and (teff <= 13700.0)):
subClass = "6";
elif ((teff > 13700.0) and (teff <= 15200.0)):
subClass = "5";
elif ((teff > 15200.0) and (teff <= 18800.0)):
subClass = "3";
elif ((teff > 18800.0) and (teff <= 20900.0)):
subClass = "2";
elif ((teff > 20900.0) and (teff <= 25400.0)):
subClass = "1";
elif ((teff > 25400.0) and (teff < 30000.0)):
subClass = "0";
elif (teff >= 30000.0):
spectralClass = "O";
if ((teff >= 30000.0) and (teff <= 35800.0)):
subClass = "8";
elif ((teff > 35800.0) and (teff <= 37500.0)):
subClass = "7";
elif ((teff > 37500.0) and (teff <= 39500.0)):
subClass = "6";
elif ((teff > 39500.0) and (teff <= 42000.0)):
subClass = "5";
#//Determine the spectralClass and subClass of giants and subgiants. lburns
#//var luminClass = "III" or luminClass = "IV"
#//Determine the spectralClass and subClass of giants and subgiants. lburns
#//var luminClass = "III" or luminClass = "IV"
if ((logg >= 1.5) and (logg < 4.0)):
if (teff < 3000.0):
spectralClass = "L";
elif ((teff >= 3000.0) and (teff < 3700.0)):
spectralClass = "M";
if ((teff >= 3000.0) and (teff <= 3330.0)):
subClass = "6";
elif ((teff > 3330.0) and (teff <= 3380.0)):
subclass = "5";
elif ((teff > 3380.0) and (teff <= 3440.0)):
subClass = "4";
elif ((teff > 3440.0) and (teff <= 3480.0)):
subClass = "3";
elif ((teff > 3480.0) and (teff <= 3540.0)):
subClass = "2";
elif ((teff > 3540.0) and (teff <= 3600.0)):
subClass = "1";
elif ((teff > 3600.0) and (teff < 3700.0)):
subClass = "0";
elif ((teff >= 3700.0) and (teff < 4700.0)):
spectralClass = "K"
if ((teff >= 3700.0) and (teff <= 3870.0)):
subClass = "7";
elif ((teff > 3870.0) and (teff <= 4050.0)):
subClass = "5";
elif ((teff > 4050.0) and (teff <= 4150.0)):
subClass = "4";
elif ((teff > 4150.0) and (teff <= 4260.0)):
subClass = "3";
elif ((teff > 4260.0) and (teff <= 4510.0)):
subClass = "1";
elif ((teff > 4510.0) and (teff < 4700.0)):
subClass = "0";
elif ((teff >= 4700.0) and (teff < 5500.0)):
spectralClass = "G";
if ((teff >= 4700.0) and (teff <= 4800.0)):
subClass = "8";
elif ((teff > 4800.0) and (teff <= 5300.0)):
subClass = "2";
elif ((teff > 5300.0) and (teff < 5500.0)):
subClass = "0";
elif ((teff >= 5500.0) and (teff < 7500.0)):
spectralClass = "F";
if ((teff >= 5500.0) and (teff <= 6410.0)):
subClass = "5";
elif ((teff > 6410.0) and (teff <= 7000.0)):
subClass = "2";
elif ((teff > 7000.0) and (teff < 7500.0)):
subClass = "0";
elif ((teff >= 7500.0) and (teff < 10300.0)):
spectralClass = "A";
if ((teff >= 7500.0) and (teff <= 7830.0)):
subClass = "8";
elif ((teff > 7830.0) and (teff <= 8550.0)):
subClass = "5";
elif ((teff > 8550.0) and (teff <= 9460.0)):
subClass = "2";
elif ((teff > 9460.0) and (teff <= 9820.0)):
subClass = "1";
elif ((teff > 9820.0) and (teff < 10300.0)):
subClass = "0";
elif ((teff >= 10300.0) and (teff < 29300.0)):
spectralClass = "B";
if ((teff >= 10300.0) and (teff <= 10900.0)):
subClass = "9";
elif ((teff > 10900.0) and (teff <= 11700.0)):
subClass = "8";
elif ((teff > 11700.0) and (teff <= 12700.0)):
subClass = "7";
elif ((teff > 12700.0) and (teff <= 13800.0)):
subClass = "6";
elif ((teff > 13800.0) and (teff <= 15100.0)):
subClass = "5";
elif ((teff > 15100.0) and (teff <= 18300.0)):
subClass = "3";
elif ((teff > 18300.0) and (teff <= 20200.0)):
subClass = "2";
elif ((teff > 20200.0) and (teff <= 24500.0)):
subClass = "1";
elif ((teff > 24500.0) and (teff < 29300.0)):
subClass = "0";
elif ((teff >= 29300.0) and (teff < 40000.0)):
spectralClass = "O";
if ((teff >= 29300.0) and (teff <= 35000.0)):
subClass = "8";
elif ((teff > 35000.0) and (teff <= 36500.0)):
subClass = "7";
elif ((teff > 36500.0) and (teff <= 37800.0)):
subClass = "6";
elif ((teff > 37800.0) and (teff < 40000.0)):
subClass = "5";
#//Determine the spectralClass and subClass of supergiants and bright giants. lburns
#//var luminClass = "I" or luminClass = "II"
if ((logg >= 0.0) and (logg < 1.5)):
if (teff < 2700.0):
spectralClass = "L";
elif ((teff >= 2700.0) and (teff < 3650.0)):
spectralClass = "M";
if ((teff >= 2700.0) and (teff <= 2710.0)):
subClass = "6";
elif ((teff > 2710.0) and (teff <= 2880.0)):
subClass = "5";
elif ((teff > 2880.0) and (teff <= 3060.0)):
subClass = "4";
elif ((teff > 3060.0) and (teff <= 3210.0)):
subClass = "3";
elif ((teff > 3210.0) and (teff <= 3370.0)):
subClass = "2";
elif ((teff > 3370.0) and (teff <= 3490.0)):
subClass = "1";
elif ((teff > 3490.0) and (teff < 3650.0)):
subClass = "0";
elif ((teff >= 3650.0) and (teff < 4600.0)):
spectralClass = "K";
if ((teff >= 3650.0) and (teff <= 3830.0)):
subClass = "7";
elif ((teff > 3830.0) and (teff <= 3990.0)):
subClass = "5";
elif ((teff > 3990.0) and (teff <= 4090.0)):
subClass = "4";
elif ((teff > 4090.0) and (teff <= 4190.0)):
subClass = "3";
elif ((teff > 4190.0) and (teff <= 4430.0)):
subClass = "1";
elif ((teff > 4430.0) and (teff < 4600.0)):
subClass = "0";
elif ((teff >= 4600.0) and (teff < 5500.0)):
spectralClass = "G";
if ((teff >= 4600.0) and (teff <= 4700.0)):
subClass = "8";
elif ((teff > 4700.0) and (teff <= 5190.0)):
subClass = "2";
elif ((teff > 5190.0) and (teff < 5500.0)):
subClass = "0";
elif ((teff >= 5500.0) and (teff < 7500.0)):
spectralClass = "F";
if ((teff >= 5500.0) and (teff <= 5750.0)):
subClass = "8";
elif ((teff > 5750.0) and (teff <= 6370.0)):
subClass = "5";
elif ((teff > 6370.0) and (teff <= 7030.0)):
subClass = "2";
elif ((teff > 7030.0) and (teff < 7500.0)):
subClass = "0";
elif ((teff >= 7500.0) and (teff < 10000.0)):
spectralClass = "A";
if ((teff >= 7500.0) and (teff <= 7910.0)):
subClass = "8";
elif ((teff > 7910.0) and (teff <= 8610.0)):
subClass = "5";
elif ((teff > 8610.0) and (teff <= 9380.0)):
subClass = "2";
elif ((teff > 9380.0) and (teff < 10000.0)):
subClass = "0";
elif ((teff >= 10000.0) and (teff < 27000.0)):
spectralClass = "B";
if ((teff >= 10000.0) and (teff <= 10500.0)):
subClass = "9";
elif ((teff > 10500.0) and (teff <= 11100.0)):
subClass = "8";
elif ((teff > 11100.0) and (teff <= 11800.0)):
subClass = "7";
elif ((teff > 11800.0) and (teff <= 12600.0)):
subClass = "6";
elif ((teff > 12600.0) and (teff <= 13600.0)):
subClass = "5";
elif ((teff > 13600.0) and (teff <= 16000.0)):
subClass = "3";
elif ((teff > 16000.0) and (teff <= 17600.0)):
subClass = "2";
elif ((teff > 17600.0) and (teff <= 21400.0)):
subClass = "1";
elif ((teff > 21400.0) and (teff < 27000.0)):
subClass = "0";
elif ((teff >= 27000.0) and (teff < 42000.0)):
spectralClass = "O";
if ((teff >= 27000.0) and (teff <= 34000.0)):
subClass = "8";
elif ((teff > 34000.0) and (teff <= 36200.0)):
subClass = "7";
elif ((teff > 36200.0) and (teff <= 38500.0)):
subClass = "6";
elif ((teff > 38500.0) and (teff < 42000.0)):
subClass = "5";
#//Determine luminClass based on logg
if ((logg >= 0.5) and (logg < 1.0)):
luminClass = "I";
elif ((logg >= 1.0) and (logg < 1.5)):
luminClass = "II";
elif ((logg >= 1.5) and (logg < 3.0)):
luminClass = "III";
elif ((logg >= 3.0) and (logg < 4.0)):
luminClass = "IV";
elif ((logg >= 4.0) and (logg < 5.0)):
luminClass = "V";
elif ((logg >= 5.0) and (logg < 6.0)):
luminClass = "VI";
elif ((logg >= 5.0)):
luminClass = "WD";
spectralType = spectralClass + subClass + " " + luminClass
print("Spectral type: ", spectralType)
#Initial guess atmospheric structure output:
#Convert everything to log_10 OR re-scaled units for plotting, printing, etc:
log10e = math.log10(math.e)
#
#
#// END initial guess for Sun section
#
#
###################################################################
#
#
#
# Converge atmospheric structure
#
# - Includes *initial* ionization equilibrium *without* molecules (for now)
#
#
#
###################################################################
#log10mmw = [0.0 for i in range(numDeps)]
#//
#// *********************
#//Jul 2016: Replace the following procedure for model building with the following PSEUDOCODE:
#//
#// 1st guess Tk(tau), Pe(tau), Pg(tau) from rescaling reference hot or cool model above
#// 1) Converge Pg-Pe relation for Az abundance distribution and T_Kin(Tau)
#// assuming all free e^-s from single ionizations - *inner* convergence
#// 2) Get Ne, rho, mu from Phil Bennet's GAS apckage
#// 3) kappa(tau) from Gray Ch. 8 sources - H, He, and e^- oapcity sources only
#// 4) P_Tot(tau) from HSE on tau scale with kappa from 4)
#// - PRad(tau) from Tk(tau)
#// - New Pg(tau) from P_Tot(tau)-PRad(tau)
#// 5) Iterate Pg - kappa relation to convergence - *outer* convergence
#// 6)Get rho(tau) = Sigma_z(m_z*N_z(tau)) and mu(tau) = rho(tau) / N(tau)
#// and depth scale
#//
#// ** Atmospheric structure converged **
#//
#// THEN for spectrum synthesis:
#//
#// 1) number densities of absorpbers from partial pressures (pps) from Phil
# Bennett's GAS package)
#// 2) Temp correction??
#/ **** STOP **** No - do we *really* need N_HI, ... for kappa if we use rho in HSE? - Yes - needed even if kappa
#// is in cm^-1 instead of cm^2/g - sigh
species = " " #; //default initialization
# double rho[][] = new double[2][numDeps];
# double[][] tauOneStagePops = new double[nelemAbnd][numStages];
tauOneStagePops = [ [ numpy.double(0.0) for i in range(numStages) ] for j in range(nelemAbnd) ]
unity = numpy.double(1.0)
zScaleList = 1.0 #//initialization
numAtmPrtTmps = 5
numMolPrtTmps = 5
# double[][] log10UwAArr = new double[numStages][2];
log10UwAArr = [ [ 0.0 for k in range(numAtmPrtTmps) ] for j in range(numStages) ]
#for i in range(numStages):
# for k in range(len(log10UwAArr[0])):
# log10UwAArr[i][k] = 0.0 #//lburns default initialization - logarithmic
#//Ground state ionization E - Stage I (eV)
# double[] chiIArr = new double[numStages]
chiIArr = [ 999999.0 for i in range(numStages) ]
#// //Ground state ionization E - Stage II (eV)
#//
""" now GAS
#//For diatomic molecules:
speciesAB = " "
speciesA = " "
speciesB = " "
# double massA, massB, logMuAB;
"""
# double[][] masterMolPops = new double[nMols][numDeps];
#masterMolPops = [ [ -49.0 for i in range(numDeps) ] for j in range(nMols) ]
#Now with GAS:
#//initialize masterMolPops for mass density (rho) calculation:
masterMolPops = [ [ logTiny for i in range(numDeps) ] for j in range(gsNumMols) ]
for i in range(gsNumMols):
for j in range(numDeps):
masterMolPops[i][j] = -49.0 #//these are logarithmic
Ng = [ numpy.double(0.0) for i in range(numDeps) ]
#double logMmw;
logKappa = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(numLams) ]
logKappaHHe = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(numLams) ]
logKappaMetalBF = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(numLams) ]
logKappaRayl = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(numLams) ]
newTemp = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
#//
#//
#//We converge the Pgas - Pe relation first under the assumption that all free e^-s are from single ionizations
#// a la David Gray Ch. 9.
#// This approach separates converging ionization fractions and Ne for spectrum synthesis purposes from
#// converging the Pgas-Pe-N_H-N_He relation for computing the mean opacity for HSE
#//
thisTemp = [ 0.0 for i in range(2) ]
log10UwUArr = [ 0.0 for i in range(numAtmPrtTmps) ]
log10UwLArr = [ 0.0 for i in range(numAtmPrtTmps) ]
#double chiI, peNumerator, peDenominator, logPhi, logPhiOverPe, logOnePlusPhiOverPe, logPeNumerTerm, logPeDenomTerm;
log300 = math.log(300.0)
log2 = math.log(2.0)
#GAS package parameters:
isolv = 1
tol = 1.0e-2
maxit = 10
#GAS package interface variables:
gsP0 = [0.0e0 for i in range(40)]
topP0 = [0.0e0 for i in range(40)]
gsPp = [0.0e0 for i in range(150)]
#For reporting purposes only:
log10MasterGsPp = [ [logTiny for iD in range(numDeps)] for iSpec in range(gsNspec) ]
#ppix = [0.0e0 for i in range(30)]
#a = [0.0e0 for i in range(625)]
#//Begin Pgas-kapp iteration
#Test:
GAStemp = 6000.0
#GAStemp = 100000.0
for pIter in range(nOuterIter):
#//
#Try making return value a tuple:
if (teff <= GAStemp):
#if (teff <= 100000.0): #test
for iD in range(numDeps):
#print("isolv ", isolv, " temp ", temp[0][iD], " guessPGas ", guessPGas[0][iD])
gasestReturn = CSGasEst.gasest(isolv, temp[0][iD], guessPGas[0][iD])
gsPe0 = gasestReturn[0]
gsP0 = gasestReturn[1]
neq = gasestReturn[2]
if (iD == 1):
topP0 = [ (0.5 * gsP0[iSpec]) for iSpec in range(40) ]
#Upper boundary causes problems:
if (pIter > 0 and iD == 0):
gsPe0 = 0.5 * newPe[0][1]
gsP0 = [ topP0[iSpec] for iSpec in range(40) ]
#print("Calling GAS 1 iD ", iD, " temp ", temp[0][iD])
#print("iD ", iD, " gsPe0 ", gsPe0, " gsP0[0] ", gsP0[0], " neq ", neq)
gasReturn = CSGas.gas(isolv, temp[0][iD], guessPGas[0][iD], gsPe0, gsP0, neq, tol, maxit)
#a = gasReturn[0]
#nit = gasReturn[1]
gsPe = gasReturn[2]
#pd = gasReturn[3]
gsPp = gasReturn[4]
#Can't pythonize this - gsPp padded at end with 0.0s
#log10MasterGsPp[:][iD] = [math.log10(x) for x in gsPp]
#for iSpec in range(gsNspec):
# log10MasterGsPp[iSpec][iD] = math.log10(gsPp[iSpec])
#print("1: ", gsPp[0]/guessPGas[0][iD])
#ppix = gasReturn[5]
gsMu = gasReturn[6]
gsRho = gasReturn[7]
#print("iD ", iD, " gsPe ", gsPe, " gsPp[0] ", gsPp[0], " gsMu ", gsMu, " gsRho ", gsRho)
newPe[0][iD] = gsPe
newPe[1][iD] = math.log(gsPe)
newNe[0][iD] = gsPe / Useful.k() / temp[0][iD]
newNe[1][iD] = math.log(newNe[0][iD])
guessPe[0][iD] = newPe[0][iD]
guessPe[1][iD] = newPe[1][iD]
guessNe[0][iD] = newNe[0][iD]
guessNe[1][iD] = newNe[1][iD]
rho[0][iD] = gsRho
rho[1][iD] = math.log(gsRho)
mmw[iD] = gsMu * Useful.amu()
#Take neutral stage populations for atomic species from GAS:
for iElem in range(nelemAbnd):
if (csp2gas[iElem] != -1):
#element is in GAS package:
#Neutral stage onnly:
thisN = gsPp[csp2gas[iElem]] / Useful.k() / temp[0][iD]
masterStagePops[iElem][0][iD] = math.log(thisN)
#print("iD ", iD, cname[19], gsName[csp2gas[19]], " logNCaI ", logE*masterStagePops[19][0][iD])
for i in range(gsNumMols):
thisN = gsPp[i+gsFirstMol] / Useful.k() / temp[0][iD]
masterMolPops[i][iD] = math.log(thisN)
#Needed now GAS??
for iA in range(nelemAbnd):
if (csp2gas[iA] != -1):
#element is in GAS package:
#Captures neutral stage only
logNz[iA][iD] = math.log10(gsPp[csp2gas[iA]]) - Useful.logK() - temp[1][iD]
for iElem in range(26):
species = cname[iElem] + "I"
chiIArr[0] = IonizationEnergy.getIonE(species)
#//THe following is a 2-element vector of temperature-dependent partitio fns, U,
#// that are base e log_e U
log10UwAArr[0] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "II"
chiIArr[1] = IonizationEnergy.getIonE(species)
log10UwAArr[1] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "III"
chiIArr[2] = IonizationEnergy.getIonE(species)
log10UwAArr[2] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "IV"
chiIArr[3] = IonizationEnergy.getIonE(species)
log10UwAArr[3] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "V"
chiIArr[4] = IonizationEnergy.getIonE(species)
log10UwAArr[4] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "VI"
chiIArr[5] = IonizationEnergy.getIonE(species)
log10UwAArr[5] = PartitionFn.getPartFn2(species) #//base e log_e U
#//double logN = (eheu[iElem] - 12.0) + logNH;
#Neeed? Now GAS:
logNums = LevelPopsGasServer.stagePops3(masterStagePops[iElem][0], guessNe, chiIArr, log10UwAArr, \
#thisNumMols, logNumBArr, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
numDeps, temp)
#for iStage in range(numStages):
# for iTau in range(numDeps):
#
# masterStagePops[iElem][iStage][iTau] = logNums[iStage][iTau]
#masterStagePops[iElem][:][:] = [ [logNums[iStage][iTau] for iTau in range(numDeps)] for iStage in range(numStages) ]
masterStagePops[iElem][:] = [x for x in logNums[:]]
if (teff > GAStemp): #teff > FoVtemp:
#// Converge Pg-Pe relation starting from intital guesses at Pg and Pe
#// - assumes all free electrons are from single ionizations
#// - David Gray 3rd Ed. Eq. 9.8:
#print("guessPe[1] ", [logE*x for x in guessPe[1]] )
for neIter in range(nInnerIter):
for iD in range(numDeps):
#//System.out.println("iD logE*newPe[1][iD] logE*guessPe[1] logE*guessPGas[1]");
#//re-initialize accumulators:
thisTemp[0] = temp[0][iD]
thisTemp[1] = temp[1][iD]
peNumerator = 0.0
peDenominator = 0.0
for iElem in range(nelemAbnd):
species = cname[iElem] + "I"
chiI = IonizationEnergy.getIonE(species)
#//THe following is a 2-element vector of temperature-dependent partitio fns, U,
#// that are base e log_e U
log10UwLArr = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "II"
log10UwUArr = PartitionFn.getPartFn2(species) #//base e log_e U
logPhi = LevelPopsGasServer.sahaRHS(chiI, log10UwUArr, log10UwLArr, thisTemp)
#if (iD%10 == 0):
# print("iD ", iD, " iElem ", iElem, " logPhi ", logE*logPhi)
logPhiOverPe = logPhi - guessPe[1][iD]
logOnePlusPhiOverPe = math.log(1.0 + math.exp(logPhiOverPe))
logPeNumerTerm = logAz[iElem] + logPhiOverPe - logOnePlusPhiOverPe
peNumerator = peNumerator + math.exp(logPeNumerTerm)
logPeDenomTerm = logAz[iElem] + math.log(1.0 + math.exp(logPeNumerTerm))
peDenominator = peDenominator + math.exp(logPeDenomTerm)
#if (iD%10 == 0):
# print("iD ", iD, " iElem ", iElem, " peNum ", peNumerator, " peDen ", peDenominator)
#print("peNum ", math.log10(peNumerator), " peDen ", math.log10(peDenominator))
#//iElem chemical element loop
newPe[1][iD] = guessPGas[1][iD] + math.log(peNumerator) - math.log(peDenominator)
newPe[0][iD] = math.exp(newPe[1][iD])
guessPe[1][iD] = newPe[1][iD]
guessPe[0][iD] = math.exp(guessPe[1][iD])
newNe[1] = [newPe[1][iD] - temp[1][iD] - Useful.logK() for iD in range(numDeps)]
newNe[0] = [math.exp(newNe[1][iD]) for iD in range(numDeps)]
#guessNe[1][:] = [newNe[1][iD] for iD in range(numDeps)]
#guessNe[0][:] = [newNe[0][iD] for iD in range(numDeps)]
guessNe[1][:] = [ x for x in newNe[1][:] ]
guessNe[0][:] = [ x for x in newNe[0][:] ]
#print("newPe ", [logE*x for x in newPe[1]])
#print("guessNe ", [logE*x for x in guessNe[1]])
#print("iD ", iD, " logT ", logE*temp[1][iD], " logNe ", logE*newNe[1][iD], " logRho ", logE*rho[1][iD], " mmw ", logE*math.log(mmw[iD]*Useful.amu()) )
#if (teff > 100000.0): #test
logNz = State.getNz(numDeps, temp, guessPGas, guessPe, ATot, nelemAbnd, logAz)
#for i in range(numDeps):
# logNH[i] = logNz[0][i]
#logNH[:] = [ logNz[0][i] for i in range(numDeps) ]
logNH = [ x for x in logNz[0] ]
zScaleList = 1.0 #//initialization
for iElem in range(26):
species = cname[iElem] + "I"
chiIArr[0] = IonizationEnergy.getIonE(species)
#//THe following is a 2-element vector of temperature-dependent partitio fns, U,
#// that are base e log_e U
log10UwAArr[0] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "II"
chiIArr[1] = IonizationEnergy.getIonE(species)
log10UwAArr[1] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "III"
chiIArr[2] = IonizationEnergy.getIonE(species)
log10UwAArr[2] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "IV"
chiIArr[3] = IonizationEnergy.getIonE(species)
log10UwAArr[3] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "V"
chiIArr[4] = IonizationEnergy.getIonE(species)
log10UwAArr[4] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "VI"
chiIArr[5] = IonizationEnergy.getIonE(species)
log10UwAArr[5] = PartitionFn.getPartFn2(species) #//base e log_e U
logNums = LevelPopsGasServer.stagePops(logNz[iElem], guessNe, chiIArr, log10UwAArr,\
numDeps, temp)
#for iStage in range(numStages):
# for iTau in range(numDeps):
#
# masterStagePops[iElem][iStage][iTau] = logNums[iStage][iTau]
#masterStagePops[iElem][:][:] = [ [logNums[iStage][iTau] for iTau in range(numDeps)] for iStage in range(numStages) ]
masterStagePops[iElem][:] = [x for x in logNums[:]]
#print("logNz[0] ", [logE*x for x in logNz[0]])
#print("masterStagePops[0][0] ", [logE*x for x in masterStagePops[0][0]])
#//double logN = (eheu[iElem] - 12.0) + logNH;
#//Get mass density from chemical composition:
rho = State.massDensity2(numDeps, nelemAbnd, logNz, cname)
#//Total number density of gas particles: nuclear species + free electrons:
#//AND
# //Compute mean molecular weight, mmw ("mu"):
#for i in range(numDeps):
# Ng[i] = newNe[0][i] #//initialize accumulation with Ne
#Ng[:] = [ newNe[0][i] for i in range(numDeps) ]
Ng[:] = [ x for x in newNe[0] ]
#Seems like this can't be "de-looped" without resorting to cryptic black boxes in python, like zip()
for i in range(numDeps):
for j in range(nelemAbnd):
Ng[i] = Ng[i] + math.exp(logNz[j][i]) #//initialize accumulation
#logMmw = rho[1][i] - math.log(Ng[i]) # // in g
#mmw[i] = math.exp(logMmw)
mmw = [ rho[1][i] - math.log(Ng[i]) for i in range(numDeps) ]
mmw = [ math.exp(x) for x in mmw ]
#//
#//Refine the number densities of the chemical elements at all depths
#logNz = State.getNz(numDeps, temp, guessPGas, guessPe, ATot, nelemAbnd, logAz)
#for i in range(numDeps):
# logNH[i] = logNz[0][i]
#logNH[:] = [ logNz[0][i] for i in range(numDeps) ]
#Needed now GAS??
#logNH = [ x for x in logNz[0] ]
#//System.out.println("i " + i + " logNH[i] " + logE*logNH[i]);
#//
#// Compute ionization fractions of H & He for kappa calculation
#//
#// Default inializations:
#//these 2-element temperature-dependent partition fns are logarithmic
#//
#////H & He only for now... we only compute H, He, and e^- opacity sources:
# //for (int iElem = 0; iElem < 2; iElem++){
#//H to Fe only for now... we only compute opacity sources for elements up to Fe:
#for iD in range(numDeps):
#print("CSGPy: iD ", iD, cname[0], " logNCaI ", logE*masterStagePops[0][0][iD])
#print("Ne ", newPe[0][iD], " logNe ", newPe[1][iD])
#//save ion stage populations at tau = 1:
#//iTau loop
#//iStage loop
#//iElem loop
#//Get mass density from chemical composition:
#Needed? Now Gas package
#rho = State.massDensity2(numDeps, nelemAbnd, logNz, cname)
#Needed? Now Gas package
#//Total number density of gas particles: nuclear species + free electrons:
#//AND
# //Compute mean molecular weight, mmw ("mu"):
#for i in range(numDeps):
# Ng[i] = newNe[0][i] #//initialize accumulation with Ne
#Ng[:] = [ newNe[0][i] for i in range(numDeps) ]
#Ng[:] = [ x for x in newNe[0] ]
#Seems like this can't be "de-looped" without resorting to cryptic black boxes in python, like zip()
#for i in range(numDeps):
# for j in range(nelemAbnd):
# Ng[i] = Ng[i] + math.exp(logNz[j][i]) #//initialize accumulation
#logMmw = rho[1][i] - math.log(Ng[i]) # // in g
#mmw[i] = math.exp(logMmw)
#mmw = [ rho[1][i] - math.log(Ng[i]) for i in range(numDeps) ]
#mmw = [ math.exp(x) for x in mmw ]
#//H & He only for now... we only compute H, He, and e^- opacity sources:
logKappaHHe = Kappas.kappas2(numDeps, newPe, zScale, temp, rho, \
numLams, lambdaScale, logAz[1], \
masterStagePops[0][0], masterStagePops[0][1], \
masterStagePops[1][0], masterStagePops[1][1], newNe, \
teff, logTotalFudge)
#//Add in metal b-f opacity from adapted Moog routines:
logKappaMetalBF = KappasMetal.masterMetal(numDeps, numLams, temp, lambdaScale, masterStagePops)
#//Add in Rayleigh scattering opacity from adapted Moog routines:
logKappaRayl = KappasRaylGas.masterRayl(numDeps, numLams, temp, lambdaScale, masterStagePops, gsName, gsFirstMol, masterMolPops)
#print("logKappaHHe ", [logKappaHHe[:][36]])
#print("logKappaMetalBF ", [logKappaMetalBF[:][36]])
#print("logKappaRayl ", [logKappaRayl[:][36]])
#for i in range(numLams):
# print("logKappaHHe " , logE*logKappaHHe[i][36]);
#for i in range(numLams):
# print("logKappaMetalBF " , logE*logKappaMetalBF[i][36]);
#for i in range(numLams):
# print("logKappaRayl " , logE*logKappaRayl[i][36]);
#//Convert metal b-f & Rayleigh scattering opacities to cm^2/g and sum up total opacities
#double logKapMetalBF, logKapRayl, kapContTot;
#for iL in range(numLams):
# for iD in range(numDeps):
# logKapMetalBF = logKappaMetalBF[iL][iD] - rho[1][iD]
# logKapRayl = logKappaRayl[iL][iD] - rho[1][iD]
# kapContTot = math.exp(logKappaHHe[iL][iD]) + math.exp(logKapMetalBF) + math.exp(logKapRayl)
# logKappa[iL][iD] = math.log(kapContTot)
logKappa = [ [ math.exp(logKappaHHe[iL][iD]) + \
math.exp(logKappaMetalBF[iL][iD] - rho[1][iD]) +\
math.exp(logKappaRayl[iL][iD] - rho[1][iD]) for iD in range(numDeps)] for iL in range(numLams) ]
logKappa = [ [math.log(logKappa[iL][iD]) for iD in range(numDeps)] for iL in range(numLams) ]
kappaRos = Kappas.kapRos(numDeps, numLams, lambdaScale, logKappa, temp);
#//Extract the "kappa_500" monochroamtic continuum oapcity scale
#// - this means we'll try interpreting the prescribed tau grid (still called "tauRos")as the "tau500" scale
it500 = ToolBox.lamPoint(numLams, lambdaScale, 500.0e-7)
#for i in range(numDeps):
# kappa500[1][i] = logKappa[it500][i]
# kappa500[0][i] = math.exp(kappa500[1][i])
kappa500[1] = [ x for x in logKappa[it500] ]
kappa500[0] = [ math.exp(x) for x in logKappa[it500] ]
pGas = Hydrostat.hydroFormalSoln(numDeps, grav, tauRos, kappaRos, temp, guessPGas)
pRad = Hydrostat.radPress(numDeps, temp)
#//Update Pgas guess for iteration:
#for i in range(numDeps):
#// Now we can update guessPGas:
# guessPGas[0][i] = pGas[0][i]
# guessPGas[1][i] = pGas[1][i]
# log10pgas[i] = log10e * pGas[1][i]
# log10pe[i] = log10e * (newNe[1][i] + Useful.logK() + temp[1][i])
# pe[i] = newNe[1][i] + Useful.logK() + temp[1][i]
# log10prad[i] = log10e * pRad[1][i]
# log10ne[i] = log10e * newNe[1][i]
guessPGas[0] = [ x for x in pGas[0] ]
guessPGas[1] = [ x for x in pGas[1] ]
log10pgas = [ log10e * x for x in pGas[1] ]
log10pe = [ log10e * (newNe[1][i] + Useful.logK() + temp[1][i]) for i in range(numDeps) ]
pe = [ newNe[1][i] + Useful.logK() + temp[1][i] for i in range(numDeps) ]
log10prad = [ log10e * x for x in pRad[1] ]
log10ne = [ log10e * x for x in newNe[1] ]
#Uncomment this block to inspect iteration-by-iteration convergence
#Graphically inspect convergence: Issue 'matplotlib qt5' in console before running code
thisClr = palette[pIter%numClrs]
#plt.plot(log10tauRos, log10pgas, color=thisClr)
#plt.plot(log10tauRos, log10pgas, color=thisClr)
#plt.plot(log10tauRos, log10pe, color=thisClr, linestyle='--')
#plt.plot(tauRos[1][:], newNe[1][:], thisClr)
#print("logKappa ", logKappa[:][36])
#//end Pgas-kappa iteration, nOuter
#Save as encapsulated postscript (eps) for LaTex
#plt.savefig('PConverge.eps', format='eps', dpi=1000)
#//diagnostic
#// int tauKapPnt01 = ToolBox.tauPoint(numDeps, tauRos, 0.01);
#// System.out.println("logTauRos " + logE*tauRos[1][tauKapPnt01] + " temp " + temp[0][tauKapPnt01] + " pGas " + logE*pGas[1][tauKapPnt01]);
#// System.out.println("tau " + " temp " + " logPgas " + " logPe " + " logRho ");
#// for (int iD = 1; iD < numDeps; iD+=5){
#// System.out.println(" " + tauRos[0][iD] + " " + temp[0][iD] + " " + logE*pGas[1][iD] + " " + logE*newPe[1][iD] + " " + logE*rho[1][iD]);
#// }
#// for (int iL = 0; iL < numLams; iL++){
#// //System.out.println(" " + lambdaScale[iL] + " " + logE*logKappa[iL][tauKapPnt01]);
#// System.out.println(" " + lambdaScale[iL]);
#// for (int iD = 1; iD < numDeps; iD+=5){
#// System.out.println(" " + logE*(logKappa[iL][iD]+rho[1][iD])); //cm^-1
#// }
#// }
# //int tauKapPnt1 = ToolBox.tauPoint(numDeps, tauRos, 1.0);
# //System.out.println("logTauRos " + logE*tauRos[1][tauKapPnt1] + " temp " + temp[0][tauKapPnt1] + " pGas " + logE*pGas[1][tauKapPnt1]);
# //for (int iL = 0; iL < numLams; iL++){
# // //System.out.println(" " + lambdaScale[iL] + " " + logE*logKappa[iL][tauKapPnt1]);
# //}
# // Then construct geometric depth scale from tau, kappa and rho
#for iD in range(numDeps):
# print("2 : ", (10.0**log10MasterGsPp[0][iD])/pGas[0][iD])
depths = DepthScale.depthScale(numDeps, tauRos, kappaRos, rho)
ifTcorr = False
ifConvec = False
#//int numTCorr = 10; //test
numTCorr = 0
for i in range(numTCorr):
#//newTemp = TCorr.tCorr(numDeps, tauRos, temp);
#Not yet newTemp = MulGrayTCorr.mgTCorr(numDeps, teff, tauRos, temp, rho, kappaRos)
#//newTemp = MulGrayTCorr.mgTCorr(numDeps, teff, tauRos, temp, rho, kappa500);
#for iTau in range(numDeps):
# temp[1][iTau] = newTemp[1][iTau]
# temp[0][iTau] = newTemp[0][iTau]
temp[1] = [ x for x in newTemp[1] ]
temp[0] = [ x for x in newTemp[0] ]
"""/*
//Convection:
// Teff below which stars are convective.
// - has to be finessed because Convec.convec() does not work well :-(
double convTeff = 6500.0;
double[][] convTemp = new double[2][numDeps];
if (teff < convTeff) {
convTemp = Convec.convec(numDeps, tauRos, depths, temp, press, rho, kappaRos, kappaSun, zScale, teff, logg);
for (int iTau = 0; iTau < numDeps; iTau++) {
temp[1][iTau] = convTemp[1][iTau];
temp[0][iTau] = convTemp[0][iTau];
}
}
*/"""
#if ((ifTcorr == True) or (ifConvec == True)):
#//Recall hydrostat with updates temps
#//Recall state withupdated Press
#//recall kappas withupdates rhos
#//Recall depths with re-updated kappas
###################################################
#
#
#
# Re-converge Ionization/chemical equilibrium WITH molecules
#
#
#
####################################################
#//
#// Now that the atmospheric structure is settled:
#// Separately converge the Ne-ionization-fractions-molecular equilibrium for
#// all elements and populate the ionization stages of all the species for spectrum synthesis:
#//
#//stuff to save ion stage pops at tau=1:
iTauOne = ToolBox.tauPoint(numDeps, tauRos, unity)
#//
#// Default inializations:
zScaleList = 1.0 #/initialization
#//these 2-element temperature-dependent partition fns are logarithmic
#//Default initialization:
#for i in range(numAssocMols):
# for j in range(numDeps):
# logNumBArr[i][j] = -49.0
#
# for k in range(numAtmPrtTmps):
# log10UwBArr[i][k] = 0.0 #// default initialization lburns
#
#
# dissEArr[i] = 29.0 #//eV
# for kk in range(numMolPrtTmps):
# logQwABArr[i][kk] = math.log(300.0)
#
# logMuABArr[i] = math.log(2.0) + Useful.logAmu() #//g
# mname_ptr[i] = 0
# specB_ptr[i] = 0
#Iterations needed? Now ga?
#for neIter2 in range(nInnerIter):
#Final run through Phil's GAS EOS/Chemic equil. for consistency with last HSE call above:
if (teff <= GAStemp):
for iD in range(numDeps):
#print("isolv ", isolv, " temp ", temp[0][iD], " guessPGas ", guessPGas[0][iD])
gasestReturn = CSGasEst.gasest(isolv, temp[0][iD], guessPGas[0][iD])
gsPe0 = gasestReturn[0]
gsP0 = gasestReturn[1]
neq = gasestReturn[2]
#print("iD ", iD, " gsPe0 ", gsPe0, " gsP0 ", gsP0, " neq ", neq)
gasReturn = CSGas.gas(isolv, temp[0][iD], guessPGas[0][iD], gsPe0, gsP0, neq, tol, maxit)
#a = gasReturn[0]
#nit = gasReturn[1]
gsPe = gasReturn[2]
#pd = gasReturn[3]
gsPp = gasReturn[4]
#Can't pythonize this - gsPp padded at end with 0.0s
#log10MasterGsPp[:][iD] = [math.log10(x) for x in gsPp]
for iSpec in range(gsNspec):
log10MasterGsPp[iSpec][iD] = math.log10(gsPp[iSpec])
#print("1: ", gsPp[0]/guessPGas[0][iD])
#ppix = gasReturn[5]
gsMu = gasReturn[6]
gsRho = gasReturn[7]
newPe[0][iD] = gsPe
newPe[1][iD] = math.log(gsPe)
newNe[0][iD] = gsPe / Useful.k() / temp[0][iD]
newNe[1][iD] = math.log(newNe[0][iD])
guessPe[0][iD] = newPe[0][iD]
guessPe[1][iD] = newPe[1][iD]
rho[0][iD] = gsRho
rho[1][iD] = math.log(gsRho)
mmw[iD] = gsMu * Useful.amu()
#print("iD ", iD, " logT ", logE*temp[1][iD], " logNe ", logE*newNe[1][iD], " logRho ", logE*rho[1][iD], " mmw ", logE*math.log(mmw[iD]*Useful.amu()) )
#Take neutral stage populations for atomic species from GAS:
for iElem in range(nelemAbnd):
if (csp2gas[iElem] != -1):
#element is in GAS package:
#neutral stage only
thisN = gsPp[csp2gas[iElem]] / Useful.k() / temp[0][iD]
masterStagePops[iElem][0][iD] = math.log(thisN)
#print("iD ", iD, cname[19], gsName[csp2gas[19]], " logNCaI ", logE*masterStagePops[19][0][iD])
for i in range(gsNumMols):
thisN = gsPp[i+gsFirstMol] / Useful.k() / temp[0][iD]
masterMolPops[i][iD] = math.log(thisN)
#Needed now GAS??
for iA in range(nelemAbnd):
if (csp2gas[iA] != -1):
#element is in GAS package:
#neutral stage only
logNz[iA][iD] = math.log10(gsPp[csp2gas[iA]]) - Useful.logK() - temp[1][iD]
#end iD loop
#Catch species NOT in Phil's GAS Chem. Equil. package
for iElem in range(nelemAbnd):
if (csp2gas[iElem] == -1):
species = cname[iElem] + "I"
chiIArr[0] = IonizationEnergy.getIonE(species)
#//The following is a 2-element vector of temperature-dependent partitio fns, U,
#// that are base e log_e U
log10UwAArr[0] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "II"
chiIArr[1] = IonizationEnergy.getIonE(species)
log10UwAArr[1] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "III"
chiIArr[2] = IonizationEnergy.getIonE(species)
log10UwAArr[2] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "IV"
chiIArr[3] = IonizationEnergy.getIonE(species)
log10UwAArr[3]= PartitionFn.getPartFn2(species) #//base 1e log_e U
species = cname[iElem] + "V"
chiIArr[4] = IonizationEnergy.getIonE(species)
log10UwAArr[4]= PartitionFn.getPartFn2(species) #//base 1e log_e U
species = cname[iElem] + "VI"
chiIArr[5] = IonizationEnergy.getIonE(species)
log10UwAArr[5]= PartitionFn.getPartFn2(species) #//base e log_e U
#Element NOT in GAS package - compute ionization equilibrium:
logNums = LevelPopsGasServer.stagePops(logNz[iElem], guessNe, chiIArr, log10UwAArr, \
#thisNumMols, logNumBArr, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
numDeps, temp);
#for iStage in range(numStages):
# for iTau in range(numDeps):
# masterStagePops[iElem][iStage][iTau] = logNums[iStage][iTau]
# #//save ion stage populations at tau = 1:
# #} //iTau loop
# tauOneStagePops[iElem][iStage] = logNums[iStage][iTauOne]
#} //iStage loop
masterStagePops[iElem] = [ [ logNums[iStage][iTau] for iTau in range(numDeps) ] for iStage in range(numStages) ]
tauOneStagePops[iElem] = [ logNums[iStage][iTauOne] for iStage in range(numStages) ]
#} //iElem loop
if (teff > GAStemp):
for neIter2 in range(nInnerIter):
for iElem in range(nelemAbnd):
species = cname[iElem] + "I"
chiIArr[0] = IonizationEnergy.getIonE(species)
#//The following is a 2-element vector of temperature-dependent partitio fns, U,
#// that are base e log_e U
log10UwAArr[0] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "II"
chiIArr[1] = IonizationEnergy.getIonE(species)
log10UwAArr[1] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "III"
chiIArr[2] = IonizationEnergy.getIonE(species)
log10UwAArr[2] = PartitionFn.getPartFn2(species) #//base e log_e U
species = cname[iElem] + "IV"
chiIArr[3] = IonizationEnergy.getIonE(species)
log10UwAArr[3]= PartitionFn.getPartFn2(species) #//base 1e log_e U
species = cname[iElem] + "V"
chiIArr[4] = IonizationEnergy.getIonE(species)
log10UwAArr[4]= PartitionFn.getPartFn2(species) #//base 1e log_e U
species = cname[iElem] + "VI"
chiIArr[5] = IonizationEnergy.getIonE(species)
log10UwAArr[5]= PartitionFn.getPartFn2(species) #//base e log_e U
#} //end Ne - ionzation fraction -molecular equilibrium iteration neIter2
logNums = LevelPopsGasServer.stagePops(logNz[iElem], guessNe, chiIArr, log10UwAArr, \
numDeps, temp);
#for iStage in range(numStages):
# for iTau in range(numDeps):
# masterStagePops[iElem][iStage][iTau] = logNums[iStage][iTau]
# #//save ion stage populations at tau = 1:
# #} //iTau loop
# tauOneStagePops[iElem][iStage] = logNums[iStage][iTauOne]
#} //iStage loop
masterStagePops[iElem] = [ [ logNums[iStage][iTau] for iTau in range(numDeps) ] for iStage in range(numStages) ]
tauOneStagePops[iElem] = [ logNums[iStage][iTauOne] for iStage in range(numStages) ]
#Fill in in PP report:
if (csp2gas[iElem] != -1):
log10MasterGsPp[csp2gas[iElem]] = [ logE*(logNums[0][iTau] + temp[1][iTau] + Useful.logK())\
for iTau in range(numDeps) ]
if (csp2gasIon1[iElem] != -1):
log10MasterGsPp[csp2gasIon1[iElem]] = [ logE*(logNums[1][iTau] + temp[1][iTau] + Useful.logK())\
for iTau in range(numDeps) ]
if (csp2gasIon2[iElem] != -1):
log10MasterGsPp[csp2gasIon2[iElem]] = [ logE*(logNums[2][iTau] + temp[1][iTau] + Useful.logK())\
for iTau in range(numDeps) ]
log10UwA = [0.0 for i in range(numAtmPrtTmps)]
newNe[0] = [ 0.0 for iTau in range(numDeps) ]
#This is cumulative and not trivially pythonizable
for iTau in range(numDeps):
for iElem in range(nelemAbnd):
#//1 e^- per ion, #//2 e^- per ion
newNe[0][iTau] = newNe[0][iTau] \
+ math.exp(masterStagePops[iElem][1][iTau]) \
+ 2.0 * math.exp(masterStagePops[iElem][2][iTau])
#//+ 3.0 * Math.exp(masterStagePops[iElem][3][iTau]) #//3 e^- per ion
#//+ 4.0 * Math.exp(masterStagePops[iElem][4][iTau]); #//3 e^- per ion
#}
# newNe[1][iTau] = math.log(newNe[0][iTau])
# #// Update guess for iteration:
# guessNe[0][iTau] = newNe[0][iTau]
# guessNe[1][iTau] = newNe[1][iTau]
#newNe[0] = [ [ newNe[0][iTau] \
# + math.exp(masterStagePops[iElem][1][iTau]) \
# + 2.0 * math.exp(masterStagePops[iElem][2][iTau]) \
# for iElem in range(nelemAbnd) ] for iTau in range(numDeps) ]
newNe[1] = [ math.log(x) for x in newNe[0] ]
guessNe[0] = [ x for x in newNe[0][:] ]
guessNe[1] = [ x for x in newNe[1][:] ]
log10pe = [ log10e * (newNe[1][i] + Useful.logK() + temp[1][i]) for i in range(numDeps) ]
log10ne = [ log10e * x for x in newNe[1] ]
#//
#Some atmospheric structure output AGAIN after chemical equilibrium:
#Convert everything to log_10 OR re-scaled units for plotting, printing, etc:
#log10mmw = [0.0 for i in range(numDeps)]
#for i in range(numDeps):
# log10pe[i] = log10e * (newNe[1][i] + Useful.logK() + temp[1][i])
# log10ne[i] = log10e * newNe[1][i]
log10pe = [ log10e * (newNe[1][i] + Useful.logK() + temp[1][i]) for i in range(numDeps) ]
log10ne = [ log10e * x for x in newNe[1] ]
# Create a restart module for use in spectrum synthesis mode:
#outFile = outPath + strucFile
outFile = outPath + fileStem + "_restart.py"
#print vertical atmospheric structure as a python source file for re-import to ChromaStarPy
# Can be used as a converged model for an almost pure spectrum syntehsis run
#with open(outFile, 'w', encoding='utf-8') as strucHandle:
with open(outFile, 'w') as restartHandle:
headerString = "# " + inputParamString
restartHandle.write(headerString + "\n")
restartHandle.write("\n")
outLine = "teffRS = " + str(teff) + " # K\n"
restartHandle.write(outLine)
outLine = "loggRS = " + str(logg) + " #log (cm/^2)\n"
restartHandle.write(outLine)
outLine = "log10ZScaleRS = " + str(log10ZScale) + "\n"
restartHandle.write(outLine)
outLine = "massStarRS = " + str(massStar) + " # (M_Sun) \n"
restartHandle.write(outLine)
outLine = "xiTRS = " + str(xiT) + " # (km/s) \n"
restartHandle.write(outLine)
outLine = "logKapFudgeRS = " + str(logKapFudge) + "\n"
restartHandle.write(outLine)
outLine = "logHeFeRS = " + str(logHeFe) + "\n"
restartHandle.write(outLine)
outLine = "logCORS = " + str(logCO) + "\n"
restartHandle.write(outLine)
outLine = "logAlphaFeRS = " + str(logAlphaFe) + "\n"
restartHandle.write(outLine)
restartHandle.write("\n")
numDepsStr = str(numDeps)
outLine = "numDeps = " + numDepsStr + "\n"
restartHandle.write(outLine)
restartHandle.write("\n")
outLine = "tauRosRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("tauRosRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(tauRos[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(tauRos[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("tauRosRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(tauRos[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(tauRos[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "tempRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("tempRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(temp[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(temp[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
### This won't work: restartHandle.write( [ str( temp[0][i] ) + ', ' for i in range(numDeps-1) ]\
# + str(temp[0][numDeps-1]) + ']\n')
restartHandle.write("tempRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(temp[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(temp[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "pGasRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("pGasRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(pGas[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(pGas[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("pGasRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(pGas[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(pGas[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "peRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("peRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(newPe[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(newPe[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("peRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(newPe[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(newPe[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "pRadRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("pRadRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(pRad[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(pRad[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("pRadRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(pRad[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(pRad[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "rhoRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("rhoRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(rho[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(rho[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("rhoRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(rho[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(rho[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "kappa500RS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("kappa500RS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(kappa500[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(kappa500[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("kappa500RS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(kappa500[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(kappa500[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "kappaRosRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("kappaRosRS[0] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(kappaRos[0][i]) + ', '
restartHandle.write(outLine)
outLine = str(kappaRos[0][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
restartHandle.write("kappaRosRS[1] = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(kappaRos[1][i]) + ', '
restartHandle.write(outLine)
outLine = str(kappaRos[1][numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
outLine = "mmwRS = [ [ 0.0 for i in range(" + numDepsStr + ") ] for j in range(2) ]\n"
restartHandle.write(outLine)
restartHandle.write("mmwRS = [\\\n") #continuation line '\' has to be escaped
for i in range(numDeps-1):
outLine = str(mmw[i]) + ', '
restartHandle.write(outLine)
outLine = str(mmw[numDeps-1]) + '\\\n]\n'
restartHandle.write(outLine)
############################################################
#
#
#
# Surface radiation field
#
# - flux distribution (SED)
# - high resolution synthetic spectrum
#
#
###############################################################
#//Okay - Now all the emergent radiation stuff:
#// Set up theta grid
#// cosTheta is a 2xnumThetas array:
#// row 0 is used for Gaussian quadrature weights
#// row 1 is used for cos(theta) values
#// Gaussian quadrature:
#// Number of angles, numThetas, will have to be determined after the fact
cosTheta = Thetas.thetas()
numThetas = len(cosTheta[0])
#//establish a phi grid for non-axi-symmetric situations (eg. spots, in situ rotation, ...)
#// //number of phi values per quandrant of unit circle centered on sub-stellar point
#// // in plane of sky:
#// //For geometry calculations: phi = 0 is direction of positive x-axis of right-handed
#// // 2D Cartesian coord system in plane of sky with origin at sub-stellar point (phi
#// // increases CCW)
numPhiPerQuad = 6
numPhi = 4 * numPhiPerQuad
numPhiD = numpy.double(numPhi)
phi = [numpy.double(0.0) for i in range(numPhi)]
#//Compute phi values in whole range (0 - 2pi radians):
delPhi = numpy.double(2.0 * math.pi / numPhiD)
#double ii
#for i in range(numPhi):
# ii = float(i)
# phi[i] = delPhi * ii
phi = [ delPhi * numpy.double(i) for i in range(numPhi) ]
#Planetary transit quantities
#Angle of orbital axis wrt plane-of-sky
iPrime = numpy.double(90.0) - rotI
#Degrees to RAD
iPrimeRad = iPrime * math.pi / numpy.double(180.0)
#Right angle triangle with hypoteneuse = rOrbit
# angle = rotIRad,
# and opposite = planet's minimum impact parameters wrt substellar point
#impact parameter (minimum offset from substellar point)in AU
impct = rOrbit * math.sin(iPrimeRad)
#impact parameter in solar radii
impct = impct * Useful.AU2cm() / Useful.rSun()
if ( impct >= (radius-(2*rPlanetSol)) ):
#There is no eclipse (transit)
ifTransit = False
print("TRANSIT FALSE triggered:")
print("radius ", radius, " rPlanetSol ", rPlanetSol, " impct ", impct)
#print("rotI ", rotI, " iPrime ", iPrime, " iPrimeRad ", iPrimeRad,\
# " impct/radius ", impct/radius)
#thetaMinRad is also the minimum theta of the eclipse path chord, in RAD
#Initialization:
#Duration of ingress (and egress) from 1st contact to planetary mid-point contact
ingressT1 = 0.0
#Duration of ingress (and egress) from planetary mid-point contact to 2nd contact
ingressT2 = 0.0
numTransThetas = 0.0
thetaMinRad = 1.0
iFirstTheta = 0
contact1x = 0.0
contact2x = 0.0
contactMidx = 0.0
cosThetaMax = 0.0
midContAngle = 0.0
halfHelpAngle = 0.0
logOmegaLens = logTiny
#omegaLens = 0.0
if (ifTransit):
#First contact position along cord, in solar radii:
contact1x = math.sqrt( (radius + rPlanetSol)**2 - impct**2 )
#Planetary mid-point contact
contactMidx = math.sqrt(radius**2 - impct**2)
#Second contact position along cord in solar radii:
contact2x = math.sqrt( (radius - rPlanetSol)**2 - impct**2 )
#ingressT = ( (contact1x - contact2x)*Useful.rSun() ) / vTrans
ingressT1 = ( (contact1x - contactMidx)*Useful.rSun() ) / vTrans
ingressT2 = ( (contactMidx - contact2x)*Useful.rSun() ) / vTrans
#print("New ingressT1 ", ingressT1, " ingressT2 ", ingressT2)
#cos(theta) *decreases* with increasing theta in Quadrant I:
thetaMinRad = math.asin(impct/radius)
cosThetaMax = math.cos(thetaMinRad)
#print(" thetaMinRad ", thetaMinRad,\
# " cosThetaMax ", cosThetaMax)
#quantities for computing the blocking factor at planetary mid-point contact
#Angle at planet's centre of lens-like occultation sector area:
midContAngle = 2.0 * math.atan(radius/rPlanetSol)
#Area of lens-shaped area occulted at planetary mid-point contact in solar-radii^2
# - (2*angle/2*Pi) * Pi*rPlanet^2 = angle*rPlanet^2
logOmegaLens = math.log(midContAngle) + 2.0*math.log(rPlanetSol)
#As fraction of host star projected radius
logOmegaLens = logOmegaLens - math.log(math.pi) - 2.0*math.log(radius)
#print("midContAngle ", midContAngle, " logOmegaLens ", logOmegaLens)
#omegaLens = math.exp(logOmegaLens)
i = 0
#ifFirst = False
#for i in range(numThetas):
#cosTheta[1] *decreases* (ie. theta increases) with increasing array number
if (ifTransit):
while ( (cosTheta[1][i] >= cosThetaMax)
& (i < numThetas) ):
#print("In while loop: i ", i, " cosTheta[1] ", cosTheta[1][i])
#if (ifFirst == False):
# iFirstTheta = i
# ifFirst = True
# We are on the eclipse semi-chord:
i+=1
iFirstTheta = i
numTransThetas = numThetas - i
numTransThetas2 = (2*numTransThetas + 4)
#print("iFirstTheta ", iFirstTheta, " numTransThetas ", numTransThetas, " numTransThetas2 ", numTransThetas2)
transit = [0.0 for i in range(numTransThetas)]
transit2 = [0.0 for i in range(numTransThetas2)]
transDuration = 0.0
transTime0 = 0.0
transTime1 = 0.0
totalDuration = 0.0
deltaT = 0.0
numEpochs = 0
ephemT = [ 0.0 for x in range(numEpochs) ]
#Mandel & Agol (2002)'s "p" parameter
pMA = rPlanetSol / radius
# blocking factor should be projected planet area over that annulus area
#transit[][] is array of distances traveled, r, along semi-chord from position of
#minimum impact parameter, and corresponding theta values:
# 2D array of 2 x numThetas
#row 0 is log_e of ratio of projected planet area to area of annulus for each theta being transited
#row 1 is times corresponding to linear distance travelled along transit
# semi-path at surface of star in solar radii
#transit = [[numpy.double(0.0) for i in range(numTransThetas)] for j in range(2)] # Default
#Row 0 is logarithmic ratio of planet area to annulus area
# - set default value to log of neglible value:
#transit[0] = [logTiny for i in range(numThetas)]
print("ifTransit ", ifTransit)
if (ifTransit):
transit = TransitLightCurve2.transLight2(radius, cosTheta, vTrans, iFirstTheta, numTransThetas, impct)
#print("numTransThetas ", numTransThetas)
#reflect the half-transit profile and add the first and last points just before
#ingress and just after egress
for i in range(numTransThetas):
transit2[2+i] = -1.0 * transit[(numTransThetas-1)-i]
#print("1st half: i ", i, " (numTransThetas-1)-i ", (numTransThetas-1)-i)
for i in range(numTransThetas):
transit2[2+(numTransThetas+i)] = transit[i]
#print("2nd half: i ", i)
transit2[1] = transit2[2] - ingressT2
transit2[0] = transit2[1] - ingressT1
transit2[numTransThetas2-2] = transit2[numTransThetas2-3] + ingressT2
transit2[numTransThetas2-1] = transit2[numTransThetas2-2] + ingressT1
transDuration = transit2[numTransThetas2-1] - transit2[0]
#print("transit2[0] ", transit2[0], " transit2[numTransThetas2-1] ", transit2[numTransThetas2-1])
transTime0 = transit2[0] - transDuration/4
transTime1 = transit2[numTransThetas2-1] + transDuration/4
totalDuration = transTime1 - transTime0
#print("transTime0 ", transTime0, " transTime1 ", transTime1)
#print("transDuration ", transDuration, " totalDuration ", totalDuration)
#numEpochs = 200
#deltaT = transDuration / numEpochs
#Make time sampling interval equal to the time of ingress/egress
deltaT = min(ingressT1, ingressT2) #/ 2.0
numEpochs = int(totalDuration / deltaT)
#print("deltaT ", deltaT, " numEpochs ", numEpochs)
#ephemeris in time units (s)
ephemT = [ ((x*deltaT)+transTime0) for x in range(numEpochs) ]
#boolean lineMode;
#//
#// ************
#//
#// Spectrum synthesis section:
#// Set up continuum info:
isCool = 7300.0 #//Class A0
#//Set up opacity:
#// lambda break-points and gray levels:
#// No. multi-gray bins = num lambda breakpoints +1
minLambda = 30.0 #//nm
maxLambda = 1.0e6 #//nm
#// JOLA molecular bands here:
#// Just-overlapping line approximation treats molecular ro-vibrational bands as pseudo-continuum
#//opacity sources by "smearing" out the individual rotational fine-structure lines
#//See 1982A&A...113..173Z, Zeidler & Koester, 1982
#double jolaOmega0; //band origin ?? //Hz OR waveno in cm^-1 ??
#//double[] jolaLogF; //total vibrational band oscillator strength (f_v'v")
#double jolaRSqu; //needed for total vibrational band oscillator strength (f_v'v")
jolaB = [0.0 for i in range(2)] #// B' value of upper vibational state (energy in cm^-1)??
jolaLambda = [0.0 for i in range(2)]
jolaAlphP = 0.0 #// alpha_P - weight of P branch (Delta J = -1)
jolaAlphR = 0.0 #/ alpha_R - weight of R branch (Delta J = 1)
jolaAlphQ = 0.0 #// alpha_Q - weight of Q branch (Delta J = 0)
#//Allen's Astrophysical quantities, 4th Ed., 4.12.2 - 4.13.1:
#// Electronic transition moment, Re
#//"Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2 (R_00 is for the band head)
#//Section 4.4.2 - for atoms or molecules:
#// then: gf = (8pi^2m_e*nu/3he^2) * S
#//
#// ^48Ti^16O systems: Table 4.18, p. 91
#// C^3Delta - X^3Delta ("alpha system") (Delta Lambda = 0??, p. 84 - no Q branch??)
#// c^1Phi - a^1Delta ("beta system") (Delta Lambda = 1??, p. 84)
#// A^3Phi - X^3Delta ("gamma system") (Delta Lambda = 0??, p. 84 - no Q branch??)
#// //
#// Rotational & vibrational constants for TiO states:, p. 87, Table 4.17
#// C^3Delta, X^3Delta a^1Delta, -- No "c^1Phi" - ??
#//
#//General TiO molecular rotational & vibrational constants - Table 3.12, p. 47
#//Zeidler & Koester 1982 p. 175, Sect vi):
#//If Q branch (deltaLambda = +/-1): alpP = alpR = 0.25, alpQ = 0.5
#//If NO Q branch (deltaLambda = 0): alpP = alpR = 0.5, alpQ = 0.0
#//number of wavelength point sampling a JOLA band
jolaNumPoints = 30
#//int jolaNumPoints = 10;
#// branch weights for transitions of DeltaLambda = +/- 1
jolaAlphP_DL1 = 0.25
jolaAlphR_DL1 = 0.25
jolaAlphQ_DL1 = 0.5
#// branch weights for transitions of DeltaLambda = 0
jolaAlphP_DL0 = 0.5
jolaAlphR_DL0 = 0.5
jolaAlphQ_DL0 = 0.0 #//no Q branch in this case
#double jolaS; //line strength
#double jolaLogF; //line strength
logSTofHelp = math.log(8.0/3.0) + 2.0*math.log(math.pi) + Useful.logMe() - Useful.logH() - 2.0*Useful.logEe()
#//Hand-tuned for now - Maybe this is the "script S" factor in Allen 4th Ed., p. 88 (S = |R|^2*q_v'v"*scriptS)
jolaQuantumS = 1.0 #//default for multiplicative factor
logNumJola = [0.0 for i in range(numDeps)]
jolaProfPR = [ [0.0 for i in range(numDeps)] for j in range(jolaNumPoints) ] #// For unified P & R branch
jolaProfQ = [ [0.0 for i in range(numDeps)] for j in range(jolaNumPoints) ] #//For Q branch
#//Differential cross-section - the main "product" of the JOLA approximation:
dfBydv = [ [0.0 for i in range(numDeps)] for j in range(jolaNumPoints) ]
#//
dataPath = "InputData/"
#//
#//
#// ************** Atomic line list:
#//
#//NIST Atomic Spectra Database Lines Data
#//Kramida, A., Ralchenko, Yu., Reader, J., and NIST ASD Team (2015). NIST Atomic Spectra Database (ver. 5.3), [Online]. Available: http://physics.nist.gov/asd [2017, January 30]. National Institute of Standards and Technology, Gaithersburg, MD.
#//
#//Stuff for byte file method:
#//
#// *** NOTE: bArrSize must have been noted from the stadout of LineListServer and be consistent
#// with whichever line list is linked to gsLineListBytes.dat, and be st manually here:
lineListBytes = absPath + dataPath + "atomLineListFeb2017Bytes.dat"
#lineListBytes = "gsLineListBytes.dat"
#//
#//System.out.println(" *********************************************** ");
#//System.out.println(" ");
#//System.out.println(" ");
print("READING LINE LIST")
#//System.out.println(" ");
#//System.out.println(" ");
#//System.out.println(" *********************************************** ");
with open(lineListBytes, 'rb') as fHandle:
#Java: barray = ByteFileRead.readFileBytes(lineListBytes, bArrSize);
barray = fHandle.read()
#fHandle closed automatically when with: exited
#Java: String decoded = new String(barray, 0, bArrSize); // example for one encoding type
decoded = barray.decode('utf-8')
#//System.out.println(" *********************************************** ");
#//System.out.println(" ");
#//System.out.println(" ");
print("LINE LIST READ")
#//System.out.println(" ");
#//System.out.println(" ");
#//System.out.println(" *********************************************** ");
arrayLineString = decoded.split("%%")
#//Number of lines MUST be the ONLY entry on the first line
numLineList = len(arrayLineString) - 1
#numLineList = 1 #//test
#//Atomic lines:
#//Okay, here we go:
list2Lam0 = [0.0 for i in range(numLineList)] #// nm
list2Element = ["" for i in range(numLineList)] #//element
list2StageRoman = ["" for i in range(numLineList)] #//ion stage
list2Stage = [0 for i in range(numLineList)] #//ion stage
list2Mass = [0.0 for i in range(numLineList)] #// amu
list2LogGammaCol = [0.0 for i in range(numLineList)]
#//Einstein coefficient for spontaneous de-exciation:
list2LogAij = [0.0 for i in range(numLineList)] #//log base 10
#//Log of unitless oscillator strength, f
list2Logf = [0.0 for i in range(numLineList)]
#//Ground state ionization E - Stage I (eV)
list2ChiI1 = [0.0 for i in range(numLineList)]
#//Ground state ionization E - Stage II (eV)
list2ChiI2 = [0.0 for i in range(numLineList)]
#//Ground state ionization E - Stage III (eV)
list2ChiI3 = [0.0 for i in range(numLineList)]
#//Ground state ionization E - Stage IV (eV)
list2ChiI4 = [0.0 for i in range(numLineList)]
#//Ground state ionization E - Stage V (eV)
list2ChiI5 = [0.0 for i in range(numLineList)]
#//Ground state ionization E - Stage VI (eV)
list2ChiI6 = [0.0 for i in range(numLineList)]
#//Excitation E of lower E-level of b-b transition (eV)
list2ChiL = [0.0 for i in range(numLineList)]
#//Unitless statisital weight, Ground state - Stage I
#//double[] list2Gw1 = new double[numLineList];
#//Unitless statisital weight, Ground state - Stage II
#//double[] list2Gw2 = new double[numLineList];
#//Unitless statisital weight, Ground state - Stage III
#//double[] list2Gw3 = new double[numLineList];
#//Unitless statisital weight, Ground state - Stage IV
#//double[] list2Gw4 = new double[numLineList];
#//double[] list2Gw4 = new double[numLineList];
#//Unitless statisital weight, lower E-level of b-b transition
list2GwL = [0.0 for i in range(numLineList)]
#//double[] list2GwU For now we'll just set GwU to 1.0
#// Is stage II?
#//Atomic Data sources:
#double thisF;
list2_ptr = 0 #//pointer into line list2 that we're populating
numFields = 7 #//number of field per record
#// 0: element, 1: ion stage, 2: lambda_0, 3: logf, 4: g_l, 5: chi_l
thisRecord = ["" for i in range(numFields)]
#String myString; //useful helper
for iLine in range(numLineList):
#// "|" turns out to mean something in regexp, so we need to escape with '\\':
thisRecord = arrayLineString[iLine].split("|")
#//System.out.println("thisRecord[0] " + thisRecord[0]
#// + "thisRecord[1] " + thisRecord[1]
#// + "thisRecord[2] " + thisRecord[2]
#// + "thisRecord[3] " + thisRecord[3]
#// + "thisRecord[4] " + thisRecord[4]
#// + "thisRecord[5] " + thisRecord[5]);
myString = thisRecord[0].strip()
list2Element[iLine] = myString
myString = thisRecord[1].strip()
list2StageRoman[iLine] = myString
myString = thisRecord[2].strip()
list2Lam0[iLine] = float(myString)
myString = thisRecord[3].strip()
list2LogAij[iLine] = float(myString)
myString = thisRecord[4].strip()
list2Logf[iLine] = float(myString)
myString = thisRecord[5].strip()
list2ChiL[iLine] = float(myString)
#//// Currently not used
#// myString = thisRecord[6].trim();
#// list2ChiU = Double.parseDouble(myString);
#// myString = thisRecord[7].trim();
#// list2Jl = Double.parseDouble(myString);
#// myString = thisRecord[8].trim();
#// list2Ju = Double.parseDouble(myString);
myString = thisRecord[9].strip()
list2GwL[iLine] = float(myString)
#//// Currently not used
#// myString = thisRecord[10].trim();
#// list2GwU = Double.parseDouble(myString);
#//Get the chemical element symbol - we don't know if it's one or two characters
if (list2StageRoman[list2_ptr] == "I"):
list2Stage[list2_ptr] = 0
if (list2StageRoman[list2_ptr] == "II"):
list2Stage[list2_ptr] = 1
if (list2StageRoman[list2_ptr] == "III"):
list2Stage[list2_ptr] = 2
if (list2StageRoman[list2_ptr] == "IV"):
list2Stage[list2_ptr] = 3
if (list2StageRoman[list2_ptr] == "V"):
list2Stage[list2_ptr] = 4
if (list2StageRoman[list2_ptr] == "VI"):
list2Stage[list2_ptr] = 5
if (list2StageRoman[list2_ptr] == "VII"):
list2Stage[list2_ptr] = 6
#//wavelength in nm starts at position 23 and is in %8.3f format - we're not expecting anything greater than 9999.999 nm
#// Some more processing:
list2Mass[list2_ptr] = AtomicMass.getMass(list2Element[list2_ptr])
species = list2Element[list2_ptr] + "I"
list2ChiI1[list2_ptr] = IonizationEnergy.getIonE(species)
species = list2Element[list2_ptr] + "II"
list2ChiI2[list2_ptr] = IonizationEnergy.getIonE(species)
species = list2Element[list2_ptr] + "III"
list2ChiI3[list2_ptr] = IonizationEnergy.getIonE(species)
species = list2Element[list2_ptr] + "IV"
list2ChiI4[list2_ptr] = IonizationEnergy.getIonE(species)
species = list2Element[list2_ptr] + "V"
list2ChiI5[list2_ptr] = IonizationEnergy.getIonE(species)
species = list2Element[list2_ptr] + "VI"
list2ChiI6[list2_ptr] = IonizationEnergy.getIonE(species)
#//We're going to have to fake the ground state statistical weight for now - sorry:
#//list2Gw1[list2_ptr] = 1.0;
#//list2Gw2[list2_ptr] = 1.0;
#//list2Gw3[list2_ptr] = 1.0;
#//list2Gw4[list2_ptr] = 1.0;
list2LogGammaCol[list2_ptr] = logGammaCol
#//We've gotten everything we need from the NIST line list:
list2_ptr+=1
#} //iLine loop
numLines2 = list2_ptr
#numLines2 = 0 #test
#//
#
#//Okay - what kind of mess did we make...
#
#
#// END FILE I/O SECTION
#//System.out.println(" *********************************************** ");
#//System.out.println(" ");
#//System.out.println(" ");
#//System.out.println("BEFORE TRIAGE");
#//System.out.println(" ");
#//System.out.println(" ");
#//System.out.println(" *********************************************** ");
#//
#//Triage: For each line: Voigt, Gaussian, or neglect??
#//
gaussLineCntr = 0 #//initialize accumulator
#//int sedLineCntr = 0; //initialize accumulator
#//No! boolean[] ifThisLine = new boolean[numLines2]; //initialize line strength flag
gaussLine_ptr = [0 for i in range(numLines2)] #//array of pointers to lines that make the cut in the
#//int sedLine_ptr[] = new int[numLines2]; //array of pointers to lines that make the cut in the
#// master line list
isFirstLine = True #//initialization
firstLine = 0 #//default initialization
#// This holds 2-element temperature-dependent base 10 logarithmic parition fn:
thisUwV = [0.0 for i in range(numAtmPrtTmps)]
#// Below created a loop to initialize each value to zero for the five temperatures lburns
#for i in range(len(thisUwV)):
# thisUwV[i] = 0.0 #//default initialization
for iLine in range(numLines2):
#//No! ifThisLine[iLine] = false;
#//if H or He, make sure kappaScale is unity:
if ((list2Element[iLine] == "H") \
or (list2Element[iLine] == "He")):
zScaleList = 1.0
#//list2Gw1[iLine] = 2.0; //fix for H lines
if (list2Lam0[iLine] <= 657.0):
list2GwL[iLine] = 8.0 #//fix for Balmer lines
else:
list2GwL[iLine] = 18.0 #//fix for Paschen lines
else:
zScaleList = zScale
list2Lam0[iLine] = list2Lam0[iLine] * nm2cm #// nm to cm
iAbnd = 0 #//initialization
logNums_ptr = 0
#//System.out.println("iLine " + iLine + " list2Element[iLine] " + list2Element[iLine]);
#Not trivially pythonizable:
for jj in range(nelemAbnd):
#//System.out.println("jj " + jj + " cname[jj]" + cname[jj]+"!");
if (list2Element[iLine] == cname[jj]):
if (list2Stage[iLine] == 0):
species = cname[jj] + "I"
logNums_ptr = 0
if (list2Stage[iLine] == 1):
species = cname[jj] + "II"
logNums_ptr = 1
if (list2Stage[iLine] == 2):
species = cname[jj] + "III"
logNums_ptr = 4
if (list2Stage[iLine] == 3):
species = cname[jj] + "IV"
logNums_ptr = 5
if (list2Stage[iLine] == 4):
species = cname[jj] + "V"
logNums_ptr = 6
if (list2Stage[iLine] == 5):
species = cname[jj] + "VI"
logNums_ptr = 7
thisUwV = PartitionFn.getPartFn2(species) #//base e log_e U
break #//we found it
iAbnd+=1
#} //jj loop
list2LogNums = [ [ 0.0 for i in range(numDeps) ] for j in range(numStages+2) ]
#for iTau in range(numDeps):
# list2LogNums[0][iTau] = masterStagePops[iAbnd][0][iTau]
# list2LogNums[1][iTau] = masterStagePops[iAbnd][1][iTau]
# list2LogNums[4][iTau] = masterStagePops[iAbnd][2][iTau]
# list2LogNums[5][iTau] = masterStagePops[iAbnd][3][iTau]
# list2LogNums[6][iTau] = masterStagePops[iAbnd][4][iTau]
# list2LogNums[7][iTau] = masterStagePops[iAbnd][5][iTau]
list2LogNums[0] = [ x for x in masterStagePops[iAbnd][0] ]
list2LogNums[1] = [ x for x in masterStagePops[iAbnd][1] ]
list2LogNums[4] = [ x for x in masterStagePops[iAbnd][2] ]
list2LogNums[5] = [ x for x in masterStagePops[iAbnd][3] ]
list2LogNums[6] = [ x for x in masterStagePops[iAbnd][4] ]
list2LogNums[7] = [ x for x in masterStagePops[iAbnd][5] ]
#if ( ((list2Lam0[iLine]) > lambdaStart) and ((list2Lam0[iLine]) < lambdaStop) and species=="CaI"):
# print("iLine ", iLine, " species ", species, " logNums_ptr ", logNums_ptr, " list2Lam0 ", list2Lam0[iLine], \
# " list2Logf[iLine] ", list2Logf[iLine] , " list2ChiL ", list2ChiL[iLine], " thisUwV ", thisUwV, \
# " list2GwL ", list2GwL[iLine])
numHelp = LevelPopsGasServer.levelPops(list2Lam0[iLine], list2LogNums[logNums_ptr], list2ChiL[iLine], thisUwV, \
list2GwL[iLine], numDeps, temp)
#for iTau in range(numDeps):
# list2LogNums[2][iTau] = numHelp[iTau]
# list2LogNums[3][iTau] = numHelp[iTau] / 2.0 #//fake for testing with gS3 line treatment
list2LogNums[2] = [ x for x in numHelp ]
list2LogNums[3] = [ x/2.0 for x in numHelp ]
#if ( ((list2Lam0[iLine]) > lambdaStart) and ((list2Lam0[iLine]) < lambdaStop) and species=="CaI"):
# print("list2LogNums[2] ", list2LogNums[2])
#//linePoints: Row 0 in cm (will need to be in nm for Plack.planck), Row 1 in Doppler widths
#//For now - initial strength check with delta fn profiles at line centre for triage:
listNumPointsDelta = 1
listLinePointsDelta = LineGrid.lineGridDelta(list2Lam0[iLine], list2Mass[iLine], xiT, numDeps, teff)
listLineProfDelta = LineProf.delta(listLinePointsDelta, list2Lam0[iLine], numDeps, tauRos, list2Mass[iLine], xiT, teff)
listLogKappaLDelta = LineKappa.lineKap(list2Lam0[iLine], list2LogNums[2], list2Logf[iLine], listLinePointsDelta, listLineProfDelta,
numDeps, zScaleList, tauRos, temp, rho, logFudgeTune)
"""/* Let's not do this - too slow:
// Low resolution SED lines and high res spectrum synthesis region lines are mutually
// exclusive sets in wavelength space:
//Does line qualify for inclusion in SED as low res line at all??
// Check ratio of line centre opacity to continuum at log(TauRos) = -5, -3, -1
if ( (logE*(listLogKappaLDelta[0][6] - kappa[1][6]) > sedThresh)
|| (logE*(listLogKappaLDelta[0][18] - kappa[1][18]) > sedThresh)
|| (logE*(listLogKappaLDelta[0][30] - kappa[1][30]) > sedThresh) ){
if ( ( list2Stage[iLine] == 0) || (list2Stage[iLine] == 1)
|| ( list2Stage[iLine] == 2) || (list2Stage[iLine] == 3) ){
if ( (list2Lam0[iLine] > lamUV) and (list2Lam0[iLine] < lamIR) ){
if ( (list2Lam0[iLine] < lambdaStart) || (list2Lam0[iLine] > lambdaStop) ){
//No! ifThisLine[iLine] = true;
sedLine_ptr[sedLineCntr] = iLine;
sedLineCntr++;
//System.out.println("SED passed, iLine= " + iLine + " sedLineCntr " + sedLineCntr
// + " list2Lam0[iLine] " + list2Lam0[iLine]
// + " list2Element[iLine] " + list2Element[iLine]
// + " list2Stage[iLine] " + list2Stage[iLine]);
}
}
}
}
*/"""
#//Does line qualify for inclusion in high res spectrum synthesis region??
#// Check ratio of line centre opacity to continuum at log(TauRos) = -5, -3, -1
#//Find local value of lambda-dependent continuum kappa - list2Lam0 & lambdaScale both in cm here:
thisLambdaPtr = ToolBox.lamPoint(numLams, lambdaScale, list2Lam0[iLine])
if ( (logE*(listLogKappaLDelta[0][6] - logKappa[thisLambdaPtr][6]) > lineThresh)
or (logE*(listLogKappaLDelta[0][18] - logKappa[thisLambdaPtr][18]) > lineThresh)
or (logE*(listLogKappaLDelta[0][30] - logKappa[thisLambdaPtr][30]) > lineThresh) ):
if ( ( list2Stage[iLine] == 0) or (list2Stage[iLine] == 1)
or ( list2Stage[iLine] == 2) or (list2Stage[iLine] == 3)
or ( list2Stage[iLine] == 4) or (list2Stage[iLine] == 5) ):
if ( (list2Lam0[iLine] > lambdaStart) and (list2Lam0[iLine] < lambdaStop) ):
#special test condition
#if (list2Element[iLine] == "Na"):
#//No! ifThisLine[iLine] = true;
gaussLine_ptr[gaussLineCntr] = iLine
gaussLineCntr+=1
if (isFirstLine == True):
firstLine = iLine
isFirstLine = False
#//
#} //iLine loop
#//
#
#//We need to have at least one line in region:
areNoLines = False #//initialization
if (gaussLineCntr == 0):
gaussLineCntr = 1
gaussLine_ptr[0] = firstLine
areNoLines = True
numGaussLines = gaussLineCntr
#//System.out.println(" *********************************************** ");
#//System.out.println(" ");
#//System.out.println(" ");
#//System.out.println("AFTER TRIAGE");
#//System.out.println(" ");
#//System.out.println(" ");
#//System.out.println(" *********************************************** ");
#//Notes
#//if Hydrogen or Helium, kappaScale should be unity for these purposes:
#//double kappaScaleList = 1.0; //initialization
#//
listNumCore = 3 #//half-core //default initialization
listNumWing = 1 #//per wing
#//int sedNumWing = 1; //per wing
#//int thisNumCore = sedNumCore; //default initialization
#//int thisNumWing = sedNumWing; //default initialization
if (sampling == "coarse"):
listNumCore = 3 #//half-core
listNumWing = 3 #//per wing
else:
listNumCore = 5 #//half-core
listNumWing = 10 #//per wing
#//Delta fn - for testing and strength triage
# //int listNumPoints = 1;
#//All gaussian
# //int listNumPoints = 2 * listNumCore - 1; // + 1; //Extra wavelength point at end for monochromatic continuum tau scale
#////All full voigt:
listNumPoints = (2 * (listNumCore + listNumWing) - 1) #// + 1; //Extra wavelength point at end for monochromatic continuum tau scale
#//int sedNumPoints = (2 * (sedNumCore + sedNumWing) - 1); // + 1; //Extra wavelength point at end for monochromatic continuum tau scale
#//int thisNumPoints = sedNumPoints; //default initialization
numNow = numLams #//initialize dynamic counter of how many array elements are in use
#int numMaster;
if (ifMols == 1):
numMaster = numLams + (numGaussLines * listNumPoints) + (numJola * jolaNumPoints) #// + (numSedLines * sedNumPoints); //total size (number of wavelengths) of master lambda & total kappa arrays
else:
numMaster = numLams + (numGaussLines * listNumPoints)
masterLams = [0.0 for i in range(numMaster)]
#//Line blanketed opacity array:
logMasterKaps = [ [ 0.0 for i in range(numDeps) ] for j in range(numMaster) ]
#//seed masterLams and logMasterKaps with continuum SED lambdas and kappas:
#//This just initializes the first numLams of the numMaster elements
#//Initialize monochromatic line blanketed opacity array:
#// Seed first numLams wavelengths with continuum wavelength and kappa values
for iL in range(numLams):
masterLams[iL] = lambdaScale[iL]
for iD in range(numDeps):
logMasterKaps[iL][iD] = logKappa[iL][iD]
#This pythonization will not work
#masterLams[0: numLams] = [ lambdaScale[iL] for iL in range(numLams) ]
#logMasterKaps[0: numLams][:] = [ [ logKappa[iL][iD] for iD in range(numDeps) ] for iL in range(numLams) ]
#//initialize the remainder with dummy values - these values will be clobbered as line wavelengths are inserted,
#// and don't matter
for iL in range(numLams, numMaster):
masterLams[iL] = lambdaScale[numLams - 1]
for iD in range(numDeps):
logMasterKaps[iL][iD] = logKappa[numLams-1][iD]
#This pythonization will not work
#masterLams[numLams: numMaster-1] = [ lambdaScale[numLams - 1] for iL in range(numLams, numMaster) ]
#logMasterKaps[numLams: numMaster-1][:] = [ [ logKappa[numLams-1][iD] for iD in range(numDeps) ] for iL in range(numLams, numMaster) ]
#stop
#//Stuff for the the Teff recovery test:
#double lambda1, lambda2, fluxSurfBol, logFluxSurfBol;
fluxSurfBol = 0
#//Get the components for the power series expansion approximation of the Hjerting function
#//for treating Voigt profiles:
hjertComp = HjertingComponents.hjertingComponents()
#// This holds 2-element temperature-dependent base 10 logarithmic parition fn:
#for k in range(numAtmPrtTmps):
# thisUwV[k] = 0.0 #//default initialization
thisUwV = [ 0.0 for i in range(numAtmPrtTmps) ]
listLineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(listNumPoints) ]
print("Beginning spectrum synthesis, numVoigtLines ", numGaussLines)
#// Put in high res spectrum synthesis lines:
for iLine in range(numGaussLines):
#//if H or He, make sure kappaScale is unity:
if ((list2Element[gaussLine_ptr[iLine]] == "H")
or (list2Element[gaussLine_ptr[iLine]] == "He")):
zScaleList = 1.0
#//list2Gw1[gaussLine_ptr[iLine]] = 2.0; //fix for H lines
if (list2Lam0[gaussLine_ptr[iLine]] <= 657.0e-7):
list2GwL[gaussLine_ptr[iLine]] = 8.0 #//fix for Balmer lines
else:
list2GwL[gaussLine_ptr[iLine]] = 18.0 #//fix for Paschen lines
else:
zScaleList = zScale;
#//
iAbnd = 0 #//initialization
logNums_ptr = 0
for jj in range(nelemAbnd):
if (list2Element[gaussLine_ptr[iLine]] == cname[jj]):
if (list2Stage[gaussLine_ptr[iLine]] == 0):
species = cname[jj] + "I"
logNums_ptr = 0
if (list2Stage[gaussLine_ptr[iLine]] == 1):
species = cname[jj] + "II"
logNums_ptr = 1
if (list2Stage[gaussLine_ptr[iLine]] == 2):
species = cname[jj] + "III"
logNums_ptr = 4
if (list2Stage[gaussLine_ptr[iLine]] == 3):
species = cname[jj] + "IV"
logNums_ptr = 5
if (list2Stage[gaussLine_ptr[iLine]] == 4):
species = cname[jj] + "V"
logNums_ptr = 6
if (list2Stage[gaussLine_ptr[iLine]] == 5):
species = cname[jj] + "VI"
logNums_ptr = 7
thisUwV = PartitionFn.getPartFn2(species) #//base e log_e U
break #//we found it
#}
iAbnd+=1
#} //jj loop
list2LogNums = [ [ 0.0 for i in range(numDeps) ] for j in range(numStages+2) ]
#for iTau in range(numDeps):
# list2LogNums[0][iTau] = masterStagePops[iAbnd][0][iTau]
# list2LogNums[1][iTau] = masterStagePops[iAbnd][1][iTau]
# list2LogNums[4][iTau] = masterStagePops[iAbnd][2][iTau]
# list2LogNums[5][iTau] = masterStagePops[iAbnd][3][iTau]
# list2LogNums[6][iTau] = masterStagePops[iAbnd][4][iTau]
# list2LogNums[7][iTau] = masterStagePops[iAbnd][5][iTau]
list2LogNums[0] = [ masterStagePops[iAbnd][0][iTau] for iTau in range(numDeps) ]
list2LogNums[1] = [ masterStagePops[iAbnd][1][iTau] for iTau in range(numDeps) ]
list2LogNums[4] = [ masterStagePops[iAbnd][2][iTau] for iTau in range(numDeps) ]
list2LogNums[5] = [ masterStagePops[iAbnd][3][iTau] for iTau in range(numDeps) ]
list2LogNums[6] = [ masterStagePops[iAbnd][4][iTau] for iTau in range(numDeps) ]
list2LogNums[7] = [ masterStagePops[iAbnd][5][iTau] for iTau in range(numDeps) ]
numHelp = LevelPopsGasServer.levelPops(list2Lam0[gaussLine_ptr[iLine]], list2LogNums[logNums_ptr], list2ChiL[gaussLine_ptr[iLine]], thisUwV,
list2GwL[gaussLine_ptr[iLine]], numDeps, temp)
#for iTau in range(numDeps):
# list2LogNums[2][iTau] = numHelp[iTau]
# list2LogNums[3][iTau] = -19.0 #//upper E-level - not used - fake for testing with gS3 line treatment
list2LogNums[2] = [ x for x in numHelp ]
list2LogNums[3] = [ -19.0 for i in range(numDeps) ] #//upper E-level - not used - fake for testing with gS3 line treatment
#print("iLine ", iLine, " iAbnd ", iAbnd)
#print("list2LogNums ", list2LogNums[2])
#if ( (list2Element[gaussLine_ptr[iLine]] == "Na") and (list2Stage[gaussLine_ptr[iLine]] == 0) ):
#if (iTau%5 == 1):
# outline = ("iTau "+ str(iTau)+ " Na I list2LogNums[2]: "+ str(log10e*list2LogNums[2][iTau]) + "\n")
# outHandle.write(outline)
#if ( ((list2Lam0[gaussLine_ptr[iLine]]) > lambdaStart) and ((list2Lam0[gaussLine_ptr[iLine]]) < lambdaStop) and species=="CaI"):
# print("iLine ", iLine , " gaussLine_ptr ", gaussLine_ptr[iLine] ," list2Lam0 ", list2Lam0[gaussLine_ptr[iLine]], " list2LogAij ", list2LogAij[gaussLine_ptr[iLine]], " list2Logf ", list2Logf[gaussLine_ptr[iLine]])
# print("list2Mass ", list2Mass[gaussLine_ptr[iLine]], " list2LogGammaCol ", list2LogGammaCol[gaussLine_ptr[iLine]])
#if ( ((list2Lam0[gaussLine_ptr[iLine]]) > lambdaStart) and ((list2Lam0[gaussLine_ptr[iLine]]) < lambdaStop) and species=="CaI"):
# print("list2LogNums[2] ", list2LogNums[2])
#//Proceed only if line strong enough:
#//
#//ifThisLine[gaussLine_ptr[iLine]] = true; //for testing
#//No! if (ifThisLine[gaussLine_ptr[iLine]] == true){
#// Gaussian only approximation to profile (voigt()):
#// double[][] listLinePoints = LineGrid.lineGridGauss(list2Lam0[gaussLine_ptr[iLine]], list2Mass[gaussLine_ptr[iLine]], xiT, numDeps, teff, listNumCore);
#// double[][] listLineProf = LineProf.gauss(listLinePoints, list2Lam0[gaussLine_ptr[iLine]],
#// numDeps, teff, tauRos, temp, tempSun);
#// Gaussian + Lorentzian approximation to profile (voigt()):
listLinePoints = LineGrid.lineGridVoigt(list2Lam0[gaussLine_ptr[iLine]], list2Mass[gaussLine_ptr[iLine]], xiT,
numDeps, teff, listNumCore, listNumWing, species)
#print("species: ", species)
#if ( (list2Element[gaussLine_ptr[iLine]] == "Na") and (list2Stage[gaussLine_ptr[iLine]] == 0) ):
# outline = ("iLine "+ str(iLine)+ " gaussLine_ptr "+ str(gaussLine_ptr[iLine])+ " list2Lam0 "+ str(list2Lam0[gaussLine_ptr[iLine]])+ " list2LogAij "+
# str(list2LogAij[gaussLine_ptr[iLine]])+ " list2LogGammaCol "+ str(list2LogGammaCol[gaussLine_ptr[iLine]])+ " list2Logf "+
# str(list2Logf[gaussLine_ptr[iLine]]) + "\n")
# outHandle.write(outline)
if (species == "HI"):
#//System.out.println("Calling Stark...");
listLineProf = LineProf.stark(listLinePoints, list2Lam0[gaussLine_ptr[iLine]], list2LogAij[gaussLine_ptr[iLine]],
list2LogGammaCol[gaussLine_ptr[iLine]],
numDeps, teff, tauRos, temp, pGas, newNe, tempSun, pGasSun, hjertComp, species)
else:
#print("voigt branch called")
listLineProf = LineProf.voigt(listLinePoints, list2Lam0[gaussLine_ptr[iLine]], list2LogAij[gaussLine_ptr[iLine]],
list2LogGammaCol[gaussLine_ptr[iLine]],
numDeps, teff, tauRos, temp, pGas, tempSun, pGasSun, hjertComp, dbgHandle)
listLogKappaL = LineKappa.lineKap(list2Lam0[gaussLine_ptr[iLine]], list2LogNums[2], list2Logf[gaussLine_ptr[iLine]], listLinePoints, listLineProf,
numDeps, zScaleList, tauRos, temp, rho, logFudgeTune)
#print("listLogKappaL ", listLogKappaL[:][16])
#stop
#if ( (list2Element[gaussLine_ptr[iLine]] == "Na") and (list2Stage[gaussLine_ptr[iLine]] == 0) ):
# for iTau in range(numDeps):
# if (iTau%5 == 1):
# for iL in range(listNumPoints):
# if (iL%2 == 0):
# print("iTau ", iTau, " iL ", iL, " listLinePoints[0]&[1] ", listLinePoints[0][iL], " ", listLinePoints[1][iL],
# " listLineProf ", listLineProf[iL][iTau], " listLogKappaL ", log10e*listLogKappaL[iL][iTau])
listLineLambdas = [0.0 for i in range(listNumPoints)]
#for il in range(listNumPoints):
# #// // lineProf[gaussLine_ptr[iLine]][*] is DeltaLambda from line centre in cm
# listLineLambdas[il] = listLinePoints[0][il] + list2Lam0[gaussLine_ptr[iLine]]
listLineLambdas = [ x + list2Lam0[gaussLine_ptr[iLine]] for x in listLinePoints[0] ]
masterLamsOut = SpecSyn.masterLambda(numLams, numMaster, numNow, masterLams, listNumPoints, listLineLambdas)
logMasterKapsOut = SpecSyn2.masterKappa(numDeps, numLams, numMaster, numNow, masterLams, masterLamsOut, \
logMasterKaps, listNumPoints, listLineLambdas, listLogKappaL)
numNow = numNow + listNumPoints
#numNow = numNow + listNumPoints
#plt.plot(masterLamsOut, [logMasterKapsOut[i][12] for i in range(numNow)])
#plt.plot(masterLamsOut, [logMasterKapsOut[i][12] for i in range(numNow)], '.')
#//update masterLams and logMasterKaps:
for iL in range(numNow):
masterLams[iL] = masterLamsOut[iL]
for iD in range(numDeps):
#//Still need to put in multi-Gray levels here:
logMasterKaps[iL][iD] = logMasterKapsOut[iL][iD]
#This pythoniztion does not work:
#masterLams[0: numNow] = [ masterLamsOut[iL] for iL in range(numNow) ]
#logMasterKaps[0: numNow][:] = [ [ logMasterKapsOut[iL][iD] for iD in range(numDeps) ] for iL in range(numNow) ]
#print("iLine ", iLine, " gaussLine_ptr ", gaussLine_ptr[iLine])
#//No! } //ifThisLine strength condition
#//numLines loop
print("End spectrum synthesis")
#print("logMasterKaps ", logMasterKaps[:][16])
#////
if (teff <= jolaTeff):
#//Begin loop over JOLA bands - isert JOLA oapcity into opacity spectum...
helpJolaSum = 0.0
if (ifMols == 1):
for iJola in range(numJola):
#//Find species in molecule set:
for iMol in range(gsFirstMol, gsNspec):
if (gsName[iMol] == jolaSpecies[iJola]):
#//System.out.println("mname " + mname[iMol]);
#for iTau in range(numDeps):
# logNumJola[iTau] = masterMolPops[iMol][iTau]
logNumJola = [ x for x in masterMolPops[iMol-gsFirstMol] ]
#}
#}
#}
jolaOmega0 = MolecData.getOrigin(jolaSystem[iJola]) #//band origin ?? //Freq in Hz OR waveno in cm^-1 ??
jolaB = MolecData.getRotConst(jolaSystem[iJola]) #// B' and b" values of upper and lower vibational state
jolaLambda = MolecData.getWaveRange(jolaSystem[iJola]) #//approx wavelength range of band
jolaDeltaLambda = MolecData.getDeltaLambda
jolaLogF = logTiny #Default
if (jolaWhichF[iJola] == "Allen"):
#Band strength: Allen's Astrophysical Quantities approach
jolaRSqu = MolecData.getSqTransMoment(jolaSystem[iJola]) #//needed for total vibrational band oscillator strength (f_v'v")
#//Line strength factor from Allen's 4th Ed., p. 88, "script S":
#This is practically the astrophysical tuning factor:
jolaQuantumS = MolecData.getQuantumS(jolaSystem[iJola])
#//Compute line strength, S, Allen, p. 88:
jolaS = jolaRSqu * jolaQuantumS #//may not be this simple (need q?)
#//Compute logf , Allen, p. 61 Section 4.4.2 - for atoms or molecules - assumes g=1 so logGf = logF:
#//jolaLogF = logSTofHelp + Math.log(jolaOmega0) + Math.log(jolaS); //if omega0 is a freq in Hz
#//Gives wrong result?? jolaLogF = logSTofHelp + Useful.logC() + Math.log(jolaOmega0) + Math.log(jolaS); //if omega0 is a waveno in cm^-1
checkgf = 303.8*jolaS/(10.0*jolaLambda[0]) #//"Numerical relation", Allen 4th, p. 62 - lambda in A
jolaLogF = math.log(checkgf) #//better??
#print("iJola ", iJola, " logF ", 10.0**(logE*jolaLogF+14) )
if (jolaWhichF[iJola] == "Jorgensen"):
#Band strength: Jorgensen, 1994, A&A, 284, 179 approach - we have the f values directly:
#This is practically the astrophysical tuning factor:
jolaQuantumS = MolecData.getQuantumS(jolaSystem[iJola])
jolaRawF = MolecData.getFel(jolaSystem[iJola])
jolaF = jolaRawF * jolaQuantumS
#print(iJola, " jQS ", jolaQuantumS, " jRF ", jolaRawF, " jF ", jolaF)
jolaLogF = math.log(jolaF)
#print("iJola ", iJola, " logF ", 10.0**(logE*jolaLogF+14) )
if (jolaDeltaLambda == 0):
jolaAlphP = jolaAlphP_DL0 #// alpha_P - weight of P branch (Delta J = 1)
jolaAlphR = jolaAlphR_DL0 #// alpha_R - weight of R branch (Delta J = -1)
jolaAlphQ = jolaAlphQ_DL0 #// alpha_Q - weight of Q branch (Delta J = 0)
if (jolaDeltaLambda != 0):
jolaAlphP = jolaAlphP_DL1 #// alpha_P - weight of P branch (Delta J = 1)
jolaAlphR = jolaAlphR_DL1 #// alpha_R - weight of R branch (Delta J = -1)
jolaAlphQ = jolaAlphQ_DL1 #// alpha_Q - weight of Q branch (Delta J = 0)
jolaPoints = Jola.jolaGrid(jolaLambda, jolaNumPoints)
#//This sequence of methods might not be the best way, but it's based on the procedure for atomic lines
#// Put in JOLA bands:
#//P & R brnaches in every case:
dfBydv = Jola.jolaProfilePR(jolaOmega0, jolaLogF, jolaB,
jolaPoints, jolaAlphP, jolaAlphR, numDeps, temp)
jolaLogKappaL = Jola.jolaKap(logNumJola, dfBydv, jolaPoints,
numDeps, temp, rho)
#////Q branch if DeltaLambda not equal to 0
#// if (jolaDeltaLambda != 0){
#// dfBydv = Jola.jolaProfileQ(jolaOmega0, jolaLogF, jolaB,
#// jolaPoints, jolaAlphQ, numDeps, temp);
#// //
#// double[][] jolaLogKappaQL = Jola.jolaKap(logNumJola, dfBydv, jolaPoints,
#// numDeps, temp, rho);
#// //Now add it to the P & R branch opacity:
#// for (int iW = 0; iW < jolaNumPoints; iW++){
#// for (int iD = 0; iD < numDeps; iD++){
#// // // if (iD%10 == 1){
#// // //System.out.println("iW " + iW + " iD " + iD + " jolaLogKappaL " + jolaLogKappaL[iW][iD]);
#// // // }
#// helpJolaSum = Math.exp(jolaLogKappaL[iW][iD]) + Math.exp(jolaLogKappaQL[iW][iD]);
#// jolaLogKappaL[iW][iD] = Math.log(helpJolaSum);
#// } //iD loop
#// } //iW loop
#// } //Q-branch if
jolaLambdas = [0.0 for i in range(jolaNumPoints)]
#for il in range(jolaNumPoints):
# #// // lineProf[gaussLine_ptr[iLine]][*] is DeltaLambda from line centre in cm
# jolaLambdas[il] = nm2cm * jolaPoints[il]
jolaLambdas = [ nm2cm * x for x in jolaPoints ]
#print("jolaLambdas[0] ", jolaLambdas[0], " jolaLambdas[jolaNumPoints] ", jolaLambdas[jolaNumPoints-1])
masterLamsOut = SpecSyn.masterLambda(numLams, numMaster, numNow, masterLams, jolaNumPoints, jolaLambdas)
logMasterKapsOut = SpecSyn2.masterKappa(numDeps, numLams, numMaster, numNow, masterLams, masterLamsOut, \
logMasterKaps, jolaNumPoints, jolaLambdas, jolaLogKappaL)
numNow = numNow + jolaNumPoints
#numNow = numNow + jolaNumPoints
#//update masterLams and logMasterKaps:
for iL in range(numNow):
masterLams[iL] = masterLamsOut[iL]
for iD in range(numDeps):
#//Still need to put in multi-Gray levels here:
logMasterKaps[iL][iD] = logMasterKapsOut[iL][iD]
#This pythoniztion does not work:
#masterLams[0: numNow] = [ masterLamsOut[iL] for iL in range(numNow) ]
#logMasterKaps[0: numNow][:] = [ [ logMasterKapsOut[iL][iD] for iD in range(numDeps) ] for iL in range(numNow) ]
#plt.xlim(500.0e-7, 820.0e-7)
#plt.plot([masterLams[i] for i in range(numNow)],\
# [logMasterKaps[i][20] for i in range(numNow)] )
#} //iJola JOLA band loop
#} //ifTiO condition
#} //jolaTeff condition
#//
#//Sweep the wavelength grid for line-specific wavelength points that are closer together than needed for
#//critical sampling:
#//equivalent spectral resolution of wavelength-dependent critical sampling interval
sweepRes = numpy.double(500000.0) #//equivalent spectral resolution of wavelength-dependent critical sampling interval
#//cm //use shortest wavelength to avoid under-smapling:
sweepDelta = lambdaStart / sweepRes #//cm //use shortest wavelength to avoid under-smapling
sweepHelp = [ numpy.double(0.0) for i in range(numMaster) ] #//to be truncated later
#//Initialize sweepHelp
#for iSweep in range(numMaster):
# sweepHelp[iSweep] = 0.0
sweepHelp = [ numpy.double(0.0) for iSweep in range(numMaster) ]
#//
sweepHelp[0] = masterLams[0] #//An auspicous start :-)
lastLam = 0 #//index of last masterLam wavelength NOT swept out
iSweep = 1 #//current sweepHelp index
#//
for iLam in range(1, numMaster):
#print ( "In sweeping loop: ", (masterLams[iLam] - masterLams[lastLam]) )
if ( (masterLams[iLam] - masterLams[lastLam]) >= sweepDelta):
#//Kept - ie. NOT swept out:
sweepHelp[iSweep] = masterLams[iLam]
lastLam = iLam
iSweep+=1
#print("Kept condition passed, iSweep ", iSweep)
numKept = iSweep-1
#sweptLams = [x for x in sweepHelp]
sweptLams = [numpy.double(0.0) for i in range(numKept)]
#for iKept in range(numKept):
# sweptLams[iKept] = sweepHelp[iKept]
sweptLams = [ sweepHelp[iKept] for iKept in range(numKept) ]
#stop
#//Interpolate the total extinction array onto the swept wavelength grid:
keptHelp = [numpy.double(0.0) for i in range(numKept)]
logSweptKaps = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(numKept) ]
logMasterKapsId = [numpy.double(0.0) for i in range(numMaster)]
#Not trivially pythonizable:
for iD in range(numDeps):
#//extract 1D kappa vs lambda at each depth:
for iL in range(numMaster):
logMasterKapsId[iL] = logMasterKaps[iL][iD]
#keptHelp = ToolBox.interpolV(logMasterKapsId, masterLams, sweptLams)
keptHelp = numpy.interp(sweptLams, masterLams, logMasterKapsId)
for iL in range(numKept):
logSweptKaps[iL][iD] = keptHelp[iL]
#Won't work logSweptKaps = [ [ ToolBox.interpolV(logMasterKaps[iL][iD], masterLams, sweptLams) for iL in range()] ]
#} //iD loop
#Special code to test sweeper by forcing it to NOT sweep anything:
# - IF this is uncommented, then sweeper above should be commented
"""for iLam in range(1, numMaster):
#//Kept - ie. NOT swept out:
sweepHelp[iSweep] = masterLams[iLam]
iSweep+=1
numKept = iSweep-1
sweptLams = [0.0 for i in range(numKept)]
for iKept in range(numKept):
sweptLams[iKept] = sweepHelp[iKept]
#//Interpolate the total extinction array onto the swept wavelength grid:
logSweptKaps = [ [ 0.0 for i in range(numDeps) ] for j in range(numKept) ]
for iD in range(numDeps):
for iL in range(numKept):
logSweptKaps[iL][iD] = logMasterKaps[iL][iD]
#end special sweeper test block"""
# doesn't work ipdb.set_trace(pdb debug command
##Debug:
#print("numLams ", numLams, " numKept ", numKept)
#print("logKappa[:][36]")
#for iL in range(0, numLams, 10):
# print(lambdaScale[iL], logKappa[iL][36])
#print("logSweptKaps[:][36]")
#for iL in range(0, numKept, 10):
# print(sweptLams[iL], logSweptKaps[iL][36])
#plt.figure()
#plt.subplot(1,1,1)
#plt.plot(lambdaScale, [logKappa[iL][36] for iL in range(numLams) ], linewidth=3 )
#plt.plot(sweptLams, [logSweptKaps[iL][36] for iL in range(numKept) ], '--' )
#//
#////
#//Continuum monochromatic optical depth array:
logTauCont = LineTau2.tauLambdaCont(numLams, logKappa,
kappa500, numDeps, tauRos, logTotalFudge)
#//Evaluate formal solution of rad trans eq at each lambda
#// Initial set to put lambda and tau arrays into form that formalsoln expects
#Untransited I_lambda(cosTheta)
contIntens = [ [ numpy.double(0.0) for i in range(numThetas) ] for j in range(numLams) ]
contIntensLam = [numpy.double(0.0) for i in range(numThetas)]
contFlux = [ [ numpy.double(0.0) for i in range(numLams) ] for j in range(2) ]
contFluxLam = [numpy.double(0.0) for i in range(2)]
thisTau = [ [ numpy.double(0.0) for i in range(numDeps) ] for j in range(2) ]
lineMode = False #//no scattering for overall SED
##For testing:
#thisR = [0.0 for i in range(numThetas)]
#for iT in range(numThetas):
# thisTheta = math.acos(cosTheta[1][iT])
# thisR[iT] = math.sin(thisTheta)
for il in range(numLams):
#for id in range(numDeps):
# thisTau[1][id] = logTauCont[il][id]
# thisTau[0][id] = math.exp(logTauCont[il][id])
thisTau[1] = [ numpy.double(x) for x in logTauCont[il] ]
thisTau[0] = [ numpy.double(math.exp(x)) for x in logTauCont[il] ]
#} // id loop
contIntensLam = FormalSoln.formalSoln(numDeps,
cosTheta, lambdaScale[il], thisTau, temp, lineMode)
# if (il == 87):
# print("plot lambda = ", 1.0e7*lambdaScale[il])
# plt.plot(thisR, contIntensLam/contIntensLam[0])
contIntens[il] = [ numpy.double(x) for x in contIntensLam ]
#//// Teff test - Also needed for convection module!:
if (il > 1):
lambda2 = lambdaScale[il] #// * 1.0E-7; // convert nm to cm
lambda1 = lambdaScale[il - 1] #// * 1.0E-7; // convert nm to cm
fluxSurfBol = fluxSurfBol + contFluxLam[0] * (lambda2 - lambda1)
#//il loop
#Untransited flux
contFlux = Flux.flux3(contIntens, lambdaScale, cosTheta, phi, cgsRadius, omegaSini, macroVkm)
contFluxTrans = [ [ [ numpy.double(0.0) for i in range(numTransThetas) ] for k in range(numLams) ] for j in range(2) ]
contFluxTrans2 = [ [ [ numpy.double(0.0) for i in range(numTransThetas2) ] for k in range(numLams) ] for j in range(2) ]
#fluxTransMA hold the monochromatic "F(z)" lightcurve from Mandel & Agol (2002) for test comparison
# at arbitrary wavelength of choice
fluxTransMA = [0.0 for i in range(numTransThetas)]
fluxTransMA2 = [0.0 for i in range(numTransThetas2)]
#Index of comparison wavelength
ilMA = ToolBox.lamPoint(numLams, lambdaScale, 5.51e-5)
print("MA lamba ", lambdaScale[ilMA])
#In Mandel & Agol (2002), "I" is intensity relative to disk centre:
relContIntens = [x/contIntens[ilMA][0] for x in contIntens[ilMA]]
if (ifTransit):
contFluxTrans = FluxTrans.fluxTrans(contIntens, contFlux, lambdaScale, cosTheta,
radius, iFirstTheta, numTransThetas, rPlanet)
fluxTransMA = TransitLightCurveAnlytc2.transLightAnlytc2(relContIntens, radius, pMA, cosTheta, vTrans, iFirstTheta, numTransThetas, impct)
#reflect the half-transit profile and add the first and last points for
#ingress and egress
for j in range(numLams):
#lens-shaped occultation area at planetary mid-point contact:
#Ingress:
#Subtracting the very small from the very large - let's be sophisticated about it:
logHelper = math.log(math.pi) + math.log(contIntens[j][numThetas-1]) + logOmegaLens - contFlux[1][j]
helper = numpy.double(1.0) - math.exp(logHelper)
contFluxTrans2[1][j][0] = contFlux[1][j]
contFluxTrans2[0][j][0] = contFlux[0][j]
contFluxTrans2[1][j][1] = contFlux[1][j] + math.log(helper)
contFluxTrans2[0][j][1] = math.exp(contFluxTrans2[1][j][1])
#Full occultation:
#Ingress to minimum impact parameter
for i in range(numTransThetas):
indx = ( (numTransThetas-1)-i )
contFluxTrans2[1][j][2+i] = contFluxTrans[1][j][indx]
contFluxTrans2[0][j][2+i] = contFluxTrans[0][j][indx]
#Minimum impact parameter to egress
for i in range(numTransThetas):
contFluxTrans2[1][j][2+(numTransThetas+i)] = contFluxTrans[1][j][i]
contFluxTrans2[0][j][2+(numTransThetas+i)] = contFluxTrans[0][j][i]
#Egress:
contFluxTrans2[1][j][numTransThetas2-2] = contFlux[1][j] + math.log(helper)
contFluxTrans2[0][j][numTransThetas2-2] = math.exp(contFluxTrans2[1][j][numTransThetas2-2])
contFluxTrans2[1][j][numTransThetas2-1] = contFlux[1][j]
contFluxTrans2[0][j][numTransThetas2-1] = contFlux[0][j]
#Comparison light curve from Mandel & Agol formula
#Ingress
fluxTransMA2[0] = 1.0
fluxTransMA2[1] = 1.0
#Full occultation:
#Ingress to minimum impact parameter
for i in range(numTransThetas):
indx = ( (numTransThetas-1)-i )
fluxTransMA2[2+i] = fluxTransMA[indx]
fluxTransMA2[2+i] = fluxTransMA[indx]
#print("2+i ", 2+i, " fluxTransMA2 ", fluxTransMA2[2+i])
#Minimum impact parameter to egress
for i in range(numTransThetas):
fluxTransMA2[2+(numTransThetas+i)] = fluxTransMA[i]
fluxTransMA2[2+(numTransThetas+i)] = fluxTransMA[i]
#print("2+(numTransThetas+i) ", 2+(numTransThetas+i), " fluxTransMA2 ", fluxTransMA2[2+(numTransThetas+i)])
#Egress
fluxTransMA2[numTransThetas2-2] = 1.0
fluxTransMA2[numTransThetas2-1] = 1.0
logTauMaster = LineTau2.tauLambda(numKept, sweptLams, logSweptKaps,
numDeps, kappa500, tauRos, logTotalFudge)
#//Line blanketed formal Rad Trans solution:
#//Evaluate formal solution of rad trans eq at each lambda throughout line profile
#// Initial set to put lambda and tau arrays into form that formalsoln expects
#Untransited I_lambda(cosTheta)
masterIntens = [ [ numpy.double(0.0) for i in range(numThetas) ] for j in range(numKept) ]
#Transited I_lambda(cosTheta)
#masterIntensTrans = [ [ numpy.double(0.0) for i in range(numThetas) ] for j in range(numKept) ]
masterIntensLam = [numpy.double(0.0) for i in range(numThetas)]
masterFlux = [ [ numpy.double(0.0) for i in range(numKept) ] for j in range(2) ]
masterFluxLam = [numpy.double(0.0) for i in range(2)]
lineMode = False #//no scattering for overall SED
for il in range(numKept):
#// }
#for id in range(numDeps):
# thisTau[1][id] = logTauMaster[il][id]
# thisTau[0][id] = math.exp(logTauMaster[il][id])
#} // id loop
thisTau[1] = [ numpy.double(x) for x in logTauMaster[il] ]
thisTau[0] = [ numpy.double(math.exp(x)) for x in logTauMaster[il] ]
masterIntensLam = FormalSoln.formalSoln(numDeps,
cosTheta, sweptLams[il], thisTau, temp, lineMode)
#for it in range(numThetas):
# masterIntens[il][it] = masterIntensLam[it]
masterIntens[il] = [ numpy.double(x) for x in masterIntensLam ]
#} //it loop - thetas
#} //il loop
#Untransited flux
masterFlux = Flux.flux3(masterIntens, sweptLams, cosTheta, phi, cgsRadius, omegaSini, macroVkm)
masterFluxTrans = [ [ [ numpy.double(0.0) for i in range(numTransThetas) ] for k in range(numKept) ] for j in range(2) ]
masterFluxTrans2 = [ [ [ numpy.double(0.0) for i in range(numTransThetas2) ] for k in range(numKept) ] for j in range(2) ]
if (ifTransit):
masterFluxTrans = FluxTrans.fluxTrans(masterIntens, masterFlux, sweptLams, cosTheta,
radius, iFirstTheta, numTransThetas, rPlanet)
#reflect the half-transit profile and add the first and last points just before
#ingress and just after egress
for j in range(numKept):
#lens-shaped occultation area at planetary mid-point contact:
#Ingress:
#Subtracting the very small from the very large - let's be sophisticated about it:
logHelper = math.log(masterIntens[j][numThetas-1]) + logOmegaLens - masterFlux[1][j]
helper = numpy.double(1.0) - math.exp(logHelper)
masterFluxTrans2[1][j][0] = masterFlux[1][j]
masterFluxTrans2[0][j][0] = masterFlux[0][j]
masterFluxTrans2[1][j][1] = masterFlux[1][j] + math.log(helper)
masterFluxTrans2[0][j][1] = math.exp(masterFluxTrans2[1][j][1])
#Full occultation:
#Ingress to minimum impact parameter
for i in range(numTransThetas):
masterFluxTrans2[1][j][2+i] = masterFluxTrans[1][j][(numTransThetas-1)-i]
masterFluxTrans2[0][j][2+i] = masterFluxTrans[0][j][(numTransThetas-1)-i]
#Minimum impact parameter to egress
for i in range(numTransThetas):
masterFluxTrans2[1][j][2+(numTransThetas+i)] = masterFluxTrans[1][j][i]
masterFluxTrans2[0][j][2+(numTransThetas+i)] = masterFluxTrans[0][j][i]
#Egress:
masterFluxTrans2[1][j][numTransThetas2-2] = masterFlux[1][j] + math.log(helper)
masterFluxTrans2[0][j][numTransThetas2-2] = math.exp(masterFluxTrans2[1][j][numTransThetas2-2])
masterFluxTrans2[1][j][numTransThetas2-1] = masterFlux[1][j]
masterFluxTrans2[0][j][numTransThetas2-1] = masterFlux[0][j]
#pltb.plot(sweptLams, masterFlux[0])
#plt.plot(sweptLams, masterFlux[0], '.')
#Can we find a pythonic way to accumulate instead of this for loop??
for il in range(numKept):
#//// Teff test - Also needed for convection module!:
if (il > 1):
lambda2 = sweptLams[il] #// * 1.0E-7; // convert nm to cm
lambda1 = sweptLams[il - 1] #// * 1.0E-7; // convert nm to cm
fluxSurfBol = fluxSurfBol + masterFlux[0][il] * (lambda2 - lambda1)
#}
logFluxSurfBol = math.log(fluxSurfBol)
logTeffFlux = (logFluxSurfBol - Useful.logSigma()) / 4.0
teffFlux = math.exp(logTeffFlux)
print("Recovered Teff = %9.2f" % (teffFlux))
#//Extract linear monochromatic continuum limb darkening coefficients (LDCs) ("epsilon"s):
ldc = [0.0 for i in range(numLams)]
ldc = LDC.ldc(numLams, lambdaScale, numThetas, cosTheta, contIntens)
#
#
#
#
# Post-processing
#
# ***** Post-processing ported from ChromaStarServerUI *****
#
#
#
#
#
#
#logContFluxI = ToolBox.interpolV(contFlux[1], lambdaScale, sweptLams)
logContFluxI = numpy.interp(sweptLams, lambdaScale, contFlux[1])
#//Quality control:
#iStart = ToolBox.lamPoint(numMaster, masterLams, (nm2cm*lambdaStart))
#iStop = ToolBox.lamPoint(numMaster, masterLams, (nm2cm*lambdaStop))
iStart = ToolBox.lamPoint(numKept, sweptLams, lambdaStart);
iStop = ToolBox.lamPoint(numKept, sweptLams, lambdaStop);
#//Continuum rectification
numSpecSyn = iStop - iStart + 1
specSynLams = [0.0 for i in range(numSpecSyn)]
specSynFlux = [ [ 0.0 for i in range(numSpecSyn) ] for j in range(2) ]
#js specSynFlux.length = 2;
#specSynFlux[0] = [];
#specSynFlux[1] = [];
#specSynFlux[0].length = numSpecSyn;
#specSynFlux[1].length = numSpecSyn;
#for iCount in range(numSpecSyn):
# specSynLams[iCount] = sweptLams[iStart+iCount]
# specSynFlux[1][iCount] = masterFlux[1][iStart+iCount] - logContFluxI[iStart+iCount]
# specSynFlux[0][iCount] = math.exp(specSynFlux[1][iCount])
specSynLams = [ x for x in sweptLams[iStart: iStart+numSpecSyn] ]
specSynFlux[1] = [ (masterFlux[1][iStart+iCount] - logContFluxI[iStart+iCount]) for iCount in range(numSpecSyn) ]
#print("log masterFlux")
#print([masterFlux[1][iStart+iCount] for iCount in range(numSpecSyn)])
#print("logContFluxI")
#print([logContFluxI[iStart+iCount] for iCount in range(numSpecSyn)])
specSynFlux[0] = [math.exp(x) for x in specSynFlux[1] ]
#//
#// * eqWidthSynth will try to return the equivalenth width of EVERYTHING in the synthesis region
#// * as one value! Isolate the synthesis region to a single line to a clean result
#// * for that line!
#// *
Wlambda = PostProcess.eqWidthSynth(specSynFlux, specSynLams)
#//
#//Radial velocity correction:
#//We have to correct both masterLams AND specSynLams to correct both the overall SED and the spectrum synthesis region:
masterLams2 = [ 0.0 for i in range(numKept) ]
specSynLams2 = [ 0.0 for i in range(numSpecSyn) ]
#//refresh default each run:
#for i in range(numKept):
# masterLams2[i] = sweptLams[i]
masterLams2 = [ x for x in sweptLams ]
#for i in range(numSpecSyn):
# specSynLams2[i] = specSynLams[i]
specSynLams2 = [ x for x in specSynLams ]
deltaLam = 0.0
c = 2.9979249E+10 #// light speed in vaccuum in cm/s
RVfac = RV / (1.0e-5*c)
if (RV != 0.0):
#for i in range(numKept):
# deltaLam = RVfac * sweptLams[i]
# masterLams2[i] = masterLams2[i] + deltaLam
masterLams2 = [ masterLams2[i] + (RVfac * sweptLams[i]) for i in range(numKept) ]
#for i in range(numSpecSyn):
# deltaLam = RVfac * specSynLams[i]
# specSynLams2[i] = specSynLams2[i] + deltaLam
specSynLams2 = [ specSynLams2[i] + (RVfac * specSynLams[i]) ]
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
if (vacAir == "air"):
#for i in range(numKept):
# masterLams2[i] = invnAir * masterLams2[i]
masterLams2 = [ invnAir * x for x in masterLams2 ]
#for i in range(numSpecSyn):
# specSynLams2[i] = invnAir * specSynLams2[i]
specSynLams2 = [ invnAir * x for x in specSynLams2 ]
bandFlux = PostProcess.UBVRIraw(masterLams2, masterFlux)
colors = PostProcess.UBVRI(bandFlux)
#print("U-V: ", colors[0], " B-V: ", colors[1], " V-R ", colors[2], " V-I: ", colors[3],\
# " R-I ", colors[4], " V- K ", colors[5], " J-K: ", colors[6])
print("U-B: %6.2f B-V: %6.2f V-R: %6.2f V-I: %6.2f R-I: %6.2f V-K: %6.2f J-K: %6.2f" %\
(colors[0], colors[1], colors[2], colors[3], colors[4], colors[5], colors[6]))
#// UBVRI band intensity annuli - for disk rendering:
bandIntens = PostProcess.iColors(masterLams2, masterIntens, numThetas, numKept)
gaussFilter = PostProcess.gaussian(masterLams2, numKept, diskLambda, diskSigma, lamUV, lamIR)
#//Use *shifted* wavelength scale (masterLams2) for user-filter integration of spectrum:
tuneBandIntens = PostProcess.tuneColor(masterLams2, masterIntens, numThetas, numKept, \
gaussFilter, lamUV, lamIR)
#//Fourier transform of narrow band image:
ft = PostProcess.fourier(numThetas, cosTheta, tuneBandIntens)
numK = len(ft[0])
if ifTransit:
#Planetary transit light curves as seen through photometric filters:
numBands = len(bandFlux)
bandFluxTransit = [[0.0 for i in range(numTransThetas2)] for j in range(numBands)]
#Sign - I don't know how to directly assign a 2D list column slice (2nd index):
helpBandFlux = [0.0 for i in range(numBands)]
helpMasterFlux = [[0.0 for i in range(numKept)] for j in range(2)]
for iEpoch in range(numTransThetas2):
for il in range(numKept):
helpMasterFlux[1][il] = masterFluxTrans2[1][il][iEpoch]
helpMasterFlux[0][il] = masterFluxTrans2[0][il][iEpoch]
helpBandFlux = PostProcess.UBVRIraw(masterLams2, helpMasterFlux)
for iBand in range(numBands):
bandFluxTransit[iBand][iEpoch] = helpBandFlux[iBand]
bandFluxTransit2 = [[0.0 for i in range(numEpochs)] for j in range(numBands)]
#Interpolate transit light curves onto total duration we are following:
for iBand in range(numBands):
bandFluxTransit2[iBand] = ToolBox.interpolV(bandFluxTransit[iBand], transit2, ephemT)
#Interpolate reference light curve from Mandel & Agol (2002) too:
fluxTransMA2i = ToolBox.interpolV(fluxTransMA2, transit2, ephemT)
#for i in range(numEpochs):
# print("i ", i, " fluxTransMA2i ", fluxTransMA2i[i])
#
#
# Report 1:
#
#
#Atmospheric structure output:
#Convert everything to log_10 OR re-scaled units for plotting, printing, etc:
log10temp = [0.0 for i in range(numDeps)]
log10rho = [0.0 for i in range(numDeps)]
log10kappaRos = [0.0 for i in range(numDeps)]
log10kappa500 = [0.0 for i in range(numDeps)]
mmwAmu = [0.0 for i in range(numDeps)]
depthsKm = [0.0 for i in range(numDeps)]
#log10mmw = [0.0 for i in range(numDeps)]
#for i in range(numDeps):
# log10tauRos[i] = log10e * tauRos[1][i]
# log10temp[i] = log10e * temp[1][i]
# log10pgas[i] = log10e * pGas[1][i]
# log10pe[i] = log10e * (newNe[1][i] + Useful.logK() + temp[1][i])
# log10prad[i] = log10e * pRad[1][i]
# log10ne[i] = log10e * newNe[1][i]
# log10rho[i] = log10e * rho[1][i]
# log10NH[i] = log10e * logNH[i]
# log10kappaRos[i] = log10e * kappaRos[1][i]
# log10kappa500[i] = log10e * kappa500[1][i]
# mmwAmu[i] = mmw[i] / Useful.amu()
# depthsKm[i] = 1.0e-5 * depths[i]
log10tauRos = [ round(log10e * x, 4) for x in tauRos[1] ]
log10temp = [ round(log10e * x, 4) for x in temp[1] ]
log10pgas = [ round(log10e * x, 4) for x in pGas[1] ]
log10pe = [ round(log10e * (newNe[1][i] + Useful.logK() + temp[1][i]), 4) for i in range(numDeps) ]
log10prad = [ round(log10e * x, 4) for x in pRad[1] ]
log10ne = [ round(log10e * x, 4) for x in newNe[1] ]
log10rho = [ round(log10e * x, 4) for x in rho[1] ]
log10NH = [ round(log10e * x, 4) for x in logNH ]
log10kappaRos = [ round(log10e * x, 4) for x in kappaRos[1] ]
log10kappa500 = [ round(log10e * x, 4) for x in kappa500[1] ]
mmwAmu = [ round(x / Useful.amu(), 4) for x in mmw ]
depthsKm = [ round(1.0e-5 * x, 4) for x in depths ]
#outFile = outPath + strucFile
outFile = outPath + fileStem + ".struc.txt"
#print vertical atmospheric structure
#with open(outFile, 'w', encoding='utf-8') as strucHandle:
with open(outFile, 'w') as strucHandle:
#with open(strucFile, 'w') as strucHandle:
strucHandle.write(inputParamString + "\n")
strucHandle.write("cgs units, unless otherwise noted" + "\n")
strucHandle.write("logTauRos depth temp logPgas logPe logPRad logNe logNH logRho mu(amu) logKapRos logKap500" + "\n")
#NOt trivially pythonizable - each time through it writes a line to an output file
for i in range(numDeps):
outLine = str(log10tauRos[i]) + " " + str(depthsKm[i]) + " " + str(round(temp[0][i], 4)) + " " + str(log10pgas[i]) +\
" " + str(log10pe[i]) + " " + str(log10prad[i]) + " " + str(log10ne[i]) + " " + str(log10NH[i]) + " " + str(log10rho[i]) +\
" " + str(mmwAmu[i]) + " " + str(log10kappaRos[i]) + " " + str(log10kappa500[i]) + "\n"
strucHandle.write(outLine)
#This doesn't work...
#outLine = ""
#outLine = [ outLine + str(log10tauRos[i]) + " " + str(depthsKm[i]) + " " + str(temp[0][i]) + " " + str(log10pgas[i]) + " " + str(log10pe[i]) + \
# " " + str(log10prad[i]) + " " + str(log10ne[i]) + " " + str(log10NH[i]) + " " + str(log10rho[i]) + " " + str(mmwAmu[i]) + \
# str(log10kappaRos[i]) + " " + str(log10kappa500[i]) + "\n" for i in range(numDeps) ]
#strucHandle.write(outLine)
if makePlotStruc:
plt.figure()
plt.subplot(1, 1, 1)
#Initialplot set-up
plt.title = "T_kin vs log(tau)"
plt.xlabel(r'$\log_{10} \tau_{\rm ROs}$')
plt.ylabel(r'$T_{\rm kin}$ (K)')
xMin = -6.5
xMax = 2.5
plt.xlim(xMin, xMax)
yMax = max(temp[0]) + 1000.0
yMin = min(temp[0]) - 500.0
plt.ylim(yMin, yMax)
plt.plot(log10tauRos, temp[0])
#
#
# Report 2:
#
#
#Print absolute spectral energy distribution (SED)
numWave = numKept
wave = [0.0 for i in range(numWave)]
log10Wave = [0.0 for i in range(numWave)]
log10Flux = [0.0 for i in range(numWave)]
#for i in range(numWave):
# wave[i] = cm2nm * masterLams2[i]
# log10Wave[i] = math.log10(masterLams2[i])
# log10Flux[i] = log10e * masterFlux[1][i]
wave = [ round(cm2nm * x, 4) for x in masterLams2 ]
log10Wave = [ round(math.log10(x), 4) for x in masterLams2 ]
log10Flux = [ round(log10e * x, 4) for x in masterFlux[1] ]
#Debug
#log10WaveC = [ round(math.log10(x), 4) for x in lambdaScale ]
#log10FluxC = [ round(log10e * x, 4) for x in contFlux[1] ]
#log10WaveCI = [ round(math.log10(x), 4) for x in sweptLams ]
#log10FluxCI = [ round(log10e * x, 4) for x in logContFluxI ]
if makePlotSED:
plt.figure()
plt.subplot(1, 1, 1)
#Initial plt plot set-up
plt.title = "Spectral energy distribution (SED)"
plt.xlabel(r'$\log_{10} \lambda$ (cm)')
plt.ylabel(r'$\log_{10} F_\lambda$ (erg s$^{-1}$ cm$^{-2}$ cm$^{-1}$')
xMin = min(log10Wave) - 0.1
xMax = max(log10Wave) + 0.1
plt.xlim(xMin, xMax)
yMax = max(log10Flux) + 0.5
yMin = min(log10Flux) - 0.5
plt.ylim(yMin, yMax)
plt.plot(log10Wave, log10Flux, linewidth=3)
#Debug
#plt.plot(log10WaveC, log10FluxC, "--", linewidth = 2)
#plt.plot(log10WaveCI, log10FluxCI, "-.")
#outFile = outPath + sedFile
outFile = outPath + fileStem + ".sed.txt"
#with open(outFile, 'w', encoding='utf-8') as sedHandle:
with open(outFile, 'w') as sedHandle:
#with open(sedFile, 'w') as sedHandle:
sedHandle.write(inputParamString)
sedHandle.write("Number of lines treated with Voigt profiles: " + str(numGaussLines) + "\n")
sedHandle.write("Number of wavelength points: " + str(numKept) + "\n")
sedHandle.write("wave (nm) log10(flux) (cgs) \n")
for i in range(numKept):
flux = log10Flux[i]
outLine = str(wave[i]) + " " + str(flux) + "\n"
sedHandle.write(outLine)
#
#
# Report 3:
#synthetic spectrum quantities
#
#
waveSS = [0.0 for i in range(numSpecSyn)]
#for i in range(numSpecSyn):
# waveSS[i] = cm2nm * specSynLams2[i]
waveSS = [ round(cm2nm * x, 4) for x in specSynLams2 ]
print("Number of lines treated with Voigt profiles: ", numGaussLines)
#Print rectified high resolution spectrum of synthesis region
#outFile = outPath + specFile
outFile = outPath + fileStem + ".spec.txt"
#with open(outFile, 'w', encoding='utf-8') as specHandle:
with open(outFile, 'w') as specHandle:
#with open(specFile, 'w') as specHandle:
specHandle.write(inputParamString + "\n")
specHandle.write("Number of lines treated with Voigt profiles: " + str(numGaussLines) + "\n")
specHandle.write("Number of wavelength points: " + str(numSpecSyn) + "\n")
specHandle.write("wave (nm) normalized flux \n")
for i in range(numSpecSyn):
outLine = str(waveSS[i]) + " " + str(round(specSynFlux[0][i], 4)) + "\n"
specHandle.write(outLine)
#With line ID labels:
specHandle.write(" ")
specHandle.write("lambda_0 species\n")
for i in range(numGaussLines):
thisLam = cm2nm * list2Lam0[gaussLine_ptr[i]]
thisLam = round(thisLam, 2)
thisLbl = list2Element[gaussLine_ptr[i]] + " " + \
list2StageRoman[gaussLine_ptr[i]] + " " + str(thisLam)
outLine = str(thisLam) + " " + thisLbl + "\n"
specHandle.write(outLine)
if makePlotSpec:
plt.figure()
plt.subplot(1, 1, 1)
plt.xlabel(r'$\lambda$ (nm)')
plt.ylabel(r'$F_\lambda/F^C_\lambda$')
plt.xlim(-6.5, 2.5)
plt.title = "Synthetic spectrum"
xMin = min(waveSS)
xMax = max(waveSS)
plt.xlim(xMin, xMax)
plt.ylim(0.0, 2.0)
plt.plot(waveSS, specSynFlux[0])
#Add spectral line labels:
for i in range(numGaussLines):
thisLam = cm2nm * list2Lam0[gaussLine_ptr[i]]
thisLam = round(thisLam, 2)
thisLbl = list2Element[gaussLine_ptr[i]] + " " + \
list2StageRoman[gaussLine_ptr[i]] + " " + str(thisLam)
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
plt.plot(xPoint, yPoint, color='black')
plt.text(thisLam, 1.7, thisLbl, rotation=270)
#
#
# Report 4:
#
#
#Print narrow band Gaussian filter quantities:
# limb darkening curve (LDC) and discrete fourier cosine transform of LDC
normTuneBandIntens = [ x / tuneBandIntens[0] for x in tuneBandIntens ]
#outFile = outPath + ldcFile
outFile = outPath + fileStem + ".ldc.txt"
#with open(outFile, 'w', encoding='utf-8') as ldcHandle:
with open(outFile, 'w') as ldcHandle:
#with open(ldcFile, 'w') as ldcHandle:
ldcHandle.write(inputParamString)
ldcHandle.write("Narrow band limb darkening curve (LDC) \n")
ldcHandle.write("cos(theta) I(mu)/I(0) \n")
for i in range(numThetas):
outLine = str(round(cosTheta[1][i], 4)) + " " + str(round(normTuneBandIntens[i], 4)) + "\n"
ldcHandle.write(outLine)
ldcHandle.write("\n ")
ldcHandle.write("Discrete fourier cosine transform of LDC \n")
ldcHandle.write("k (RAD/RAD) I(k) \n")
for i in range(numK):
outLine = str(round(ft[0][i], 4)) + " " + str(round(ft[1][i], 4)) + "\n"
ldcHandle.write(outLine)
ldcHandle.write("\n ")
ldcHandle.write("Monochromatic continuum linear limb darkening coefficients (LDCs) \n")
ldcHandle.write("Wavelength (nm) LDC \n")
for i in range(numK):
outLine = str(wave[i]) + " " + str(round(ldc[i], 4)) + "\n"
ldcHandle.write(outLine)
#narrow band limb darkening curve (LDC)
if makePlotLDC:
plt.figure()
plt.subplot(1, 1, 1)
plt.title = "Narrow band limb darkening"
plt.xlabel(r'$cos\theta$ (RAD)')
plt.ylabel(r'$I^{\rm C}_{\rm band}/I^{\rm C}_{\rm band}(0)$')
plt.xlim(-0.1, 1.1)
plt.ylim(0, 1.1)
plt.plot(cosTheta[1], normTuneBandIntens)
#discrete fourier cosine transform of LDC
if makePlotFT:
plt.figure()
plt.subplot(1, 1, 1)
plt.title = "Fourier cosine transform of I_lambda(theta)"
plt.xlabel('Angular frequency (RAD/RAD)')
plt.ylabel(r'$I^{\rm C}_{\rm band}(\theta)$')
xMin = 0.9 * min(ft[0])
xMax = 1.1 * max(ft[0])
plt.xlim(xMin, xMax)
yMin = 0.9 * min(ft[1])
yMax = 1.1 * max(ft[1])
plt.ylim(yMin, yMax)
plt.plot(ft[0], ft[1])
#
#
# Report 6:
#
#
#//
#//"""
#Print partial pressures of atomic and molecular species
#Mostly now from Phil Bennett's GAS apckage
#outFile = outPath + lineFile
#print(" **** Report 6!!!! **** ")
outFile = outPath + fileStem + ".ppress.txt"
#with open(outFile, 'w', encoding='utf-8') as tlaHandle:
with open(outFile, 'w') as ppHandle:
#with open(tlaFile, 'w') as tlaHandle:
ppHandle.write(inputParamString + "\n")
ppHandle.write("Log_10 partial pressures every 10th depth: \n")
for iD in range(0, numDeps):
ppHandle.write("log_10(Tau_Ros) " + str(log10tauRos[iD]) + " T_Kin " + str(log10temp[iD]) +\
" (K) log_10(P_Gas) " + str(log10pgas[iD]) +\
" (dynes/cm^2) log_10(P_e) " + str(log10pe[iD]) + "\n" )
for iSpec in range(gsNspec):
ppHandle.write(gsName[iSpec] + " " + \
str(round(log10MasterGsPp[iSpec][iD], 4)) + " ")
ppHandle.write("\n")
#print("R6 ", (10.0**log10MasterGsPp[0][iD])/(10.0**log10pgas[iD]))
#spectral line of user-defined 2-level atom
if makePlotPPress:
plt.figure()
plt.subplot(1, 1, 1)
whichSpec = Input.plotSpec
for thisSpec in range(gsNspec):
if (gsName[thisSpec].strip() == whichSpec.strip()):
break;
plt.title = "Log_10 Partial pressure: " + gsName[thisSpec]
plt.xlabel(r'$\log\tau$')
plt.ylabel(r'$\log P$ (dynes cm$^{-2}$')
xMin = logE * min(tauRos[1])
xMax = logE * max(tauRos[1])
yMin = min(log10MasterGsPp[thisSpec])
yMax = max(log10MasterGsPp[thisSpec])
plt.xlim(xMin, xMax)
plt.ylim(yMin, yMax)
plt.plot(log10tauRos, log10MasterGsPp[thisSpec])
#print(log10tauRos)
#print(log10MasterGsPp[thisSpec])
#
#
# Report 7:
#
if (ifTransit and makePlotTrans):
#
#//Exo-planet Transit light curves through UBVRIK
plt.figure()
plt.subplot(1, 1, 1)
#Initialplot set-up
plt.title = "Exoplanet transit light curve"
plt.xlabel(r'Time (hrs)')
plt.ylabel(r'Relative flux')
#xMin =
#xMax =
#plt.xlim(xMin, xMax)
yMin = 0.0
yMax = 1.0
yMinUV = min(bandFluxTransit2[0])/bandFluxTransit2[0][0] #minimum UV flux during transit
yMinIR = min(bandFluxTransit2[numBands-1])/bandFluxTransit2[numBands-1][0] #minimum IR flux during transit
yMinMA = min(fluxTransMA2i)
yMin = min([yMinUV, yMinIR, yMinMA]) # minimum of the two
yMax = 1.0 + (1.0 - yMin)
textStep = (xMax-xMin)/10.0
#print("yminUV ", yMinUV, " yminIR ", yMinIR, " ymin ", yMin, " yMax ", yMax, " textStep ", textStep)
plt.ylim(yMin, yMax)
#transit2Hrs = [x/3600.0 for x in transit2] #s to hours
ephemTHrs = [x/3600.0 for x in ephemT] #s to hours
whichBands = [0, 1, 3, 4, 5, 8]
numPlotBands = len(whichBands)
#numPlotBands = 1
bandLbls = ["U", "B", "V", "R", "I", "K"]
transPalette = ['violet', 'blue', 'green', 'red', 'brown', 'black']
#transPalette = ['violet', 'blue', 'blue', 'green', 'orange', 'red', 'brown', 'gray', 'black', '']
#normFluxTransit = [[0.0 for i in range(numTransThetas2)] for j in range(numBands)]
#for iB in range(numBands):
#for iE in range(numTransThetas2):
##Normalize by untransited flux:
#normFluxTransit[iB][iE] = bandFluxTransit[iB][iE]/bandFluxTransit[iB][0]
##plt.plot(transit2, bandFluxTransit[0]/bandFluxTransit[0][0], 'o')
#plt.plot(transit2Hrs, normFluxTransit[iB], color = transPalette[iB])
normFluxTransit = [[0.0 for i in range(numEpochs)] for j in range(numPlotBands)]
for iB in range(numPlotBands):
for iE in range(numEpochs):
#Normalize by untransited flux:
normFluxTransit[iB][iE] = bandFluxTransit2[whichBands[iB]][iE]/bandFluxTransit2[whichBands[iB]][0]
plt.plot(ephemTHrs, normFluxTransit[iB], color = transPalette[iB])
plt.text(ephemTHrs[10]+(textStep*iB), 1.0, bandLbls[iB], color = transPalette[iB])
#plt.plot(ephemTHrs, bandFluxTransit2[whichBands[iB]], color = transPalette[iB])
#Overplot "F(z)" lightcurve from Mandel & Agol (2002) Section 5 Eq.
plt.plot(ephemTHrs, fluxTransMA2i, '--', color='black')
#plt.plot(ephemTHrs, normFluxTransit[0], 'o')
#for i in range(numEpochs):
# print("i ", i, " normFluxTransit[0] ", normFluxTransit[0][i], " fluxTransMA2i ", fluxTransMA2i[i])
if (ifTransit):
outFile = outPath + fileStem + ".trans.txt"
#print vertical atmospheric structure
#with open(outFile, 'w', encoding='utf-8') as strucHandle:
with open(outFile, 'w') as strucHandle:
#with open(strucFile, 'w') as strucHandle:
strucHandle.write(inputParamString + "\n")
strucHandle.write("numBands " + str(7) + " numEpochs " + str(numEpochs) + "\n")
strucHandle.write("t (s), F^transit_band(t)/F_band" + "\n")
#Not trivially pythonizable - each time through it writes a line to an output file
outLine = "t(s) "\
+ " " + str(bandLbls[0]) + " " + str(bandLbls[1])\
+ " " + str(bandLbls[2]) + " " + str(bandLbls[3])\
+ " " + str(bandLbls[4]) + " " + str(bandLbls[5])\
+ " " + "M&A02\n"
strucHandle.write(outLine)
for iE in range(numEpochs):
#Transited flux already normalized to untransited flux above
outLine = str(round(ephemT[iE], 4)) + " "\
+ str(normFluxTransit[whichBands[0]][iE]) + " "\
+ str(normFluxTransit[1][iE]) + " "\
+ str(normFluxTransit[2][iE]) + " "\
+ str(normFluxTransit[3][iE]) + " "\
+ str(normFluxTransit[4][iE]) + " "\
+ str(normFluxTransit[5][iE]) + " "\
+ str(fluxTransMA2i[iE]) + "\n"
strucHandle.write(outLine)
#print(" ")
#print(" ************** ")
#print(" ")
#print("STOP!!!!")
#print(" ")
#print(" ************** ")
#print(" ")
#// *****************************
#//
#//
#//
#// User-defined two-level atom and line profile section:
#//
#//
#//
#//
# // Set up grid of line lambda points sampling entire profile (cm):
numCore = 5 #//half-core
numWing = 10 #//per wing
numPoints = 2 * (numCore + numWing) - 1 #// + 1; //Extra wavelength point at end for monochromatic continuum tau scale
#//linePoints: Row 0 in cm (will need to be in nm for Plack.planck), Row 1 in Doppler widths
species = "Ca" #Anything but Hydrogen - doesn't matter for now - ??
linePoints = LineGrid.lineGridVoigt(userLam0, userMass, xiT, numDeps, teff, numCore, numWing, species) #//cm
#// Get Einstein coefficient for spontaneous de-excitation from f_ij to compute natural
#// (radiation) roadening: Assumes ration of statisitcal weight, g_j/g_i is unity
#logAij = math.log(6.67e13) + math.log(10.0)*userLogF - 2.0*math.log(cm2nm*userLam0)
log10Aij = math.log10(6.67e13) + userLogF - 2.0*math.log10(cm2nm*userLam0)
#////
#//Compute area-normalized depth-independent line profile "phi_lambda(lambda)"
if (ifVoigt == True):
lineProf = LineProf.voigt2(linePoints, userLam0, log10Aij, userLogGammaCol,
numDeps, teff, tauRos, temp, pGas, tempSun, pGasSun)
else:
lineProf = LineProf.voigt(linePoints, userLam0, log10Aij, userLogGammaCol, \
numDeps, teff, tauRos, temp, pGas, tempSun, pGasSun, hjertComp, dbgHandle)
#//
#// Level population now computed in LevelPops.levelPops()
#//
#// This holds 2-element temperature-dependent base 10 logarithmic parition fn:
#for k in range(len(thisUwV)):
# thisUwV[k] = 0.0 #//default initialization
thisUwV = [ 0.0 for i in range(numAtmPrtTmps) ]
logNums = [ [ 0.0 for i in range(numDeps) ] for j in range(numStages+2) ]
thisLogN = [0.0 for i in range(numDeps)]
#for i in range(numDeps):
# thisLogN[i] = logE10*(userA12 - 12.0) + logNH[i]
thisLogN = [ logE10*(userA12 - 12.0) + x for x in logNH ]
#//load arrays for stagePops2():
#//Default is to set both temperature-dependent values to to the user-input value:
chiIArr[0] = userChiI1
chiIArr[1] = userChiI2
chiIArr[2] = userChiI3
chiIArr[3] = userChiI4
log10UwAArr[0][0] = math.log10(userGw1)
log10UwAArr[0][1] = math.log10(userGw1)
log10UwAArr[1][0] = math.log10(userGw2)
log10UwAArr[1][1] = math.log10(userGw2)
log10UwAArr[2][0] = math.log10(userGw3)
log10UwAArr[2][1] = math.log10(userGw3)
log10UwAArr[3][0] = math.log10(userGw4)
log10UwAArr[3][1] = math.log10(userGw4)
#//One phantom molecule:
fakeNumMols = 1
fakeLogNumB = [ [ 0.0 for i in range(numDeps) ] for j in range(1) ]
#for i in range(numDeps):
# fakeLogNumB[0][i] = -49.0
fakeLogNumB[0] = [ -49.0 for i in range(numDeps) ]
fakeDissEArr = [ 0.0 for i in range(1) ]
fakeDissEArr[0] = 29.0 #//eV
fakeLog10UwBArr = [ [ 0.0 for i in range(numAtmPrtTmps) ] for j in range(1) ]
#for kk in range(len(fakeLog10UwBArr)):
# fakeLog10UwBArr[0][kk] = 0.0
fakeLogQwABArr = [ [ 0.0 for i in range(numMolPrtTmps) ] for j in range(fakeNumMols) ]
#for im in range(fakeNumMols):
# for kk in range(numMolPrtTmps):
# fakeLogQwABArr[im][kk] = math.log(300.0)
fakeLogQwABArr = [ [ log300 for kk in range(numMolPrtTmps) ] for im in range(fakeNumMols) ]
fakeLogMuABArr = [0.0 for i in range(1)]
fakeLogMuABArr[0] = math.log(2.0) + Useful.logAmu() #//g
logN = LevelPopsGasServer.stagePops2(thisLogN, newNe, chiIArr, log10UwAArr, \
fakeNumMols, fakeLogNumB, fakeDissEArr, fakeLog10UwBArr, fakeLogQwABArr, fakeLogMuABArr, \
numDeps, temp)
#for iTau in range(numDeps):
# logNums[0][iTau] = logN[0][iTau]
# logNums[1][iTau] = logN[1][iTau]
# logNums[4][iTau] = logN[2][iTau]
# logNums[5][iTau] = logN[3][iTau]
# logNums[6][iTau] = logN[4][iTau]
#//logNums[6][iTau] = logN[4][iTau];
logNums[0] = [ x for x in logN[0] ]
logNums[1] = [ x for x in logN[1] ]
logNums[4] = [ x for x in logN[2] ]
logNums[5] = [ x for x in logN[3] ]
logNums[6] = [ x for x in logN[4] ]
stage_ptr = 0 #//default initialization is neutral stage
if (userStage == 0):
stage_ptr = 0
if (userStage == 1):
stage_ptr = 1
if (userStage == 2):
stage_ptr = 4
if (userStage == 3):
stage_ptr = 5
numHelp = LevelPopsGasServer.levelPops(userLam0, logN[stage_ptr], userChiL, thisUwV, \
userGwL, numDeps, temp);
#for iTau in range(numDeps):
# logNums[2][iTau] = numHelp[iTau]
#//Log of line-center wavelength in cm
logNums[2] = [ x for x in numHelp ]
logLam0 = math.log(userLam0)
#// energy of b-b transition
logTransE = Useful.logH() + Useful.logC() - logLam0 - Useful.logEv() #// last term converts back to cgs units
#// Energy of upper E-level of b-b transition
chiU = userChiL + math.exp(logTransE)
numHelp = LevelPopsGasServer.levelPops(userLam0, logN[stage_ptr], chiU, thisUwV, userGwL, \
numDeps, temp)
#for iTau in range(numDeps):
# logNums[3][iTau] = numHelp[iTau] #//upper E-level - not used - fake for testing with gS3 line treatment
logNums[3] = [ x for x in numHelp ] #//upper E-level - not used - fake for testing with gS3 line treatment
#//
#//Compute depth-dependent logarithmic monochromatic extinction co-efficient, kappa_lambda(lambda, tauRos):
lineLambdas = [0.0 for i in range(numPoints)]
#for il in range(numPoints):
# lineLambdas[il] = linePoints[0][il] + userLam0
lineLambdas = [ x + userLam0 for x in linePoints[0] ]
logKappaL = LineKappa.lineKap(userLam0, logNums[2], userLogF, linePoints, lineProf, \
numDeps, zScale, tauRos, temp, rho, logFudgeTune)
logTotKappa = LineKappa.lineTotalKap(lineLambdas, logKappaL, numDeps, logKappa, \
numLams, lambdaScale)
#//
#//Compute monochromatic optical depth scale, Tau_lambda throughout line profile
#//CAUTION: Returns numPoints+1 x numDeps array: the numPoints+1st row holds the line centre continuum tau scale
logTauL = LineTau2.tauLambda(numPoints, lineLambdas, logTotKappa, \
numDeps, kappa500, tauRos, logTotalFudge)
#//Evaluate formal solution of rad trans eq at each lambda throughout line profile
#// Initial set to put lambda and tau arrays into form that formalsoln expects
lineIntens = [ [ 0.0 for i in range(numThetas) ] for j in range(numPoints) ]
lineIntensLam = [0.0 for i in range(numThetas)]
lineFlux = [ [ 0.0 for i in range(numPoints) ] for j in range(2) ]
lineFluxLam = [0.0 for i in range(2)]
if (ifScatt == True):
lineMode = True
else:
lineMode = False
for il in range(numPoints):
#for id in range(numDeps):
# thisTau[1][id] = logTauL[il][id]
# thisTau[0][id] = math.exp(logTauL[il][id])
#//console.log("il " + il + " id " + id + " logTauL[il][id] " + logE*logTauL[il][id]);
thisTau[1] = [ x for x in logTauL[il] ]
thisTau[0] = [ math.exp(x) for x in logTauL[il] ]
lineIntensLam = FormalSoln.formalSoln(numDeps, \
cosTheta, lineLambdas[il], thisTau, temp, lineMode)
#//lineFluxLam = flux2(lineIntensLam, cosTheta);
#for it in range(numThetas):
# lineIntens[il][it] = lineIntensLam[it]
lineIntens[il] = [ x for x in lineIntensLam ]
#//console.log("il " + il + " it " + it + "lineIntensLam[it] " + lineIntensLam[it]);
#} //it loop - thetas
#} //il loop
lineFlux = Flux.flux3(lineIntens, lineLambdas, cosTheta, phi, cgsRadius, omegaSini, macroVkm)
#//Continuum rectify line spectrum:
#//
#contFlux2 = ToolBox.interpolV(contFlux[0], lambdaScale, lineLambdas)
contFlux2 = numpy.interp(lineLambdas, lambdaScale, contFlux[0])
lineFlux2 = [ [ 0.0 for i in range(numPoints) ] for j in range(2) ]
#for i in range(numPoints):
# lineFlux2[0][i] = lineFlux[0][i] / contFlux2[i]
# lineFlux2[1][i] = math.log(lineFlux2[0][i])
lineFlux2[0] = [ lineFlux[0][i] / contFlux2[i] for i in range(numPoints) ]
lineFlux2[1] = [ math.log(x) for x in lineFlux2[0] ]
#//Get equivalent width, W_lambda, in pm - picometers:
#//Wlambda = eqWidth(lineFlux2, linePoints, lam0); //, fluxCont);
WlambdaLine = PostProcess.eqWidthSynth(lineFlux2, lineLambdas)
#
#
# Report 5:
#
#
#//
#//"""
#Print rectified high resolution spectrum of synthesis region
lineWave = [0.0 for i in range(numPoints)]
#outFile = outPath + lineFile
outFile = outPath + fileStem + ".tla.txt"
#with open(outFile, 'w', encoding='utf-8') as tlaHandle:
with open(outFile, 'w') as tlaHandle:
#with open(tlaFile, 'w') as tlaHandle:
tlaHandle.write(inputParamString + "\n")
tlaHandle.write("User-defined two-level atom and line: Equivalent width: " + str(WlambdaLine) + " pm \n")
tlaHandle.write("wave (nm) normalized flux \n")
for i in range(numPoints):
lineWave[i] = cm2nm*lineLambdas[i]
outLine = str(round(lineWave[i], 4)) + " " + str(round(lineFlux2[0][i], 4)) + "\n"
tlaHandle.write(outLine)
tlaHandle.write("\n")
tlaHandle.write("log_10 energy level populations (cm^-3) \n")
tlaHandle.write("tauRos n_l n_I n_II N_III N_IV")
for i in range(numDeps):
nI = round(log10e * logNums[0][i], 4)
nII = round(log10e * logNums[1][i], 4)
nl = round(log10e * logNums[2][i], 4)
nIII = round(log10e * logNums[4][i], 4)
nIV = round(log10e * logNums[5][i], 4)
outLine = str(log10tauRos[i]) + " " + str(nl) + " " + str(nI) + " " + str(nII) + " " + str(nIII) + " " + str(nIV) + "\n"
tlaHandle.write(outLine)
#spectral line of user-defined 2-level atom
if makePlotTLA:
plt.figure()
plt.subplot(1, 1, 1)
plt.title = "Fourier cosine transform of I_lambda(theta)"
plt.xlabel(r'$\lambda$ (nm)')
plt.ylabel(r'$F_\lambda/F^{\rm C}_\lambda$')
xMin = min(lineWave)
xMax = max(lineWave)
plt.xlim(xMin, xMax)
plt.ylim(0, 1.2)
plt.plot(lineWave, lineFlux2[0])
dbgHandle.close()
| 197,848
| 37.128541
| 245
|
py
|
ChromaStarPy
|
ChromaStarPy-master/PPressPlotMols.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
from functools import reduce
import subprocess
import os
import sys
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
thisOS = "unknown" #default
myOS= ""
#returns 'posix' form unix-like OSes and 'nt' for Windows??
thisOS = os.name
print("")
print("Running on OS: ", thisOS)
print("")
absPath0 = "./" #default
if thisOS == "nt":
#windows
absPath0 = subprocess.check_output("cd", shell=True)
backSpace = 2
elif thisOS == "posix":
absPath0 = subprocess.check_output("pwd", shell=True)
backSpace = 1
absPath0 = bytes.decode(absPath0)
#remove OS_dependent trailing characters 'r\n'
nCharsPath = len(absPath0)
nCharsPath -= backSpace
absPath0 = absPath0[0: nCharsPath]
slashIndex = absPath0.find('\\') #The first backslash is the escape character!
while slashIndex != -1:
#python strings are immutable:
absPathCopy = absPath0[0: slashIndex]
absPathCopy += '/'
absPathCopy += absPath0[slashIndex+1: len(absPath0)]
absPath0 = absPathCopy
#print(absPathCopy, absPath0)
slashIndex = absPath0.find('\\')
absPath = absPath0 + '/'
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = absPath + "Outputs/"
#outPath = absPath + "Outputs/"
project = "Project"
runVers = "RunGas"
teff = 3600.0
logg = 1.0
log10ZScale = 0.0
lambdaStart = 695.0
lambdaStop = 700.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".ppress.txt"
#whichSpec = "Ca+"
#whichSpec = ["C", "N", "O", "Na", "Mg", "Si", "S", "K", "Ca", "Fe"]
#colrSpec = ["black", "brown", "red", "orange", "yellow", "green", "blue", "indigo", "violet", "gray"]
#whichIon = ["Na+", "Mg+", "Si+", "S+", "K+", "Ca+", "Fe+"]
#colrIon = ["orange", "yellow", "green", "blue", "indigo", "violet", "gray"]
whichSpec = ["H2", "C2", "O2", "N2", "CO", "OH", "CN", "TiO", "CaH", "CaO"]
colrSpec = ["black", "brown", "red", "orange", "yellow", "green", "blue", "indigo", "violet", "gray"]
whichIon = ["H2+", "H2O", "CaOH"]
colrIon = ["black", "brown", "red", "orange", "yellow", "green", "blue", "indigo", "violet", "gray"]
thisSpec = 0 #default initialization (H)
numSampleDepths = 48
#numSampleDepths = 2 #debug
numSpecies = 105
#numSpecies = 3 #debug
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
species = [0.0 for i in range(numSpecies)]
logTau = [0.0 for i in range(numSampleDepths)]
logTkin = [0.0 for i in range(numSampleDepths)]
logPGas = [0.0 for i in range(numSampleDepths)]
logPe = [0.0 for i in range(numSampleDepths)]
logPP = [ [ 0.0 for j in range(numSpecies) ] for i in range(numSampleDepths)]
fileTeff = 0.0
fileLogg = 0.0
fileLogZ = 0.0
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
fields = inLine.split()
fileTeff = float(fields[1].strip())
fileLogg = float(fields[3].strip())
fileZ = float(fields[5].strip())
if ( (fileTeff != teff) or
(fileLogg != logg) or
(fileLogZ != log10ZScale) ):
print(" ")
print(" !!!!!!!!!!!!!!!!!!!!!!")
print(" ")
print("Mismatch between input file name and stellar paramters in input file!")
print(" ")
print(" !!!!!!!!!!!!!!!!!!!!!!")
print(" ")
#Header line
inLine = inputHandle.readline()
print(inLine)
#Get the synthetic spectrum
for i in range(numSampleDepths):
#Begin reading data - each depthwise record is two lines:
#line 1 has depth and environmental paramters
#line 2 has specieswise partial pressures
inLine1 = inputHandle.readline()
#print(inLine1)
fields = inLine1.split()
logTau[i] = float(fields[1].strip())
logTkin[i] = float(fields[3].strip())
logPGas[i] = float(fields[6].strip())
logPe[i] = float(fields[9].strip())
#Relative to total gas pressure for plot:
logPe[i] = logPe[i] - logPGas[i]
inLine2 = inputHandle.readline()
#print(inLine2)
fields = inLine2.split()
for j in range(numSpecies):
species[j] = fields[2*j].strip()
#if (species[j] == whichSpec):
# thisSpec = j
logPP[i][j] = float(fields[(2*j) + 1].strip())
#Relative to total gas pressure for plot:
logPP[i][j] = logPP[i][j] - logPGas[i]
#print("j ", j, " 2*j ", 2*j, " 2*j+1 ", (2*j)+1, " species ", species[j], " pp ", logPP[i][j])
#plot some partial pressures
#plt.title('Synthetic spectrum')
plt.figure()
plt.subplot(1, 1, 1)
#plt.ylabel(r'$\log P$ dynes cm$^{\rm -2}$', fontsize=14)
plt.ylabel(r'$\log_{10} (P/P_{\rm H})$', fontsize=14)
plt.xlabel(r'$\log_{10}\tau_{\rm Ros}$')
xMin = min(logTau)
xMax = max(logTau)
pylab.xlim(xMin, xMax)
pylab.ylim(-12.0, 0.0)
#thisSpec = 3
colr = 0
for wS in whichSpec:
for i in range(numSpecies):
if (species[i] == wS):
thisSpec = i
print("Species: ", species[thisSpec])
#print("At plot:")
#print ("logPP ", [logPP[i][0] for i in range(numSampleDepths)])
# skip first depth point [i=0] - upper boundary condition:
pylab.plot( [logTau[i] for i in range(1, numSampleDepths)],\
[logPP[i][thisSpec] for i in range(1, numSampleDepths)],\
color=colrSpec[colr], linewidth=2)
pylab.text(logTau[4], logPP[4][thisSpec], species[thisSpec],\
color=colrSpec[colr], fontsize=13, weight='bold')
colr+=1
#pylab.plot(logTau, logPe, 'o', color='black')
#pylab.text(logTau[numSampleDepths-3], logPe[numSampleDepths-3], 'Pe',\
# color='black', fontsize=13, weight='bold')
colr = 0
for wI in whichIon:
for i in range(numSpecies):
if (species[i] == wI):
thisSpec = i
print("Species: ", species[thisSpec])
pylab.plot([logTau[i] for i in range(1, numSampleDepths)],\
[logPP[i][thisSpec] for i in range(1, numSampleDepths)],\
'--', color=colrIon[colr], linewidth=2)
pylab.text(logTau[numSampleDepths-8], logPP[numSampleDepths-8][thisSpec],\
species[thisSpec], color=colrIon[colr], fontsize=13, weight='bold')
colr+=1
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + "Mols.eps"
plt.savefig(epsName, format='eps', dpi=1000)
| 6,942
| 31.143519
| 107
|
py
|
ChromaStarPy
|
ChromaStarPy-master/VegaHalpha.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Castelli & Kurucz
#Model atmosphere
teff = 9550.0 #, K
logg = 3.95 #, cgs
log10ZScale = -0.5 # [A/H]
massStar = 2.0 #, solar masses
xiT = 2.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 651.0 #, nm
lambdaStop = 661.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.0
logKapFudge = 0.0
macroV = 2.0 #, km/s
rotV = 275.0 #, km/s
rotI = 5.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 0 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,691
| 32.65
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/solartest.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "SolFluxAtlas2005/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = dataPath + "fluxspliced.2005"
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #Special one-line header
print(inLine)
fields = inLine.split()
numStr = fields[0].strip() #first field is number of following records
num = int(numStr)
waveSun = [0.0 for i in range(num)]
fluxSun = [0.0 for i in range(num)]
for i in range(num):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
waveSun[i] = float(wavStr); fluxSun[i] = float(flxStr)
pylab.plot(waveSun, fluxSun, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "5777.0"
loggStr = "4.44"
logZStr = "0.0"
massStarStr = "1.0"
xiTStr = "1.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.0"
#Spectrum synthesis
lambdaStartStr = "390.0"
lambdaStopStr = "400.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.5"
logKapFudgeStr = "0.0"
macroVStr = "1.0"
rotVStr = "2.0"
rotIStr = "90.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "Run"
teff = 5777.0
logg = 4.44
log10ZScale = 0.0
lambdaStart = 390.0
lambdaStop = 400.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
pylab.ylim(0.0, 1.6)
pylab.plot(waveMod, fluxMod, color="gray")
#add the line IDs
for i in range(numLineIds):
if "Ca II" in lblIds[i]:
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.5, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + ".eps"
plt.savefig(epsName, format='eps', dpi=1000)
| 6,253
| 28.780952
| 106
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LineTau2.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 15:33:20 2017
@author: Ian
"""
import math
import Useful
"""
/* This might be the wrong approach - using the *local* monochromatic continuum optical depth and extinction
* scale for reference at each wavelength - the alternative is to use a universal tau and kappa scale
* for reference, like Rosseland tau and kappa (or those at 500 nm)*/"""
def tauLambda(numMaster, masterLams, logKappaL,
numDeps, logKappaRef, tauRef, logTotalFudge):
"""/* This version is for computing the monochromatic optical depth distribution from a line blanketed
* and a continuum monochromatic extinction distribution */
/* logTauCont is the optical depth scale corresponding to the continuum extinction logKappa*/"""
#//No monochromatic optical depth can be less than the Rosseland optical depth,
#// so prevent zero tau_lambda values by setting each tau_lambda(lambda) at the
#//top of the atmosphere to the tau_Ross value at the top
#// - prevents trying to take a log of zero!
logE = math.log10(math.e) #// for debug output
logE10 = math.log(10.0)
minTauL = tauRef[0][0]
minLogTauL = tauRef[1][0]
#//int numPoints = linePoints[0].length;
logTauL = [ [ 0.0 for i in range(numDeps) ] for j in range(numMaster) ]
#double tau1, tau2, delta, tauL, thisTau, lastTau,
# integ, logKapRat, lastLogKapRat, kapTot;
#//Interpolate continuum opacity and corresponding optical depth scale onto onto line-blanketed opacity lambda array:
#//
"""#/*
logKappaC = [0.0 for i in range(numLams)]
logKappaC2 = [0.0 for i in range(numMaster)]
logKappa2 = [ [ 0.0 for i in range(numDeps) ] for j in range(numMaster) ]
logTauC = [0.0 for i in range(numLams)]
logTauC2 = [0.0 for in range(numMaster)]
logTau2 = [ [ 0.0 for i in range(numDeps) ] for j in range(numMaster) ]
for id in range(numDeps):
for il in range(numLams):
logKappaC[il] = logKappa[il][id]
logTauC[il] = logTauCont[il][id]
logKappaC2 = ToolBox.interpolV(logKappaC, lambdaScale, masterLams);
logTauC2 = ToolBox.interpolV(logTauC, lambdaScale, masterLams);
for il in range(numMaster):
logKappa2[il][id] = logKappaC2[il]
logTau2[il][id] = logTauC2[il]
*/"""
for il in range(numMaster):
tau1 = minTauL #//initialize accumulator
logTauL[il][0] = minLogTauL #// Set upper boundary TauL
#//System.out.println("LineTau: minTauL: " + minTauL);
#//Trapezoid method: first integrand:
#//total extinction co-efficient
#//// With local monochromatic optical depth scale as reference scale:
#//lastLogKapRat = logKappaL[il][0] - logKappa2[il][0];
#//With Rosseland optical depth scale as reference scale:
#//lastLogKapRat = Math.log(kapTot) - kappaRef[1][0];
lastLogKapRat = logKappaL[il][0] - logKappaRef[1][0]
lastLogKapRat = lastLogKapRat + logE10*logTotalFudge
for id in range(1, numDeps):
#// With local monochromatic optical depth scale as reference scale:
#//thisTau = Math.exp(logTau2[il][id]);
#//lastTau = Math.exp(logTau2[il][id - 1]);
#////With Rosseland optical depth scale as reference scale:
thisTau = tauRef[0][id]
lastTau = tauRef[0][id-1]
#//
delta = thisTau - lastTau
#// With local monochromatic optical depth scale as reference scale:
#//logKapRat = Math.log(kapTot) - logKappa2[il][id];
#//logKapRat = logKappaL[il][id] - logKappa2[il][id];
#////With Rosseland optical depth scale as reference scale:
logKapRat = logKappaL[il][id] - logKappaRef[1][id]
logKapRat = logKapRat + logE10*logTotalFudge
#//opacity being handed in is now total opacity: line plux continuum:
#//trapezoid rule:
integ = 0.5 * (math.exp(logKapRat) + math.exp(lastLogKapRat))
tau2 = tau1 + (integ * delta)
logTauL[il][id] = math.log(tau2)
tau1 = tau2
lastLogKapRat = logKapRat
#} //id loop
#} //il loop
return logTauL
#} //end method tauLambda
#//
def tauLambdaCont(numCont, logKappaCont, logKappaRef,
numDeps, tauRef, logTotalFudge):
"""/* This version is for computing the monochromatic optical depth distribution from a continuum monochromatic extinction
* distribution and a reference extinction scale */ """
#// kappaRef is usual 2 x numDeps array with linear (row 0) and logarithmic (row 1) reference extinction coefficient
#// values
#// tauRef is the optical depth distribution corresponding to the extinction distribution kappaRef
#//No monochromatic optical depth can be less than the Rosseland optical depth,
#// so prevent zero tau_lambda values by setting each tau_lambda(lambda) at the
#//top of the atmosphere to the tau_Ross value at the top
#// - prevents trying to take a log of zero!
logE = math.log10(math.e) #// for debug output
logE10 = math.log(10.0)
minTauC = tauRef[0][0]
minLogTauC = tauRef[1][0]
#//int numPoints = linePoints[0].length;
#// returns numPoints+1 x numDeps array: the numPoints+1st row holds the line centre continuum tau scale
logTauC = [ [ 0.0 for i in range(numDeps) ] for j in range(numCont) ]
#double tau1, tau2, delta, tauL,
# integ, logKapRat, lastLogKapRat;
#//Interpolate continuum opacity onto onto line-blanketed opacity lambda array:
#//
for il in range(numCont):
tau1 = minTauC #//initialize accumulator
logTauC[il][0] = minLogTauC #// Set upper boundary TauL
#//System.out.println("LineTau: minTauL: " + minTauL);
#//Trapezoid method: first integrand:
#//total extinction co-efficient
#// Convert kappa_Ros to cm^-1 for consistency with kappaL:
#//logKappaC = kappa[1][0] + rhoSun[1][0]; // + logg;
#//WRONG! kappa is now wavelength dependent!
#//logKappaC = kappa[1][0];
#//delta = tauRos[0][1] - tauRos[0][0];
#//logKapRat = logKappaL[il][0] - kappa[1][0];
lastLogKapRat = logKappaCont[il][0] - logKappaRef[1][0]
lastLogKapRat = lastLogKapRat + logE10*logTotalFudge
#//tau2 = tau1 + ((Math.exp(logKapRat) + 1.0) * delta);
#//opacity being handed in is now total oapcity: line plux continuum:
#//tau2 = tau1 + (Math.exp(logKapRat) * delta);
#//logTauL[il][1] = Math.log(tau2);
#//tau1 = tau2;
for id in range(1, numDeps):
#// To test: continue with Euler's method:
#// Convert kappa_Ros to cm^-1 for consistency with kappaL:
#//logKappaC = kappa[1][id] + rhoSun[1][id]; // - logg;
#//logKappaC = kappa[1][id];
delta = tauRef[0][id] - tauRef[0][id - 1]
#//logKapRat = logKappaL[il][id] - kappa[1][id];
#//logKapRat = logKappaL[il][id] - logKappaC;
logKapRat = logKappaCont[il][id] - logKappaRef[1][id]
logKapRat = logKapRat + logE10*logTotalFudge
#// if (id == 36){
#//System.out.println("il " + il + " masterLams " + masterLams[il] + " logKappaL " + logE*logKappaL[il][id] + " kappa2 " + logE*kappa2[il][id]
#// + " logKapRat " + logKapRat);
#//}
#//tau2 = tau1 + ((Math.exp(logKapRat) + 1.0) * delta);
#//opacity being handed in is now total oppcity: line plux continuum:
#//trapezoid rule:
integ = 0.5 * (math.exp(logKapRat) + math.exp(lastLogKapRat))
tau2 = tau1 + (integ * delta)
logTauC[il][id] = math.log(tau2)
tau1 = tau2
lastLogKapRat = logKapRat
#//if (id == 12) {
#// System.out.println("il " + il + " id " + id + " logTauL[il][id] " + logE * logTauL[il][id]);
#// System.out.println("tauLambda: il, id, masterLams, logKappaL, logKappa2, logKapRat, logTauL : "
#// + il + " " + id + " " + masterLams[il] + " " + logE*logKappaL[il][id] + " " + logE*kappa2[il][id] + " " + logE*logKapRat + " " + logE*logTauL[il][id] );
#//}
#} //id loop
#} //il loop
"""/* No!
//This is probably superfluous here, but let's do it this way for consistency with code that was
// dependent on Method 1:
//Now compute the monochromatic line centre continuum optical depth scale and store it in an numPoints+1st column of
// logTauL array:
for (int id = 0; id < numDeps; id++) {
logTauL[numPoints - 1][id] = tauRos[1][id];
}
*/"""
return logTauC
#} //end method tauLambda
| 8,909
| 42.043478
| 172
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Dgesl.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 11:42:29 2019
@author:
"""
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
#from Documents.ChromaStarPy.GAS.blas.Daxpy import daxpy
#from Documents.ChromaStarPy.GAS.blas.Ddot import ddot
#from Documents.ChromaStarPy.GAS.blas.Dscal import dscal
#from Documents.ChromaStarPy.GAS.blas.Idamax import idamax
import Daxpy
import Ddot
import Dscal
import Idamax
def dgesl(a, lda, n, ipvt, b, job):
#integer lda,n,ipvt(1),job
#double precision a(lda,1),b(1)
"""
c
c dgesl solves the double precision system
c a * x = b or trans(a) * x = b
c using the factors computed by dgeco or dgefa.
c
c on entry
c
c a double precision(lda, n)
c the output from dgeco or dgefa.
c
c lda integer
c the leading dimension of the array a .
c
c n integer
c the order of the matrix a .
c
c ipvt integer(n)
c the pivot vector from dgeco or dgefa.
c
c b double precision(n)
c the right hand side vector.
c
c job integer
c = 0 to solve a*x = b ,
c = nonzero to solve trans(a)*x = b where
c trans(a) is the transpose.
c
c on return
c
c b the solution vector x .
c
c error condition
c
c a division by zero will occur if the input factor contains a
c zero on the diagonal. technically this indicates singularity
c but it is often caused by improper arguments or improper
c setting of lda . it will not occur if the subroutines are
c called correctly and if dgeco has set rcond .gt. 0.0
c or dgefa has set info .eq. 0 .
c
c to compute inverse(a) * c where c is a matrix
c with p columns
c call dgeco(a,lda,n,ipvt,rcond,z)
c if (rcond is too small) go to ...
c do 10 j = 1, p
c call dgesl(a,lda,n,ipvt,c(1,j),0)
c 10 continue
c
c linpack. this version dated 08/14/78 .
c cleve moler, university of new mexico, argonne national lab.
c
c subroutines and functions
c
c blas daxpy,ddot
c
c internal variables
c
"""
#double precision ddot,t
#integer k,kb,l,nm1
#c
nm1 = n - 1
if (job == 0):
#c
#c job = 0 , solve a * x = b
#c first solve l*y = b
#c
if (nm1 >= 1):
for k in range(nm1):
l = ipvt[k]
t = b[l]
if (l != k):
#print("DGESL if triggered")
b[l] = b[k]
b[k] = t
#print("DGESL 1: l ", l, " k, ", k, " b ", b[k])
#FORTRAN call call daxpy(n-k, t, a[k+1][k], 1, b[k+1], 1)
#5th parameter is in/out:
#b[k+1] = daxpy(n-k, t, a[k+1][k], 1, b[k+1], 1)
#[b[kk+1] for kk in range(k, n)] = daxpy(n-k, t,\
# [a[k+1][kk] for kk in range(k, n)], 1, [b[kk+1] for kk in range(k, n)], 1)
daxpyOut =\
Daxpy.daxpy(n-k-1, t, [a[kk][k] for kk in range(k+1, n)], 1, [b[kk] for kk in range(k+1, n)], 1)
daxpyCount = 0
for kk in range(k+1, n):
b[kk] = daxpyOut[daxpyCount]
daxpyCount+=1
#print("DGESL 2: k ", k, " b ", b[k])
#scipy: b[k+1] = daxpy(t, a[k+1][k], n-k, 1, 1)
#c
#c now solve u*x = y
#c
#print("DGESL: Before 2nd DAXPY call n ", n)
for kb in range(n):
#k = n + 1 - kb
k = (n-1) - kb
#print("DGESL: kb ", kb, " k ", k, " b ", b[k], " a ", a[k][k])
b[k] = b[k]/a[k][k]
t = -b[k]
#FORTRAN call: call daxpy(k-1, t, a[1][k], 1, b[1], 1)
#b[1] = daxpy(k-1, t, a[1][k], 1, b[1], 1)
#[b[kk] for kk in range(1, k)] = daxpy(k-1, t,\
# [a[1][kk] for kk in range(1, k)], 1, [b[kk] for kk in range(1, k)], 1)
#print("DGESL: Before DAPXPY 2:")
#print("a ", [a[kk][k] for kk in range(0, k+1)])
#print("b ", [b[kk] for kk in range(0, k+1)])
daxpyOut =\
Daxpy.daxpy(k, t, [a[kk][k] for kk in range(0, k+1)], 1, [b[kk] for kk in range(0, k+1)], 1)
daxpyCount = 0
for kk in range(0, k+1):
b[kk] = daxpyOut[daxpyCount]
daxpyCount+=1
#print("DGESL: After DAPXPY 2:")
#print("b ", [b[kk] for kk in range(0, k+1)])
#scipy: b[0] = daxpy(t, a[0][k], k-1, 1, 1)
# **** goto 100 !!! Oh-oh!!
#c
#c job = nonzero, solve trans(a) * x = b
#c first solve trans(u)*y = b
#c
if (job != 0):
for k in range(n):
#t = ddot(k-1, a[1][k], 1, b[1], 1)
t = Ddot.ddot(k, [a[kk][k] for kk in range(0, k)],\
1, [b[kk] for kk in range(0, k)], 1)
b[k] = (b[k] - t)/a[k][k]
#print("DDOT 1: t ", t)
#c
#c now solve trans(l)*x = y
#c
if (nm1 >= 1):
for kb in range(nm1):
#k = n - kb
k = n - kb - 1
#b[k] = b[k] + ddot(n-k, a[k+1][k], 1, b[k+1], 1)
b[k] = b[k] + Ddot.ddot(n-k, [a[kk][k] for kk in range(k, n)],\
1, [b[kk] for kk in range(k, n)], 1)
#print("DDOT 2: t ", t)
l = ipvt[k]
if (l != k):
t = b[l]
b[l] = b[k]
b[k] = t
return b
| 5,968
| 31.440217
| 112
|
py
|
ChromaStarPy
|
ChromaStarPy-master/CSBlockData.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 12:04:33 2019
@author:
"""
#Try this
#global pi, sbcon, kbol, cvel, gcon, hpl, hmass, t0, everg #/consts/
global kbol, hmass, t0 #/consts/
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
global nlin1, lin1, linv1, nlin2, lin2, linv2 #/lin/
#global xg,wtrp,dfflag,fqhead,nxg,ixgp,nxgp,iper #/fqgrid/
#global msol,lsol,rsol #/solar/
#global nit,nmit,itcon,mitcon,jtcorr,outfg #/iters/
#global nblock,nline #/output/
#global epsper,ftlim,fplim,frlim,restol,updatj,updatx, aconv,taups,irfact,ipverb,itaups #/flags/
#global chix, nix, nopac, ixa, ixn, opinit, opflag, opchar, iopt #/opacty
global chix, nix, ixa, ixn #/opacty
#pi = 3.1415926536e0
#sbcon = 5.66956e-5
kbol = 1.3806e-16
#cvel = 2.997925e+10
#gcon = 6.670e-8
#hpl = 6.62620e-27
hmass = 1.66053e-24
t0 = 5039.93e0
#everg = 1.60219e-12
#c
ip = [0.0e0 for i in range(150)]
comp = [0.0e0 for i in range(40)]
awt = [0.0e0 for i in range(150)]
#itab = [0 for i in range(83)]
#ntab = [0 for i in range(5)]
indx = [ [ [ [ [149 for i in range(2)] for j in range(5) ] for k in range(7) ] for l in range(26) ] for m in range(4) ]
name = [' ' for i in range(150)]
#gsinit = False
#print0 = False
gsinit = True
print0 = False
#common /gasp/ name,ip,comp,awt,nspec,natom,itab,ntab,indx,
# iprint,gsinit,print0
"""
itab = [2, 8, 0, 0, 0, 3, 4, 5, 0, 9,\
10, 11, 12, 13, 0, 6, 7, 0, 14, 15,\
16, 17, 18, 19, 20, 21, 22, 23, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 24, 25, 26,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0]
"""
itab = [1, 7, 0, 0, 0, 2, 3, 4, 0, 8,\
9, 10, 11, 12, 0, 5, 6, 0, 13, 14,\
15, 16, 17, 18, 19, 20, 21, 22, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 23, 24, 25,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0]
#ntab = [4, 26, 7, 5, 2]
ntab = [3, 25, 6, 4, 1]
#c
ipr = [0 for i in range(150)]
nch = [0 for i in range(150)]
nel = [0 for i in range(150)]
ntot = [0 for i in range(150)]
nat = [ [0 for i in range(150)] for j in range(5) ]
zat = [ [0 for i in range(150)] for j in range(5) ]
neut = [0 for i in range(150)]
idel = [0 for i in range(150)]
#indsp = [0 for i in range(40)]
#indzat = [0 for i in range(100)]
#iat = [0 for i in range(150)]
natsp = [0 for i in range(40)]
iatsp = [ [0 for i in range(40)] for j in range(40) ]
#common /gasp2/ ipr,nch,nel,ntot,nat,zat,neut,idel,indsp,
# indzat,iat,natsp,iatsp
#iat = 150*40
#indsp = 40*150
#indzat = 100*40
iat = [39 for i in range(150)]
indsp = [149 for i in range(40)]
indzat = [39 for i in range(100)]
#c
lin1 = [0 for i in range(40)]
lin2 = [0 for i in range(40)]
linv1 = [0 for i in range(40)]
linv2 = [0 for i in range(40)]
#common /lin/ nlin1,lin1,linv1,nlin2,lin2,linv2
#c
#dfflag = [0.0e0 for i in range(120)]
#wtrp = [0.0e0 for i in range(120)]
#xg = [0.0e0 for i in range(120)]
#ixgp = [0 for i in range(120)]
"""
fqhead = ['' for i in range(120)]
#common /fqgrid/ xg,wtrp,dfflag,fqhead,nxg,ixgp,nxgp,iper
nxg = 116
xg = [7.550, 7.050, 6.547, 6.545, 6.350, 6.159, 6.157, 5.940, 5.938, 5.600,\
5.300, 5.000, 4.831, 4.829, 4.600, 4.400, 4.200, 3.972, 3.970, 3.800,\
3.600, 3.401, 3.399, 3.200, 3.000, 2.896, 2.894, 2.800, 2.700, 2.600,\
2.500, 2.450, 2.400, 2.350, 2.300, 2.250, 2.200, 2.150, 2.100, 2.050,\
2.000, 1.960, 1.920, 1.880, 1.840, 1.800, 1.760, 1.720, 1.680, 1.640,\
1.600, 1.560, 1.520, 1.480, 1.440, 1.400, 1.360, 1.330, 1.300, 1.270,\
1.240, 1.210, 1.180, 1.150, 1.120, 1.009, 1.006, 1.003, 1.000, 0.980,\
0.960, 0.940, 0.920, 0.900, 0.880, 0.860, 0.840, 0.820, 0.800, 0.780,\
0.760, 0.740, 0.720, 0.700, 0.680, 0.660, 0.640, 0.620, 0.600, 0.580,\
0.560, 0.540, 0.520, 0.500, 0.480, 0.460, 0.440, 0.420, 0.400, 0.380,\
0.360, 0.340, 0.320, 0.300, 0.280, 0.260, 0.240, 0.220, 0.200, 0.180,\
0.160, 0.140, 0.120, 0.100, 0.080, 0.060]
#c
#common /solar/ msol,lsol,rsol
msol = 1.989e+33
lsol = 3.826e+33
rsol = 6.9599e+10
#c
#common /iters/ nit,nmit,itcon,mitcon,jtcorr,outfg
nit = 0
nmit = 0
itcon = False
mitcon = False
jtcorr = False
outfg = False
#c
#common /output/ nblock,nline
nblock = 10
nline = 1
#c
#common /flags/ epsper,ftlim,fplim,frlim,restol,updatj,updatx,
# aconv,taups,irfact,ipverb,itaups
irfact = 0
epsper = 1.0e-4
restol = 1.0e-6
updatj = 0.0e0
updatx = 0.0e0
ftlim = 0.10e0
fplim = 0.10e0
frlim = 0.10e0
aconv = 1.6e0
taups = 6.66667e-01
ipverb = 2
"""
#c
#chix = ['' for i in range(70)]
#opchar = ['' for i in range(25)]
#opch1 = ['' for i in range(18)]
#opch2 = ['' for i in range(7)]
#******
#
#I don't know what we do about this FORTRAN "equivalence"...
#equivalence (opch1(1),opchar(1)),(opch2(1),opchar(19))
#
#
#*******
#opflag = [False for i in range(25)]
#common /opacty/ chix,nix,nopac,ixa,ixn,opinit,opflag,opchar,iopt
#c
#c Initialize block of flags and control parameters.
#c
#c The list of species for which partial pressures are
#c explicitly referenced (for opacity calculations and general
#c interest) are defined by the array IXA below. These are:
#c
#c 1. H 2. H+ 3. H- 4. H2 5. H2+ 6. He
#c 7. He+ 8. C 9. C+ 10. N 11. N+ 12. O
#c 13. O+ 14. Ne 15. Na 16. Na+ 17. Mg 18. Mg+
#c 19. Mg++ 20. Al 21. Al+ 22. Si 23. Si+ 24. S
#c 25. S+ 26. K 27. K+ 28. Ca 29. Ca+ 30. Ca++
#c 31. Ti 32. Ti+ 33. V 34. V+ 35. Fe 36. Fe+
#c 37. CO 38. N2 39. OH 40. H2O 41. SiO 42. TiO
#c 43. VO 44. CN 45. CH 46. NH 47. HCO 48. HCN
#c 49. C2H2 50. HS 51. MgH 52. AlH 53. SiH 54. CaH
#c 55. C2 56. C3 57. CS 58. SiS 59. SiC 60. SiC2
#c
nix = 60
#nopac = 23
#opinit = True
ixa = [ [0 for i in range(70)] for j in range(5) ]
ixn = [0 for i in range(70)]
"""
ixaTranspose = [[2, 2,1,1,1], [3, 2,1,1,1], [1, 2,1,1,1], [2, 2,2,1,1],\
[3, 2,2,1,1], [2, 8,1,1,1], [3, 8,1,1,1], [2, 3,1,1,1],\
[3, 3,1,1,1], [2, 4,1,1,1], [3, 4,1,1,1], [2, 5,1,1,1],\
[3, 5,1,1,1], [2, 9,1,1,1], [2,10,1,1,1], [3,10,1,1,1],\
[2,11,1,1,1], [3,11,1,1,1], [4,11,1,1,1], [2,12,1,1,1],\
[3,12,1,1,1], [2,13,1,1,1], [3,13,1,1,1], [2, 6,1,1,1],\
[3, 6,1,1,1], [2,14,1,1,1], [3,14,1,1,1], [2,15,1,1,1],\
[3,15,1,1,1], [4,15,1,1,1], [2,17,1,1,1], [3,17,1,1,1],\
[2,18,1,1,1], [3,18,1,1,1], [2,21,1,1,1], [3,21,1,1,1],\
[2, 5,3,1,1], [2, 4,4,1,1], [2, 5,2,1,1], [2, 5,2,2,1],\
[2,13,5,1,1], [2,17,5,1,1], [2,18,5,1,1], [2, 4,3,1,1],\
[2, 3,2,1,1], [2, 4,2,1,1], [2, 5,3,2,1], [2, 4,3,2,1],\
[2, 3,3,2,2], [2, 6,2,1,1], [2,11,2,1,1], [2,12,2,1,1],\
[2,13,2,1,1], [2,15,2,1,1], [2, 3,3,1,1], [2, 3,3,3,1],\
[2, 6,3,1,1], [2,13,6,1,1], [2,13,3,1,1], [2,13,3,3,1]]
"""
ixaTranspose = [[1, 1,0,0,0], [2, 1,0,0,0], [0, 1,0,0,0], [1, 1,1,0,0],\
[2, 1,1,0,0], [1, 7,0,0,0], [2, 7,0,0,0], [1, 2,0,0,0],\
[2, 2,0,0,0], [1, 3,0,0,0], [2, 3,0,0,0], [1, 4,0,0,0],\
[2, 4,0,0,0], [1, 8,0,0,0], [1, 9,0,0,0], [2, 9,0,0,0],\
[1,10,0,0,0], [2,10,0,0,0], [3,10,0,0,0], [1,11,0,0,0],\
[2,11,0,0,0], [1,12,0,0,0], [2,12,0,0,0], [1, 5,0,0,0],\
[2, 5,0,0,0], [1,13,0,0,0], [2,13,0,0,0], [1,14,0,0,0],\
[2,14,0,0,0], [3,14,0,0,0], [1,16,0,0,0], [2,16,0,0,0],\
[1,17,0,0,0], [2,17,0,0,0], [1,20,0,0,0], [2,20,0,0,0],\
[1, 4,2,0,0], [1, 3,3,0,0], [1, 4,1,0,0], [1, 4,1,1,0],\
[1,12,4,0,0], [1,16,4,0,0], [1,17,4,0,0], [1, 3,2,0,0],\
[1, 2,1,0,0], [1, 3,1,0,0], [1, 4,2,1,0], [1, 3,2,1,0],\
[1, 2,2,1,1], [1, 5,1,0,0], [1,10,1,0,0], [1,11,1,0,0],\
[1,12,1,0,0], [1,14,1,0,0], [1, 2,2,0,0], [1, 2,2,2,0],\
[1, 5,2,0,0], [1,12,5,0,0], [1,12,2,0,0], [1,12,2,2,0]]
for i in range(5):
for j in range(60):
ixa[i][j] = ixaTranspose[j][i]
chix = ['H ','H+ ','H- ','H2 ',\
'H2+ ','He ','He+ ','C ',\
'C+ ','N ','N+ ','O ',\
'O+ ','Ne ','Na ','Na+ ',\
'Mg ','Mg+ ','Mg++ ','Al ',\
'Al+ ','Si ','Si+ ','S ',\
'S+ ','K ','K+ ','Ca ',\
'Ca+ ','Ca++ ','Ti ','Ti+ ',\
'V ','V+ ','Fe ','Fe+ ',\
'CO ','N2 ','OH ','H2O ',\
'SiO ','TiO ','VO ','CN ',\
'CH ','NH ','HCO ','HCN ',\
'C2H2 ','HS ','MgH ','AlH ',\
'SiH ','CaH ','C2 ','C3 ',\
'CS ','SiS ','SiC ','SiC2 ']
"""
opch1 = ['Neutral H bound-free and free-free ',\
'H- ion bound-free and free-free ',\
'He- ion free-free ',\
'H2- free-free ',\
'H2+ bound-free and free-free ',\
'Neutral Si bound-free ',\
'Neutral Mg bound-free ',\
'Neutral Ca bound-free ',\
'Neutral Al bound-free ',\
'Neutral Na bound-free ',\
'Neutral K bound-free ',\
'Neutral C bound-free ',\
'Neutral H Rayleigh scattering ',\
'Molecular H2 Rayleigh scattering ',\
'Neutral He Rayleigh scattering ',\
'Free electron scattering ',\
'Analytic opacity defined by TESTOPAC cmd',\
'Grey hydrogen test opacity ']
opch2 = ['CN red system (straight mean) ',\
'CO vibration-rotation ',\
'H2O vibration-rotation (straight mean) ',\
'H2O vibration-rotation (harmonic mean) ',\
'TiO electronic (straight mean) ']
"""
| 10,838
| 35.867347
| 123
|
py
|
ChromaStarPy
|
ChromaStarPy-master/ScaleT5000.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 16:39:41 2017
/**
* Initializes and re-scales a Phoenix LTE spherical reference model of
* Teff=5000K, log(g)=4.5, [Fe/H]=0.0, xi=1.0 km/s, l=1.0H_p, M=1M_Sun, R=6.4761D+10cm
*
* @author Ian
*/
@author: ishort
"""
import math
import ToolBox
import Useful
def phxRefTeff():
return 5000.0
def phxRefLogEg():
return math.log(10.0) * 4.5 #//base e!
#//He abundance from Grevesse Asplund et al 2010
def phxRefLogAHe():
return math.log(10.0) * (10.93 - 12.0) #//base e "A_12" logarithmic abundance scale!
def getphxRefTau64():
#//Corresponding Tau_12000 grid (ie. lambda_0 = 1200 nm):
phxRefTau64 = [
0.00000000000000000e+00, 9.99999999999999955e-07, 1.34596032415536424e-06,
1.81160919420041334e-06, 2.43835409826882661e-06, 3.28192787251147086e-06,
4.41734470314007309e-06, 5.94557070854439435e-06, 8.00250227816105150e-06,
1.07710505603676914e-05, 1.44974067037263169e-05, 1.95129342263596216e-05,
2.62636352765333530e-05, 3.53498110503010939e-05, 4.75794431400941383e-05,
6.40400427119728238e-05, 8.61953566475303262e-05, 1.16015530173997159e-04,
1.56152300600049659e-04, 2.10174801133248699e-04, 2.82886943462596935e-04,
3.80754602122237182e-04, 5.12480587696093125e-04, 6.89778537938765847e-04,
9.28414544519474451e-04, 1.24960914129198684e-03, 1.68192432488086874e-03,
2.26380340952144670e-03, 3.04698957090350801e-03, 4.10112707055130046e-03,
5.51995432128156785e-03, 7.42963950759494875e-03, 1.00000000000000002e-02,
1.34596032415536422e-02, 1.81160919420041318e-02, 2.43835409826882663e-02,
3.28192787251147047e-02, 4.41734470314006436e-02, 5.94557070854439401e-02,
8.00250227816105275e-02, 1.07710505603676912e-01, 1.44974067037263149e-01,
1.95129342263596212e-01, 2.62636352765332981e-01, 3.53498110503010221e-01,
4.75794431400941464e-01, 6.40400427119728333e-01, 8.61953566475303190e-01,
1.16015530173997150e+00, 1.56152300600049654e+00, 2.10174801133248712e+00,
2.82886943462596641e+00, 3.80754602122236818e+00, 5.12480587696092638e+00,
6.89778537938765801e+00, 9.28414544519474383e+00, 1.24960914129198670e+01,
1.68192432488086894e+01, 2.26380340952144650e+01, 3.04698957090350540e+01,
4.10112707055129562e+01, 5.51995432128157333e+01, 7.42963950759495049e+01,
1.00000000000000000e+02]
return phxRefTau64
def getLogPhxRefTau64():
logE = math.log10(math.e)
phxRefTau64 = getphxRefTau64()
numPhxDep = len(phxRefTau64)
logPhxRefTau64 = [0.0 for i in range(numPhxDep)]
#for i in range(1, numPhxDep):
# logPhxRefTau64[i] = math.log(phxRefTau64[i])
logPhxRefTau64[1: numPhxDep] = [ math.log(phxRefTau64[i]) for i in range(1, numPhxDep) ]
logPhxRefTau64[0] = logPhxRefTau64[1] - (logPhxRefTau64[numPhxDep - 1] - logPhxRefTau64[1]) / numPhxDep
return logPhxRefTau64
def phxRefTemp(teff, numDeps, tauRos):
logE = math.log10(math.e)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefTemp64 = [
3.15213572679982190e+03, 3.15213572679982190e+03, 3.17988621810632685e+03,
3.21012887128011243e+03, 3.24126626267038500e+03, 3.27276078893546673e+03,
3.30435725697820226e+03, 3.33589185632140106e+03, 3.36724151725549154e+03,
3.39831714195318273e+03, 3.42906935013664861e+03, 3.45949368388945595e+03,
3.48962758169505923e+03, 3.51953742647688796e+03, 3.54929791042697934e+03,
3.57896962155466872e+03, 3.60858205550851335e+03, 3.63812646699481775e+03,
3.66755983657917068e+03, 3.69681905522719444e+03, 3.72583932497757132e+03,
3.75457006928661031e+03, 3.78298372918123914e+03, 3.81109104721021231e+03,
3.83893072914395862e+03, 3.86656355962043835e+03, 3.89408059675027425e+03,
3.92160316230741546e+03, 3.94927225929978204e+03, 3.97726284805320847e+03,
4.00584847611869327e+03, 4.03531360317989993e+03, 4.06591896438200047e+03,
4.09802860937899732e+03, 4.13221207874272022e+03, 4.16915227717330799e+03,
4.20937593060261861e+03, 4.25369220113429128e+03, 4.30330739566306784e+03,
4.36035870964639616e+03, 4.42601579216115442e+03, 4.50281614584142153e+03,
4.59386420090837146e+03, 4.70448179136501403e+03, 4.83727710376560208e+03,
4.99516189027659129e+03, 5.19102132587796405e+03, 5.40505223548941285e+03,
5.67247302987449984e+03, 5.95695843497286933e+03, 6.27957483223234703e+03,
6.71365960956718118e+03, 7.06828382342861460e+03, 7.34157936910693206e+03,
7.56939938735570740e+03, 7.77138428264261165e+03, 7.95656000812699585e+03,
8.13006721530056711e+03, 8.29523535580475982e+03, 8.45429779465689171e+03,
8.60879260449185131e+03, 8.75981713693203528e+03, 8.90838141718757288e+03,
9.05361290415211806e+03]
logPhxRefTau64 = getLogPhxRefTau64();
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefTemp = [0.0 for i in range(numDeps)]
scaleTemp = [ [0.0 for i in range(numDeps)] for j in range(2)]
#for i in range(numDeps):
# phxRefTemp[i] = ToolBox.interpol(logPhxRefTau64, phxRefTemp64, tauRos[1][i])
# scaleTemp[0][i] = teff * phxRefTemp[i] / phxRefTeff()
# scaleTemp[1][i] = math.log(scaleTemp[0][i]);
phxRefTemp = [ ToolBox.interpol(logPhxRefTau64, phxRefTemp64, x) for x in tauRos[1] ]
scaleTemp[0] = [ teff * x / phxRefTeff() for x in phxRefTemp ]
scaleTemp[1] = [ math.log(x) for x in scaleTemp[0] ]
#//System.out.println("tauRos[1][i] " + logE * tauRos[1][i] + " scaleTemp[1][i] " + logE * scaleTemp[1][i]);
return scaleTemp
def phxRefPGas(grav, zScale, logAHe, numDeps, tauRos):
#//System.out.println("ScaleT5000.phxRefPGas called");
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
AHe = math.exp(logAHe)
refAHe = math.exp(phxRefLogAHe())
logZScale = math.log(zScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefPGas64 = [
1.00000000000000005e-04, 1.03770217591881035e+02, 1.24242770084417913e+02,
1.47686628640383276e+02, 1.74578854906314291e+02, 2.05506972274478784e+02,
2.41168221287605292e+02, 2.82385081738383917e+02, 3.30127686150304896e+02,
3.85540773715381306e+02, 4.49974446823229414e+02, 5.25018679681323647e+02,
6.12542265074691159e+02, 7.14737800095933608e+02, 8.34175243666085407e+02,
9.73867213356324669e+02, 1.13734973870022168e+03, 1.32878148706864113e+03,
1.55306409432270971e+03, 1.81598529465124716e+03, 2.12438618583220841e+03,
2.48635477283421324e+03, 2.91145034581766595e+03, 3.41095942605562823e+03,
3.99819276314161607e+03, 4.68883438023894087e+03, 5.50134310662684311e+03,
6.45741052408807354e+03, 7.58249196327514983e+03, 8.90641248566333525e+03,
1.04639741154490002e+04, 1.22956502717452295e+04, 1.44484787849992390e+04,
1.69769301182948657e+04, 1.99435621814443475e+04, 2.34195796692420117e+04,
2.74860930366683497e+04, 3.22351125605895031e+04, 3.77699103578024442e+04,
4.42033085085744533e+04, 5.16616495136288213e+04, 6.02879692077906366e+04,
7.02475218656768702e+04, 8.17365047611011832e+04, 9.50146489805318997e+04,
1.10441316485543124e+05, 1.28451318144638804e+05, 1.49415613553191157e+05,
1.72877372164747008e+05, 1.96852852539717947e+05, 2.18808320050485723e+05,
2.35794833242603316e+05, 2.48716041541587241e+05, 2.59902150512206339e+05,
2.70560370352023339e+05, 2.81251297069544089e+05, 2.92310802132537181e+05,
3.03988239352240635e+05, 3.16495216131040419e+05, 3.30029076402488339e+05,
3.44786943994771456e+05, 3.60975297786138486e+05, 3.78815092131546407e+05,
3.98560549755298765e+05]
numPhxDeps = len(phxRefPGas64) #//yeah, I know, 64, but that could change!
logPhxRefPGas64 = [0.0 for i in range(numPhxDeps)]
#for i in range(numPhxDeps):
# logPhxRefPGas64[i] = math.log(phxRefPGas64[i])
logPhxRefPGas64 = [ math.log(x) for x in phxRefPGas64 ]
logPhxRefTau64 = getLogPhxRefTau64();
#// interpolate onto gS3 tauRos grid and re-scale with grav, metallicity and He abundance
#// From Gray 3rd Ed. Ch.9, esp p. 189, 196:
phxRefPGas = [0.0 for i in range(numDeps)]
logPhxRefPGas = [0.0 for i in range(numDeps)]
scalePGas = [ [0.0 for i in range(numDeps)] for j in range(2) ]
#//exponents in scaling with g:
gexpTop = 0.54 #//top of model
gexpBottom = 0.64 #//bottom of model
gexpRange = (gexpBottom - gexpTop)
tauLogRange = tauRos[1][numDeps-1] - tauRos[1][0]
#double thisGexp;
#// factor for scaling with A_He:
logHeDenom = 0.666667 * math.log(1.0 + 4.0*refAHe)
logPhxRefPGas = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, x) for x in tauRos[1] ]
for i in range(numDeps):
#//if (i%10 == 0){
#//System.out.println("i " + i);
#//}
#logPhxRefPGas[i] = ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, tauRos[1][i])
#//if (i%10 == 0){
#//System.out.println("After tau interpolation: pGas " + logE*logPhxRefPGas[i]);
#//}
thisGexp = gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange
#//scaling with g
#//if (i%10 == 0){
#//System.out.println("thisGexp " + thisGexp);
#//}
scalePGas[1][i] = thisGexp*logEg + logPhxRefPGas[i] - thisGexp*phxRefLogEg()
#//if (i%10 == 0){
#//System.out.println("After scaling with g: pGas " + logE*scalePGas[1][i]);
#//}
#//scaling with zscl:
#//if (i%10 == 0){
#//System.out.println("logZScale " + logZScale);
#//}
#scalePGas[1][i] = -0.333333*logZScale + scalePGas[1][i]
#//if (i%10 == 0){
#//System.out.println("After scaling with z: pGas " + logE*scalePGas[1][i]);
#//}
#//scaling with A_He:
#//if (i%10 == 0){
#//System.out.println("Math.log(1.0 + 4.0*AHe) - logHeDenom " + (0.666667*Math.log(1.0 + 4.0*AHe) - logHeDenom));
#//}
#scalePGas[1][i] = 0.666667 * math.log(1.0 + 4.0*AHe) + scalePGas[1][i] - logHeDenom
#//if (i%10 == 0){
#//System.out.println("After scaling with AHe: pGas " + logE*scalePGas[1][i]);
#//}
#scalePGas[0][i] = math.exp(scalePGas[1][i])
#//if (i%10 == 0){
#//System.out.println("logPhxRefPGas " + logE*logPhxRefPGas[i] + " scalePGas[1][i] " + logE * scalePGas[1][i]);
#//}
scalePGas[1] = [ -0.333333*logZScale + x for x in scalePGas[1] ]
scalePGas[1] = [ 0.666667 * math.log(1.0 + 4.0*AHe) + x - logHeDenom for x in scalePGas[1] ]
scalePGas[0] = [ math.exp(x) for x in scalePGas[1] ]
#Carefull here - P at upper boundary can be an underestimate, but it must not be greater than value at next depth in!
if (scalePGas[0][0] >= scalePGas[0][1]):
scalePGas[0][0] = 0.5 * scalePGas[0][1];
scalePGas[1][0] = math.log(scalePGas[0][0]);
return scalePGas
def phxRefPe(teff, grav, numDeps, tauRos, zScale, logAHe):
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
AHe = math.exp(logAHe)
refAHe = math.exp(phxRefLogAHe())
logZScale = math.log(zScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefPe64 = [
1.17858427569630401e-08, 1.73073837795169436e-03, 2.13762360059438538e-03,
2.64586145846806451e-03, 3.26749020460433354e-03, 4.02219945676032288e-03,
4.93454747856481805e-03, 6.03357965110110344e-03, 7.35319802933484621e-03,
8.93306098318919460e-03, 1.08200092390451780e-02, 1.30700158082515377e-02,
1.57505131367194594e-02, 1.89428593874781982e-02, 2.27446519479000651e-02,
2.72716961646799864e-02, 3.26596927620770305e-02, 3.90659173672675136e-02,
4.66713907010225318e-02, 5.56843086932707065e-02, 6.63452384304821230e-02,
7.89341909634427297e-02, 9.37792909747245523e-02, 1.11270186635302790e-01,
1.31870014183696899e-01, 1.56130489360824298e-01, 1.84715397349025645e-01,
2.18428766543559472e-01, 2.58245610307223983e-01, 3.05363622257444900e-01,
3.61311333509324040e-01, 4.27990544717643029e-01, 5.07743853690445168e-01,
6.03604039632526179e-01, 7.19674246257567152e-01, 8.61422066803848585e-01,
1.03568172049434559e+00, 1.25187412720684454e+00, 1.52336996895144261e+00,
1.87078029858400652e+00, 2.31893413667797388e+00, 2.90597658045488094e+00,
3.68566481623166187e+00, 4.74110273402785865e+00, 6.16546324347510222e+00,
8.08486709272609971e+00, 1.07959796585076546e+01, 1.46390000057528482e+01,
2.17273927465764913e+01, 3.56194058574816239e+01, 6.57361652682183575e+01,
1.48468954779851543e+02, 2.80489497081349555e+02, 4.46587250419467807e+02,
6.46784311972032128e+02, 8.86744838282462069e+02, 1.17244960918767083e+03,
1.51089748714632174e+03, 1.91050957850908458e+03, 2.38115682377229541e+03,
2.93426662234414562e+03, 3.58305801646245618e+03, 4.34379670059742239e+03,
5.22642525609140284e+03]
logPhxRefTau64 = getLogPhxRefTau64()
numPhxDeps = len(phxRefPe64) #//yeah, I know, 64, but that could change!
logPhxRefPe64 = [0.0 for i in range(numPhxDeps)]
#for i in range(numPhxDeps):
# logPhxRefPe64[i] = math.log(phxRefPe64[i])
logPhxRefPe64 = [ math.log(x) for x in phxRefPe64 ]
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefPe = [0.0 for i in range(numDeps)]
logPhxRefPe = [0.0 for i in range(numDeps)]
scalePe = [ [0.0 for i in range(numDeps) ] for j in range(2) ]
#//exponents in scaling with Teff ONLY VALID FOR Teff < 10000K:
omegaTaum1 = 0.0012 #//log_10(tau) < 0.1
omegaTaup1 = 0.0015 #//log_10(tau) > 1.0
omegaRange = (omegaTaup1-omegaTaum1)
lonOfM1 = math.log(0.1)
#//exponents in scaling with g:
gexpTop = 0.48 #//top of model
gexpBottom = 0.33 #//bottom of model
gexpRange = (gexpBottom - gexpTop)
tauLogRange = tauRos[1][numDeps-1] - tauRos[1][0]
#double thisGexp
thisOmega = omegaTaum1 #//default initialization
#// factor for scaling with A_He:
logHeDenom = 0.333333 * math.log(1.0 + 4.0*refAHe)
logPhxRefPe = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPe64, x) for x in tauRos[1] ]
for i in range(numDeps):
#//if (i%10 == 0){
#//System.out.println("i " + i);
#//}
#logPhxRefPe[i] = ToolBox.interpol(logPhxRefTau64, logPhxRefPe64, tauRos[1][i])
thisGexp = gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange
if (tauRos[0][i] < 0.1):
thisOmega = omegaTaum1
if (tauRos[0][i] > 10.0):
thisOmega = omegaTaup1
if ( (tauRos[0][i] >= 0.1) and (tauRos[0][i] <= 10.0) ):
thisOmega = omegaTaum1 + omegaRange * (tauRos[1][i] - lonOfM1) / tauLogRange
#//if (i%10 == 0){
#//System.out.println("thisGexp " + thisGexp + " thisOmega " + thisOmega);
#//}
#//scaling with g
scalePe[1][i] = thisGexp*logEg + logPhxRefPe[i] - thisGexp*phxRefLogEg()
#//if (i%10 == 0){
#//System.out.println("After g scaling: pe " + logE*scalePe[1][i]);
#//}
#//scale with Teff:
scalePe[1][i] = thisOmega*teff + scalePe[1][i] - thisOmega*phxRefTeff()
#//if (i%10 == 0){
#//System.out.println("After Teff scaling: pe " + logE*scalePe[1][i]);
#//}
#//scaling with zscl:
#scalePe[1][i] = 0.333333*logZScale + scalePe[1][i]
#//if (i%10 == 0){
#//System.out.println("After z scaling: pe " + logE*scalePe[1][i]);
#//}
#//scaling with A_He:
#scalePe[1][i] = 0.333333 * math.log(1.0 + 4.0*AHe) + scalePe[1][i] - logHeDenom
#//if (i%10 == 0){
#//System.out.println(" logPhxRefPe " + logE*logPhxRefPe[i] + " After A_He scaling: pe " + logE*scalePe[1][i]);
#//}
#scalePe[0][i] = math.exp(scalePe[1][i]);
scalePe[1] = [ 0.333333*logZScale + x for x in scalePe[1] ]
scalePe[1] = [ 0.333333 * math.log(1.0 + 4.0*AHe) + x - logHeDenom for x in scalePe[1] ]
scalePe[0] = [ math.exp(x) for x in scalePe[1] ]
return scalePe
def phxRefNe(numDeps, scaleTemp, scalePe):
logE = math.log10(math.e)
scaleNe = [ [0.0 for i in range(numDeps) ] for j in range(2) ]
#for i in range(numDeps):
# scaleNe[1][i] = scalePe[1][i] - scaleTemp[1][i] - Useful.logK()
# scaleNe[0][i] = math.exp(scaleNe[1][i])
scaleNe[1] = [ scalePe[1][i] - scaleTemp[1][i] - Useful.logK() for i in range(numDeps) ]
scaleNe[0] = [ math.exp(x) for x in scaleNe[1] ]
return scaleNe
| 16,606
| 46.99711
| 121
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LDC.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 1 12:58:43 2017
@author: ishort
"""
#JB#
import numpy as np
from sklearn.metrics import r2_score
#returns the "experimental" data
#takes a coefficent episilon(lambda) and a cosine value.
def func(coeff,ctheta):
return(1-coeff+coeff*ctheta)
#returns the mean^2 "experimental" value
def meansq(mean,val):
return((val-mean)**2)
#returns the mean^2 "real" value
def residsq(mean,funVal):
return((funVal-mean)**2)
#an array of epsilon values to try for each function
epi=np.linspace(0,1,num=750)#can increase to get a better solution, but 750 seems fine
#JB#
def ldc(numLams, lambdaScale, numThetas, cosTheta, contIntens):
ldc = [0.0 for i in range(numLams)]
#double epsilon, meanEpsilon, y;
for iL in range(numLams):
#//System.out.println("lambdaScale[iL] " + lambdaScale[iL]);
#meanEpsilon = 0.0 #//initialize accumulator
#JB#
#a list to hold all of the R^2 values
R2=[]
#loop through all the episilons to find the one that best fits the data
for epiI in epi:
#hold values for a particular lambda model
currentY=[]
currentMeansq=0
currentResidsq=0
intensities=[]
for iT in range(1, numThetas-1):
#consider this to be the "real" data of the function
I=contIntens[iL][iT]
I0=contIntens[iL][0]
y = I/I0
intensities.append(y)
#get the "experimental" values with the current coefficent
currentY.append(func(epiI,cosTheta[1][iT]))
for k in range(0,len(currentY),1):
#get mean^2 and residuals for current data set
currentMeansq+=(meansq(np.mean(currentY),currentY[k]))
currentResidsq+=(residsq(np.mean(intensities),intensities[k]))
#store the R^2 value to see how well it fits the data set
R2.append(r2_score(currentY,intensities))
#pick the best LDC with the best R^2 and use that LDC
ldc[iL] = epi[R2.index(max(R2))]
#JB#
#epsilon = (y - 1.0) / (cosTheta[1][iT] - 1.0)
#//System.out.println("cosTheta[1][iT] " + cosTheta[1][iT] + " epsilon " + epsilon);
#meanEpsilon += epsilon
#} //iT theta loop
#ldc[iL] = meanEpsilon / numThetas
#} //iL lambda loop
return ldc
#} //end method ldc
| 2,694
| 27.072917
| 100
|
py
|
ChromaStarPy
|
ChromaStarPy-master/ScaleT4250g20.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 16:39:41 2017
/**
* Initializes and re-scales a Phoenix LTE spherical reference model of
* Teff=4250K, log(g)=2.0, [Fe/H]=0.0, xi=1.0 km/s, l=1.0H_p, M=1M_Sun, R=6.4761e+10cm
*
* @author Ian
*/
@author: ishort
"""
import math
import ToolBox
import Useful
def phxRefTeff():
return 4250.0
def phxRefLogEg():
return math.log(10.0) * 2.0 #//base e!
#//He abundance from Grevesse Asplund et al 2010
def phxRefLogAHe():
return math.log(10.0) * (10.93 - 12.0) #//base e "A_12" logarithmic abundance scale!
def getphxRefTau64():
#//Corresponding Tau_12000 grid (ie. lambda_0 = 1200 nm):
phxRefTau64 = [
0.00000000000000000e+00, 9.99999999999999955e-07, 1.34596032415536424e-06,
1.81160919420041334e-06, 2.43835409826882661e-06, 3.28192787251147086e-06,
4.41734470314007309e-06, 5.94557070854439435e-06, 8.00250227816105150e-06,
1.07710505603676914e-05, 1.44974067037263169e-05, 1.95129342263596216e-05,
2.62636352765333530e-05, 3.53498110503010939e-05, 4.75794431400941383e-05,
6.40400427119728238e-05, 8.61953566475303262e-05, 1.16015530173997159e-04,
1.56152300600049659e-04, 2.10174801133248699e-04, 2.82886943462596935e-04,
3.80754602122237182e-04, 5.12480587696093125e-04, 6.89778537938765847e-04,
9.28414544519474451e-04, 1.24960914129198684e-03, 1.68192432488086874e-03,
2.26380340952144670e-03, 3.04698957090350801e-03, 4.10112707055130046e-03,
5.51995432128156785e-03, 7.42963950759494875e-03, 1.00000000000000002e-02,
1.34596032415536422e-02, 1.81160919420041318e-02, 2.43835409826882663e-02,
3.28192787251147047e-02, 4.41734470314006436e-02, 5.94557070854439401e-02,
8.00250227816105275e-02, 1.07710505603676912e-01, 1.44974067037263149e-01,
1.95129342263596212e-01, 2.62636352765332981e-01, 3.53498110503010221e-01,
4.75794431400941464e-01, 6.40400427119728333e-01, 8.61953566475303190e-01,
1.16015530173997150e+00, 1.56152300600049654e+00, 2.10174801133248712e+00,
2.82886943462596641e+00, 3.80754602122236818e+00, 5.12480587696092638e+00,
6.89778537938765801e+00, 9.28414544519474383e+00, 1.24960914129198670e+01,
1.68192432488086894e+01, 2.26380340952144650e+01, 3.04698957090350540e+01,
4.10112707055129562e+01, 5.51995432128157333e+01, 7.42963950759495049e+01,
1.00000000000000000e+02]
return phxRefTau64
def getLogPhxRefTau64():
logE = math.log10(math.e)
phxRefTau64 = getphxRefTau64()
numPhxDep = len(phxRefTau64)
logPhxRefTau64 = [0.0 for i in range(numPhxDep)]
#for i in range(1, numPhxDep):
# logPhxRefTau64[i] = math.log(phxRefTau64[i])
logPhxRefTau64[1: numPhxDep] = [ math.log(phxRefTau64[i]) for i in range(1, numPhxDep) ]
#print("*********")
#print(logPhxRefTau64)
#print("*********")
#stop
logPhxRefTau64[0] = logPhxRefTau64[1] - (logPhxRefTau64[numPhxDep - 1] - logPhxRefTau64[1]) / numPhxDep
return logPhxRefTau64
def phxRefTemp(teff, numDeps, tauRos):
logE = math.log10(math.e)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefTemp64 = [
2.55177189467776134e+03, 2.55177189467776134e+03, 2.57792125655286191e+03,
2.60758002435901381e+03, 2.64012742898025908e+03, 2.67432875475946230e+03,
2.70893891273552163e+03, 2.74308870598832664e+03, 2.77632383320681129e+03,
2.80848811483166946e+03, 2.83959882227651724e+03, 2.86975658734314266e+03,
2.89908813206188461e+03, 2.92771359779851400e+03, 2.95573102630948051e+03,
2.98321198037901013e+03, 3.01020749755630777e+03, 3.03675606855582828e+03,
3.06288810300240721e+03, 3.08862982043339889e+03, 3.11400422160098924e+03,
3.13903037085205142e+03, 3.16372090154785838e+03, 3.18810905092703479e+03,
3.21223709737028685e+03, 3.23616013487906321e+03, 3.25997011697764810e+03,
3.28379847029234497e+03, 3.30779792957306154e+03, 3.33215641253420881e+03,
3.35718199939999613e+03, 3.38317056293436599e+03, 3.41035215459581877e+03,
3.43904544965103469e+03, 3.46977882782005645e+03, 3.50316432370656776e+03,
3.53952093749718961e+03, 3.57938756282554868e+03, 3.62358876591721355e+03,
3.67390373594475159e+03, 3.73057002905895024e+03, 3.79526239264127798e+03,
3.87014190881368677e+03, 3.96034605960104500e+03, 4.06785077337546363e+03,
4.19530126018311603e+03, 4.35807101922509446e+03, 4.53128152603078979e+03,
4.75647877892966608e+03, 4.99815140831592271e+03, 5.29640554819846147e+03,
5.60111278999269234e+03, 6.01099379170666271e+03, 6.36043009619938857e+03,
6.87521615935859882e+03, 7.26044412747461593e+03, 7.50448146936407466e+03,
7.68547260686038317e+03, 7.83660366095023619e+03, 7.97031461253075395e+03,
8.09288613904806061e+03, 8.20796577074222841e+03, 8.31800144917403668e+03,
8.42235419676150195e+03]
logPhxRefTau64 = getLogPhxRefTau64();
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefTemp = [0.0 for i in range(numDeps)]
scaleTemp = [ [0.0 for i in range(numDeps)] for j in range(2)]
#for i in range(numDeps):
# phxRefTemp[i] = ToolBox.interpol(logPhxRefTau64, phxRefTemp64, tauRos[1][i])
# scaleTemp[0][i] = teff * phxRefTemp[i] / phxRefTeff()
# scaleTemp[1][i] = math.log(scaleTemp[0][i]);
phxRefTemp = [ ToolBox.interpol(logPhxRefTau64, phxRefTemp64, x) for x in tauRos[1] ]
scaleTemp[0] = [ teff * x / phxRefTeff() for x in phxRefTemp ]
scaleTemp[1] = [ math.log(x) for x in scaleTemp[0] ]
#//System.out.println("tauRos[1][i] " + logE * tauRos[1][i] + " scaleTemp[1][i] " + logE * scaleTemp[1][i]);
#print("*********")
#print(phxRefTemp)
#print("*********")
#print(scaleTemp[0])
#stop
return scaleTemp
def phxRefPGas(grav, zScale, logAHe, numDeps, tauRos):
#//System.out.println("ScaleT4250g20.phxRefPGas called");
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
AHe = math.exp(logAHe)
refAHe = math.exp(phxRefLogAHe())
logZScale = math.log(zScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefPGas64 = [
1.00000000000000005e-04, 4.30797022881529035e+00, 5.23633209862413107e+00,
6.35110679837766412e+00, 7.68415255320091806e+00, 9.26944725328996455e+00,
1.11437944000671951e+01, 1.33486175933519231e+01, 1.59320732110696728e+01,
1.89510405786159843e+01, 2.24729535630686001e+01, 2.65776121702747155e+01,
3.13591521574494898e+01, 3.69283427891380711e+01, 4.34153596323810476e+01,
5.09731596421035889e+01, 5.97815115471393099e+01, 7.00515965533140843e+01,
8.20320988196306047e+01, 9.60157114466986172e+01, 1.12346894715709283e+02,
1.31431137235228107e+02, 1.53746059612369663e+02, 1.79853925611152988e+02,
2.10416068512255976e+02, 2.46209759085281831e+02, 2.88146638387011080e+02,
3.37292455489998588e+02, 3.94889455574428723e+02, 4.62381218232488322e+02,
5.41431239027500510e+02, 6.33944346680425951e+02, 7.42101436711750239e+02,
8.68388258638872003e+02, 1.01559408923251908e+03, 1.18679360231291594e+03,
1.38539626183849145e+03, 1.61516883770226173e+03, 1.88020120421177171e+03,
2.18463232976707104e+03, 2.53293658371589027e+03, 2.93000255565556563e+03,
3.38117288723951515e+03, 3.89194415900473678e+03, 4.46986439959823201e+03,
5.12776215162745939e+03, 5.88482548647403291e+03, 6.77685699601984379e+03,
7.85508152689431518e+03, 9.16793034379196797e+03, 1.06499829584269464e+04,
1.20573723930509423e+04, 1.31104385587741126e+04, 1.38632723386838443e+04,
1.43359336214159030e+04, 1.46332769212074945e+04, 1.48729928730357842e+04,
1.50999078995603995e+04, 1.53309455833121519e+04, 1.55753421735992888e+04,
1.58400487938745937e+04, 1.61313349003330495e+04, 1.64553234176194455e+04,
1.68190844909027946e+04]
numPhxDeps = len(phxRefPGas64) #//yeah, I know, 64, but that could change!
logPhxRefPGas64 = [0.0 for i in range(numPhxDeps)]
#for i in range(numPhxDeps):
# logPhxRefPGas64[i] = math.log(phxRefPGas64[i])
logPhxRefPGas64 = [ math.log(x) for x in phxRefPGas64 ]
logPhxRefTau64 = getLogPhxRefTau64();
#// interpolate onto gS3 tauRos grid and re-scale with grav, metallicity and He abundance
#// From Gray 3rd Ed. Ch.9, esp p. 189, 196:
phxRefPGas = [0.0 for i in range(numDeps)]
logPhxRefPGas = [0.0 for i in range(numDeps)]
scalePGas = [ [0.0 for i in range(numDeps)] for j in range(2) ]
#//exponents in scaling with g:
gexpTop = 0.54 #//top of model
gexpBottom = 0.64 #//bottom of model
gexpRange = (gexpBottom - gexpTop)
tauLogRange = tauRos[1][numDeps-1] - tauRos[1][0]
#double thisGexp;
#// factor for scaling with A_He:
logHeDenom = 0.666667 * math.log(1.0 + 4.0*refAHe)
logPhxRefPGas = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, x) for x in tauRos[1] ]
for i in range(numDeps):
# #//if (i%10 == 0){
# #//System.out.println("i " + i);
# #//}
# logPhxRefPGas[i] = ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, tauRos[1][i])
# #//if (i%10 == 0){
# #//System.out.println("After tau interpolation: pGas " + logE*logPhxRefPGas[i]);
# #//}
thisGexp = gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange
# #//scaling with g
# #//if (i%10 == 0){
# #//System.out.println("thisGexp " + thisGexp);
# #//}
scalePGas[1][i] = thisGexp*logEg + logPhxRefPGas[i] - thisGexp*phxRefLogEg()
# #//if (i%10 == 0){
# #//System.out.println("After scaling with g: pGas " + logE*scalePGas[1][i]);
# #//}
# #//scaling with zscl:
# #//if (i%10 == 0){
# #//System.out.println("logZScale " + logZScale);
# #//}
# scalePGas[1][i] = -0.333333*logZScale + scalePGas[1][i]
# #//if (i%10 == 0){
# #//System.out.println("After scaling with z: pGas " + logE*scalePGas[1][i]);
# #//}
# #//scaling with A_He:
# #//if (i%10 == 0){
# #//System.out.println("Math.log(1.0 + 4.0*AHe) - logHeDenom " + (0.666667*Math.log(1.0 + 4.0*AHe) - logHeDenom));
# #//}
# scalePGas[1][i] = 0.666667 * math.log(1.0 + 4.0*AHe) + scalePGas[1][i] - logHeDenom
# #//if (i%10 == 0){
# #//System.out.println("After scaling with AHe: pGas " + logE*scalePGas[1][i]);
# #//}
# scalePGas[0][i] = math.exp(scalePGas[1][i])
# #//if (i%10 == 0){
# #//System.out.println("logPhxRefPGas " + logE*logPhxRefPGas[i] + " scalePGas[1][i] " + logE * scalePGas[1][i]);
# #//}
#logPhxRefPGas = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, x) for x in tauRos[1] ]
#No! scalePGas[1] = [ logEg*(gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange)\
#No! + logPhxRefPGas[i] - thisGexp*phxRefLogEg() for i in range(numDeps) ]
scalePGas[1] = [ -0.333333*logZScale + x for x in scalePGas[1] ]
scalePGas[1] = [ 0.666667 * math.log(1.0 + 4.0*AHe) + x - logHeDenom for x in scalePGas[1] ]
scalePGas[0] = [ math.exp(x) for x in scalePGas[1] ]
#SOMETHING WRONG!!
#print("*********")
#print("logPhxRefPGas ", logPhxRefPGas)
#print("*********")
#print("scalePGas[1] ", scalePGas[1])
#print("*********")
#stop
#Carefull here - P at upper boundary can be an underestimate, but it must not be greater than value at next depth in!
if (scalePGas[0][0] >= scalePGas[0][1]):
scalePGas[0][0] = 0.5 * scalePGas[0][1];
scalePGas[1][0] = math.log(scalePGas[0][0]);
return scalePGas
def phxRefPe(teff, grav, numDeps, tauRos, zScale, logAHe):
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
AHe = math.exp(logAHe)
refAHe = math.exp(phxRefLogAHe())
logZScale = math.log(zScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefPe64 = [
8.82107332460937786e-09, 2.78937385278448721e-05, 3.47576301038577686e-05,
4.35832572659463317e-05, 5.49376619037973015e-05, 6.94409656870532439e-05,
8.77707903792445917e-05, 1.10693651821575575e-04, 1.39104108185757799e-04,
1.74066374927776200e-04, 2.16859921028963076e-04, 2.69028731246404777e-04,
3.32432061837431244e-04, 4.09296056659801003e-04, 5.02267654306449442e-04,
6.14473664793957226e-04, 7.49597452349998888e-04, 9.11974871131562793e-04,
1.10671154087655646e-03, 1.33981929191927902e-03, 1.61837137445693482e-03,
1.95067571498008536e-03, 2.34645801822358068e-03, 2.81730972458459810e-03,
3.37701527100230971e-03, 4.04206883240371042e-03, 4.83264944868273382e-03,
5.77375902899801303e-03, 6.89635454260574543e-03, 8.23939377538898503e-03,
9.85478499694628605e-03, 1.18084450314643753e-02, 1.41821928408006545e-02,
1.70844502273588689e-02, 2.06675266544882504e-02, 2.51401461738746321e-02,
3.07601652353682656e-02, 3.78883555684116358e-02, 4.70434551509077148e-02,
5.90737609051253110e-02, 7.49278881219457016e-02, 9.61506501246208595e-02,
1.25001054449255633e-01, 1.65469764331727026e-01, 2.21877141054763416e-01,
2.99471993844184214e-01, 4.09463450124863737e-01, 5.44807309494293679e-01,
7.32622090968494066e-01, 1.00147011151172505e+00, 1.61234892801148755e+00,
3.05928587566573817e+00, 7.65951702544590596e+00, 1.63896549667325182e+01,
4.47471354140328827e+01, 8.76262151753557248e+01, 1.30080006341931238e+02,
1.72013432855285373e+02, 2.15462867458483203e+02, 2.61494582627386649e+02,
3.10959654478989705e+02, 3.64658419590852475e+02, 4.23480004219150715e+02,
4.87036243289867400e+02]
logPhxRefTau64 = getLogPhxRefTau64()
numPhxDeps = len(phxRefPe64) #//yeah, I know, 64, but that could change!
logPhxRefPe64 = [0.0 for i in range(numPhxDeps)]
#for i in range(numPhxDeps):
# logPhxRefPe64[i] = math.log(phxRefPe64[i])
logPhxRefPe64 = [ math.log(x) for x in phxRefPe64 ]
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefPe = [0.0 for i in range(numDeps)]
logPhxRefPe = [0.0 for i in range(numDeps)]
scalePe = [ [0.0 for i in range(numDeps) ] for j in range(2) ]
#//exponents in scaling with Teff ONLY VALID FOR Teff < 10000K:
omegaTaum1 = 0.0012 #//log_10(tau) < 0.1
omegaTaup1 = 0.0015 #//log_10(tau) > 1.0
omegaRange = (omegaTaup1-omegaTaum1)
lonOfM1 = math.log(0.1)
#//exponents in scaling with g:
gexpTop = 0.48 #//top of model
gexpBottom = 0.33 #//bottom of model
gexpRange = (gexpBottom - gexpTop)
tauLogRange = tauRos[1][numDeps-1] - tauRos[1][0]
#double thisGexp
thisOmega = omegaTaum1 #//default initialization
#// factor for scaling with A_He:
logHeDenom = 0.333333 * math.log(1.0 + 4.0*refAHe)
logPhxRefPe = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPe64, x) for x in tauRos[1] ]
for i in range(numDeps):
#//if (i%10 == 0){
#//System.out.println("i " + i);
#//}
#logPhxRefPe[i] = ToolBox.interpol(logPhxRefTau64, logPhxRefPe64, tauRos[1][i])
thisGexp = gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange
if (tauRos[0][i] < 0.1):
thisOmega = omegaTaum1
if (tauRos[0][i] > 10.0):
thisOmega = omegaTaup1
if ( (tauRos[0][i] >= 0.1) and (tauRos[0][i] <= 10.0) ):
thisOmega = omegaTaum1 + omegaRange * (tauRos[1][i] - lonOfM1) / tauLogRange
#//if (i%10 == 0){
#//System.out.println("thisGexp " + thisGexp + " thisOmega " + thisOmega);
#//}
#//scaling with g
scalePe[1][i] = thisGexp*logEg + logPhxRefPe[i] - thisGexp*phxRefLogEg()
#//if (i%10 == 0){
#//System.out.println("After g scaling: pe " + logE*scalePe[1][i]);
#//}
#//scale with Teff:
scalePe[1][i] = thisOmega*teff + scalePe[1][i] - thisOmega*phxRefTeff()
#//if (i%10 == 0){
#//System.out.println("After Teff scaling: pe " + logE*scalePe[1][i]);
#//}
#//scaling with zscl:
#scalePe[1][i] = 0.333333*logZScale + scalePe[1][i]
#//if (i%10 == 0){
#//System.out.println("After z scaling: pe " + logE*scalePe[1][i]);
#//}
#//scaling with A_He:
#scalePe[1][i] = 0.333333 * math.log(1.0 + 4.0*AHe) + scalePe[1][i] - logHeDenom
#//if (i%10 == 0){
#//System.out.println(" logPhxRefPe " + logE*logPhxRefPe[i] + " After A_He scaling: pe " + logE*scalePe[1][i]);
#//}
#scalePe[0][i] = math.exp(scalePe[1][i]);
scalePe[1] = [ 0.333333*logZScale + x for x in scalePe[1] ]
scalePe[1] = [ 0.333333 * math.log(1.0 + 4.0*AHe) + x - logHeDenom for x in scalePe[1] ]
scalePe[0] = [ math.exp(x) for x in scalePe[1] ]
#print("*********")
#print("logPhxRefPe ", logPhxRefPe)
#print("*********")
#print("scalePe[1] ", scalePe[1])
#print("*********")
#print("scalePe[0] ", scalePe[0])
#print("*********")
#stop
return scalePe
def phxRefNe(numDeps, scaleTemp, scalePe):
logE = math.log10(math.e)
scaleNe = [ [0.0 for i in range(numDeps) ] for j in range(2) ]
#for i in range(numDeps):
# scaleNe[1][i] = scalePe[1][i] - scaleTemp[1][i] - Useful.logK()
# scaleNe[0][i] = math.exp(scaleNe[1][i])
scaleNe[1] = [ scalePe[1][i] - scaleTemp[1][i] - Useful.logK() for i in range(numDeps) ]
scaleNe[0] = [ math.exp(x) for x in scaleNe[1] ]
#print("*********")
#print("scaleNe[1] ", scaleNe[1])
#print("*********")
#print("scaleNe[0] ", scaleNe[0])
#print("*********")
#stop
return scaleNe
| 17,414
| 44.708661
| 122
|
py
|
ChromaStarPy
|
ChromaStarPy-master/ScaleSolar.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 09:59:40 2017
Routines to read in a standard solar atmosphere model computed with Phoenix V15 and
calculate associated quantities. Needed for physical treatments normalized with
solar qantities
@author: ishort
"""
import math
import ToolBox
import Useful
def phxSunTeff():
return 5777.0
def phxSunLogEg():
return math.log(10.0) * 4.44 #//base e!
def getPhxSunTau64():
phxSunTau64 = [
0.00000000000000000e+00, 9.99999999999999955e-07, 1.34596032415536424e-06,
1.81160919420041334e-06, 2.43835409826882661e-06, 3.28192787251147086e-06,
4.41734470314007309e-06, 5.94557070854439435e-06, 8.00250227816105150e-06,
1.07710505603676912e-05, 1.44974067037263168e-05, 1.95129342263596224e-05,
2.62636352765333536e-05, 3.53498110503010944e-05, 4.75794431400941376e-05,
6.40400427119728256e-05, 8.61953566475303296e-05, 1.16015530173997152e-04,
1.56152300600049664e-04, 2.10174801133248704e-04, 2.82886943462596928e-04,
3.80754602122237184e-04, 5.12480587696093120e-04, 6.89778537938765824e-04,
9.28414544519474432e-04, 1.24960914129198688e-03, 1.68192432488086880e-03,
2.26380340952144672e-03, 3.04698957090350784e-03, 4.10112707055130048e-03,
5.51995432128156800e-03, 7.42963950759494912e-03, 1.00000000000000002e-02,
1.34596032415536416e-02, 1.81160919420041312e-02, 2.43835409826882656e-02,
3.28192787251147072e-02, 4.41734470314006464e-02, 5.94557070854439424e-02,
8.00250227816105216e-02, 1.07710505603676912e-01, 1.44974067037263136e-01,
1.95129342263596224e-01, 2.62636352765332992e-01, 3.53498110503010240e-01,
4.75794431400941440e-01, 6.40400427119728384e-01, 8.61953566475303168e-01,
1.16015530173997152e+00, 1.56152300600049664e+00, 2.10174801133248704e+00,
2.82886943462596640e+00, 3.80754602122236800e+00, 5.12480587696092608e+00,
6.89778537938765824e+00, 9.28414544519474432e+00, 1.24960914129198672e+01,
1.68192432488086880e+01, 2.26380340952144640e+01, 3.04698957090350528e+01,
4.10112707055129536e+01, 5.51995432128157312e+01, 7.42963950759495040e+01,
1.00000000000000000e+02]
return phxSunTau64
def getLogPhxSunTau64():
logE = math.log10(math.e)
phxSunTau64 = getPhxSunTau64()
numPhxDep = len(phxSunTau64)
#print("numPhxDep ", numPhxDep)
logPhxSunTau64 = [0.0 for i in range(numPhxDep)]
for i in range(1, numPhxDep):
#print("i ", i, " phxSunTau64[i] ", phxSunTau64[i])
logPhxSunTau64[i] = math.log(phxSunTau64[i])
logPhxSunTau64[0] = logPhxSunTau64[1] - (logPhxSunTau64[numPhxDep - 1] - logPhxSunTau64[1]) / numPhxDep
return logPhxSunTau64
def phxSunTemp(teff, numDeps, tauRos):
logE = math.log10(math.e)
#//Theoretical radiative/convective model from Phoenix V15:
phxSunTemp64 = [
3.75778887392339840e+03, 3.75778887392339840e+03, 3.78480175327941504e+03,
3.81385432525541760e+03, 3.84360130602512768e+03, 3.87340585446516608e+03,
3.90300184305606656e+03, 3.93231689265254528e+03, 3.96137919852984000e+03,
3.99027119028325824e+03, 4.01910484194699648e+03, 4.04798292490651008e+03,
4.07699548886169152e+03, 4.10623218035810816e+03, 4.13574364539801920e+03,
4.16548101060783104e+03, 4.19541371831173824e+03, 4.22551121760088000e+03,
4.25571229065970624e+03, 4.28594188575783232e+03, 4.31613168919769152e+03,
4.34620698440244928e+03, 4.37603327507328960e+03, 4.40564394765877952e+03,
4.43507740841559296e+03, 4.46439148496796224e+03, 4.49375530130093952e+03,
4.52341166116436480e+03, 4.55357281866347264e+03, 4.58446079852491520e+03,
4.61663974201107520e+03, 4.65052341797810624e+03, 4.68623381803595456e+03,
4.72408924142126144e+03, 4.76494152329308416e+03, 4.80984310271200128e+03,
4.85897778977827584e+03, 4.91315894280032960e+03, 4.97390461818851328e+03,
5.04531167969494336e+03, 5.12680296183560704e+03, 5.22061204180252480e+03,
5.32918534350649152e+03, 5.46202432323604352e+03, 5.61966782651567040e+03,
5.80986721241013376e+03, 6.03911828822760320e+03, 6.23433005487621120e+03,
6.53458311644527488e+03, 6.87429103746811904e+03, 7.29999981509928192e+03,
7.66682942009826304e+03, 7.94223816217841024e+03, 8.16133659245977728e+03,
8.35020013757955200e+03, 8.52047273964030720e+03, 8.67812135633704064e+03,
8.82687568743616768e+03, 8.96926538519515648e+03, 9.10706359999037824e+03,
9.24154121553023488e+03, 9.37363000902155008e+03, 9.50427569030960000e+03,
9.63219702937432192e+03]
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxSunTemp = [0.0 for i in range(numDeps)]
scaleTemp = [ [0.0 for i in range(numDeps)] for j in range(2)]
logPhxSunTau64 = getLogPhxSunTau64()
#for i in range(64):
#print("i ", i, " logPhxSunTau64 ", logPhxSunTau64[i], " phxSunTemp64 ", phxSunTemp64[i])
#print("phxSunTemp: numDeps ", numDeps)
for i in range(numDeps):
phxSunTemp[i] = ToolBox.interpol(logPhxSunTau64, phxSunTemp64, tauRos[1][i])
#print("i ", i, " tauRos[1][i] ", tauRos[1][i], " phxSunTemp[i] ", phxSunTemp[i])
scaleTemp[0][i] = teff * phxSunTemp[i] / phxSunTeff()
scaleTemp[1][i] = math.log(scaleTemp[0][i])
#//System.out.println("tauRos[1][i] " + logE * tauRos[1][i] + " scaleTemp[1][i] " + logE * scaleTemp[1][i]);
return scaleTemp
def phxSunPGas(grav, numDeps, tauRos):
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
#//Theoretical radiative/convective model from Phoenix V15:
phxSunPGas64 = [
1.00000000000000005e-04, 7.28828683006412544e+01, 8.61732126528505984e+01,
1.01843641855932976e+02, 1.20317369304629504e+02, 1.42093296011949696e+02,
1.67758727999644384e+02, 1.98004769223716256e+02, 2.33644726494082176e+02,
2.75635953319757664e+02, 3.25104809120938880e+02, 3.83378880706399168e+02,
4.52022443862726592e+02, 5.32877321364649344e+02, 6.28113128741022208e+02,
7.40284569989930496e+02, 8.72399144145001216e+02, 1.02799724165148560e+03,
1.21124571517496000e+03, 1.42704756928025427e+03, 1.68117132827309248e+03,
1.98040330055171296e+03, 2.33272402439094176e+03, 2.74752260927171264e+03,
3.23584954067544384e+03, 3.81071167175796544e+03, 4.48742481283848128e+03,
5.28403135449994368e+03, 6.22178478543013120e+03, 7.32571484052561408e+03,
8.62531818498740864e+03, 1.01553497268350960e+04, 1.19567104697253520e+04,
1.40775115384306991e+04, 1.65743702896828832e+04, 1.95139034178162464e+04,
2.29742653550211872e+04, 2.70468752817440448e+04, 3.18381441391788864e+04,
3.74704748898233472e+04, 4.40799661582952512e+04, 5.18080650391892096e+04,
6.07793647633492224e+04, 7.10351288049853440e+04, 8.24259773567987968e+04,
9.44866985169806080e+04, 1.06329924298695632e+05, 1.17862219382348656e+05,
1.28295128203359424e+05, 1.36933948396180352e+05, 1.43493910023715958e+05,
1.48487688700034048e+05, 1.52795575243316608e+05, 1.56932489940248512e+05,
1.61140965195830048e+05, 1.65564070780028256e+05, 1.70312554701480352e+05,
1.75486986284790656e+05, 1.81187218697219744e+05, 1.87518050413513344e+05,
1.94593473563783808e+05, 2.02540389901047584e+05, 2.11500759107428064e+05,
2.21643078023966592e+05]
numPhxDeps = len(phxSunPGas64) #//yeah, I know, 64, but that could change!
logPhxSunPGas64 = [0.0 for i in range(numPhxDeps)]
for i in range(len(phxSunPGas64)):
logPhxSunPGas64[i] = math.log(phxSunPGas64[i]);
logPhxSunTau64 = getLogPhxSunTau64()
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxSunPGas = [0.0 for i in range(numDeps)]
logPhxSunPGas = [0.0 for i in range(numDeps)]
scalePGas = [ [0.0 for i in range(numDeps)] for j in range(2)]
for i in range(numDeps):
logPhxSunPGas[i] = ToolBox.interpol(logPhxSunTau64, logPhxSunPGas64, tauRos[1][i])
scalePGas[1][i] = logEg + logPhxSunPGas[i] - phxSunLogEg()
#print("i ", i, " scalePGas[1][i] ", scalePGas[1][i])
scalePGas[0][i] = math.exp(scalePGas[1][i])
#//System.out.println("scalePGas[1][i] " + logE * scalePGas[1][i])
return scalePGas
def phxSunNe(grav, numDeps, tauRos, scaleTemp, kappaScale):
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
logEkappaScale = math.log(kappaScale);
#//Theoretical radiative/convective model from Phoenix V15:
phxSunPe64 = [
1.53086468021591745e-07, 5.66518458165471424e-03, 6.72808433760886656e-03,
8.00271552708326656e-03, 9.51809762875982208e-03, 1.13117438884935648e-02,
1.34299756939525680e-02, 1.59287848014678144e-02, 1.88751877391284448e-02,
2.23491173128862976e-02, 2.64457686695698400e-02, 3.12779350532322240e-02,
3.69791374171045888e-02, 4.37078139287801024e-02, 5.16503829681397248e-02,
6.10221573903118336e-02, 7.20768505868849536e-02, 8.51123959415642752e-02,
1.00475763241309840e-01, 1.18571138726675232e-01, 1.39870552376136714e-01,
1.64923053015554560e-01, 1.94357063774820192e-01, 2.28928720249475840e-01,
2.69525262128246720e-01, 3.17192228891198592e-01, 3.73192988074577856e-01,
4.39058414038311360e-01, 5.16615873984964544e-01, 6.08066526878471680e-01,
7.16264581324812416e-01, 8.44657163125294336e-01, 9.97267452897639808e-01,
1.17915717019238848e+00, 1.39715732004723136e+00, 1.66026825646718432e+00,
1.97886823850223904e+00, 2.36716912384854112e+00, 2.84540915928013805e+00,
3.44853013665125120e+00, 4.21529199485384704e+00, 5.21488490421314560e+00,
6.56660005867586432e+00, 8.55643059606379776e+00, 1.16931723772200080e+01,
1.71629079266534368e+01, 2.75152019254691616e+01, 4.18720694941323264e+01,
7.66283674228108288e+01, 1.45995186997127872e+02, 3.04766672331673792e+02,
5.44151864837275328e+02, 8.17181982032739072e+02, 1.11216222784450608e+03,
1.43633935534913856e+03, 1.79603721463325728e+03, 2.19692608617747040e+03,
2.64548745663525184e+03, 3.14931730610757952e+03, 3.71721361233669376e+03,
4.35932065708395904e+03, 5.08736399892079296e+03, 5.91634943413070720e+03,
6.85104524590000384e+03]
numPhxDeps = len(phxSunPe64) #//yeah, I know, 64, but that could change!
logPhxSunPe64 = [0.0 for i in range(numPhxDeps)]
for i in range(len(phxSunPe64)):
logPhxSunPe64[i] = math.log(phxSunPe64[i]);
logPhxSunTau64 = getLogPhxSunTau64()
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxSunPe = [0.0 for i in range(numDeps)]
logPhxSunPe = [0.0 for i in range(numDeps)]
logScalePe = [0.0 for i in range(numDeps)]
scaleNe = [ [0.0 for i in range(numDeps)] for j in range(2) ]
for i in range(numDeps):
logPhxSunPe[i] = ToolBox.interpol(logPhxSunTau64, logPhxSunPe64, tauRos[1][i])
logScalePe[i] = logEg + logPhxSunPe[i] - phxSunLogEg() - logEkappaScale
scaleNe[1][i] = logScalePe[i] - scaleTemp[1][i] - Useful.logK()
scaleNe[0][i] = math.exp(scaleNe[1][i])
#//System.out.println("scaleNe[1][i] " + logE * scaleNe[1][i]);
return scaleNe
#//Try to recover the opacity as lambda_0 = 1200 nm:
def phxSunKappa(numDeps, tauRos, kappaScale):
logEkappaScale = math.log(kappaScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxSunRho64 = [
4.13782346832222649e-16, 3.02095569469690462e-10, 3.54633225055968270e-10,
4.15928280610231993e-10, 4.87569895799879155e-10, 5.71381142733345291e-10,
6.69468927495419999e-10, 7.84278468388299388e-10, 9.18654436245877140e-10,
1.07590983297567878e-09, 1.25990158939278389e-09, 1.47513757382262481e-09,
1.72688539188771193e-09, 2.02128936476074103e-09, 2.36554000030610158e-09,
2.76809615861929229e-09, 3.23884396019102352e-09, 3.78934920783997866e-09,
4.43317360103421215e-09, 5.18621173362546736e-09, 6.06707380164391496e-09,
7.09757215466433105e-09, 8.30337600953291647e-09, 9.71426731449415417e-09,
1.13650770268615465e-08, 1.32964932176367733e-08, 1.55557163673284530e-08,
1.81974840999693492e-08, 2.12855768344032029e-08, 2.48940684847852482e-08,
2.91068454381155637e-08, 3.40213170202104799e-08, 3.97519122004400661e-08,
4.64290866159173997e-08, 5.41967343519845744e-08, 6.32144869975830899e-08,
7.36729431582295057e-08, 8.57774421976652924e-08, 9.97399445761737017e-08,
1.15721981027072251e-07, 1.33967659681056212e-07, 1.54620178670780798e-07,
1.77690495649821781e-07, 2.02608223525831620e-07, 2.28481547026651195e-07,
2.53309018291389784e-07, 2.74195019891415717e-07, 2.94373976046088894e-07,
3.05614181338722779e-07, 3.09912387277346887e-07, 3.05484245799381785e-07,
3.00519445088246902e-07, 2.98007120264342719e-07, 2.97336159154754909e-07,
2.97854109132361140e-07, 2.99327766949861546e-07, 3.01691329467384893e-07,
3.04944348605014908e-07, 3.09125225055924192e-07, 3.14302162196028050e-07,
3.20569231575000568e-07, 3.28044919674719785e-07, 3.36858977566225440e-07,
3.47271781807407172e-07]
phxSunRadius64 = [
9.98760000000000000e+10, 9.98660572490945152e+10, 9.98645871807186304e+10,
9.98631098643980160e+10, 9.98616245003269760e+10, 9.98601306458076032e+10,
9.98586280682994048e+10, 9.98571166428681216e+10, 9.98555962828737792e+10,
9.98540668955362944e+10, 9.98525283799022080e+10, 9.98509805586940416e+10,
9.98494232096872704e+10, 9.98478561022866944e+10, 9.98462789875034752e+10,
9.98446916403608064e+10, 9.98430938763377024e+10, 9.98414855440511616e+10,
9.98398665329129600e+10, 9.98382367818977152e+10, 9.98365962762478464e+10,
9.98349450434777856e+10, 9.98332831693342848e+10, 9.98316107506358144e+10,
9.98299278514395904e+10, 9.98282344996977408e+10, 9.98265306530218624e+10,
9.98248161610832000e+10, 9.98230907740896512e+10, 9.98213541602841472e+10,
9.98196058171550848e+10, 9.98178450629064448e+10, 9.98160711682936960e+10,
9.98142833645708160e+10, 9.98124806655167488e+10, 9.98106617425436544e+10,
9.98088251712680448e+10, 9.98069695137641728e+10, 9.98050931816160256e+10,
9.98031941655960192e+10, 9.98012715884401664e+10, 9.97993275763000704e+10,
9.97973698841808128e+10, 9.97954186071983616e+10, 9.97935139683624704e+10,
9.97917197632915456e+10, 9.97901090800493440e+10, 9.97886636105590528e+10,
9.97874361487011200e+10, 9.97864521731274880e+10, 9.97857037165240960e+10,
9.97851119909964288e+10, 9.97845890321633152e+10, 9.97840832513957504e+10,
9.97835682848038784e+10, 9.97830286360697344e+10, 9.97824528501662336e+10,
9.97818311493811456e+10, 9.97811545315540224e+10, 9.97804143368400768e+10,
9.97796020159347072e+10, 9.97787090120484864e+10, 9.97777268638511104e+10,
9.97766460582020224e+10]
numPhxDeps = len(phxSunRadius64)
phxSunKappa64 = [0.0 for i in range(numPhxDeps)]
logPhxSunKappa64 = [0.0 for i in range(numPhxDeps)]
#//double[] logPhxSunRho64 = new double[numPhxDeps];
#//double[] logPhxSunRadius64 = new double[numPhxDeps];
phxSunTau64 = getPhxSunTau64()
logPhxSunTau64 = getLogPhxSunTau64()
#//Fix to get right depth scale and right line strengths:
#// Yeah - everywhere ya go - opacity fudge
fudge = 0.25
logFudge = math.log(fudge)
#double deltaRho, deltaRadius, deltaTau, logDeltaRho, logDeltaRadius, logDeltaTau;
logE = math.log10(math.e)
for i in range(1, numPhxDeps):
#//Renormalize radii before taking difference
#//Caution: Radius *decreases* with increasing i (inward) and we'll be taking the log:
deltaRadius = (1.0e-11 * phxSunRadius64[i - 1]) - (1.0e-11 * phxSunRadius64[i])
deltaRadius = abs(deltaRadius)
#//restore to cm:
deltaRadius = 1.0e11 * deltaRadius
#//Renormalize before taking rho difference
deltaRho = (1.0e9 * phxSunRho64[i]) - (1.0e9 * phxSunRho64[i - 1])
deltaRho = abs(deltaRho)
#//Restore g/cm^3:
deltaRho = 1.0e-9 * deltaRho
#//Renormalize before taking rho difference
deltaTau = (1.0e2 * phxSunTau64[i]) - (1.0e2 * phxSunTau64[i - 1])
deltaTau = abs(deltaTau)
deltaTau = 1.0e-2 * deltaTau
logDeltaRadius = math.log(deltaRadius)
logDeltaRho = math.log(deltaRho)
logDeltaTau = math.log(deltaTau)
logPhxSunKappa64[i] = logDeltaTau - logDeltaRho - logDeltaRadius - logEkappaScale + logFudge
phxSunKappa64[i] = math.exp(logPhxSunKappa64[i])
#//System.out.println("logPhxSunKappa64[i] " + logE*logPhxSunKappa64[i]);
logPhxSunKappa64[0] = logPhxSunKappa64[1]
phxSunKappa64[0] = phxSunKappa64[1]
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxSunKappa = [ [0.0 for i in range(numDeps)] for j in range(2)]
for i in range(numDeps):
phxSunKappa[1][i] = ToolBox.interpol(logPhxSunTau64, logPhxSunKappa64, tauRos[1][i])
phxSunKappa[0][i] = math.exp(phxSunKappa[1][i])
#//System.out.println("phxSunKappa[1][i], i= " + i + " " + logE * phxSunKappa[1][i]);
return phxSunKappa
| 17,041
| 51.76161
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Daxpy.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 15:38:28 2019
@author:
"""
import math
"""
*> \brief \b DAXPY
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* SUBROUTINE DAXPY(N,DA,DX,INCX,DY,INCY)
*
* .. Scalar Arguments ..
* DOUBLE PRECISION DA
* INTEGER INCX,INCY,N
* ..
* .. Array Arguments ..
* DOUBLE PRECISION DX(*),DY(*)
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> DAXPY constant times a vector plus a vector.
*> uses unrolled loops for increments equal to one.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> number of elements in input vector(s)
*> \endverbatim
*>
*> \param[in] DA
*> \verbatim
*> DA is DOUBLE PRECISION
*> On entry, DA specifies the scalar alpha.
*> \endverbatim
*>
*> \param[in] DX
*> \verbatim
*> DX is DOUBLE PRECISION array, dimension ( 1 + ( N - 1
*)*abs( INCX ) )
*> \endverbatim
*>
*> \param[in] INCX
*> \verbatim
*> INCX is INTEGER
*> storage spacing between elements of DX
*> \endverbatim
*>
*> \param[in,out] DY
*> \verbatim
*> DY is DOUBLE PRECISION array, dimension ( 1 + ( N - 1)*abs( INCY ) )
*> \endverbatim
*>
*> \param[in] INCY
*> \verbatim
*> INCY is INTEGER
*> storage spacing between elements of DY
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date November 2017
*
*> \ingroup double_blas_level1
*
*> \par Further Details:
* =====================
*>
*> \verbatim
*>
*> jack dongarra, linpack, 3/11/78.
*> modified 12/3/93, array(1) declarations changed to array(*)
*> \endverbatim
*>
* =====================================================================
"""
#SUBROUTINE daxpy(N,DA,DX,INCX,DY,INCY)
def daxpy(n, da, dx, incx, dy, incy):
"""
*
* -- Reference BLAS level1 routine (version 3.8.0) --
* -- Reference BLAS is a software package provided by Univ. of
* Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG
* Ltd..--
* November 2017
*
"""
#* .. Scalar Arguments ..
#DOUBLE PRECISION DA
#INTEGER INCX,INCY,N
#* ..
#* .. Array Arguments ..
#DOUBLE PRECISION DX(*),DY(*)
#dySize = 1 + (n-1)*abs(incy)
#dyOut = [0.0e0 for i in range(dySize)]
#* ..
#*
#* =====================================================================
#*
#* .. Local Scalars ..
#INTEGER I,IX,IY,M,MP1
i = 0
ix = 0
iy = 0
m = 0
mp1 = 0
#* ..
#* .. Intrinsic Functions ..
#INTRINSIC mod
#* ..
#IF (n.LE.0) RETURN
#IF (da.EQ.0.0d0) RETURN
if ( (n > 0) and (da != 0.0e0) ):
#IF (incx.EQ.1 .AND. incy.EQ.1) THEN
if ( (incx == 1) and (incy == 1) ):
#*
#* code for both increments equal to 1
#*
#*
#* clean-up loop
#*
m = n % 4
#IF (m.NE.0) THEN
if (m != 0):
#DO i = 1,m
for i in range(m):
dy[i] = dy[i] + da*dx[i]
#END DO
#END IF
#IF (n.LT.4) RETURN
if (n >= 4):
mp1 = m + 1
#DO i = mp1,n,4
#print("DAXPY: n ", n, " m ", m, " mp1 ", mp1, " da ", da)
for i in range(mp1-1, n, 4):
#print("DAXPY 2: i ", i, " dx(i... i+4) ",\
# dx[i], dx[i+1], dx[i+2], dx[i+3])
#print("DAXPY 2: i ", i, " dy(i... i+3) ",\
# dy[i], dy[i+1], dy[i+2], dy[i+3])
dy[i] = dy[i] + da*dx[i]
dy[i+1] = dy[i+1] + da*dx[i+1]
dy[i+2] = dy[i+2] + da*dx[i+2]
dy[i+3] = dy[i+3] + da*dx[i+3]
#print("DAXPY 3: i ", i, " dy(i... i+3) ",\
# dy[i], dy[i+1], dy[i+2], dy[i+3])
#END DO
else:
#*
#* code for unequal increments or equal increments
#* not equal to 1
#*
ix = 1
iy = 1
if (incx < 0):
ix = ((-1*n)+1)*incx + 1
if (incy < 0):
iy = ((-1*n)+1)*incy + 1
for i in range(n):
#print("DAXPY 4: iy ", iy, " dy ", dy[iy],\
# " ix ", ix, " dx ", dx[ix])
dy[iy] = dy[iy] + da*dx[ix]
#print("DAXPY 5: iy ", iy, " dy ", dy[iy],\
# " ix ", ix, " dx ", dx[ix])
ix = ix + incx
iy = iy + incy
return dy
| 5,189
| 24.441176
| 90
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Ddot.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 17:11:14 2019
@author:
"""
import math
"""
*> \brief \b DDOT
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* DOUBLE PRECISION FUNCTION DDOT(N,DX,INCX,DY,INCY)
*
* .. Scalar Arguments ..
* INTEGER INCX,INCY,N
* ..
* .. Array Arguments ..
* DOUBLE PRECISION DX(*),DY(*)
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> DDOT forms the dot product of two vectors.
*> uses unrolled loops for increments equal to one.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> number of elements in input vector(s)
*> \endverbatim
*>
*> \param[in] DX
*> \verbatim
*> DX is DOUBLE PRECISION array, dimension ( 1 + ( N - 1
*)*abs( INCX ) )
*> \endverbatim
*>
*> \param[in] INCX
*> \verbatim
*> INCX is INTEGER
*> storage spacing between elements of DX
*> \endverbatim
*>
*> \param[in] DY
*> \verbatim
*> DY is DOUBLE PRECISION array, dimension ( 1 + ( N - 1
*)*abs( INCY ) )
*> \endverbatim
*>
*> \param[in] INCY
*> \verbatim
*> INCY is INTEGER
*> storage spacing between elements of DY
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date November 2017
*
*> \ingroup double_blas_level1
*
*> \par Further Details:
* =====================
*>
*> \verbatim
*>
*> jack dongarra, linpack, 3/11/78.
*> modified 12/3/93, array(1) declarations changed to array(*)
*> \endverbatim
*>
* =====================================================================
"""
#DOUBLE PRECISION FUNCTION ddot(N,DX,INCX,DY,INCY)
def ddot(n, dx, incx, dy, incy):
#*
#* -- Reference BLAS level1 routine (version 3.8.0) --
#* -- Reference BLAS is a software package provided by Univ. of
#* Tennessee, --
#* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG
#* Ltd..--
#* November 2017
#*
#* .. Scalar Arguments ..
#INTEGER INCX,INCY,N
#* ..
#* .. Array Arguments ..
#DOUBLE PRECISION DX(*),DY(*)
#* ..
#*
#* =====================================================================
#*
#* .. Local Scalars ..
dtemp = 0.0e0
i = 0
ix = 0
iy = 0
m = 0
mp1 = 0
#DOUBLE PRECISION DTEMP
#INTEGER I,IX,IY,M,MP1
#* ..
#* .. Intrinsic Functions ..
#INTRINSIC mod
#* ..
#ddot = 0.0d0
returnValue = 0.0e0
dtemp = 0.0e0
#IF (n.LE.0) RETURN
if (n > 0):
#IF (incx.EQ.1 .AND. incy.EQ.1) THEN
if (incx == 1 and incy == 1):
#*
#* code for both increments equal to 1
#*
#*
#* clean-up loop
#*
m = n % 5
#IF (m.NE.0) THEN
if (m != 0):
#DO i = 1,m
for i in range(m):
dtemp = dtemp + dx[i]*dy[i]
#IF (n.LT.5) THEN
if (n < 5):
#ddot=dtemp
returnValue = dtemp
#RETURN
if (n >= 5):
mp1 = m + 1
#DO i = mp1,n,5
for i in range(mp1-1, n, 5):
dtemp = dtemp + dx[i]*dy[i] + dx[i+1]*dy[i+1] +\
dx[i+2]*dy[i+2] + dx[i+3]*dy[i+3] + dx[i+4]*dy[i+4]
else:
#*
#* code for unequal increments or equal increments
#* not equal to 1
#*
#ix = 1
#iy = 1
ix = 0
iy = 0
if (incx < 0):
ix = ((-1*n)+1)*incx + 1
if (incy < 0):
iy = ((-1*n)+1)*incy + 1
#DO i = 1,n
for i in range(n):
dtemp = dtemp + dx[ix]*dy[iy]
ix = ix + incx
iy = iy + incy
#ddot = dtemp
returnValue = dtemp
return returnValue
| 4,425
| 21.353535
| 77
|
py
|
ChromaStarPy
|
ChromaStarPy-master/ScaleT10000.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 17:28:32 2017
Initializes and re-scales a Phoenix LTE spherical reference model of
* Teff=10000K, log(g)=4.0, [Fe/H]=0.0, xi=1.0 km/s, l=1.0H_p, M=1M_Sun, R=1.1613E+11cm
@author: ishort
"""
import math
import ToolBox
import Useful
def phxRefTeff():
return 10000.0
def phxRefLogEg():
return math.log(10.0) * 4.0 #//base e!
#//He abundance from Grevesse Asplund et al 2010
def phxRefLogAHe():
return math.log(10.0) * (10.93 - 12.0) #//base e "A_12" logarithmic abundance scale!
def getPhxRefTau64():
# //Corresponding Tau_12000 grid (ie. lambda_0 = 1200 nm):
phxRefTau64 = [
0.00000000000000000E+00, 9.99999999999999955E-07, 1.34596032415536424E-06,
1.81160919420041334E-06, 2.43835409826882661E-06, 3.28192787251147086E-06,
4.41734470314007309E-06, 5.94557070854439435E-06, 8.00250227816105150E-06,
1.07710505603676914E-05, 1.44974067037263169E-05, 1.95129342263596216E-05,
2.62636352765333530E-05, 3.53498110503010939E-05, 4.75794431400941383E-05,
6.40400427119728238E-05, 8.61953566475303262E-05, 1.16015530173997159E-04,
1.56152300600049659E-04, 2.10174801133248699E-04, 2.82886943462596935E-04,
3.80754602122237182E-04, 5.12480587696093125E-04, 6.89778537938765847E-04,
9.28414544519474451E-04, 1.24960914129198684E-03, 1.68192432488086874E-03,
2.26380340952144670E-03, 3.04698957090350801E-03, 4.10112707055130046E-03,
5.51995432128156785E-03, 7.42963950759494875E-03, 1.00000000000000002E-02,
1.34596032415536422E-02, 1.81160919420041318E-02, 2.43835409826882663E-02,
3.28192787251147047E-02, 4.41734470314006436E-02, 5.94557070854439401E-02,
8.00250227816105275E-02, 1.07710505603676912E-01, 1.44974067037263149E-01,
1.95129342263596212E-01, 2.62636352765332981E-01, 3.53498110503010221E-01,
4.75794431400941464E-01, 6.40400427119728333E-01, 8.61953566475303190E-01,
1.16015530173997150E+00, 1.56152300600049654E+00, 2.10174801133248712E+00,
2.82886943462596641E+00, 3.80754602122236818E+00, 5.12480587696092638E+00,
6.89778537938765801E+00, 9.28414544519474383E+00, 1.24960914129198670E+01,
1.68192432488086894E+01, 2.26380340952144650E+01, 3.04698957090350540E+01,
4.10112707055129562E+01, 5.51995432128157333E+01, 7.42963950759495049E+01,
1.00000000000000000E+02]
return phxRefTau64
def getLogPhxRefTau64():
logE = math.log10(math.e)
phxRefTau64 = getPhxRefTau64()
numPhxDep = len(phxRefTau64)
logPhxRefTau64 = [ 0.0 for i in range(numPhxDep) ]
#for i in range(1, numPhxDep):
# logPhxRefTau64[i] = math.log(phxRefTau64[i])
logPhxRefTau64[1: numPhxDep] = [ math.log(phxRefTau64[i]) for i in range(1, numPhxDep) ]
logPhxRefTau64[0] = logPhxRefTau64[1] - (logPhxRefTau64[numPhxDep - 1] - logPhxRefTau64[1]) / numPhxDep
return logPhxRefTau64
def phxRefTemp(teff, numDeps, tauRos):
logE = math.log10(math.e)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefTemp64 = [
6.07574016685149309E+03, 6.07574016685149309E+03, 6.13264671606194861E+03,
6.20030362747541585E+03, 6.27534705504544127E+03, 6.35396254937768026E+03,
6.43299900128272293E+03, 6.51018808525609893E+03, 6.58411555606889124E+03,
6.65406717610081068E+03, 6.71983498258185136E+03, 6.78154367852633823E+03,
6.83954193198123903E+03, 6.89437231818902364E+03, 6.94676889243451842E+03,
6.99759489202792247E+03, 7.04769490055547158E+03, 7.09773520027041195E+03,
7.14812062339764907E+03, 7.19901426577775601E+03, 7.25041827414427917E+03,
7.30225171801659872E+03, 7.35440093819652611E+03, 7.40675066225539558E+03,
7.45920456139609178E+03, 7.51166464185182758E+03, 7.56404228766520191E+03,
7.61627005664532771E+03, 7.66833575187113820E+03, 7.72034173334201841E+03,
7.77258785750414881E+03, 7.82555139374063583E+03, 7.87986936059489017E+03,
7.93639246968124371E+03, 7.99620846303960116E+03, 8.06052820253916161E+03,
8.13047124123426238E+03, 8.20741189262034641E+03, 8.29307358429898159E+03,
8.38980788216330802E+03, 8.49906053657168923E+03, 8.62314483632361771E+03,
8.76456384216990409E+03, 8.92693370905029224E+03, 9.11177170396923248E+03,
9.32167977041711492E+03, 9.56236981551314602E+03, 9.82432656703466455E+03,
1.01311427939962559E+04, 1.04299661074183350E+04, 1.08355089220389909E+04,
1.12094886773674716E+04, 1.16360710406256258E+04, 1.20991237739366334E+04,
1.25891111265208237E+04, 1.31070008299570563E+04, 1.36522498965801387E+04,
1.42233473670298790E+04, 1.48188302103200131E+04, 1.54423659243804523E+04,
1.60892587452310745E+04, 1.67828517694842230E+04, 1.74930217234773954E+04,
1.82922661949382236E+04]
logPhxRefTau64 = getLogPhxRefTau64()
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefTemp = [ 0.0 for i in range(numDeps)]
scaleTemp = [ [ 0.0 for i in range(numDeps)] for j in range(2) ]
#for i in range(numDeps):
# phxRefTemp[i] = ToolBox.interpol(logPhxRefTau64, phxRefTemp64, tauRos[1][i])
# scaleTemp[0][i] = teff * phxRefTemp[i] / phxRefTeff()
# scaleTemp[1][i] = math.log(scaleTemp[0][i])
phxRefTemp = [ ToolBox.interpol(logPhxRefTau64, phxRefTemp64, x) for x in tauRos[1] ]
scaleTemp[0] = [ teff * x / phxRefTeff() for x in phxRefTemp ]
scaleTemp[1] = [ math.log(x) for x in scaleTemp[0] ]
#//System.out.println("tauRos[1][i] " + logE * tauRos[1][i] + " scaleTemp[1][i] " + logE * scaleTemp[1][i]);
return scaleTemp
def phxRefPGas(grav, zScale, logAHe, numDeps, tauRos):
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
AHe = math.exp(logAHe)
refAHe = math.exp(phxRefLogAHe())
logZScale = math.log(zScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefPGas64 = [
1.00000000000000005E-04, 8.32127743125684882E-02, 1.29584527404206007E-01,
1.94435381478779895E-01, 2.81524759872055830E-01, 3.94850766488002047E-01,
5.39098197994885120E-01, 7.20109114447812781E-01, 9.45331395103965466E-01,
1.22424260721948497E+00, 1.56877812718506826E+00, 1.99379948180689048E+00,
2.51761637911653136E+00, 3.16251087800302599E+00, 3.95513966878889667E+00,
4.92671520309637767E+00, 6.11303768406991388E+00, 7.55464145673977505E+00,
9.29736005428628154E+00, 1.13934670418806956E+01, 1.39033471883101818E+01,
1.68975909460311797E+01, 2.04594801623940121E+01, 2.46878880919212804E+01,
2.97005964646718432E+01, 3.56383534114781142E+01, 4.26698208468708984E+01,
5.09974403334007107E+01, 6.08640463074419387E+01, 7.25594340179816726E+01,
8.64248329112294158E+01, 1.02854593091977520E+02, 1.22294652156180661E+02,
1.45234045163109670E+02, 1.72184927273123520E+02, 2.03652334583264832E+02,
2.40105656346438934E+02, 2.81936164286554344E+02, 3.29393094590693863E+02,
3.82482413201705356E+02, 4.40963324580460835E+02, 5.04333229685725428E+02,
5.71827998329611432E+02, 6.42424030136117835E+02, 7.15115448265608620E+02,
7.89188190751975185E+02, 8.64179477829598227E+02, 9.41037808653716070E+02,
1.02093026109089942E+03, 1.10808816566702853E+03, 1.20591338801728261E+03,
1.32157321934523725E+03, 1.46400967396971282E+03, 1.64395527893530380E+03,
1.87431044562489683E+03, 2.16986659968736876E+03, 2.54753164223200429E+03,
3.02667796755900645E+03, 3.62964225373483487E+03, 4.38288420138537458E+03,
5.31730879832813844E+03, 6.47251190142057658E+03, 7.89413608165941059E+03,
9.64747840003540659E+03]
logPhxRefTau64 =getLogPhxRefTau64()
numPhxDeps = len(phxRefPGas64) #//yeah, I know, 64, but that could change!
logPhxRefPGas64 = [ 0.0 for i in range(numPhxDeps) ]
#for i in range(numPhxDeps):
# logPhxRefPGas64[i] = math.log(phxRefPGas64[i])
logPhxRefPGas64 = [ math.log(x) for x in phxRefPGas64 ]
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefPGas = [0.0 for i in range(numDeps)]
logPhxRefPGas = [0.0 for i in range(numDeps)]
scalePGas = [ [ 0.0 for i in range(numDeps)] for j in range(2) ]
#//exponents in scaling with g:
gexpTop = 0.53 #//top of model
gexpBottom = 0.85 #//bottom of model
gexpRange = (gexpBottom - gexpTop)
tauLogRange = tauRos[1][numDeps-1] - tauRos[1][0]
#double thisGexp;
#// factor for scaling with A_He:
logHeDenom = 0.666667 * math.log(1.0 + 4.0*refAHe)
logPhxRefPGas = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, x) for x in tauRos[1] ]
for i in range(numDeps):
logPhxRefPGas[i] = ToolBox.interpol(logPhxRefTau64, logPhxRefPGas64, tauRos[1][i])
thisGexp = gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange
#//scaling with g
scalePGas[1][i] = thisGexp*logEg + logPhxRefPGas[i] - thisGexp*phxRefLogEg()
#//scaling with zscl:
#scalePGas[1][i] = -0.5*logZScale + scalePGas[1][i]
##//scaling with A_He:
#scalePGas[1][i] = 0.666667 * math.log(1.0 + 4.0*AHe) + scalePGas[1][i] - logHeDenom
#scalePGas[0][i] = math.exp(scalePGas[1][i])
#//System.out.println("scalePGas[1][i] " + logE * scalePGas[1][i])
scalePGas[1] = [ -0.5*logZScale + x for x in scalePGas[1] ]
scalePGas[1] = [ 0.666667 * math.log(1.0 + 4.0*AHe) + x - logHeDenom for x in scalePGas[1] ]
scalePGas[0] = [ math.exp(x) for x in scalePGas[1] ]
#Carefull here - P at upper boundary can be an underestimate, but it must not be greater than value at next depth in!
if (scalePGas[0][0] >= scalePGas[0][1]):
scalePGas[0][0] = 0.5 * scalePGas[0][1];
scalePGas[1][0] = math.log(scalePGas[0][0]);
return scalePGas
def phxRefPe(teff, grav, numDeps, tauRos, zScale, logAHe):
logE = math.log10(math.e)
logEg = math.log(grav) #//base e!
AHe = math.exp(logAHe)
refAHe = math.exp(phxRefLogAHe())
logZScale = math.log(zScale)
#//Theoretical radiative/convective model from Phoenix V15:
phxRefPe64 = [
4.77258390479251340E-05, 1.54333794509103339E-02, 2.24384775218179552E-02,
3.24056217848841463E-02, 4.62639509784656192E-02, 6.49897301016105072E-02,
8.96001972148401798E-02, 1.21161157265374353E-01, 1.60825358340301261E-01,
2.09891146620685975E-01, 2.69867426146356171E-01, 3.42538888354808724E-01,
4.30045384007358256E-01, 5.35006986797593842E-01, 6.60704782988379868E-01,
8.11262305821688567E-01, 9.91741961224463009E-01, 1.20813527252446407E+00,
1.46731521914247520E+00, 1.77705126262480850E+00, 2.14614122290851617E+00,
2.58462667298359561E+00, 3.10405210627260297E+00, 3.71777653138435804E+00,
4.44135288803457673E+00, 5.29279499891786465E+00, 6.29303772366266312E+00,
7.46652989782078702E+00, 8.84221515332682451E+00, 1.04552216626003140E+01,
1.23496848557054300E+01, 1.45813048229500843E+01, 1.72206436663779385E+01,
2.03589457441922157E+01, 2.41156208954111868E+01, 2.86442876094033458E+01,
3.41355927487861948E+01, 4.08398462152914732E+01, 4.90908766488638761E+01,
5.93486059459067832E+01, 7.21405304518226842E+01, 8.81824952094146681E+01,
1.08367129768339950E+02, 1.33856171619767082E+02, 1.65693080738235807E+02,
2.04943252558813072E+02, 2.52705001145053956E+02, 3.07224623951268654E+02,
3.70334137141753217E+02, 4.33722318385145741E+02, 5.08910395587106336E+02,
5.82220694357564639E+02, 6.65278728107771599E+02, 7.62124991657425880E+02,
8.79654481582760809E+02, 1.02622262715821921E+03, 1.21099204341081804E+03,
1.44432886438589208E+03, 1.73838904022049860E+03, 2.10808802008476914E+03,
2.57102379769462232E+03, 3.14976025581092108E+03, 3.86645770963505538E+03,
4.75493678618616923E+03]
logPhxRefTau64 = getLogPhxRefTau64();
numPhxDeps = len(phxRefPe64) #//yeah, I know, 64, but that could change!
logPhxRefPe64 = [0.0 for i in range(numPhxDeps)]
#for i in range(numPhxDeps):
# logPhxRefPe64[i] = math.log(phxRefPe64[i])
logPhxRefPe64 = [ math.log(x) for x in phxRefPe64 ]
#// interpolate onto gS3 tauRos grid and re-scale with Teff:
phxRefPe = [0.0 for i in range(numDeps)]
logPhxRefPe = [0.0 for i in range(numDeps)]
scalePe = [ [0.0 for i in range(numDeps)] for j in range(2) ]
#//exponents in scaling with Teff ONLY VALID FOR Teff < 10000K:
omegaTaum1 = 0.0012 #//log_10(tau) < 0.1
omegaTaup1 = 0.0015 #//log_10(tau) > 1.0
omegaRange = (omegaTaup1-omegaTaum1)
lonOfM1 = math.log(0.1)
#//exponents in scaling with g:
gexpTop = 0.53 #//top of model
gexpBottom = 0.82 #//bottom of model
gexpRange = (gexpBottom - gexpTop)
tauLogRange = tauRos[1][numDeps-1] - tauRos[1][0]
#double thisGexp;
thisOmega = omegaTaum1 #//default initialization
#// factor for scaling with A_He:
logHeDenom = 0.333333 * math.log(1.0 + 4.0*refAHe)
logPhxRefPe = [ ToolBox.interpol(logPhxRefTau64, logPhxRefPe64, x) for x in tauRos[1] ]
for i in range(numDeps):
#logPhxRefPe[i] = ToolBox.interpol(logPhxRefTau64, logPhxRefPe64, tauRos[1][i])
thisGexp = gexpTop + gexpRange * (tauRos[1][i] - tauRos[1][0]) / tauLogRange
#//scaling with g
scalePe[1][i] = thisGexp*logEg + logPhxRefPe[i] - thisGexp*phxRefLogEg()
#//scale with Teff:
if (teff < 10000.0):
if (tauRos[0][i] < 0.1):
thisOmega = omegaTaum1
if (tauRos[0][i] > 10.0):
thisOmega = omegaTaup1
if ( (tauRos[0][i] >= 0.1) and (tauRos[0][i] <= 10.0) ):
thisOmega = omegaTaum1 + omegaRange * (tauRos[1][i] - lonOfM1) / tauLogRange
scalePe[1][i] = thisOmega*teff + scalePe[1][i] - thisOmega*phxRefTeff()
#//scaling with zscl:
#scalePe[1][i] = 0.5*logZScale + scalePe[1][i]
#//scaling with A_He:
#scalePe[1][i] = 0.333333 * math.log(1.0 + 4.0*AHe) + scalePe[1][i] - logHeDenom
#scalePe[1][i] = logEg + logPhxRefPe[i] - phxRefLogEg()
#scalePe[0][i] = math.exp(scalePe[1][i])
scalePe[1] = [ 0.5*logZScale + x for x in scalePe[1] ]
scalePe[1] = [ 0.333333 * math.log(1.0 + 4.0*AHe) + x - logHeDenom for x in scalePe[1] ]
scalePe[1] = [ logEg + x - phxRefLogEg() for x in logPhxRefPe ]
scalePe[0] = [ math.exp(x) for x in scalePe[1] ]
#//System.out.println("scaleNe[1][i] " + logE * scaleNe[1][i]);
return scalePe
def phxRefNe(numDeps, scaleTemp, scalePe):
logE = math.log10(math.e)
scaleNe = [ [ 0.0 for i in range(numDeps)] for j in range(2) ]
#for i in range(numDeps):
# scaleNe[1][i] = scalePe[1][i] - scaleTemp[1][i] - Useful.logK()
# scaleNe[0][i] = math.exp(scaleNe[1][i])
scaleNe[1] = [ scalePe[1][i] - scaleTemp[1][i] - Useful.logK() for i in range(numDeps) ]
scaleNe[0] = [ math.exp(x) for x in scaleNe[1] ]
return scaleNe
| 14,914
| 49.731293
| 121
|
py
|
ChromaStarPy
|
ChromaStarPy-master/CSGasData2.py
|
import BlockData
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
ip = BlockData.ip
comp = BlockData.comp
awt = BlockData.awt
name = BlockData.name
ipr = BlockData.ipr
nch = BlockData.nch
nel = BlockData.nel
nat = BlockData.nat
zat = BlockData.zat
logk = [ [0.0e0 for i in range(150)] for j in range(5) ]
logwt = [0.0e0 for i in range(150)]
name[0] = "H"; ipr[0] = 1; nch[0] = 0; nel[0] = 1; nat[0][0] = 1; zat[0][0] = 1; awt[0] = 1.008; comp[0] = 9.32e-01
name[1] = "H+"; ipr[1] = 1; nch[1] = +1; ip[1] = 13.598; logwt[1] = 0.000
name[2] = "H-"; ipr[2] = 1; nch[2] = -1; ip[2] = 0.754; logwt[2] = 0.600
name[3] = "He"; ipr[3] = 2; nch[3] = 0; nel[3] = 1; nat[0][3] = 1; zat[0][3] = 2; awt[3] = 4.003; comp[1] = 6.53e-02
name[4] = "He+"; ipr[4] = 2; nch[4] = +1; ip[4] = 24.587; logwt[4] = 0.600
name[5] = "C"; ipr[5] = 1; nch[5] = 0; nel[5] = 1; nat[0][5] = 1; zat[0][5] = 6; awt[5] = 12.011; comp[2] = 4.94e-04
name[6] = "C+"; ipr[6] = 1; nch[6] = +1; ip[6] = 11.260; logwt[6] = 0.100
name[7] = "N"; ipr[7] = 1; nch[7] = 0; nel[7] = 1; nat[0][7] = 1; zat[0][7] = 7; awt[7] = 14.007; comp[3] = 8.95e-04
name[8] = "N+"; ipr[8] = 1; nch[8] = +1; ip[8] = 14.534; logwt[8] = 0.650
name[9] = "O"; ipr[9] = 1; nch[9] = 0; nel[9] = 1; nat[0][9] = 1; zat[0][9] = 8; awt[9] = 16.000; comp[4] = 8.48e-04
name[10] = "O+"; ipr[10] = 1; nch[10] = +1; ip[10] = 13.618; logwt[10] = -0.050
name[11] = "Ne"; ipr[11] = 2; nch[11] = 0; nel[11] = 1; nat[0][11] = 1; zat[0][11] = 10; awt[11] = 20.179; comp[5] = 7.74e-05
name[12] = "Ne+"; ipr[12] = 2; nch[12] = +1; ip[12] = 21.564; logwt[12] = 1.080
name[13] = "Na"; ipr[13] = 2; nch[13] = 0; nel[13] = 1; nat[0][13] = 1; zat[0][13] = 11; awt[13] = 22.990; comp[6] = 1.68e-06
name[14] = "Na+"; ipr[14] = 2; nch[14] = +1; ip[14] = 5.139; logwt[14] = 0.000
name[15] = "Mg"; ipr[15] = 2; nch[15] = 0; nel[15] = 1; nat[0][15] = 1; zat[0][15] = 12; awt[15] = 24.305; comp[7] = 2.42e-05
name[16] = "Mg+"; ipr[16] = 2; nch[16] = +1; ip[16] = 7.644; logwt[16] = 0.600
name[17] = "Mg++"; ipr[17] = 2; nch[17] = +2; ip[17] = 15.031; logwt[17] = 0.000
name[18] = "Al"; ipr[18] = 2; nch[18] = 0; nel[18] = 1; nat[0][18] = 1; zat[0][18] = 13; awt[18] = 26.982; comp[8] = 2.24e-06
name[19] = "Al+"; ipr[19] = 2; nch[19] = +1; ip[19] = 5.984; logwt[19] = -0.480
name[20] = "Si"; ipr[20] = 1; nch[20] = 0; nel[20] = 1; nat[0][20] = 1; zat[0][20] = 14; awt[20] = 28.086; comp[9] = 3.08e-05
name[21] = "Si+"; ipr[21] = 1; nch[21] = +1; ip[21] = 8.149; logwt[21] = 0.120
name[22] = "S"; ipr[22] = 1; nch[22] = 0; nel[22] = 1; nat[0][22] = 1; zat[0][22] = 16; awt[22] = 32.060; comp[10] = 1.49e-05
name[23] = "S+"; ipr[23] = 1; nch[23] = +1; ip[23] = 10.360; logwt[23] = -0.050
name[24] = "Cl"; ipr[24] = 3; nch[24] = 0; nel[24] = 1; nat[0][24] = 1; zat[0][24] = 17; awt[24] = 35.453; comp[11] = 3.73e-07
name[25] = "Cl-"; ipr[25] = 3; nch[25] = -1; ip[25] = 3.613; logwt[25] = 1.080
name[26] = "K"; ipr[26] = 2; nch[26] = 0; nel[26] = 1; nat[0][26] = 1; zat[0][26] = 19; awt[26] = 39.102; comp[12] = 8.30e-08
name[27] = "K+"; ipr[27] = 2; nch[27] = +1; ip[27] = 4.339; logwt[27] = 0.000
name[28] = "Ca"; ipr[28] = 2; nch[28] = 0; nel[28] = 1; nat[0][28] = 1; zat[0][28] = 20; awt[28] = 40.080; comp[13] = 1.86e-06
name[29] = "Ca+"; ipr[29] = 2; nch[29] = +1; ip[29] = 6.111; logwt[29] = 0.600
name[30] = "Ca++"; ipr[30] = 2; nch[30] = +2; ip[30] = 11.868; logwt[30] = 0.000
name[31] = "Sc"; ipr[31] = 3; nch[31] = 0; nel[31] = 1; nat[0][31] = 1; zat[0][31] = 21; awt[31] = 44.956; comp[14] = 1.49e-09
name[32] = "Sc+"; ipr[32] = 3; nch[32] = +1; ip[32] = 6.540; logwt[32] = 0.480
name[33] = "Ti"; ipr[33] = 3; nch[33] = 0; nel[33] = 1; nat[0][33] = 1; zat[0][33] = 22; awt[33] = 47.900; comp[15] = 1.21e-07
name[34] = "Ti+"; ipr[34] = 3; nch[34] = +1; ip[34] = 6.820; logwt[34] = 0.430
name[35] = "V"; ipr[35] = 3; nch[35] = 0; nel[35] = 1; nat[0][35] = 1; zat[0][35] = 23; awt[35] = 50.941; comp[16] = 2.33e-08
name[36] = "V+"; ipr[36] = 3; nch[36] = +1; ip[36] = 6.740; logwt[36] = 0.250
name[37] = "Cr"; ipr[37] = 3; nch[37] = 0; nel[37] = 1; nat[0][37] = 1; zat[0][37] = 24; awt[37] = 51.996; comp[17] = 6.62e-07
name[38] = "Cr+"; ipr[38] = 3; nch[38] = +1; ip[38] = 6.766; logwt[38] = 0.230
name[39] = "Mn"; ipr[39] = 3; nch[39] = 0; nel[39] = 1; nat[0][39] = 1; zat[0][39] = 25; awt[39] = 54.938; comp[18] = 2.33e-07
name[40] = "Mn+"; ipr[40] = 3; nch[40] = +1; ip[40] = 7.435; logwt[40] = 0.370
name[41] = "Fe"; ipr[41] = 2; nch[41] = 0; nel[41] = 1; nat[0][41] = 1; zat[0][41] = 26; awt[41] = 55.847; comp[19] = 3.73e-05
name[42] = "Fe+"; ipr[42] = 2; nch[42] = +1; ip[42] = 7.870; logwt[42] = 0.380
name[43] = "Co"; ipr[43] = 3; nch[43] = 0; nel[43] = 1; nat[0][43] = 1; zat[0][43] = 27; awt[43] = 58.933; comp[20] = 1.12e-07
name[44] = "Co+"; ipr[44] = 3; nch[44] = +1; ip[44] = 7.860; logwt[44] = 0.180
name[45] = "Ni"; ipr[45] = 2; nch[45] = 0; nel[45] = 1; nat[0][45] = 1; zat[0][45] = 28; awt[45] = 58.710; comp[21] = 1.86e-06
name[46] = "Ni+"; ipr[46] = 2; nch[46] = +1; ip[46] = 7.635; logwt[46] = -0.020
name[47] = "Sr"; ipr[47] = 3; nch[47] = 0; nel[47] = 1; nat[0][47] = 1; zat[0][47] = 38; awt[47] = 87.620; comp[22] = 6.62e-10
name[48] = "Sr+"; ipr[48] = 3; nch[48] = +1; ip[48] = 5.695; logwt[48] = 0.500
name[49] = "Y"; ipr[49] = 3; nch[49] = 0; nel[49] = 1; nat[0][49] = 1; zat[0][49] = 39; awt[49] = 88.906; comp[23] = 5.87e-11
name[50] = "Y+"; ipr[50] = 3; nch[50] = +1; ip[50] = 6.380; logwt[50] = 0.500
name[51] = "Zr"; ipr[51] = 3; nch[51] = 0; nel[51] = 1; nat[0][51] = 1; zat[0][51] = 40; awt[51] = 91.220; comp[24] = 2.98e-10
name[52] = "Zr+"; ipr[52] = 3; nch[52] = +1; ip[52] = 6.840; logwt[52] = 0.420
name[53] = "H2"; ipr[53] = 1; nch[53] = 0; nel[53] = 1; nat[0][53] = 2; zat[0][53] = 1; logk[0][53] = 12.739; logk[1][53] = -5.1172; logk[2][53] = 0.12572; logk[3][53] = -1.4149e-02; logk[4][53] = 6.3021e-04
name[54] = "H2+"; ipr[54] = 1; nch[54] = +1; ip[54] = 15.422; logwt[54] = 0.600
name[55] = "C2"; ipr[55] = 1; nch[55] = 0; nel[55] = 1; nat[0][55] = 2; zat[0][55] = 6; logk[0][55] = 12.804; logk[1][55] = -6.5178; logk[2][55] = .097719; logk[3][55] = -1.2739e-02; logk[4][55] = 6.2603e-04
name[56] = "C3"; ipr[56] = 1; nch[56] = 0; nel[56] = 1; nat[0][56] = 3; zat[0][56] = 6; logk[0][56] = 25.230; logk[1][56] = -14.445; logk[2][56] = 0.12547; logk[3][56] = -1.7390e-02; logk[4][56] = 8.8594e-04
name[57] = "N2"; ipr[57] = 1; nch[57] = 0; nel[57] = 1; nat[0][57] = 2; zat[0][57] = 7; logk[0][57] = 13.590; logk[1][57] = -10.585; logk[2][57] = 0.22067; logk[3][57] = -2.9997e-02; logk[4][57] = 1.4993e-03
name[58] = "O2"; ipr[58] = 1; nch[58] = 0; nel[58] = 1; nat[0][58] = 2; zat[0][58] = 8; logk[0][58] = 13.228; logk[1][58] = -5.5181; logk[2][58] = .069935; logk[3][58] = -8.1511e-03; logk[4][58] = 3.7970e-04
name[59] = "CH"; ipr[59] = 1; nch[59] = 0; nel[59] = 2; nat[0][59] = 1; zat[0][59] = 6; nat[1][59] = 1; zat[1][59] = 1; nat[2][59] = 0; zat[2][59] = 0; logk[0][59] = 12.135; logk[1][59] = -4.0760; logk[2][59] = 0.12768; logk[3][59] = -1.5473e-02; logk[4][59] = 7.2661e-04
name[60] = "C2H2"; ipr[60] = 1; nch[60] = 0; nel[60] = 2; nat[0][60] = 2; zat[0][60] = 6; nat[1][60] = 2; zat[1][60] = 1; nat[2][60] = 0; zat[2][60] = 0; logk[0][60] = 38.184; logk[1][60] = -17.365; logk[2][60] = .021512; logk[3][60] = -8.8961e-05; logk[4][60] = -2.8720e-05
name[61] = "NH"; ipr[61] = 1; nch[61] = 0; nel[61] = 2; nat[0][61] = 1; zat[0][61] = 7; nat[1][61] = 1; zat[1][61] = 1; nat[2][61] = 0; zat[2][61] = 0; logk[0][61] = 12.033; logk[1][61] = -3.8435; logk[2][61] = 0.13629; logk[3][61] = -1.6643e-02; logk[4][61] = 7.8691e-04
name[62] = "NH2"; ipr[62] = 1; nch[62] = 0; nel[62] = 2; nat[0][62] = 1; zat[0][62] = 7; nat[1][62] = 2; zat[1][62] = 1; nat[2][62] = 0; zat[2][62] = 0; logk[0][62] = 24.603; logk[1][62] = -8.6300; logk[2][62] = 0.20048; logk[3][62] = -2.4124e-02; logk[4][62] = 1.1484e-03
name[63] = "NH3"; ipr[63] = 1; nch[63] = 0; nel[63] = 2; nat[0][63] = 1; zat[0][63] = 7; nat[1][63] = 3; zat[1][63] = 1; nat[2][63] = 0; zat[2][63] = 0; logk[0][63] = 37.554; logk[1][63] = -13.059; logk[2][63] = 0.12910; logk[3][63] = -1.2338e-02; logk[4][63] = 5.3429e-04
name[64] = "OH"; ipr[64] = 1; nch[64] = 0; nel[64] = 2; nat[0][64] = 1; zat[0][64] = 8; nat[1][64] = 1; zat[1][64] = 1; nat[2][64] = 0; zat[2][64] = 0; logk[0][64] = 12.371; logk[1][64] = -5.0578; logk[2][64] = 0.13822; logk[3][64] = -1.6547e-02; logk[4][64] = 7.7224e-04
name[65] = "MgH"; ipr[65] = 2; nch[65] = 0; nel[65] = 2; nat[0][65] = 1; zat[0][65] = 12; nat[1][65] = 1; zat[1][65] = 1; nat[2][65] = 0; zat[2][65] = 0; logk[0][65] = 11.285; logk[1][65] = -2.7164; logk[2][65] = 0.19658; logk[3][65] = -2.7310e-02; logk[4][65] = 1.3816e-03
name[66] = "AlH"; ipr[66] = 2; nch[66] = 0; nel[66] = 2; nat[0][66] = 1; zat[0][66] = 13; nat[1][66] = 1; zat[1][66] = 1; nat[2][66] = 0; zat[2][66] = 0; logk[0][66] = 12.191; logk[1][66] = -3.7636; logk[2][66] = 0.25557; logk[3][66] = -3.7261e-02; logk[4][66] = 1.9406e-03
name[67] = "SiH"; ipr[67] = 1; nch[67] = 0; nel[67] = 2; nat[0][67] = 1; zat[0][67] = 14; nat[1][67] = 1; zat[1][67] = 1; nat[2][67] = 0; zat[2][67] = 0; logk[0][67] = 11.852; logk[1][67] = -3.7418; logk[2][67] = 0.15999; logk[3][67] = -2.0629e-02; logk[4][67] = 9.9897e-04
name[68] = "HS"; ipr[68] = 1; nch[68] = 0; nel[68] = 2; nat[0][68] = 1; zat[0][68] = 16; nat[1][68] = 1; zat[1][68] = 1; nat[2][68] = 0; zat[2][68] = 0; logk[0][68] = 12.019; logk[1][68] = -4.2922; logk[2][68] = 0.14913; logk[3][68] = -1.8666e-02; logk[4][68] = 8.9438e-04
name[69] = "H2S"; ipr[69] = 1; nch[69] = 0; nel[69] = 2; nat[0][69] = 1; zat[0][69] = 16; nat[1][69] = 2; zat[1][69] = 1; nat[2][69] = 0; zat[2][69] = 0; logk[0][69] = 24.632; logk[1][69] = -8.4616; logk[2][69] = 0.17014; logk[3][69] = -2.0236e-02; logk[4][69] = 9.5782e-04
name[70] = "HCl"; ipr[70] = 3; nch[70] = 0; nel[70] = 2; nat[0][70] = 1; zat[0][70] = 17; nat[1][70] = 1; zat[1][70] = 1; nat[2][70] = 0; zat[2][70] = 0; logk[0][70] = 12.528; logk[1][70] = -5.1827; logk[2][70] = 0.18117; logk[3][70] = -2.4014e-02; logk[4][70] = 1.1994e-03
name[71] = "CaH"; ipr[71] = 3; nch[71] = 0; nel[71] = 2; nat[0][71] = 1; zat[0][71] = 20; nat[1][71] = 1; zat[1][71] = 1; nat[2][71] = 0; zat[2][71] = 0; logk[0][71] = 11.340; logk[1][71] = -3.0144; logk[2][71] = 0.42349; logk[3][71] = -6.1467e-02; logk[4][71] = 3.1639e-03
name[72] = "CN"; ipr[72] = 1; nch[72] = 0; nel[72] = 2; nat[0][72] = 1; zat[0][72] = 7; nat[1][72] = 1; zat[1][72] = 6; nat[2][72] = 0; zat[2][72] = 0; logk[0][72] = 12.805; logk[1][72] = -8.2793; logk[2][72] = .064162; logk[3][72] = -7.3627e-03; logk[4][72] = 3.4666e-04
name[73] = "NO"; ipr[73] = 1; nch[73] = 0; nel[73] = 2; nat[0][73] = 1; zat[0][73] = 8; nat[1][73] = 1; zat[1][73] = 7; nat[2][73] = 0; zat[2][73] = 0; logk[0][73] = 12.831; logk[1][73] = -7.1964; logk[2][73] = 0.17349; logk[3][73] = -2.3065e-02; logk[4][73] = 1.1380e-03
name[74] = "CO"; ipr[74] = 1; nch[74] = 0; nel[74] = 2; nat[0][74] = 1; zat[0][74] = 8; nat[1][74] = 1; zat[1][74] = 6; nat[2][74] = 0; zat[2][74] = 0; logk[0][74] = 13.820; logk[1][74] = -11.795; logk[2][74] = 0.17217; logk[3][74] = -2.2888e-02; logk[4][74] = 1.1349e-03
name[75] = "CO2"; ipr[75] = 1; nch[75] = 0; nel[75] = 2; nat[0][75] = 2; zat[0][75] = 8; nat[1][75] = 1; zat[1][75] = 6; nat[2][75] = 0; zat[2][75] = 0; logk[0][75] = 27.478; logk[1][75] = -17.098; logk[2][75] = .095012; logk[3][75] = -1.2579e-02; logk[4][75] = 6.4058e-04
name[76] = "MgO"; ipr[76] = 3; nch[76] = 0; nel[76] = 2; nat[0][76] = 1; zat[0][76] = 12; nat[1][76] = 1; zat[1][76] = 8; nat[2][76] = 0; zat[2][76] = 0; logk[0][76] = 11.702; logk[1][76] = -5.0326; logk[2][76] = 0.29641; logk[3][76] = -4.2811e-02; logk[4][76] = 2.2023e-03
name[77] = "AlO"; ipr[77] = 2; nch[77] = 0; nel[77] = 2; nat[0][77] = 1; zat[0][77] = 13; nat[1][77] = 1; zat[1][77] = 8; nat[2][77] = 0; zat[2][77] = 0; logk[0][77] = 12.739; logk[1][77] = -5.2534; logk[2][77] = 0.18218; logk[3][77] = -2.5793e-02; logk[4][77] = 1.3185e-03
name[78] = "SiO"; ipr[78] = 1; nch[78] = 0; nel[78] = 2; nat[0][78] = 1; zat[0][78] = 14; nat[1][78] = 1; zat[1][78] = 8; nat[2][78] = 0; zat[2][78] = 0; logk[0][78] = 13.413; logk[1][78] = -8.8710; logk[2][78] = 0.15042; logk[3][78] = -1.9581e-02; logk[4][78] = 9.4828e-04
name[79] = "SO"; ipr[79] = 1; nch[79] = 0; nel[79] = 2; nat[0][79] = 1; zat[0][79] = 16; nat[1][79] = 1; zat[1][79] = 8; nat[2][79] = 0; zat[2][79] = 0; logk[0][79] = 12.929; logk[1][79] = -6.0100; logk[2][79] = 0.16253; logk[3][79] = -2.1665e-02; logk[4][79] = 1.0676e-03
name[80] = "CaO"; ipr[80] = 2; nch[80] = 0; nel[80] = 2; nat[0][80] = 1; zat[0][80] = 20; nat[1][80] = 1; zat[1][80] = 8; nat[2][80] = 0; zat[2][80] = 0; logk[0][80] = 12.260; logk[1][80] = -6.0525; logk[2][80] = 0.58284; logk[3][80] = -8.5805e-02; logk[4][80] = 4.4425e-03
name[81] = "ScO"; ipr[81] = 3; nch[81] = 0; nel[81] = 2; nat[0][81] = 1; zat[0][81] = 21; nat[1][81] = 1; zat[1][81] = 8; nat[2][81] = 0; zat[2][81] = 0; logk[0][81] = 13.747; logk[1][81] = -8.6420; logk[2][81] = 0.48072; logk[3][81] = -6.9670e-02; logk[4][81] = 3.5747e-03
name[82] = "ScO2"; ipr[82] = 3; nch[82] = 0; nel[82] = 2; nat[0][82] = 1; zat[0][82] = 21; nat[1][82] = 2; zat[1][82] = 8; nat[2][82] = 0; zat[2][82] = 0; logk[0][82] = 26.909; logk[1][82] = -15.824; logk[2][82] = 0.39999; logk[3][82] = -5.9363e-02; logk[4][82] = 3.0875e-03
name[83] = "TiO"; ipr[83] = 2; nch[83] = 0; nel[83] = 2; nat[0][83] = 1; zat[0][83] = 22; nat[1][83] = 1; zat[1][83] = 8; nat[2][83] = 0; zat[2][83] = 0; logk[0][83] = 13.398; logk[1][83] = -8.5956; logk[2][83] = 0.40873; logk[3][83] = -5.7937e-02; logk[4][83] = 2.9287e-03
name[84] = "VO"; ipr[84] = 3; nch[84] = 0; nel[84] = 2; nat[0][84] = 1; zat[0][84] = 23; nat[1][84] = 1; zat[1][84] = 8; nat[2][84] = 0; zat[2][84] = 0; logk[0][84] = 13.811; logk[1][84] = -7.7520; logk[2][84] = 0.37056; logk[3][84] = -5.1467e-02; logk[4][84] = 2.5861e-03
name[85] = "VO2"; ipr[85] = 3; nch[85] = 0; nel[85] = 2; nat[0][85] = 1; zat[0][85] = 23; nat[1][85] = 2; zat[1][85] = 8; nat[2][85] = 0; zat[2][85] = 0; logk[0][85] = 27.754; logk[1][85] = -14.040; logk[2][85] = 0.33613; logk[3][85] = -4.8215e-02; logk[4][85] = 2.4780e-03
name[86] = "YO"; ipr[86] = 3; nch[86] = 0; nel[86] = 2; nat[0][86] = 1; zat[0][86] = 39; nat[1][86] = 1; zat[1][86] = 8; nat[2][86] = 0; zat[2][86] = 0; logk[0][86] = 13.514; logk[1][86] = -8.7775; logk[2][86] = 0.40700; logk[3][86] = -5.8053e-02; logk[4][86] = 2.9535e-03
name[87] = "YO2"; ipr[87] = 3; nch[87] = 0; nel[87] = 2; nat[0][87] = 1; zat[0][87] = 39; nat[1][87] = 2; zat[1][87] = 8; nat[2][87] = 0; zat[2][87] = 0; logk[0][87] = 26.764; logk[1][87] = -16.447; logk[2][87] = 0.39991; logk[3][87] = -5.8916e-02; logk[4][87] = 3.0506e-03
name[88] = "ZrO"; ipr[88] = 3; nch[88] = 0; nel[88] = 2; nat[0][88] = 1; zat[0][88] = 40; nat[1][88] = 1; zat[1][88] = 8; nat[2][88] = 0; zat[2][88] = 0; logk[0][88] = 13.296; logk[1][88] = -9.0129; logk[2][88] = 0.19562; logk[3][88] = -2.9892e-02; logk[4][88] = 1.6010e-03
name[89] = "ZrO2"; ipr[89] = 3; nch[89] = 0; nel[89] = 2; nat[0][89] = 1; zat[0][89] = 40; nat[1][89] = 2; zat[1][89] = 8; nat[2][89] = 0; zat[2][89] = 0; logk[0][89] = 26.793; logk[1][89] = -16.151; logk[2][89] = 0.46988; logk[3][89] = -6.4636e-02; logk[4][89] = 3.2277e-03
name[90] = "CS"; ipr[90] = 1; nch[90] = 0; nel[90] = 2; nat[0][90] = 1; zat[0][90] = 16; nat[1][90] = 1; zat[1][90] = 6; nat[2][90] = 0; zat[2][90] = 0; logk[0][90] = 13.436; logk[1][90] = -8.5574; logk[2][90] = 0.18754; logk[3][90] = -2.5507e-02; logk[4][90] = 1.2735e-03
name[91] = "SiS"; ipr[91] = 1; nch[91] = 0; nel[91] = 2; nat[0][91] = 1; zat[0][91] = 14; nat[1][91] = 1; zat[1][91] = 16; nat[2][91] = 0; zat[2][91] = 0; logk[0][91] = 13.182; logk[1][91] = -7.1147; logk[2][91] = 0.19300; logk[3][91] = -2.5826e-02; logk[4][91] = 1.2648e-03
name[92] = "TiS"; ipr[92] = 2; nch[92] = 0; nel[92] = 2; nat[0][92] = 1; zat[0][92] = 22; nat[1][92] = 1; zat[1][92] = 16; nat[2][92] = 0; zat[2][92] = 0; logk[0][92] = 13.316; logk[1][92] = -6.2216; logk[2][92] = 0.45829; logk[3][92] = -6.4903e-02; logk[4][92] = 3.2788e-03
name[93] = "SiC"; ipr[93] = 1; nch[93] = 0; nel[93] = 2; nat[0][93] = 1; zat[0][93] = 14; nat[1][93] = 1; zat[1][93] = 6; nat[2][93] = 0; zat[2][93] = 0; logk[0][93] = 12.327; logk[1][93] = -5.0419; logk[2][93] = 0.13941; logk[3][93] = -1.9363e-02; logk[4][93] = 9.6202e-04
name[94] = "SiC2"; ipr[94] = 1; nch[94] = 0; nel[94] = 2; nat[0][94] = 1; zat[0][94] = 14; nat[1][94] = 2; zat[1][94] = 6; nat[2][94] = 0; zat[2][94] = 0; logk[0][94] = 25.623; logk[1][94] = -13.085; logk[2][94] = -.055227; logk[3][94] = 9.3363e-03; logk[4][94] = -4.9876e-04
name[95] = "NaCl"; ipr[95] = 2; nch[95] = 0; nel[95] = 2; nat[0][95] = 1; zat[0][95] = 11; nat[1][95] = 1; zat[1][95] = 17; nat[2][95] = 0; zat[2][95] = 0; logk[0][95] = 11.768; logk[1][95] = -4.9884; logk[2][95] = 0.23975; logk[3][95] = -3.4837e-02; logk[4][95] = 1.8034e-03
name[96] = "MgCl"; ipr[96] = 2; nch[96] = 0; nel[96] = 2; nat[0][96] = 1; zat[0][96] = 12; nat[1][96] = 1; zat[1][96] = 17; nat[2][96] = 0; zat[2][96] = 0; logk[0][96] = 11.318; logk[1][96] = -4.2224; logk[2][96] = 0.21137; logk[3][96] = -3.0174e-02; logk[4][96] = 1.5480e-03
name[97] = "AlCl"; ipr[97] = 2; nch[97] = 0; nel[97] = 2; nat[0][97] = 1; zat[0][97] = 13; nat[1][97] = 1; zat[1][97] = 17; nat[2][97] = 0; zat[2][97] = 0; logk[0][97] = 11.976; logk[1][97] = -5.2228; logk[2][97] = -.010263; logk[3][97] = 3.9344e-03; logk[4][97] = -2.6236e-04
name[98] = "CaCl"; ipr[98] = 2; nch[98] = 0; nel[98] = 2; nat[0][98] = 1; zat[0][98] = 20; nat[1][98] = 1; zat[1][98] = 17; nat[2][98] = 0; zat[2][98] = 0; logk[0][98] = 12.314; logk[1][98] = -5.1814; logk[2][98] = 0.56532; logk[3][98] = -8.2868e-02; logk[4][98] = 4.2822e-03
name[99] = "HCN"; ipr[99] = 1; nch[99] = 0; nel[99] = 3; nat[0][99] = 1; zat[0][99] = 7; nat[1][99] = 1; zat[1][99] = 6; nat[2][99] = 1; zat[2][99] = 1; logk[0][99] = 25.635; logk[1][99] = -13.833; logk[2][99] = 0.13827; logk[3][99] = -1.8122e-02; logk[4][99] = 9.1645e-04
name[100] = "HCO"; ipr[100] = 1; nch[100] = 0; nel[100] = 3; nat[0][100] = 1; zat[0][100] = 8; nat[1][100] = 1; zat[1][100] = 6; nat[2][100] = 1; zat[2][100] = 1; logk[0][100] = 25.363; logk[1][100] = -13.213; logk[2][100] = 0.18451; logk[3][100] = -2.2973e-02; logk[4][100] = 1.1114e-03
name[101] = "MgOH"; ipr[101] = 2; nch[101] = 0; nel[101] = 3; nat[0][101] = 1; zat[0][101] = 12; nat[1][101] = 1; zat[1][101] = 8; nat[2][101] = 1; zat[2][101] = 1; logk[0][101] = 24.551; logk[1][101] = -9.3818; logk[2][101] = 0.19666; logk[3][101] = -2.7178e-02; logk[4][101] = 1.3887e-03
name[102] = "AlOH"; ipr[102] = 2; nch[102] = 0; nel[102] = 3; nat[0][102] = 1; zat[0][102] = 13; nat[1][102] = 1; zat[1][102] = 8; nat[2][102] = 1; zat[2][102] = 1; logk[0][102] = 25.707; logk[1][102] = -10.624; logk[2][102] = .097901; logk[3][102] = -1.1835e-02; logk[4][102] = 5.8121e-04
name[103] = "CaOH"; ipr[103] = 2; nch[103] = 0; nel[103] = 3; nat[0][103] = 1; zat[0][103] = 20; nat[1][103] = 1; zat[1][103] = 8; nat[2][103] = 1; zat[2][103] = 1; logk[0][103] = 24.611; logk[1][103] = -10.910; logk[2][103] = 0.60803; logk[3][103] = -8.7197e-02; logk[4][103] = 4.4736e-03
| 19,996
| 158.976
| 295
|
py
|
ChromaStarPy
|
ChromaStarPy-master/AtomicMass.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 15:33:22 2017
//Atomic AND molecular masses in atomic mass units (amu. "mu")
@author: ishort
"""
def getMass(elName):
"""//Atomic masses in atomic mass units (amu. "mu")
//From CIAAW
//Atomic weights of the elements 2015 ciaaw.org/atomic-weights.htm, Aug. 2015
//Heaviest element treated is La (57)"""
elMass = 1.0 #//default initialization
if ("H" == elName):
elMass = 1.007
if ("He" == elName):
elMass = 4.002
if ("Li" == elName):
elMass = 6.938
if ("Be" == elName):
elMass = 9.012
if ("B" == elName):
elMass = 10.806
if ("C" == elName):
elMass = 12.0096
if ("N" == elName):
elMass = 14.006
if ("O" == elName):
elMass = 15.999
if ("F" == elName):
elMass = 18.998
if ("Ne" == elName):
elMass = 20.1797
if ("Na" == elName):
elMass = 22.989
if ("Mg" == elName):
elMass = 24.304
if ("Al" == elName):
elMass = 26.981
if ("Si" == elName):
elMass = 28.084
if ("P" == elName):
elMass = 30.973
if ("S" == elName):
elMass = 32.059
if ("Cl" == elName):
elMass = 35.446
if ("Ar" == elName):
elMass = 39.948
if ("K" == elName):
elMass = 39.0983
if ("Ca" == elName):
elMass = 40.078
if ("Sc" == elName):
elMass = 44.955
if ("Ti" == elName):
elMass = 47.867
if ("Va" == elName):
elMass = 50.9415
if ("Cr" == elName):
elMass = 51.9961
if ("Mn" == elName):
elMass = 54.938
if ("Fe" == elName):
elMass = 55.845
if ("Co" == elName):
elMass = 58.933
if ("Ni" == elName):
elMass = 58.6934
if ("Cu" == elName):
elMass = 63.546
if ("Zn" == elName):
elMass = 65.38
if ("Ga" == elName):
elMass = 69.723
if ("Ge" == elName):
elMass = 72.630
if ("As" == elName):
elMass = 74.921
if ("Se" == elName):
elMass = 78.971
if ("Br" == elName):
elMass = 79.901
if ("Kr" == elName):
elMass = 83.798
if ("Rb" == elName):
elMass = 85.4678
if ("Sr" == elName):
elMass = 87.62
if ("Y" == elName):
elMass = 88.905
if ("Zr" == elName):
elMass = 91.224
if ("Nb" == elName):
elMass = 92.906
if ("Mo" == elName):
elMass = 95.95
if ("Ru" == elName):
elMass = 101.07
if ("Rh" == elName):
elMass = 102.905
if ("Pd" == elName):
elMass = 106.42
if ("Ag" == elName):
elMass = 107.8682
if ("Cd" == elName):
elMass = 112.414
if ("In" == elName):
elMass = 114.818
if ("Sn" == elName):
elMass = 118.710
if ("Sb" == elName):
elMass = 121.760
if ("Te" == elName):
elMass = 127.60
if ("I" == elName):
elMass = 126.904
if ("Xe" == elName):
elMass = 131.293
if ("Cs" == elName):
elMass = 132.905
if ("Ba" == elName):
elMass = 137.327
if ("La" == elName):
elMass = 138.905
return elMass;
#// end of getMass method
#//Molecular masses in atomic mass units (amu. "mu")
def getMolMass(molName):
molMass = 2.0 #//default initialization (H_2)
if ("TiO" == molName):
molMass = getMass("O") + getMass("Ti")
return molMass
| 3,547
| 16.564356
| 77
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LamGrid.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 16:33:44 2017
Create the wavelength grid that samples the overall spectral energy distribution
@author: ishort
"""
import math
def lamgrid(numLams, lamSetup):
lambdaScale = []
logLambda = 0.0
#// Space lambdas logarithmically:
logLam1 = math.log10(lamSetup[0])
logLam2 = math.log10(lamSetup[1])
delta = ( logLam2 - logLam1 ) / numLams
ii = 0.0
for i in range(numLams):
ii = float(i);
logLambda = logLam1 + ( ii * delta );
lambdaScale.append(math.pow(10.0, logLambda))
return lambdaScale;
| 631
| 20.066667
| 80
|
py
|
ChromaStarPy
|
ChromaStarPy-master/GsCalc.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 10:09:02 2019
@author:
"""
#
#
#his module can serve as the main method
#
#
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
"""
from Documents.ChromaStarPy.GAS.blas.Daxpy import daxpy
from Documents.ChromaStarPy.GAS.blas.Ddot import ddot
from Documents.ChromaStarPy.GAS.blas.Dscal import dscal
from Documents.ChromaStarPy.GAS.blas.Idamax import idamax
from Documents.ChromaStarPy.GAS.linpack.Dgesl import dgesl
from Documents.ChromaStarPy.GAS.linpack.Dgefa import dgefa
"""
from functools import reduce
import subprocess
import os
import sys
#import CSBlockData
#import GsRead
import CSGsRead2
#import CSGsTabl
import CSGasEst
import CSGas
"""
import Documents.ChromaStarPy.GAS.BlockData
#from Documents.ChromaStarPy.GAS.GsRead import gsread
#from Documents.ChromaStarPy.GAS.GasEst import gasest
#from Documents.ChromaStarPy.GAS.Gas import gas
import Documents.ChromaStarPy.GAS.GsRead
import Documents.ChromaStarPy.GAS.GasEst
import Documents.ChromaStarPy.GAS.Gas
"""
#############################################
#
#
#
# Initial set-up:
# - import all python modules
# - set input parameters
#
#
#
##############################################
#Detect python version
pythonV = sys.version_info
if pythonV[0] != 3:
print("")
print("")
print(" ********************************************* ")
print("")
print("WARNING!! WARNING!! WARNING!!")
print("")
print("")
print("ChromaStarPy/GAS developed for python V. 3!!" )
print("")
print("May not work in other version")
print("")
print("")
print("*********************************************** ")
print("")
print("")
thisOS = "unknown" #default
myOS= ""
#returns 'posix' form unix-like OSes and 'nt' for Windows??
thisOS = os.name
print("")
print("Running on OS: ", thisOS)
print("")
absPath0 = "./" #default
if thisOS == "nt":
#windows
absPath0 = subprocess.check_output("cd", shell=True)
backSpace = 2
elif thisOS == "posix":
absPath0 = subprocess.check_output("pwd", shell=True)
backSpace = 1
absPath0 = bytes.decode(absPath0)
#remove OS_dependent trailing characters 'r\n'
nCharsPath = len(absPath0)
nCharsPath -= backSpace
absPath0 = absPath0[0: nCharsPath]
slashIndex = absPath0.find('\\') #The first backslash is the escape character!
while slashIndex != -1:
#python strings are immutable:
absPathCopy = absPath0[0: slashIndex]
absPathCopy += '/'
absPathCopy += absPath0[slashIndex+1: len(absPath0)]
absPath0 = absPathCopy
#print(absPathCopy, absPath0)
slashIndex = absPath0.find('\\')
absPath = absPath0 + '/'
##makePlot = Input.makePlot
#makePlot = "yes"
#print("")
#print("Will make plot: ", makePlot)
#print("")
#stop
#color platte for plt plotting
#palette = ['black', 'brown','red','orange','yellow','green','blue','indigo','violet']
#grayscale
#stop
#Grayscale:
numPal = 12
palette = ['0.0' for i in range(numPal)]
delPal = 0.04
#for i in range(numPal):
# ii = float(i)
# helpPal = 0.481 - ii*delPal
# palette[i] = str(helpPal)
palette = [ str( 0.481 - float(i)*delPal ) for i in range(numPal) ]
numClrs = len(palette)
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
outPath = absPath + "/Outputs/"
#fileStem = Input.fileStem
fileStem = "GsCalc"
#Set input pressure and isolv here for file naming:
#pt = 1.0e5
pt = 96.0
isolv = 1
fileStem = fileStem + ".Read2." + "pt" + str(int(math.log10(pt))).strip() + "is" + str(isolv)
outFileString = outPath+fileStem+".out" #Report for humans
outFileString2 = outPath+fileStem+".2.out" #PPs for plotting
print(" ")
print("Writing to files ", outFileString)
print(" ")
outFile = open(outFileString, "w")
outFile2 = open(outFileString2, "w")
#program gcalc
"""
#The seven universal FORTRAN "commons"
common /consts/ pi,sbcon,kbol,cvel,gcon,hpl,hmass,t0,everg
common /gasp/ name,ip,comp,awt,nspec,natom,itab,ntab,indx,
# iprint,gsinit,print0
common /gasp2/ ipr,nch,nel,ntot,nat,zat,neut,idel,indsp,
# indzat,iat,natsp,iatsp
common /lin/ nlin1,lin1,linv1,nlin2,lin2,linv2
common /equil/ logk,logwt,it,kt,type
common /opacty/ chix,nix,nopac,ixa,ixn,opinit,opflag,opchar,iopt
common /stellr/ mstar,lstar,rstar,ms,ls,rs,teff,logg,mbol,comt
"""
"""
#Try this:
#global pi, sbcon, kbol, cvel, gcon, hpl, hmass, t0, everg # /consts/
global kbol, hmass, t0 # /consts/
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
global nlin1, lin1, linv1, nlin2, lin2, linv2 #/lin/
global logk, logwt, it, kt, type0 #equil
#global chix, nix, nopac, ixa, ixn, opinit, opflag, opchar, iopt #/opacty/
global chix, nix, ixa, ixn #/opacty/
#global mstar, lstar, rstar, ms, ls, rs, teff, logg, mbol, comt #/stellr/
"""
outString = ""
print0 = False
p0 = [0.0e0 for i in range(40)]
pp = [0.0e0 for i in range(150)]
p = [0.0e0 for i in range(40)]
ppix = [0.0e0 for i in range(30)]
a = [0.0e0 for i in range(625)]
#c cis:
fp = [0.0e0 for i in range(150)]
#name = [0.0e0 for i in range(150)]
#ip = [0.0e0 for i in range(150)]
#comp = [0.0e0 for i in range(40)]
#awt = [0.0e0 for i in range(150)]
#itab = [0 for i in range(83)]
#ntab = [0 for i in range(5)]
#indx = [ [ [ [ [0 for i in range(2)] for j in range(5) ] for k in range(7) ] for l in range(26) ] for m in range(4) ]
#common /gasp/ name,ip,comp,awt,nspec,natom,gsinit,itab,ntab,indx
#BlockData.block_data()
#print("Calling GsRead:")
#GsRead.gsread(outFile)
nelemAbnd = 41
eheu = [0.0 for i in range(nelemAbnd)] #log_10 "A_12" values
cname = ["" for i in range(nelemAbnd)]
#//log_10 "A_12" values:
eheu[0]= 12.00
eheu[1]= 10.93
eheu[2]= 1.05
eheu[3]= 1.38
eheu[4]= 2.70
eheu[5]= 8.43
eheu[6]= 7.83
eheu[7]= 8.69
eheu[8]= 4.56
eheu[9]= 7.93
eheu[10]= 6.24
eheu[11]= 7.60
eheu[12]= 6.45
eheu[13]= 7.51
eheu[14]= 5.41
eheu[15]= 7.12
eheu[16]= 5.50
eheu[17]= 6.40
eheu[18]= 5.03
eheu[19]= 6.34
eheu[20]= 3.15
eheu[21]= 4.95
eheu[22]= 3.93
eheu[23]= 5.64
eheu[24]= 5.43
eheu[25]= 7.50
eheu[26]= 4.99
eheu[27]= 6.22
eheu[28]= 4.19
eheu[29]= 4.56
eheu[30]= 3.04
eheu[31]= 3.25
eheu[32]= 2.52
eheu[33]= 2.87
eheu[34]= 2.21
eheu[35]= 2.58
eheu[36]= 1.46
eheu[37]= 2.18
eheu[38]= 1.10
eheu[39]= 1.12
eheu[40]= 3.65 #// Ge - out of sequence
cname[0]="H";
cname[1]="He";
cname[2]="Li";
cname[3]="Be";
cname[4]="B";
cname[5]="C";
cname[6]="N";
cname[7]="O";
cname[8]="F";
cname[9]="Ne";
cname[10]="Na";
cname[11]="Mg";
cname[12]="Al";
cname[13]="Si";
cname[14]="P";
cname[15]="S";
cname[16]="Cl";
cname[17]="Ar";
cname[18]="K";
cname[19]="Ca";
cname[20]="Sc";
cname[21]="Ti";
cname[22]="V";
cname[23]="Cr";
cname[24]="Mn";
cname[25]="Fe";
cname[26]="Co";
cname[27]="Ni";
cname[28]="Cu";
cname[29]="Zn";
cname[30]="Ga";
cname[31]="Kr";
cname[32]="Rb";
cname[33]="Sr";
cname[34]="Y";
cname[35]="Zr";
cname[36]="Nb";
cname[37]="Ba";
cname[38]="La";
cname[39]="Cs";
cname[40]="Ge";
CSGsRead2.gsread(cname, eheu)
#GsTabl.gstabl() #necessary??
#nspec = GsRead.nspec
#name = GsRead.name
nspec = CSGsRead2.nspec
name = CSGsRead2.name
#print("GsCalc: nspec: ", nspec)
outString = ""
for k in range(nspec):
outString = outString + " " + name[k]
outString+="\n"
outFile2.write(outString)
# Input parameters are now command line arguments
#c call time(1,0,mscpu)
#c cpu1=mscpu/1000.0
#c write(7,100)
# write(6,100)
# 100 format('enter: t ,p, and pe')
#c call fread(5,'3r*8:',t,pt,pe0)
# read(5, *) t, pt, pe0
#c write(7,150)
# write(6,150)
# 150 format('enter: tolerance, max # iter. and isolv')
#c call fread(5,'r*8,2i:',tol,maxit,isolv)
# read(5, *) tol, maxit, isolv
#Get and parse the command line arguments:
# sys.argv[0] is the name of the script
"""
t = float(sys.argv[1])
pt = float(sys.argv[2])
pe0 = float(sys.argv[3])
tol = float(sys.argv[4])
maxit = int(sys.argv[5])
isolv = int(sys.argv[6])
print("t ", t, " pt ", pt, " pe0 ", pe0, " tol ", tol, " maxit ", maxit, " isolv ", isolv)
"""
#For now:
#t = 6000.0
t1 = 3600.0
t2 = 4500.0
dt = 100.0
# Set this above: pt = 100000.0
#pe0 = 100.0
tol = 1.0e-4
maxit = 100
# Set this above: isolv = 1
nt = (t2 - t1) / dt + 1
nt = int(nt)
pe0 = 0.01 * pt
outString = ("%4s %12.3e\n" %("PT ", pt))
outFile.write(outString)
outFile2.write(outString)
outStringHead = ("%11s %8s %5s %4s %4s %8s\n"\
%("t ", " rholog ", " gmu ", " fd ", " fe ", " fp(k) "))
outFile2.write(outStringHead)
#testing:
#tol = 1.0e-1
#maxit = 1
original = sys.stdout
#sys.stdout = open('./redirect.txt', 'w')
for k in range(nt):
k = float(k)
t = t1 + dt*k
#Try making return value a tuple:
#print("Before GasEst pe0 ", pe0)
#gasestReturn = GasEst.gasest(isolv, t, pt, pe0)
gasestReturn = CSGasEst.gasest(isolv, t, pt)
pe0 = gasestReturn[0]
p0 = gasestReturn[1]
neq = gasestReturn[2]
#print("GsCalc: pe0 ", pe0, " p0 ", p0, " neq ", neq)
#print("Before gas pe0 ", pe0)
gasReturn = CSGas.gas(isolv, t, pt, pe0, p0, neq, tol, maxit)
a = gasReturn[0]
nit = gasReturn[1]
pe = gasReturn[2]
pd = gasReturn[3]
pp = gasReturn[4]
ppix = gasReturn[5]
gmu = gasReturn[6]
rho = gasReturn[7]
#print("GsCalc: rho ", rho)
#print("GsCalc: gmu ", gmu)
rholog= math.log10(rho)
fd= -99.0e0
if(pd/pt > 0.0e0):
fd = math.log10(pd/pt)
fe= -99.0e0
if(pe/pt > 0.0e0):
fe = math.log10(pe/pt)
for n in range(nspec):
fp[n]= -99.0e0
if (pp[n]/pt > 0.0e0):
fp[n]= math.log10( pp[n]/pt)
#c write(7,200) t,rholog,gmu,fd,fe,(fp(k),k=1,nspec)
#print("t ", t, " pt ", pt, " rholog ", rholog, " gmu ", gmu, " fd ", fd, " fe ",fe)
#print("pp, fp:")
#for k in range(nspec):
# print("k ", k, pp[k], fp[k])
# 200 format(1x,f10.2,160f10.5)
outFile.write(outStringHead)
outString = ("%11.2f %10.5f %10.5f %10.5f %10.5f\n"\
%(t, rholog, gmu, fd, fe))
outFile.write(outString)
outFile2.write(outString)
outString = ""
for j in range(nspec):
outString = outString + " " + str(fp[j])
outString+="\n"
outFile.write(outString)
outFile2.write(outString)
#sys.stdout = original
outFile.close()
outFile2.close()
| 10,658
| 22.426374
| 118
|
py
|
ChromaStarPy
|
ChromaStarPy-master/KappasRaylGas.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 17:18:39 2017
@author: ishort
"""
import math
import Useful
import PartitionFn
import ToolBox
#import numpy
#/* Rayleigh scattering opacity routines taken from Moog (moogjul2014/, MOOGJUL2014.tar)
#Chris Sneden (Universtiy of Texas at Austin) and collaborators
#http://www.as.utexas.edu/~chris/moog.html
#//From Moog source file Opacscat.f
#*/
"""
#JB#
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
"""
masterTemp=[130,500,3000,8000,10000]
#JB#
def masterRayl(numDeps, numLams, temp, lambdaScale, stagePops, gsName, gsFirstMol, molPops):
""" /*c******************************************************************************
c The subroutines needed to calculate the opacities from scattering by
c H I, H2, He I, are in this file. These are from ATLAS9.
c******************************************************************************
*/"""
#//System.out.println("masterRayl called...");
#//From Moog source file Opacitymetals.f
#// From how values such as aC1[] are used in Moog file Opacit.f to compute the total opacity
#// and then the optical depth scale, I infer that they are extinction coefficients
#// in cm^-1
#//
#// There does not seem to be any correction for stimulated emission
logE = math.log10(math.e)
masterRScat = [ [ 0.0 for i in range(numDeps) ] for j in range(numLams) ]
logUH1 = [0.0 for i in range(5)]
logUHe1 = [0.0 for i in range(5)]
logStatWH1 = 0.0
logStatWHe1 = 0.0
theta = 1.0
species = ""
logGroundPopsH1 = [0.0 for i in range(numDeps)]
logGroundPopsHe1 = [0.0 for i in range(numDeps)]
logH2Pops = [0.0 for i in range(numDeps)]
#//
#// H I: Z=1 --> iZ=0:
sigH1 = [0.0 for i in range(numDeps)]
#// He I: Z=2 --> iZ=1:
sigHe1 = [0.0 for i in range(numDeps)]
species = "HI"
logUH1 = PartitionFn.getPartFn2(species)
species = "HeI"
logUHe1 = PartitionFn.getPartFn2(species)
sigH2 = [0.0 for i in range(numDeps)]
#Find index of H2 in molPops array
for iH2 in range(len(gsName)):
if (gsName[iH2].strip() == "H2"):
break;
#print("iH2 ", iH2, " iH2-gsFirstMol ", (iH2-gsFirstMol))
#//System.out.println("iD PopsH1 PopsHe1");
for iD in range(numDeps):
#//neutral stage
#//Assumes ground state stat weight, g_1, is 1.0
#theta = 5040.0 / temp[0][iD]
#// U[0]: theta = 1.0, U[1]: theta = 0.5
"""
if (theta <= 0.5):
logStatWH1 = logUH1[1]
logStatWHe1 = logUHe1[1]
elif ( (theta < 1.0) and (theta > 0.5) ):
logStatWH1 = ( (theta-0.5) * logUH1[0] ) + ( (1.0-theta) * logUH1[1] )
logStatWHe1 = ( (theta-0.5) * logUHe1[0] ) + ( (1.0-theta) * logUHe1[1] )
#//divide by common factor of interpolation interval of 0.5 = (1.0 - 0.5):
logStatWH1 = 2.0 * logStatWH1
logStatWHe1 = 2.0 * logStatWHe1
else:
logStatWH1 = logUH1[0]
logStatWHe1 = logUHe1[0]
"""
thisTemp = temp[0][iD];
#JB#
logWH1Fit = ToolBox.cubicFit(masterTemp,logUH1)
logStatWH1 = ToolBox.valueFromFit(logWH1Fit,thisTemp)
logWHe1Fit = ToolBox.cubicFit(masterTemp,logUHe1)
logStatWHe1 = ToolBox.valueFromFit(logWHe1Fit,thisTemp)
#logStatWH1Fun = spline(masterTemp,logUH1)
#logStatWH1=logStatWH1Fun(thisTemp)
#logStatWHe1Fun = spline(masterTemp,logUHe1)
#logStatWHe1=logStatWHe1Fun(thisTemp)
#JB#
#// NEW Interpolation with temperature for new partition function: lburns
thisTemp = temp[0][iD];
if (thisTemp <= 130.0):
logStatWH1 = logUH1[0]
logStatWHe1 = logUHe1[0]
if (thisTemp >= 10000.0):
logStatWH1 = logUH1[4]
logStatWHe1 = logUHe1[4]
"""
elif (thisTemp > 130 and thisTemp <= 500):
logStatWH1 = logUH1[1] * (thisTemp - 130)/(500 - 130) \
+ logUH1[0] * (500 - thisTemp)/(500 - 130)
logStatWHe1 = logUHe1[1] * (thisTemp - 130)/(500 - 130) \
+ logUHe1[0] * (500 - thisTemp)/(500 - 130)
elif (thisTemp > 500 and thisTemp <= 3000):
logStatWH1 = logUH1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUH1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWHe1 = logUHe1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUHe1[1] * (3000 - thisTemp)/(3000 - 500)
elif (thisTemp > 3000 and thisTemp <= 8000):
logStatWH1 = logUH1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUH1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWHe1 = logUHe1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUHe1[2] * (8000 - thisTemp)/(8000 - 3000)
elif (thisTemp > 8000 and thisTemp < 10000):
logStatWH1 = logUH1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUH1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWHe1 = logUHe1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUHe1[3] * (10000 - thisTemp)/(10000 - 8000)
else:
#// for temperatures of greater than or equal to 10000K lburns
logStatWH1 = logUH1[4]
logStatWHe1 = logUHe1[4]
"""
logGroundPopsH1[iD] = stagePops[0][0][iD] - logStatWH1
logGroundPopsHe1[iD] = stagePops[1][0][iD] - logStatWHe1
logH2Pops[iD] = molPops[iH2-gsFirstMol][iD]
#print("iD " , iD , " logH2 " , logH2Pops[iD])
#// if (iD%10 == 1){
#// System.out.format("%03d, %21.15f, %21.15f %n",
#// iD, logE*logGroundPopsH1[iD], logE*logGroundPopsHe1[iD]);
#// }
kapRScat = 0.0
#//System.out.println("iD iL lambda sigH1 sigHe1 ");
for iL in range(numLams):
#//
for i in range(numDeps):
sigH1[i] = 0.0
sigHe1[i] = 0.0
sigH2[i] = 0.0
#//System.out.println("Calling opacH1 from masterMetal...");
sigH1 = opacHscat(numDeps, temp, lambdaScale[iL], logGroundPopsH1)
sigHe1 = opacHescat(numDeps, temp, lambdaScale[iL], logGroundPopsHe1)
sigH2 = opacH2scat(numDeps, temp, lambdaScale[iL], logH2Pops)
for iD in range(numDeps):
kapRScat = sigH1[iD] + sigHe1[iD] + sigH2[iD]
masterRScat[iL][iD] = math.log(kapRScat)
#if ( (iD%10 == 0) and (iL%10 == 0) ):
# print("iD ", iD, " iL ", iL, " lambda ", lambdaScale[iL], math.log10(sigH1[iD]), math.log10(sigHe1[iD]), math.log10(sigH2[iD]) )
#} //iD
#} //iL
return masterRScat
#} //end method masterRayl
def opacHscat(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes H I Rayleigh scattering opacities.
//c******************************************************************************"""
#//System.out.println("opacHscat called");
sigH = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigH[i] = 0.0
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
#// include 'Linex.com'
wavetemp = 2.997925e18 / min(freq, 2.463e15)
ww = math.pow(wavetemp, 2)
sig = ( 5.799e-13 + (1.422e-6/ww) + (2.784/(ww*ww)) ) / (ww*ww)
for i in range(numDeps):
sigH[i] = sig * 2.0 * math.exp(logGroundPops[i])
return sigH
#} //end method opacHscat
def opacHescat(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes He I Rayleigh scattering opacities.
//c******************************************************************************"""
#//System.out.println("opacHescat called");
sigHe = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigHe[i] = 0.0
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
#// include 'Linex.com'
wavetemp = 2.997925e18 / min(freq, 5.15e15)
ww = math.pow(wavetemp, 2)
sig = (5.484e-14/ww/ww) * math.pow( ( 1.0 + ((2.44e5 + (5.94e10/(ww-2.90e5)))/ww) ), 2 )
for i in range(numDeps):
sigHe[i] = sig * math.exp(logGroundPops[i])
return sigHe
#} //end method opacHescat
def opacH2scat(numDeps, temp, lambda2, molPops):
sigH2 = [0.0e0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigH2[i] = 0.0
freq = Useful.c() / lambda2;
"""
//c******************************************************************************
//c This routine computes H2 I Rayleigh scattering opacities.
//c******************************************************************************
// include 'Atmos.com'
// include 'Kappa.com'
// include 'Linex.com'
"""
wavetemp = 2.997925e18 / min(freq, 2.463e15)
ww = wavetemp**2
sig = ( 8.14e-13 + (1.28e-6/ww) + (1.61/(ww*ww)) ) / (ww*ww)
#print("freq ", freq, " wavetemp ", wavetemp, " ww ", ww, " sig ", sig)
for i in range(numDeps):
sigH2[i] = sig * math.exp(molPops[i])
#print("i " , i , " molPops " , molPops[i] , " sigH2 " , sigH2[i])
return sigH2
| 10,089
| 33.087838
| 149
|
py
|
ChromaStarPy
|
ChromaStarPy-master/AlfBooTiO516.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Griffin, R. E. M., Lynas-Gray, A. E., 1999, \aj, 117, 2998
#Decin, L., Vandenbussche, B., Waelkens, C., Decin, G., Eriksson, K., Gustafsson, B., Plez, B., Sauval, A. J., 2003a, \aap, 400, 709
#Model atmosphere
teff = 4300.0 #, K
logg = 2.0 #, cgs
log10ZScale = -0.7 # [A/H]
massStar = 0.75 #, solar masses
xiT = 2.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 512.0 #, nm
lambdaStop = 526.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.5
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 2.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,864
| 34.37037
| 132
|
py
|
ChromaStarPy
|
ChromaStarPy-master/GsCalcMaster.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 10:09:02 2019
@author:
"""
#
#
#his module can serve as the main method
#
#
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import math
import numpy
#from scipy.linalg.blas import daxpy
#from scipy.linalg.blas import ddot
#from scipy.linalg.blas import dscal
#from scipy.linalg.blas import idamax
"""
from Documents.ChromaStarPy.GAS.blas.Daxpy import daxpy
from Documents.ChromaStarPy.GAS.blas.Ddot import ddot
from Documents.ChromaStarPy.GAS.blas.Dscal import dscal
from Documents.ChromaStarPy.GAS.blas.Idamax import idamax
from Documents.ChromaStarPy.GAS.linpack.Dgesl import dgesl
from Documents.ChromaStarPy.GAS.linpack.Dgefa import dgefa
"""
from functools import reduce
import subprocess
import os
import sys
from Documents.ChromaStarPy.GAS.BlockData import *
from Documents.ChromaStarPy.GAS.GsRead2 import gsread
from Documents.ChromaStarPy.GAS.GasEst import gasest
from Documents.ChromaStarPy.GAS.Gas import gas
#import GAS
#############################################
#
#
#
# Initial set-up:
# - import all python modules
# - set input parameters
#
#
#
##############################################
#Detect python version
pythonV = sys.version_info
if pythonV[0] != 3:
print("")
print("")
print(" ********************************************* ")
print("")
print("WARNING!! WARNING!! WARNING!!")
print("")
print("")
print("ChromaStarPy/GAS developed for python V. 3!!" )
print("")
print("May not work in other version")
print("")
print("")
print("*********************************************** ")
print("")
print("")
thisOS = "unknown" #default
myOS= ""
#returns 'posix' form unix-like OSes and 'nt' for Windows??
thisOS = os.name
print("")
print("Running on OS: ", thisOS)
print("")
absPath0 = "./" #default
if thisOS == "nt":
#windows
absPath0 = subprocess.check_output("cd", shell=True)
backSpace = 2
elif thisOS == "posix":
absPath0 = subprocess.check_output("pwd", shell=True)
backSpace = 1
absPath0 = bytes.decode(absPath0)
#remove OS_dependent trailing characters 'r\n'
nCharsPath = len(absPath0)
nCharsPath -= backSpace
absPath0 = absPath0[0: nCharsPath]
slashIndex = absPath0.find('\\') #The first backslash is the escape character!
while slashIndex != -1:
#python strings are immutable:
absPathCopy = absPath0[0: slashIndex]
absPathCopy += '/'
absPathCopy += absPath0[slashIndex+1: len(absPath0)]
absPath0 = absPathCopy
#print(absPathCopy, absPath0)
slashIndex = absPath0.find('\\')
absPath = absPath0 + '/'
##makePlot = Input.makePlot
#makePlot = "yes"
#print("")
#print("Will make plot: ", makePlot)
#print("")
#stop
#color platte for plt plotting
#palette = ['black', 'brown','red','orange','yellow','green','blue','indigo','violet']
#grayscale
#stop
#Grayscale:
numPal = 12
palette = ['0.0' for i in range(numPal)]
delPal = 0.04
#for i in range(numPal):
# ii = float(i)
# helpPal = 0.481 - ii*delPal
# palette[i] = str(helpPal)
palette = [ str( 0.481 - float(i)*delPal ) for i in range(numPal) ]
numClrs = len(palette)
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
outPath = absPath + "/Outputs/"
#fileStem = Input.fileStem
fileStem = "GsCalc"
#Set input pressure and isolv here for file naming:
pt = 1.0e5
isolv = 1
fileStem = fileStem + "pt" + str(int(math.log10(pt))).strip() + "is" + str(isolv)
outFileString = outPath+fileStem+".out" #Report for humans
outFileString2 = outPath+fileStem+".2.out" #PPs for plotting
print(" ")
print("Writing to files ", outFileString)
print(" ")
outFile = open(outFileString, "w")
outFile2 = open(outFileString2, "w")
#program gcalc
"""
#The seven universal FORTRAN "commons"
common /consts/ pi,sbcon,kbol,cvel,gcon,hpl,hmass,t0,everg
common /gasp/ name,ip,comp,awt,nspec,natom,itab,ntab,indx,
# iprint,gsinit,print0
common /gasp2/ ipr,nch,nel,ntot,nat,zat,neut,idel,indsp,
# indzat,iat,natsp,iatsp
common /lin/ nlin1,lin1,linv1,nlin2,lin2,linv2
common /equil/ logk,logwt,it,kt,type
common /opacty/ chix,nix,nopac,ixa,ixn,opinit,opflag,opchar,iopt
common /stellr/ mstar,lstar,rstar,ms,ls,rs,teff,logg,mbol,comt
"""
#Try this:
#global pi, sbcon, kbol, cvel, gcon, hpl, hmass, t0, everg # /consts/
global kbol, hmass, t0 # /consts/
global name, ip, comp, awt, nspec, natom, itab, ntab, indx, iprint, gsinit, print0 #/gasp/
global ipr, nch, nel, ntot, nat, zat, neut, idel, indsp, indzat, iat, natsp, iatsp #/gasp2/
global nlin1, lin1, linv1, nlin2, lin2, linv2 #/lin/
global logk, logwt, it, kt, type0 #equil
#global chix, nix, nopac, ixa, ixn, opinit, opflag, opchar, iopt #/opacty/
global chix, nix, ixa, ixn #/opacty/
#global mstar, lstar, rstar, ms, ls, rs, teff, logg, mbol, comt #/stellr/
outString = ""
print0 = False
p0 = [0.0e0 for i in range(40)]
pp = [0.0e0 for i in range(150)]
p = [0.0e0 for i in range(40)]
ppix = [0.0e0 for i in range(30)]
a = [0.0e0 for i in range(625)]
#c cis:
fp = [0.0e0 for i in range(150)]
#name = [0.0e0 for i in range(150)]
#ip = [0.0e0 for i in range(150)]
#comp = [0.0e0 for i in range(40)]
#awt = [0.0e0 for i in range(150)]
#itab = [0 for i in range(83)]
#ntab = [0 for i in range(5)]
#indx = [ [ [ [ [0 for i in range(2)] for j in range(5) ] for k in range(7) ] for l in range(26) ] for m in range(4) ]
#common /gasp/ name,ip,comp,awt,nspec,natom,gsinit,itab,ntab,indx
#BlockData.block_data()
#print("Calling GsRead:")
GAS.GsRead2.gsread(outFile)
#GsTabl.gstabl() #necessary??
nspec = GAS.GsRead2.nspec
name = GAS.GsRead2.name
#print("GsCalc: nspec: ", nspec)
outString = ""
for k in range(nspec):
outString = outString + " " + name[k]
outString+="\n"
outFile2.write(outString)
# Input parameters are now command line arguments
#c call time(1,0,mscpu)
#c cpu1=mscpu/1000.0
#c write(7,100)
# write(6,100)
# 100 format('enter: t ,p, and pe')
#c call fread(5,'3r*8:',t,pt,pe0)
# read(5, *) t, pt, pe0
#c write(7,150)
# write(6,150)
# 150 format('enter: tolerance, max # iter. and isolv')
#c call fread(5,'r*8,2i:',tol,maxit,isolv)
# read(5, *) tol, maxit, isolv
#Get and parse the command line arguments:
# sys.argv[0] is the name of the script
"""
t = float(sys.argv[1])
pt = float(sys.argv[2])
pe0 = float(sys.argv[3])
tol = float(sys.argv[4])
maxit = int(sys.argv[5])
isolv = int(sys.argv[6])
print("t ", t, " pt ", pt, " pe0 ", pe0, " tol ", tol, " maxit ", maxit, " isolv ", isolv)
"""
#For now:
#t = 6000.0
t1 = 1500.0
t2 = 6500.0
dt = 100.0
# Set this above: pt = 100000.0
#pe0 = 100.0
tol = 1.0e-4
maxit = 100
# Set this above: isolv = 1
nt = (t2 - t1) / dt + 1
nt = int(nt)
pe0 = 0.01 * pt
outString = ("%4s %12.3e\n" %("PT ", pt))
outFile.write(outString)
outFile2.write(outString)
outStringHead = ("%11s %8s %5s %4s %4s %8s\n"\
%("t ", " rholog ", " gmu ", " fd ", " fe ", " fp(k) "))
outFile2.write(outStringHead)
#testing:
#tol = 1.0e-1
#maxit = 1
original = sys.stdout
#sys.stdout = open('./redirect.txt', 'w')
for k in range(nt):
k = float(k)
t = t1 + dt*k
#Try making return value a tuple:
#print("Before GasEst pe0 ", pe0)
#gasestReturn = GasEst.gasest(isolv, t, pt, pe0)
gasestReturn = GAS.GasEst.gasest(isolv, t, pt)
pe0 = gasestReturn[0]
p0 = gasestReturn[1]
neq = gasestReturn[2]
#print("GsCalc: pe0 ", pe0, " p0 ", p0, " neq ", neq)
#print("Before gas pe0 ", pe0)
gasReturn = GAS.Gas.gas(isolv, t, pt, pe0, p0, neq, tol, maxit, outFile)
a = gasReturn[0]
nit = gasReturn[1]
pe = gasReturn[2]
pd = gasReturn[3]
pp = gasReturn[4]
ppix = gasReturn[5]
gmu = gasReturn[6]
rho = gasReturn[7]
#print("GsCalc: rho ", rho)
#print("GsCalc: gmu ", gmu)
rholog= math.log10(rho)
fd= -99.0e0
if(pd/pt > 0.0e0):
fd = math.log10(pd/pt)
fe= -99.0e0
if(pe/pt > 0.0e0):
fe = math.log10(pe/pt)
for n in range(nspec):
fp[n]= -99.0e0
if (pp[n]/pt > 0.0e0):
fp[n]= math.log10( pp[n]/pt)
#c write(7,200) t,rholog,gmu,fd,fe,(fp(k),k=1,nspec)
#print("t ", t, " pt ", pt, " rholog ", rholog, " gmu ", gmu, " fd ", fd, " fe ",fe)
#print("pp, fp:")
#for k in range(nspec):
# print("k ", k, pp[k], fp[k])
# 200 format(1x,f10.2,160f10.5)
outFile.write(outStringHead)
outString = ("%11.2f %10.5f %10.5f %10.5f %10.5f\n"\
%(t, rholog, gmu, fd, fe))
outFile.write(outString)
outFile2.write(outString)
outString = ""
for j in range(nspec):
outString = outString + " " + str(fp[j])
outString+="\n"
outFile.write(outString)
outFile2.write(outString)
#sys.stdout = original
outFile.close()
outFile2.close()
| 8,875
| 24.287749
| 118
|
py
|
ChromaStarPy
|
ChromaStarPy-master/DepthScale.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 26 16:10:24 2017
@author: ishort
"""
import math
def depthScale(numDeps, tauRos, kappa, rho):
""" /**
* Returns vector of numDep linear geometric DEPTHS below top of atmosphere - in
* cm (cgs) for consistency with log(g) units
*
*/"""
logE = math.log10(math.e) #// for debug output
#//double ln10 = Math.log(10.0); //handy wee quantity
#//log_10 Rosseland optical depth scale
depths = [0.0 for i in range(numDeps)]
#// Upper bounday condition:
#// Zero point at top of atmosphere - this can be shifted later?
#// log(z) cannot really correspond to zero
#//double logZ1 = -10.0; // log(cm)
#//depths[0] = Math.pow(10.0, logZ1); //cm
#//Start at this depth index - the topmost layers have such low rhos that they correspond to HUUUGE geometric depths!
iStart = 1
z1 = 1.0e-19 #//cm
#//double z1 = -500.0 * 1.0e5; // FOr comparison to O&ASP 3rd Ed. (D.F. Gray), Table 9.2
for i in range(iStart+1):
depths[i] = z1
#//double minZ = 1.0E5; // = 1km - Minimum increase in depth from one point to the next
#// declare scratch variables
#//double deltaX, deltaZ, logZ2;
#double deltaX, deltaZ, z2, z3, help, logHelp, helpNext;
#// h, k1, k2, k3, k4, logH, logK1, logK2, logK3, logK4;
#//Trapezoid method for depth at 2nd point in
#// Need to avoid using rho at upper boundary, so rho value must be taken at y_n+2 on all RHSs
#/*
# deltaX = tauRos[1][1] - tauRos[1][0];
# logHelp = tauRos[1][0] - kappa[1][0] - rho[1][2];
# System.out.format("%12.8f %12.8f %12.8f%n", logE*tauRos[1][0], logE*kappa[1][0], logE*rho[1][2]);
# //help = ( tauRos[0][0] / kappa[0][0] ) / rho[0][1];
# help = Math.exp(logHelp);
# */
#//First integrand:
#//deltaX = tauRos[1][iStart+1] - tauRos[1][iStart];
logHelp = tauRos[1][iStart] - kappa[1][iStart] - rho[1][iStart]
helpNext = math.exp(logHelp)
#// deltaZ = (deltaX) * (0.5 * (help + helpNext));
#// z2 = z1 + deltaZ;
#// depths[1] = z2;
help = helpNext
#//z1 =z2;
for i in range(iStart + 1, numDeps):
#//Trapezoid method:
deltaX = tauRos[1][i] - tauRos[1][i - 1]
logHelp = tauRos[1][i] - kappa[1][i] - rho[1][i]
helpNext = math.exp(logHelp)
#//System.out.format("%12.8f %12.8f %12.8f%n", logE*tauRos[1][i], logE*kappa[1][i], logE*rho[1][i]);
deltaZ = deltaX * (0.5 * (help + helpNext))
#//System.out.println("i " + i + " tauRos[1] " + logE*tauRos[1][i] + " kappa[1] " + logE*kappa[1][i] + " rho[1] " + logE*rho[1][i] + " deltaX " + deltaX + " deltaZ " + deltaZ);
z2 = z1 + deltaZ
depths[i] = z2
z1 = z2
help = helpNext
#//System.out.format("%12.8f %12.8f%n", logE*tauRos[1][i], z2);
return depths
| 2,903
| 34.851852
| 184
|
py
|
ChromaStarPy
|
ChromaStarPy-master/SunCaIIHK.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Model atmosphere
teff = 5777.0 #, K
logg = 4.44 #, cgs
log10ZScale = 0.0 # [A/H]
massStar = 1.0 #, solar masses
xiT = 1.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 390.0 #, nm
lambdaStop = 400.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.5
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 2.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,669
| 33.230769
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Planck.py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import math
import Useful
def planck(temp, lambda2):
""" /**
* Inputs: lambda: a single scalar wavelength in cm temp: a single scalar
* temperature in K Returns log of Plank function in logBBlam - B_lambda
* distribution in pure cgs units: ergs/s/cm^2/ster/cm
*/"""
#//int numLams = (int) (( lamSetup[1] - lamSetup[0] ) / lamSetup[2]) + 1;
#double logBBlam; //, BBlam;
#//double c = Useful.c; //linear
logC = Useful.logC() # //log
#//double k = Useful.k; //linear
logK = Useful.logK() #//log
#//double h = Useful.h; //linear
logH = Useful.logH() #//log
logPreFac = math.log(2.0) + logH + 2.0 * logC #//log
logExpFac = logH + logC - logK #//log
#//double preFac = 2.0 * h * ( c * c ); //linear
#//double expFac = ( h / k ) * c; //linear
#//System.out.println("logC " + logC + " logK " + logK + " logH " + logH);
#//System.out.println("logPreFac " + logPreFac + " logExpFac " + logExpFac);
#//Declare scratch variables:
#double logLam, logPreLamFac, logExpLamFac, expon, logExpon, eTerm, denom, logDenom; //log
#//double preLamFac, expLamFac, expon, denom; //linear
#//for (int il = 0; il < numLams; il++){
#//lambda = lambda[il] * 1.0E-7; // convert nm to cm
#//lambda = lambda * 1.0E-7; // convert nm to cm
logLam = math.log(lambda2) #// Do the call to log for lambda once //log
#//System.out.println("lambda " + lambda + " logLam " + logLam);
logPreLamFac = logPreFac - 5.0 * logLam #//log
logExpLamFac = logExpFac - logLam #//log
#//System.out.println("logPreLamFac " + logPreLamFac + " logExpLamFac " + logExpLamFac);
#// Be VERY careful about how we divide by lambda^5:
#//preLamFac = preFac / ( lambda * lambda ); //linear
#//preLamFac = preLamFac / ( lambda * lambda ); //linear
#//preLamFac = preLamFac / lambda; //linear
#//expLamFac = expFac / lambda;
#//for (int id = 0; id < numDeps; id++){
#//logExpon = logExpLamFac - temp[1][id];
#//This is very subtle and dangerous!
logExpon = logExpLamFac - math.log(temp) #// log of hc/kTlambda
#//System.out.println("temp " + temp + " logTemp " + Math.log(temp));
expon = math.exp(logExpon) #// hc/kTlambda
#//System.out.println("logExpon " + logExpon + " expon " + expon + " denom " + denom);
#// expon = expLamFac / temp; //linear
eTerm = math.exp(expon) #// e^hc/ktlambda
denom = eTerm - 1.0 #// e^hc/ktlambda - 1
logDenom = math.log(denom) #// log(e^hc/ktlambda - 1)
#//BBlam[1][id][il] = logPreLamFac - logDenom;
#//BBlam[0][id][il] = Math.exp(BBlam[1][id][il]);
logBBlam = logPreLamFac - logDenom #//log
#// Not needed? BBlam = math.exp(logBBlam) #//log
#//BBlam = preLamFac / denom; //linear
#// } //id loop - depths
#// } //il loop - lambdas
return logBBlam;
#} //end method planck()
def dBdT(temp, lambda2):
"""// Computes the first partial derivative of B(T) wrt T, dB/dT:"""
#double logdBdTlam;
#//double c = Useful.c; //linear
logC = Useful.logC() #//log
#//double k = Useful.k #//linear
logK = Useful.logK() #//log
#//double h = Useful.h #//linear
logH = Useful.logH() #//log
logPreFac = math.log(2.0) + logH + 2.0 * logC #//log
logExpFac = logH + logC - logK #//log
#//Declare scratch variables:
#double logLam, logTemp, logPreLamFac, logExpLamFac, expon, logExpon, eTerm, denom, logDenom; //log
#//lambda = lambda * 1.0E-7; // convert nm to cm
logLam = math.log(lambda2) #// Do the call to log for lambda once //log
logTemp = math.log(temp)
logPreLamFac = logPreFac + logExpFac - 6.0 * logLam - 2.0 * logTemp #//log
logExpLamFac = logExpFac - logLam #//log
#//This is very subtle and dangerous!
logExpon = logExpLamFac - logTemp #// log of hc/kTlambda
expon = math.exp(logExpon) #// hc/kTlambda
eTerm = math.exp(expon) #// e^hc/ktlambda
denom = eTerm - 1.0 #// e^hc/ktlambda - 1
logDenom = math.log(denom) #// log(e^hc/ktlambda - 1)
logdBdTlam = logPreLamFac + expon - 2.0 * logDenom #//log
return logdBdTlam;
#} //end method dBdT
| 4,329
| 35.694915
| 104
|
py
|
ChromaStarPy
|
ChromaStarPy-master/RGBNaID.py
|
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "structure"
#makePlot = "sed"
makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
specSynMode = False
#Model atmosphere
teff = 3750.0 #, K
logg = 1.5 #, cgs
log10ZScale = 0.0 # [A/H]
massStar = 0.5 #, solar masses
xiT = 2.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
lambdaStart = 588.5 #, nm
lambdaStop = 590.5 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.0
logKapFudge = 0.0
macroV = 4.0 #, km/s
rotV = 1.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifTiO = 1 #, where to include TiO JOLA bands in synthesis
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 2,669
| 32.797468
| 116
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Convec.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 14:40:19 2017
@author: ishort
"""
import math
import Useful
def convec(numDeps, tauRos, depths, temp, press, rho, kappa, kappaSun,
zScale, teff, logg, mmw):
logE = math.log10(math.E) #// for debug output
ln10 = math.log(10.0) #//needed to convert logg from base 10 to base e
convTemp = [ [ 0.0 for i in range(numDeps) ] for j in range(2) ]
#//Schwarzschild criterion for convective instability:
gamma = 5.0 / 3.0 #//adiabatic gamma for ideal monatomic gas - the photon gas is negligible in stars w convection
gammaFac = gamma / (gamma - 1.0) #// yeah, yeah - I know it's 2.5, but let's show where it comes from for the record...
invGamFac = 1.0 / gammaFac
#//CHEAT: Set gammaThing to value that makes convection just disappear at bottom of mid-F star (7000 K)
#//double gammaThing = 1.60;
#//double invGamThing = 1.0 / gammaThing;
#double invGamThing;
#//System.out.println("gammaThing " + gammaThing);
#double deltaP, deltaT; //, dlnPdlnT;
#double dlnTdlnP, dlnMudlnP, deltaMu;
#double Hp, logHp;
#//double HpSun = 1.2535465715411615E7; //cm, as computed by GrayStar at depth index=36
HpSun = 2.0e7 #//cm, approximately as computed by GrayStar at depth index=36
logHpSun = math.log(HpSun)
#//Compute the presure scale height as a reality check:
HpRefDep=36 #//index of reference depth for computing pressure scale height
logHp = press[1][HpRefDep] - rho[1][HpRefDep] - ln10 * logg
Hp = math.exp(logHp)
#//Try scaling gamma to "fix" the convective boundary
#//invGamThing = invGamFac * HpSun/Hp;
#//System.out.println("Hp/HpSun " + Hp/HpSun);
#//double[] mmw = State.mmwFn(numDeps, temp, zScale);
#//Search outward for top of convection zone
isStable = False
iBound = numDeps - 1 #//initialize index of depth where convection begins to bottom of atmosphere
for i in range(numDeps - 2, 0, -1):
#//System.out.println("Hp " + Hp);
#//1st order finite difference - erratic?
#//double deltaP = press[1][i] - press[1][i-1];
#//double deltaT = temp[1][i] - temp[1][i-1];
#//Try "2nd order" finite difference - non-uniform spacing in deltaT
deltaP = press[1][i + 1] - press[1][i - 1]
deltaT = temp[1][i + 1] - temp[1][i - 1]
deltaMu = (mmw[i + 1] - mmw[i]) * Useful.amu
#//dlnPdlnT = deltaP / deltaT;
dlnTdlnP = deltaT / deltaP
dlnMudlnP = deltaMu / deltaP
#//System.out.format("%12.8f %12.8f%n", logE * tauRos[1][i], dlnPlndT);
#// This can be finicky - let's say we have not found the radiative zone unless two consecutive layers meet the criterion
#//if (dlnPdlnT > gammaThing) {
if (dlnTdlnP < invGamFac + dlnMudlnP):
#//Convectively stable
if (isStable == False):
#//The previous convectively unstable layer was an isolated anomoly - we're have NOT found the zone! Reset:
isStable = true
iBound = i
#//System.out.println("First stable layer was found, tauRos " + logE * tauRos[1][i] + " NOW: isStable " + isStable);
#}
#}
#}
#//System.out.println("Convec: iBound " + iBound);
#//Radiative zone - leave temperatures alone:
for i in range(iBound):
convTemp[0][i] = temp[0][i]
convTemp[1][i] = temp[1][i]
baseTemp = temp[0][iBound]
baseLogTemp = temp[1][iBound]
baseTau = tauRos[0][iBound]
baseLogTau = tauRos[1][iBound]
#//double baseDepth = depths[iBound]
logSigma = Useful.logSigma()
logK = Useful.logK()
logAmu = Useful.logAmu()
mixLSun = 1.0 #// convective mixing length in pressure scale heights (H_P)
betaSun = 0.5 #// factor for square of convective bubble velocity (range: 0.0 - 1.0)
#double Cp, logCp; //Specific heat capacity at constant pressure
mixL = mixLSun #//initialization
beta = betaSun #//initialization
teffSun = 5778.0
loggSun = 4.44
#//Shameless fix:
#//It seems mixL and beta need to be temp and press dependent:
if (teff < teffSun):
mixL = mixLSun * math.pow(teff / teffSun, 4.0) #//lower teff -> smaller mixL -> steeper SAdGrad
beta = betaSun * math.pow(teff / teffSun, 4.0) #//lower teff -> smaller beta -> steeper SAdGrad
mixL = mixL * math.pow(loggSun / logg, 2.0) #// lower logg -> larger mixL -> smaller sAdGrad
beta = beta * math.pow(loggSun / logg, 2.0) #// lower logg -> larger beta -> smaller sAdGrad
"""/*
//Shameless fix:
beta = betaSun; // no fix?
mixL = mixLSun * Math.pow(Hp / HpSun, 4.0); //lower teff -> smaller Hp -> smaller mixL -> steeper SAdGrad
//mixL = mixL * Math.pow(logg / loggSun, 4.0); // lower logg -> smaller mixL -> larger sAdGrad
*/"""
logMixL = math.log(mixL)
logBeta = math.log(beta)
logFluxSurfBol = logSigma + 4.0 * math.log(teff)
#// This will get hairy when we take it super-adiabatic so let's take it *really* easy and make every factor and term clear:
logInvGamFac = math.log(invGamFac)
#//Get the mean molecular weight in amu from State - Row 0 is "mu" in amu:
#double mu, logMu, logFctr1, logFctr2, logFctr3;
#double nextTemp, lastTemp, nextTemp2;
#//Adiabatic dT/dx gradients in various coordinates
#//tau, logTau space
#double logAdGradTauMag, logAdGradLogTauMag, adGradLogTau;
#//SuperAdiabatic dT/dx gradients in various coordinates
#double deltaTau, logDeltaTau, deltaLogTau, logDeltaLogTau;
#double sAdGradLogTau, logSadGradR, logSadGradTau, logSadGradLogTau;
#double lastLogTau;
#//r space:
#double logAdGradRMag, adGradR;
#//SuperAdiabatic dT/dx gradients in various coordinates
#double deltaR, logDeltaR;
#/*
# double sAdGradR;
# double lastDepth;
# */
lastTemp = baseTemp
lastLogTau = baseLogTau
#//lastDepth = baseDepth;
#//System.out.println(
#// "tauRos[1][i] (tauRos[1][i]-lastLogTau) adGradLogTau rho[1][i] kappa[1][i] lastTemp nextTemp");
for i in range(iBound, numDeps):
mu = mmw[i]
logMu = math.log(mu)
logFctr1 = logMu + logAmu - logK
#//System.out.println("logFactr1 " + logE*logFctr1 + " logInvGamFac " + logE*logInvGamFac + " logg " + logg);
logCp = math.log(5.0 / 2.0) - logFctr1 #//ideal monatomic gas - underestimate that neglects partial ionization
#// ** Caution: These are log_e of the *magnitude* of the temperature gradients!
#//The adiabatic dT/dTau in r space
logAdGradRMag = logInvGamFac + logFctr1 + ln10 * logg #//logg is in base 10
#//This is baaad stuff - remember our tuaRos scale has *nothing* to do with our kappa values!
#//The adiabatic dT/dTau in tau space - divide dT/dr by rho and kappa and make it +ve becasue we're in tau-space:
#//Bad fake to fix artificially small dT/dr at low Teff - use kappaSun instead of kappa
logAdGradTauMag = logAdGradRMag - rho[1][i] - kappa[1][i]
#//The adiabatic dT/dLnTau in log_e(tau) space
logAdGradLogTauMag = tauRos[1][i] + logAdGradTauMag
#//Build the T(tau) in the convection zone:
#// Work in logTau space - numerically safer??
adGradLogTau = math.exp(logAdGradLogTauMag) #//No minus sign - logTau increases inward...
nextTemp = lastTemp + adGradLogTau * (tauRos[1][i] - lastLogTau)
#//System.out.format("%12.8f %12.8f %12.8f %12.8f %12.8f %7.1f %7.1f%n", logE * tauRos[1][i], logE * (tauRos[1][i] - lastLogTau), adGradLogTau, logE * rho[1][i], logE * kappa[1][i], lastTemp, nextTemp);
"""/*
// Do in geometric depth space
adGradR = Math.exp(logAdGradRMag); // no minus sign - our depths *increase* inwards (they're NOT heights!)
nextTemp = lastTemp + adGradR * (depths[i] - lastDepth);
//System.out.format("%12.8f %12.8f %12.8f %7.1f %7.1f%n", logE*tauRos[1][i], (depths[i] - lastDepth), adGradR, lastTemp, nextTemp);
*/"""
#//Okay - now the difference between the superadiabatic and adiabatic dT/dr:
logFctr2 = rho[1][i] + logCp + 2.0 * logMixL
#// ** NOTE ** Should temp in the following line be the *convective* temp of the last depth???
#// logg is in base 10 - convert to base e
logFctr3 = 3.0 * (ln10 * logg - math.log(lastTemp)) / 2.0
#//Difference between SuperAdibatic dT/dr and Adiabtic dT/dr in r-space - Carroll & Ostlie 2nd Ed. p. 328
#//System.out.println("logFluxSurfBol " + logE * logFluxSurfBol + " logFctr2 " + logE * logFctr2 + " logFctr1 " + logE * logFctr1 + " logFctr3 " + logE * logFctr3 + " logBeta " + logE * logBeta);
logDeltaR = logFluxSurfBol - logFctr2 + 2.0 * logFctr1 + logFctr3 - 0.5 * logBeta
logDeltaR = 2.0 * logDeltaR / 3.0 #//DeltaR is above formula to the 2/3 power
#//This is baaad stuff - remember our tuaRos scale has *nothing* to do with our kappa values!
#//Bad fake to fix artificially small dT/dr at low Teff - use kappaSun instead of kappa
logDeltaTau = logDeltaR - rho[1][i] - kappa[1][i]
logDeltaLogTau = tauRos[1][i] + logDeltaTau
sAdGradLogTau = adGradLogTau + math.exp(logDeltaLogTau)
#//System.out.format("%12.8f %12.8f %12.8f %12.8f%n", logE*tauRos[1][i], logE*logDeltaR, logE*logDeltaTau, logE*logDeltaLogTau);
nextTemp2 = lastTemp + sAdGradLogTau * (tauRos[1][i] - lastLogTau)
"""/*
// Do in geometric depth space
sAdGradR = adGradR + Math.exp(logDeltaR);
nextTemp2 = lastTemp + sAdGradR * (depths[i] - lastDepth);
*/"""
#// set everything to nextTemp2 for superadibatic dT/dr, and to nexTemp for adiabatic dT/dr
convTemp[0][i] = nextTemp2
convTemp[1][i] = math.log(nextTemp2)
lastTemp = nextTemp2
lastLogTau = tauRos[1][i]
#//lastDepth = depths[i]
#}
return convTemp
| 10,216
| 44.008811
| 222
|
py
|
ChromaStarPy
|
ChromaStarPy-master/PPressPlot.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
from functools import reduce
import subprocess
import os
import sys
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
thisOS = "unknown" #default
myOS= ""
#returns 'posix' form unix-like OSes and 'nt' for Windows??
thisOS = os.name
print("")
print("Running on OS: ", thisOS)
print("")
absPath0 = "./" #default
if thisOS == "nt":
#windows
absPath0 = subprocess.check_output("cd", shell=True)
backSpace = 2
elif thisOS == "posix":
absPath0 = subprocess.check_output("pwd", shell=True)
backSpace = 1
absPath0 = bytes.decode(absPath0)
#remove OS_dependent trailing characters 'r\n'
nCharsPath = len(absPath0)
nCharsPath -= backSpace
absPath0 = absPath0[0: nCharsPath]
slashIndex = absPath0.find('\\') #The first backslash is the escape character!
while slashIndex != -1:
#python strings are immutable:
absPathCopy = absPath0[0: slashIndex]
absPathCopy += '/'
absPathCopy += absPath0[slashIndex+1: len(absPath0)]
absPath0 = absPathCopy
#print(absPathCopy, absPath0)
slashIndex = absPath0.find('\\')
absPath = absPath0 + '/'
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = absPath + "Outputs/"
#outPath = absPath + "Outputs/"
project = "Project"
runVers = "RunGas"
teff = 3600.0
logg = 1.0
log10ZScale = 0.0
lambdaStart = 695.0
lambdaStop = 700.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".ppress.txt"
#whichSpec = "Ca+"
whichSpec = ["C", "N", "O", "Na", "Mg", "Si", "S", "K", "Ca", "Fe"]
colrSpec = ["black", "brown", "red", "orange", "yellow", "green", "blue", "indigo", "violet", "gray"]
whichIon = ["H-", "Na+", "Mg+", "Si+", "S+", "K+", "Ca+", "Fe+"]
colrIon = ["black", "orange", "yellow", "green", "blue", "indigo", "violet", "gray"]
thisSpec = 0 #default initialization (H)
numSampleDepths = 48
#numSampleDepths = 2 #debug
numSpecies = 105
#numSpecies = 3 #debug
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
species = [0.0 for i in range(numSpecies)]
logTau = [0.0 for i in range(numSampleDepths)]
logTkin = [0.0 for i in range(numSampleDepths)]
logPGas = [0.0 for i in range(numSampleDepths)]
logPe = [0.0 for i in range(numSampleDepths)]
logPP = [ [ 0.0 for j in range(numSpecies) ] for i in range(numSampleDepths)]
fileTeff = 0.0
fileLogg = 0.0
fileLogZ = 0.0
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
fields = inLine.split()
fileTeff = float(fields[1].strip())
fileLogg = float(fields[3].strip())
fileZ = float(fields[5].strip())
if ( (fileTeff != teff) or
(fileLogg != logg) or
(fileLogZ != log10ZScale) ):
print(" ")
print(" !!!!!!!!!!!!!!!!!!!!!!")
print(" ")
print("Mismatch between input file name and stellar paramters in input file!")
print(" ")
print(" !!!!!!!!!!!!!!!!!!!!!!")
print(" ")
#Header line
inLine = inputHandle.readline()
print(inLine)
#Get the synthetic spectrum
for i in range(numSampleDepths):
#Begin reading data - each depthwise record is two lines:
#line 1 has depth and environmental paramters
#line 2 has specieswise partial pressures
inLine1 = inputHandle.readline()
#print(inLine1)
fields = inLine1.split()
logTau[i] = float(fields[1].strip())
logTkin[i] = float(fields[3].strip())
logPGas[i] = float(fields[6].strip())
logPe[i] = float(fields[9].strip())
#Relative to total gas pressure for plot:
logPe[i] = logPe[i] - logPGas[i]
inLine2 = inputHandle.readline()
#print(inLine2)
fields = inLine2.split()
for j in range(numSpecies):
species[j] = fields[2*j].strip()
#if (species[j] == whichSpec):
# thisSpec = j
logPP[i][j] = float(fields[(2*j) + 1].strip())
#Relative to total gas pressure for plot:
logPP[i][j] = logPP[i][j] - logPGas[i]
#print("j ", j, " 2*j ", 2*j, " 2*j+1 ", (2*j)+1, " species ", species[j], " pp ", logPP[i][j])
#plot some partial pressures
#plt.title('Synthetic spectrum')
plt.figure()
plt.subplot(1, 1, 1)
#plt.ylabel(r'$\log P$ dynes cm$^{\rm -2}$')
plt.ylabel(r'$\log_{10} (P/P_{\rm H})$', fontsize=14)
plt.xlabel(r'$\log_{10}\tau_{\rm Ros}$', fontsize=14)
xMin = min(logTau)
xMax = max(logTau)
pylab.xlim(xMin, xMax)
pylab.ylim(-10.0, -1.0)
#thisSpec = 3
colr = 0
for wS in whichSpec:
for i in range(numSpecies):
if (species[i] == wS):
thisSpec = i
print("Species: ", species[thisSpec])
#print("At plot:")
#print ("logPP ", [logPP[i][0] for i in range(numSampleDepths)])
# skip first depth point [i=0] - upper boundary condition:
pylab.plot( [logTau[i] for i in range(1, numSampleDepths)],\
[logPP[i][thisSpec] for i in range(1, numSampleDepths)],\
color=colrSpec[colr], linewidth=2 )
pylab.text(logTau[4], logPP[4][thisSpec], species[thisSpec],\
color=colrSpec[colr], fontsize=13, weight='bold')
colr+=1
# skip first depth point [i=0] - upper boundary condition:
pylab.plot( [logTau[i] for i in range(1, numSampleDepths)],\
[logPe[i] for i in range(1, numSampleDepths)],\
'o', color='black')
pylab.text(logTau[numSampleDepths-8], logPe[numSampleDepths-8], 'e-',\
color='black', fontsize=13, weight='bold')
colr = 0
for wI in whichIon:
for i in range(numSpecies):
if (species[i] == wI):
thisSpec = i
print("Species: ", species[thisSpec])
# skip first depth point [i=0] - upper boundary condition:
pylab.plot( [logTau[i] for i in range(1, numSampleDepths)],\
[logPP[i][thisSpec] for i in range(1, numSampleDepths)],\
'--', color=colrIon[colr], linewidth=2)
pylab.text(logTau[numSampleDepths-4], logPP[numSampleDepths-4][thisSpec],\
species[thisSpec], color=colrIon[colr], fontsize=13, weight='bold')
colr+=1
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + ".eps"
plt.savefig(epsName, format='eps', dpi=1000)
| 6,870
| 30.810185
| 107
|
py
|
ChromaStarPy
|
ChromaStarPy-master/IonizationEnergy.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 17:12:02 2017
@author: ishort
"""
def getIonE(species):
"""// Ground state ionization energies in eV
//From NIST Atomic Spectra Database
//Ionization Energies Data """
#//Kramida, A., Ralchenko, Yu., Reader, J., and NIST ASD Team (2014). NIST Atomic Spectra Database (ver. 5.2), [Online]. Available: http://physics.nist.gov/asd [2015, November 23]. National Institute of Standards and Technology, Gaithersburg, MD.
#//Heaviest element treatable: La
#//Ionization stages that don't exist (eg. "HIII") are given extremely large ioization energies (999 ev)
ionE = 999999.0 #//default initialization
if ("HI" == species):
ionE = 13.598434005136
if ("HII" == species):
ionE = 999999.0
if ("HIII" == species):
ionE = 999999.0
if ("HIV" == species):
ionE = 999999.0
if ("HV" == species):
ionE = 999999.0
if ("HVI" == species):
ionE = 999999.0
if ("HeI" == species):
ionE = 24.587387936
if ("HeII" == species):
ionE = 54.417763110
if ("HeIII" == species):
ionE = 999999.0
if ("HeIV" == species):
ionE = 999999.0
if ("HeV" == species):
ionE = 999999.0
if ("HeVI" == species):
ionE = 999999.0
if ("LiI" == species):
ionE = 5.391714761
if ("LiII" == species):
ionE = 75.6400937
if ("LiIII" == species):
ionE = 122.45435380
if ("LiIV" == species):
ionE = 999999.0
if ("LiV" == species):
ionE = 999999.0
if ("LiVI" == species):
ionE = 999999.0
if ("BeI" == species):
ionE = 9.3226990
if ("BeII" == species):
ionE = 18.211153
if ("BeIII" == species):
ionE = 153.8961980
if ("BeIV" == species):
ionE = 217.7185766
if ("BeV" == species):
ionE = 999999.0
if ("BeVI" == species):
ionE = 999999.0
if ("BI" == species):
ionE = 8.2980190
if ("BII" == species):
ionE = 25.154830
if ("BIII" == species):
ionE = 37.930580
if ("BIV" == species):
ionE = 259.3715
if ("BV" == species):
ionE = 340.2260080
if ("BVI" == species):
ionE = 999999.0
if ("CI" == species):
ionE = 11.260300
if ("CII" == species):
ionE = 24.38450
if ("CIII" == species):
ionE = 47.88778
if ("CIV" == species):
ionE = 64.49351
if ("CV" == species):
ionE = 392.090500
if ("CVI" == species):
ionE = 489.9931770
if ("NI" == species):
ionE = 14.534130
if ("NII" == species):
ionE = 29.601250
if ("NIII" == species):
ionE = 47.4453
if ("NIV" == species):
ionE = 77.47350
if ("NV" == species):
ionE = 97.89013
if ("NVI" == species):
ionE = 552.067310
if ("OI" == species):
ionE = 13.6180540
if ("OII" == species):
ionE = 35.121110
if ("OIII" == species):
ionE = 54.93554
if ("OIV" == species):
ionE = 77.41350
if ("OV" == species):
ionE = 113.89890
if ("OVI" == species):
ionE = 138.1189
if ("FI" == species):
ionE = 17.422820
if ("FII" == species):
ionE = 34.97081
if ("FIII" == species):
ionE = 62.70800
if ("FIV" == species):
ionE = 87.175
if ("FV" == species):
ionE = 114.2490
if ("FVI" == species):
ionE = 157.16310
if ("NeI" == species):
ionE = 21.5645400
if ("NeII" == species):
ionE = 40.962960
if ("NeIII" == species):
ionE = 63.42331
if ("NeIV" == species):
ionE = 97.1900
if ("NeV" == species):
ionE = 126.247
if ("NeVI" == species):
ionE = 157.9340
if ("NaI" == species):
ionE = 5.13907670
if ("NaII" == species):
ionE = 47.28636
if ("NaIII" == species):
ionE = 71.6200
if ("NaIV" == species):
ionE = 98.936
if ("NaV" == species):
ionE = 138.400
if ("NaVI" == species):
ionE = 172.228
if ("MgI" == species):
ionE = 7.6462350
if ("MgII" == species):
ionE = 15.0352670
if ("MgIII" == species):
ionE = 80.14360
if ("MgIV" == species):
ionE = 109.2654
if ("MgV" == species):
ionE = 141.335
if ("MgVI" == species):
ionE = 186.760
if ("AlI" == species):
ionE = 5.9857684
if ("AlII" == species):
ionE = 18.828550
if ("AlIII" == species):
ionE = 28.447640
if ("AlIV" == species):
ionE = 119.9924
if ("AlV" == species):
ionE = 153.8252
if ("AlVI" == species):
ionE = 190.490
if ("SiI" == species):
ionE = 8.151683
if ("SiII" == species):
ionE = 16.345845
if ("SiIII" == species):
ionE = 33.493000
if ("SiIV" == species):
ionE = 45.141790
if ("SiV" == species):
ionE = 166.7670
if ("SiVI" == species):
ionE = 205.267
if ("PI" == species):
ionE = 10.486686
if ("PII" == species):
ionE = 19.769490
if ("PIII" == species):
ionE = 30.202640
if ("PIV" == species):
ionE = 51.44387
if ("PV" == species):
ionE = 65.02511
if ("PVI" == species):
ionE = 220.4304
if ("SI" == species):
ionE = 10.36001
if ("SII" == species):
ionE = 23.33788
if ("SIII" == species):
ionE = 34.856
if ("SIV" == species):
ionE = 47.222
if ("SV" == species):
ionE = 72.59449
if ("SVI" == species):
ionE = 88.05292
if ("ClI" == species):
ionE = 12.967632
if ("ClII" == species):
ionE = 23.81364
if ("ClIII" == species):
ionE = 39.80
if ("ClIV" == species):
ionE = 53.24
if ("ClV" == species):
ionE = 67.68
if ("ClVI" == species):
ionE = 96.940
if ("ArI" == species):
ionE = 15.75961120
if ("ArII" == species):
ionE = 27.62967
if ("ArIII" == species):
ionE = 40.735
if ("ArIV" == species):
ionE = 59.58
if ("ArV" == species):
ionE = 74.84
if ("ArVI" == species):
ionE = 91.290
if ("KI" == species):
ionE = 4.340663540
if ("KII" == species):
ionE = 31.62500
if ("KIII" == species):
ionE = 45.8031
if ("KIV" == species):
ionE = 60.917
if ("KV" == species):
ionE = 82.66
if ("KVI" == species):
ionE = 99.40
if ("CaI" == species):
ionE = 6.11315520
if ("CaII" == species):
ionE = 11.8717180
if ("CaIII" == species):
ionE = 50.91315
if ("CaIV" == species):
ionE = 67.273
if ("CaV" == species):
ionE = 84.338
if ("CaVI" == species):
ionE = 108.78
if ("ScI" == species):
ionE = 6.561490
if ("ScII" == species):
ionE = 12.79977
if ("ScIII" == species):
ionE = 24.756838
if ("ScIV" == species):
ionE = 73.48940
if ("ScV" == species):
ionE = 91.949
if ("ScVI" == species):
ionE = 110.680
if ("TiI" == species):
ionE = 6.828120
if ("TiII" == species):
ionE = 13.5755
if ("TiIII" == species):
ionE = 27.49171
if ("TiIV" == species):
ionE = 43.26717
if ("TiV" == species):
ionE = 99.300
if ("TiVI" == species):
ionE = 119.530
if ("VI" == species):
ionE = 6.746187
if ("VII" == species):
ionE = 14.6200
if ("VIII" == species):
ionE = 29.3110
if ("VIV" == species):
ionE = 46.7090
if ("VV" == species):
ionE = 65.28165
if ("VVI" == species):
ionE = 128.130
if ("CrI" == species):
ionE = 6.766510
if ("CrII" == species):
ionE = 16.486305
if ("CrIII" == species):
ionE = 30.960
if ("CrIV" == species):
ionE = 49.160
if ("CrV" == species):
ionE = 69.460
if ("CrVI" == species):
ionE = 90.63500
if ("MnI" == species):
ionE = 7.4340377
if ("MnII" == species):
ionE = 15.639990
if ("MnIII" == species):
ionE = 33.668
if ("MnIV" == species):
ionE = 51.20
if ("MnV" == species):
ionE = 72.40
if ("MnVI" == species):
ionE = 95.600
if ("FeI" == species):
ionE = 7.9024678
if ("FeII" == species):
ionE = 16.199200
if ("FeIII" == species):
ionE = 30.651
if ("FeIV" == species):
ionE = 54.910
if ("FeV" == species):
ionE = 75.00
if ("FeVI" == species):
ionE = 98.985
if ("CoI" == species):
ionE = 7.88101
if ("CoII)" == species):
ionE = 17.0844
if ("CoIII" == species):
ionE = 33.500
if ("CoIV" == species):
ionE = 51.27
if ("CoV" == species):
ionE = 79.50
if ("CoVI" == species):
ionE = 102.00
if ("NiI" == species):
ionE = 7.639877
if ("NiII" == species):
ionE = 18.168837
if ("NiIII" == species):
ionE = 35.190
if ("NiIV" == species):
ionE = 54.90
if ("NiV" == species):
ionE = 76.060
if ("NiVI" == species):
ionE = 108.0
if ("CuI" == species):
ionE = 7.7263800
if ("CuII" == species):
ionE = 20.292390
if ("CuIII" == species):
ionE = 36.841
if ("CuIV" == species):
ionE = 57.380
if ("CuV" == species):
ionE = 79.80
if ("CuVI" == species):
ionE = 103.0
if ("ZnI" == species):
ionE = 9.3941970
if ("ZnII" == species):
ionE = 17.96439
if ("ZnIII" == species):
ionE = 39.72300
if ("ZnIV" == species):
ionE = 59.573
if ("ZnV" == species):
ionE = 82.60
if ("ZnVI" == species):
ionE = 108.0
if ("GaI" == species):
ionE = 5.9993018
if ("GaII" == species):
ionE = 20.51514
if ("GaIII" == species):
ionE = 30.72600
if ("GaIV" == species):
ionE = 63.2410
if ("GaV" == species):
ionE = 86.01
if ("GaVI" == species):
ionE = 112.7
if ("GeI" == species):
ionE = 7.899435
if ("GeII" == species):
ionE = 15.934610
if ("GeIII" == species):
ionE = 34.0576
if ("GeIV" == species):
ionE = 45.7150
if ("GeV" == species):
ionE = 90.500
if ("GeVI" == species):
ionE = 115.90
if ("KrI" == species):
ionE = 13.9996049
if ("KrII" == species):
ionE = 24.35984
if ("KrIII" == species):
ionE = 35.838
if ("KrIV" == species):
ionE = 50.85
if ("KrV" == species):
ionE = 64.69
if ("KrVI" == species):
ionE = 78.49
if ("RbI" == species):
ionE = 4.1771280
if ("RbII" == species):
ionE = 27.289540
if ("RbIII" == species):
ionE = 39.2470
if ("RbIV" == species):
ionE = 52.20
if ("RbV" == species):
ionE = 68.40
if ("RbVI" == species):
ionE = 82.9
if ("SrI" == species):
ionE = 5.69486720
if ("SrII" == species):
ionE = 11.0302760
if ("SrIII" == species):
ionE = 42.88353
if ("SrIV" == species):
ionE = 56.2800
if ("SrV" == species):
ionE = 71.00
if ("SrVI" == species):
ionE = 88.0
if ("YI" == species):
ionE = 6.21726
if ("YII" == species):
ionE = 12.22400
if ("YIII" == species):
ionE = 20.52441
if ("YIV" == species):
ionE = 60.6070
if ("YV" == species):
ionE = 74.97
if ("YVI" == species):
ionE = 91.390
if ("ZrI" == species):
ionE = 6.633900
if ("ZrII" == species):
ionE = 13.13
if ("ZrIII" == species):
ionE = 23.1700
if ("ZrIV" == species):
ionE = 34.418360
if ("ZrV" == species):
ionE = 80.3480
if ("ZrVI" == species):
ionE = 96.383
if ("NbI" == species):
ionE = 6.758850
if ("NbII" == species):
ionE = 14.32
if ("NbIII" == species):
ionE = 25.0
if ("NbIV" == species):
ionE = 37.611
if ("NbV" == species):
ionE = 50.5728
if ("NbVI" == species):
ionE = 102.0690
if ("CsI" == species):
ionE = 3.893905548
if ("CsII" == species):
ionE = 23.157450
if ("CsIII" == species):
ionE = 33.1950
if ("CsIV" == species):
ionE = 43.0
if ("CsV" == species):
ionE = 56.0
if ("CsVI" == species):
ionE = 69.1
if ("BaI" == species):
ionE = 5.2116640
if ("BaII" == species):
ionE = 10.003826
if ("BaIII" == species):
ionE = 35.8400
if ("BaIV" == species):
ionE = 47.03
if ("BaV" == species):
ionE = 58.0
if ("BaVI" == species):
ionE = 71.0
if ("LaI" == species):
ionE = 5.57690
if ("LaII" == species):
ionE = 11.184920
if ("LaIII" == species):
ionE = 19.17730
if ("LaIV" == species):
ionE = 49.950
if ("LaV" == species):
ionE = 61.60
if ("LaVI" == species):
ionE = 74.0
#//
return ionE;
# } //end of method getIonE
def getDissE(species):
""" // Molecular dissociation energies in eV
//From NIST Allen's Astrophysical Quantities, 4th Ed. """
dissE = 8.0 #//default initialization
if (species == "H2"):
dissE = 4.4781
if (species == "H2+"):
dissE = 2.6507
if (species == "C2"):
dissE = 6.296
if (species == "CH"):
dissE = 3.465
if (species == "CO"):
dissE = 11.092
if (species == "CN"):
dissE = 7.76
if (species == "N2"):
dissE = 9.759
if (species == "NH"):
dissE = 3.47
if (species == "NO"):
dissE = 6.497
if (species == "O2"):
dissE = 5.116
if (species == "OH"):
dissE = 4.392
if (species == "MgH"):
dissE = 1.34
if (species == "SiO"):
dissE = 8.26
if (species == "CaH"):
dissE = 1.70
if (species == "CaO"):
dissE = 4.8
if (species == "TiO"):
dissE = 6.87
if (species == "VO"):
dissE = 6.4
if (species == "FeO"):
dissE = 4.20
#//
return dissE
# }; //end of method getDissE
| 14,900
| 16.953012
| 246
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Dscal.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 16:20:21 2019
@author:
"""
import math
"""
*> \brief \b DSCAL
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* SUBROUTINE DSCAL(N,DA,DX,INCX)
*
* .. Scalar Arguments ..
* DOUBLE PRECISION DA
* INTEGER INCX,N
* ..
* .. Array Arguments ..
* DOUBLE PRECISION DX(*)
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> DSCAL scales a vector by a constant.
*> uses unrolled loops for increment equal to 1.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> number of elements in input vector(s)
*> \endverbatim
*>
*> \param[in] DA
*> \verbatim
*> DA is DOUBLE PRECISION
*> On entry, DA specifies the scalar alpha.
*> \endverbatim
*>
*> \param[in,out] DX
*> \verbatim
*> DX is DOUBLE PRECISION array, dimension ( 1 + ( N - 1)*abs( INCX ) )
*> \endverbatim
*>
*> \param[in] INCX
*> \verbatim
*> INCX is INTEGER
*> storage spacing between elements of DX
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date November 2017
*
*> \ingroup double_blas_level1
*
*> \par Further Details:
* =====================
*>
*> \verbatim
*>
*> jack dongarra, linpack, 3/11/78.
*> modified 3/93 to return if incx .le. 0.
*> modified 12/3/93, array(1) declarations changed to array(*)
*> \endverbatim
*>
* =====================================================================
"""
#SUBROUTINE dscal(N,DA,DX,INCX)
def dscal(n, da, dx, incx):
#*
#* -- Reference BLAS level1 routine (version 3.8.0) --
#* -- Reference BLAS is a software package provided by Univ. of
#* Tennessee, --
#* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG
#* Ltd..--
#* November 2017
#*
#* .. Scalar Arguments ..
#DOUBLE PRECISION DA
#INTEGER INCX,N
#* ..
#* .. Array Arguments ..
#DOUBLE PRECISION DX(*)
dxSize = 1 + (n-1)*abs(incx)
#dxOut = [0.0e0 for i in range(dxSize)]
#* ..
#*
#* =====================================================================
#*
#* .. Local Scalars ..
#INTEGER I,M,MP1,NINCX
i = 0
m = 0
mp1 = 0
nincx = 0
#print("DSCAL: n ", n, " incx ", incx, " da ", da)
#print("dx in ", [dx[kk] for kk in range(n)])
#* ..
#* .. Intrinsic Functions ..
#INTRINSIC mod
#* ..
#IF (n.LE.0 .OR. incx.LE.0) RETURN
if ( (n > 0) and (incx > 0) ):
#IF (incx.EQ.1) THEN
if (incx == 1):
#*
#* code for increment equal to 1
#*
#*
#* clean-up loop
#*
m = n % 5
#IF (m.NE.0) THEN
if (m != 0):
#DO i = 1,m
for i in range(m):
dx[i] = da*dx[i]
#print("DSCAL 1: i ", i, " dx ", dx[i])
#IF (n.LT.5) RETURN
#END IF
#if ( (m == 0) and (n >= 5) ):
if ( n >= 5 ):
mp1 = m + 1
#DO i = mp1,n,5
# print("DSCAL: n ", n, " m ", m, " mp1 ", mp1, " da ", da)
for i in range(mp1-1, n, 5):
# print("DSCAL 2: i ", i, " dx(i... i+4) ",\
# dx[i], dx[i+1], dx[i+2], dx[i+3], dx[i+4])
dx[i] = da*dx[i]
dx[i+1] = da*dx[i+1]
dx[i+2] = da*dx[i+2]
dx[i+3] = da*dx[i+3]
dx[i+4] = da*dx[i+4]
#print("DSCAL 2: i ", i, " dx(i... i+4) ",\
# dx[i], dx[i+1], dx[i+2], dx[i+3], dx[i+4])
else:
#*
#* code for increment not equal to 1
#*
nincx = n*incx
#DO i = 1,nincx,incx
for i in range(0, nincx, incx):
dx[i] = da*dx[i]
#print("DSCAL 3: i ", i, " dx ", dx[i])
return dx
| 4,433
| 22.967568
| 80
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LineListPy.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 1 16:07:45 2017
@author: ishort
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 C. Ian Short
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/"""
import math
#// Argument 0: Name of ascii input line list
#// Argument 1: Name of byte data output line list
#asciiListStr = args[0]
#byteListStr = args[1]
asciiListStr = "atomLineListFeb2017"
byteListStr = "atomLineListFeb2017Bytes"
logE = math.log10(math.e) #// for debug output
logE10 = math.log(10.0) #//natural log of 10
#//
#//These atomic and molecular data are really just here for the human reader's
#// reference - they're not actually used by the code:
#////
#////Abundance table adapted from PHOENIX V. 15 input bash file
#////Solar abundances:
#//// c='abundances, Anders & Grevesse',
#//
nelemAbnd = 41
nome = [0 for i in range(nelemAbnd)]
cname = ["" for i in range(nelemAbnd)]
#//nome is the Kurucz code - in case it's ever useful
nome[0]= 100
nome[1]= 200
nome[2]= 300
nome[3]= 400
nome[4]= 500
nome[5]= 600
nome[6]= 700
nome[7]= 800
nome[8]= 900
nome[9]= 1000
nome[10]= 1100
nome[11]= 1200
nome[12]= 1300
nome[13]= 1400
nome[14]= 1500
nome[15]= 1600
nome[16]= 1700
nome[17]= 1800
nome[18]= 1900
nome[19]= 2000
nome[20]= 2100
nome[21]= 2200
nome[22]= 2300
nome[23]= 2400
nome[24]= 2500
nome[25]= 2600
nome[26]= 2700
nome[27]= 2800
nome[28]= 2900
nome[29]= 3000
nome[30]= 3100
nome[31]= 3600
nome[32]= 3700
nome[33]= 3800
nome[34]= 3900
nome[35]= 4000
nome[36]= 4100
nome[37]= 5600
nome[38]= 5700
nome[39]= 5500
nome[40]= 3200
cname[0]="H"
cname[1]="He"
cname[2]="Li"
cname[3]="Be"
cname[4]="B"
cname[5]="C"
cname[6]="N"
cname[7]="O"
cname[8]="F"
cname[9]="Ne"
cname[10]="Na"
cname[11]="Mg"
cname[12]="Al"
cname[13]="Si"
cname[14]="P"
cname[15]="S"
cname[16]="Cl"
cname[17]="Ar"
cname[18]="K"
cname[19]="Ca"
cname[20]="Sc"
cname[21]="Ti"
cname[22]="V"
cname[23]="Cr"
cname[24]="Mn"
cname[25]="Fe"
cname[26]="Co"
cname[27]="Ni"
cname[28]="Cu"
cname[29]="Zn"
cname[30]="Ga"
cname[31]="Kr"
cname[32]="Rb"
cname[33]="Sr"
cname[34]="Y"
cname[35]="Zr"
cname[36]="Nb"
cname[37]="Ba"
cname[38]="La"
cname[39]="Cs"
cname[40]="Ge"
#String species;
#//
#// FILE I/O Section
#//
#//External line list input file approach:
dataPath = "./InputData/"
lineListFile = dataPath + asciiListStr + ".dat"
#//Put entire line list into one big string - we'll sort it out later
masterLineString = "" #//initialize
#may not need splitChar is we can split string on end-of-line character, "\n"
#splitChar = "%%" #//character separating new lines
#System.out.println(" *********************************************** ");
#System.out.println(" ");
#System.out.println(" ");
#System.out.println("BEFORE FILE READ");
#System.out.println(" ");
#System.out.println(" ");
#System.out.println(" *********************************************** ");
try:
fHandle = open(lineListFile, 'r', encoding='utf-8')
thisLine = fHandle.readline()
masterLineString = masterLineString + thisLine
while (thisLine != ""):
thisLine = fHandle.readline()
#masterLineString = masterLineString + splitChar + thisLine
#may not need splitChar is we can split string on end-of-line character, "\n"
masterLineString = masterLineString + thisLine
finally:
fHandle.close()
#System.out.println(" *********************************************** ");
#System.out.println(" ");
#System.out.println(" ");
#System.out.println("AFTER FILE READ");
#System.out.println(" ");
#System.out.println(" ");
#System.out.println(" *********************************************** ");
#Split string on new line character "\n":
arrayLineString = masterLineString.split("\n")
#//Number of lines MUST be the ONLY entry on the first line
numLineList = int(arrayLineString[0])
#//System.out.println("arrayLineString[0] " + arrayLineString[0]);
list2Length = len(arrayLineString) - 1 #//useful for checking if something's wrong?
#System.out.println("numLineList " + numLineList + " list2Length " + list2Length);
#// for (int i = 0; i < 5; i++){
#// System.out.println(arrayLineString[i]);
#// }
#// In general there will be header information. The first block of six lines (blank separartor line
#//followed by five data lines) must be immediately preceded by a line whose first six columns contain
#//the string "START:", followed by the correct pipe symbol ("|") separators
startKey = "START:"
testField= ""
startLine = 1 #//initialization
for i in range(1, list2Length):
#print("i " + i + " arrayLineString[i] " + arrayLineString[i]);
print(arrayLineString[i])
#testField = arrayLineString[i].substring(0, 6);
testField = arrayLineString[i][0:6]
if (testField == startKey):
break #//We found it
startLine+=1
#// startLine++; //one more
#System.out.println("list2Length " + list2Length + " numLineList " + numLineList + " startLine " + startLine);
#//// Find seven field separators ("|"):
#//int lastBound = 0; //initialization
#//int[] bounds = new int[7];
#//for (int i = 0; i < 7; i++){
#// bounds[i] = arrayLineString[startLine].indexOf("|", lastBound);
#// lastBound = bounds[i]+1;
#// //System.out.println("i " + i + " bounds[i] " + bounds[i]);
#// }
#//Okay, here we go:
print("numLineList ", numLineList)
#String list2Element; // = new String[numLineList]; //element
#String list2LogGammaCol; // = new double[numLineList];
#//Log of unitless oscillator strength, f
#double list2Logf; // = new double[numLineList];
#//Einstein coefficinet for spontaneous de-excitation
#double list2LogAij; // = new double[numLineList];
#//Unitless statisital weight, lower E-level of b-b transition
#double list2GwL; // = new double[numLineList];
#//Atomic Data sources:
list2_ptr = 0 #//pointer into line list2 that we're populating
#int array_ptr; //pointer into array containing line list2 data file line-by-line Strings
#//First line in block of six is always a blank separator line:
numBlocks = int((list2Length - (startLine+1))/6) - 1
#// int rmndr = (list2Length - (startLine+1)) % 6
rmndr = 0 #//for now - something's wrong
#System.out.println("numBlocks " + numBlocks + " rmndr " + rmndr);
#String myString, myStringUp, elName; //useful helper
#double log10gf, Jnumer, Jdenom, Jfinal;
#int testLength, thisUpperBound;
#boolean blankFlag;
newField = " | " #//field separator - consistent with NIST ascii output
newRecord = "%%" #//record separator
masterStringOut = "" #//initialize master string for output
numFields = 12 #//number of "|"-separated INPUT fields in NIST ascii dump
#// Input filds:
#// 0: element + ion stage, 1: lambda_0, 2: A_ij, 3: f, 4: log(gf), 5: "Acc." - ??, 6: E_i - E_j, 7: J_i, 8: J_j
thisRecord = ["" for i in range(numFields)]
subFields = ["" for i in range(2)]
for iBlock in range(numBlocks):
offset = startLine + 6 * iBlock + 1
for i in range(1, 6):
array_ptr = offset + i
#//System.out.println("i " + i + " array_ptr " + array_ptr);
#//System.out.println("arrayLineString " + arrayLineString[array_ptr]);
#//"|" turns out to mean something in regexp, so we need to escape with '\\':
#//Get the chemical element symbol - we don't know if it's one or two characters
thisRecord = arrayLineString[array_ptr].split("|")
#//
#// "|"-separated field [0] is the species - element AND ion. stage
#//
testField = thisRecord[0]
#//Contains both chemical symbol and ionization stage, so have to "sub-split":
testField = testField.strip()
subFields = testField.split(" ")
myString = subFields[0]
#//System.out.println("element " + myString);
list2Element = myString.strip()
masterStringOut = masterStringOut + list2Element + newField
myString = subFields[1]
#//System.out.println("ion " + myString.trim());
#//list2StageRoman[list2_ptr] = myString.trim();
masterStringOut = masterStringOut + myString.strip() + newField
#//
#// "|"-separated field [1] is wavelength in nm
#//
myString = thisRecord[1]
#//We need to be ready for blank fields - checking for this in Java is hard!
#//testLength = bounds[1] - bounds[0];
blankFlag = True
if (len(myString.strip()) > 0):
blankFlag = False
if (blankFlag):
myString = " "
#// else {
#// myString.trim();
#// list2Lam0[list2_ptr] = Double.parseDouble(myString);
#// }
#//System.out.println("lambda " + myString.trim());
masterStringOut = masterStringOut + myString.strip() + newField
#//
#// "|"-separated field [2] is Einstein A_ij coeffcient for spontaneous de-excitation:
#//
myString = thisRecord[2];
#//We need to be ready for blank fields - checking for this in Java is hard!
#//testLength = bounds[1] - bounds[0];
blankFlag = True
if (len(myString.strip()) > 0):
blankFlag = False
if (blankFlag):
myString = "-19.0"
else:
myString.strip()
list2LogAij = math.log10(float(myString)) #//careful - base 10 log of f
myString = str(list2LogAij)
#//System.out.println("logAji " + myString.trim());
masterStringOut = masterStringOut + myString.strip() + newField;
#//
#// "|"-separated field [3] is oscillator strength f_ij:
#//
myString = thisRecord[3]
#//We need to be ready for blank fields - checking for this in Java is hard!
#//testLength = bounds[3] - bounds[2];
blankFlag = True
if (len(myString.strip()) > 0):
blankFlag = False
if (blankFlag == True):
list2Logf = -9.0 #//careful - base 10 log of f
myString = "-9.0"
else:
myString.strip()
list2Logf = math.log10(float(myString)) #//careful - base 10 log of f
myString = str(list2Logf)
#//System.out.println("log(f) " + myString.trim());
masterStringOut = masterStringOut + myString.strip() + newField
#////
#//// "|"-separated field [4] is log_10 gf_ij ("log gf") -
#////
#//// Originally needed to recover "g_i" from f_ij and log(gf)
#//// - We may not need this anymore - latest line list has "g_i" values
#////
#// //process this so we can back out the statistical weight, g_l of the lower E-level (heh-heh!)
#// myString = thisRecord[4];
#// //We need to be ready for blank fields - checking for this in Java is hard!
#// //testLength = bounds[4] - bounds[3];
#// blankFlag = true;
#// if (myString.trim().length() > 0){
#// blankFlag = false;
#// }
#// if (blankFlag){
#// list2GwL = 1.0;
#// myString = "1.0";
#// } else {
#// myString.trim();
#// log10gf = Double.parseDouble(myString); // log_10 of gf
#// //Lower E level statistical weight
#// list2GwL = 2.0 * ( Math.exp(log10gf - list2Logf) );
#// list2GwL = (double) ( (int) list2GwL );
#// myString = Double.toString(list2GwL);
#// }
#// //System.out.println("g_i " + myString.trim());
#// masterStringOut = masterStringOut + myString.trim() + newField;
#//
#// "|"-separated field [5] is a quality control indicator
#//
#//
#// "|"-separated field [6] is BOTH the Lower & Upper E-level excitation energy in eV
#//
testField = thisRecord[6]
#//System.out.println("list2Element " + list2Element + " testField " + testField + " testField.trim().length() " + testField.trim().length());
#//testLength = bounds[6] - bounds[5];
blankFlag = True
#//for (int kk = 0; kk < testLength-2; kk++){
#//testChar = testField.substring(kk, kk+2);
testField = testField.strip()
if (len(testField) > 0):
blankFlag = False
#// }
if (blankFlag):
myString = "0.0"
myStringUp = "0.0"
#//System.out.println("blankFlag triggered, myString = " + myString);
else:
#// chi_L and chi_U separated by "-" - revise upper boundary to isolate chi_L:
subFields = testField.split("-")
myString = subFields[0].strip() #//lower E level
myStringUp = subFields[1].strip() #//upper E level
#//Some values are in square brackets ("[ ]"):
sqbr1 = myString.find("[")
if (sqbr1 != -1):
sqbr2 = myString.find("]")
myString = myString[sqbr1+1: sqbr2]
sqbr1 = myStringUp.find("[")
if (sqbr1 != -1):
sqbr2 = myStringUp.find("]")
myStringUp = myStringUp[sqbr1+1: sqbr2]
#//Or it could be round brackets ("( )"):
sqbr1 = myString.find("(")
if (sqbr1 != -1):
sqbr2 = myString.find(")")
myString = myString[sqbr1+1: sqbr2]
sqbr1 = myStringUp.find("(")
if (sqbr1 != -1):
sqbr2 = myStringUp.find(")")
myStringUp = myStringUp[sqbr1+1: sqbr2]
#//**Or** Some values have "+x" appended (NIST code):
plusX = myString.find("+x")
if (plusX != -1):
myString = myString[0: plusX]
plusX = myStringUp.find("+x")
if (plusX != -1):
myStringUp = myStringUp[0: plusX]
#//**Or** Some values have "?" appended (NIST code):
questn = myString.find("?")
if (questn != -1):
myString = myString[0: questn]
questn = myStringUp.find("?")
if (questn != -1):
myStringUp = myStringUp[0: questn]
#//myString.trim();
#//list2ChiL[list2_ptr] = Double.parseDouble(myString);
#//System.out.println("final myString = " + myString);
#//System.out.println("loggf " + myString.trim());
#//System.out.println("chi_i " + myString.trim() + " chi_j " + myStringUp.trim());
masterStringOut = masterStringOut + myString.strip() + newField + myStringUp.strip() + newField
#//
#// "|"-separated field [7] is the term designation of the lower level - needed?
#//
#//
#// "|"-separated field [8] is Lower E-level J quantum number
#//
testField = thisRecord[8]
#//System.out.println("list2Element " + list2Element + " testField " + testField + " testField.trim().length() " + testField.trim().length());
#//testLength = bounds[6] - bounds[5];
blankFlag = True
#//for (int kk = 0; kk < testLength-2; kk++){
#//testChar = testField.substring(kk, kk+2);
testField = testField.strip()
if (len(testField) > 0):
blankFlag = False
#// }
#//initialize subfields so we're ready for both whole and rational number Js
subFields[0] = "1"
subFields[1] = "1"
if (blankFlag):
myString = "1"
myStringUp = "1"
#//System.out.println("blankFlag triggered, myString = " + myString);
else:
#// chi_L and chi_U separated by "-" - revise upper boundary to isolate chi_L:
slash = testField.find("/")
if (slash != -1):
subFields = testField.split("/")
myString = subFields[0].strip() #//numerator OR entire value, as case may be
myStringUp = subFields[1].strip() #//denominator OR default value of unity as case may be
else:
myString = testField
myStringUp = "1"
Jnumer = float(myString) #// log_10 of gf
Jdenom = float(myStringUp) #// log_10 of gf
#//Lower E level statistical weight
Jfinal = Jnumer / Jdenom
myString = str(Jfinal)
#//System.out.println("J_i " + myString.trim());
masterStringOut = masterStringOut + myString.strip() + newField;
#//
#// "|"-separated field [9] is the term designation of the upper level - needed?
#//
#//
#// "|"-separated field [10] is Upper E-level J quantum number
#//
#//Upper J quantum number
testField = thisRecord[10]
#//System.out.println("list2Element " + list2Element + " testField " + testField + " testField.trim().length() " + testField.trim().length());
#//testLength = bounds[6] - bounds[5];
blankFlag = True
#//for (int kk = 0; kk < testLength-2; kk++){
#//testChar = testField.substring(kk, kk+2);
testField = testField.strip()
if (len(testField) > 0):
blankFlag = False
#// }
#//initialize subfields so we're ready for both whole and rational number Js
subFields[0] = "1"
subFields[1] = "1"
if (blankFlag):
myString = "1"
myStringUp = "1";
#//System.out.println("blankFlag triggered, myString = " + myString);
else:
#// chi_L and chi_U separated by "-" - revise upper boundary to isolate chi_L:
slash = testField.find("/")
if (slash != -1):
subFields = testField.split("/")
myString = subFields[0].strip() #//numerator OR entire value, as case may be
myStringUp = subFields[1].strip() #//denominator OR default value of unity as case may be
else:
myString = testField
myStringUp = "1.0"
Jnumer = float(myString) #// log_10 of gf
Jdenom = float(myStringUp) #// log_10 of gf
#//Lower E level statistical weight
Jfinal = Jnumer / Jdenom
myString = str(Jfinal)
#//System.out.println("J_j " + myString.trim());
masterStringOut = masterStringOut + myString.strip() + newField
#//
#// "|"-separated field [11] is the statistical weight, g_i of BOTH the lower and upper level
#//
testField = thisRecord[11]
#//System.out.println("list2Element " + list2Element + " testField " + testField + " testField.trim().length() " + testField.trim().length());
#//testLength = bounds[6] - bounds[5];
blankFlag = True
#//for (int kk = 0; kk < testLength-2; kk++){
#//testChar = testField.substring(kk, kk+2);
testField = testField.strip()
if (len(testField) > 0):
blankFlag = False
#// }
if (blankFlag):
myString = "0.0"
myStringUp = "0.0"
#//System.out.println("blankFlag triggered, myString = " + myString);
else:
#// chi_L and chi_U separated by "-" - revise upper boundary to isolate chi_L:
subFields = testField.split("-")
myString = subFields[0].strip() #//lower E level
myStringUp = subFields[1].strip() #//upper E level
#//myString.trim()
#//list2ChiL[list2_ptr] = Double.parseDouble(myString);
#//System.out.println("final myString = " + myString);
#//System.out.println("loggf " + myString.trim());
#//System.out.println("chi_i " + myString.trim() + " chi_j " + myStringUp.trim());
masterStringOut = masterStringOut + myString.strip() + newField + myStringUp.strip() + newRecord
#//
#//We've gotten everything we need from the closed blocks of the NIST line list:
list2_ptr+=1
#} //i loop
#} //iBlock loop
#//now get the remaining lines:
iBlock = numBlocks
offset = startLine + 6 * iBlock + 1
for i in range(1, rmndr):
pass
####
numLines2 = list2_ptr
#//check:
#//System.out.println("masterStringOut " + masterStringOut);
#
#//Okay - what kind of mess did we make...
#// System.out.println("We processed " + numLines2 + " lines");
#// System.out.println("list2Element list2Stage list2Lam0 list2Logf list2GwL list2ChiL list2ChiI1 list2ChiI2 list2Mass");
#
#// WARNING: The line list is expected to be in the format printed out by the NIST Atomic Spectra Database (ver. 5.3), [Online].
#//Available: http://physics.nist.gov/asd [2015, November 21] * when ascii output is selected *
#// Ie. blocks of five lines sepeareted by a lineof blank fields, fields separated by '|', etc.
#// NOTE: "START:" MUST be added by hand after retrieving a NIST list
#//NIST database Print-out options MUST be selected so as to produce the following header, headings and sample data lines:
#//117
#Spectrum | Ritz | Aki | fik | log_gf | Acc. | Ei Ek | Lower level | Upper level | gi gk |Type|
# | Wavelength | s^-1 | | | | (eV) (eV) |--------------|----------------| | |
# | Vac (nm) | | | | | | Term | J | Term | J | | |
#
#Java: byte[] barray = masterStringOut.getBytes();
barray = masterStringOut.encode('utf-8')
#//byte[] barray = masterStringOut.getBytes("UTF-8")
#// what do I do with this?? throws UnsupportedEncodingException;
#System.out.println(" ");
#System.out.println("*************************");
#System.out.println(" ");
#System.out.println("This needs to be detected by GrayStar3Server.java: ");
#System.out.println(" ");
#System.out.println("size of barray " + barray.length);
#System.out.println(" ");
#System.out.println("*************************");
#System.out.println(" ");
# // System.out.println("barray " + barray);
# //
#Java: ByteFileWrite.writeFileBytes(byteListStr, barray);
with open(byteListStr, 'wb') as fHandle:
fHandle.write(barray)
#fHandle closed automatically upon exit from with:
#//
# } // end main()
| 23,868
| 37.685575
| 463
|
py
|
ChromaStarPy
|
ChromaStarPy-master/Idamax.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 16:41:34 2019
@author:
"""
import math
"""
*> \brief \b IDAMAX
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* INTEGER FUNCTION IDAMAX(N,DX,INCX)
*
* .. Scalar Arguments ..
* INTEGER INCX,N
* ..
* .. Array Arguments ..
* DOUBLE PRECISION DX(*)
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> IDAMAX finds the index of the first element having maximum
*absolute value.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> number of elements in input vector(s)
*> \endverbatim
*>
*> \param[in] DX
*> \verbatim
*> DX is DOUBLE PRECISION array, dimension ( 1 + ( N - 1
*)*abs( INCX ) )
*> \endverbatim
*>
*> \param[in] INCX
*> \verbatim
*> INCX is INTEGER
*> storage spacing between elements of SX
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date November 2017
*
*> \ingroup aux_blas
*
*> \par Further Details:
* =====================
*>
*> \verbatim
*>
*> jack dongarra, linpack, 3/11/78.
*> modified 3/93 to return if incx .le. 0.
*> modified 12/3/93, array(1) declarations changed to array(*)
*> \endverbatim
*>
* =====================================================================
"""
#INTEGER FUNCTION idamax(N,DX,INCX)
def idamax(n, dx, incx):
#*
#* -- Reference BLAS level1 routine (version 3.8.0) --
#* -- Reference BLAS is a software package provided by Univ. of
#* Tennessee, --
#* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG
#* Ltd..--
#* November 2017
#*
#* .. Scalar Arguments ..
#INTEGER INCX,N
#* ..
#* .. Array Arguments ..
#DOUBLE PRECISION DX(*)
#* ..
#*
#* =====================================================================
#*
#* .. Local Scalars ..
dmax = 0.0e0
i = 0
ix = 0
#* ..
#* .. Intrinsic Functions ..
#INTRINSIC dabs
#* ..
#idamax = 0
returnValue = 0
#IF (n.LT.1 .OR. incx.LE.0) RETURN
if ( (n >= 1) and (incx > 0) ):
#idamax = 1
returnValue = 0
#IF (n.EQ.1) RETURN
if (n != 1):
#IF (incx.EQ.1) THEN
if (incx == 1):
#*
#* code for increment equal to 1
#*
dmax = abs(dx[0])
#print("dmax ", dmax)
#DO i = 2,n
# print("IDAMAX: n ", n)
for i in range(1, n):
#IF (dabs(dx(i)).GT.dmax) THEN
#print("abs(dx[i]) ", abs(dx[i]))
if (abs(dx[i]) > dmax):
#idamax = i
#print("Condition triggered")
returnValue = i
dmax = abs(dx[i])
#print("i ", i, " dx ", dx[i], " returnValue ", returnValue)
else:
#*
#* code for increment not equal to 1
#*
#ix = 1
print("Road not taken, right?")
ix = 0
dmax = abs(dx[0])
ix = ix + incx
#DO i = 2,n
for i in range(1, n):
#IF (dabs(dx(ix)).GT.dmax) THEN
if (abs(dx[ix]) > dmax):
#idamax = i
returnValue = i
dmax = abs(dx[ix])
ix = ix + incx
#print("IDAMAX: ", returnValue)
return returnValue
| 3,993
| 22.356725
| 80
|
py
|
ChromaStarPy
|
ChromaStarPy-master/MStarSpecTest.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
Compare arbitrary phoenix spectra to CSGPy spectra
- mainly to "astrophyscially tune" molecular band oscillator strenths
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
#From: https://www.geeksforgeeks.org/python-sort-values-first-list-using-second-list/
def sort_list(list1, list2):
zipped_pairs = zip(list2, list1)
z = [x for _, x in sorted(zipped_pairs)]
return z
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "PHX/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wav = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = dataPath + "ltePy-3750-2.0-0.0.sph.ames.spec.7"
with open(inFile, 'r') as inputHandle:
#No header - we'll figure out number of records on fly
wave = []
flux = []
#for i in range(num):
inLine = inputHandle.readline()
while (inLine != ""):
inLine = inputHandle.readline()
#print(inLine)
if not inLine:
break
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = 0.1 * float(wavStr) # A to nm
wave.append(wav)
flx = 10.0**float(flxStr)
flx = 1.0e8 * flx # erg/s/cm^2/cm to ergs/s/cm^2/nm
flx = 1.5e-22 * flx # crude normalization
flux.append(flx)
print("wave ", [wave[x] for x in range(10)])
print("flux ", [flux[x] for x in range(10)])
#; Parallel version produces unsorted wavelengths!! ;MPI
flux2 = sort_list(flux, wave) #This *first!!*
wave.sort()
pylab.plot(wave, flux2, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "4300.0"
loggStr = "2.0"
logZStr = "-0.7"
massStarStr = "0.75"
xiTStr = "2.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.3"
#Spectrum synthesis
lambdaStartStr = "390.0"
lambdaStopStr = "400.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.5"
logKapFudgeStr = "0.0"
macroVStr = "1.0"
rotVStr = "1.0"
rotIStr = "90.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "RunGas"
teff = 3750.0
logg = 2.0
log10ZScale = 0.0
#TiO alpha system
#lambdaStart = 515.0
#lambdaStop = 519.0
#TiO beta system
#lambdaStart = 560.0
#lambdaStop = 564.0
#TiO gamma system
#lambdaStart = 715.0
#lambdaStop = 719.0
#TiO gamma prime system
#lambdaStart = 617.0
#lambdaStop = 621.0
#TiO epsilon system
#lambdaStart = 839.0
#lambdaStop = 843.0
#TiO delta system
#lambdaStart = 882.0
#lambdaStop = 892.0
#TiO phi system
#lambdaStart = 1100.0
#lambdaStop = 1110.0
#CH A2Delta_X2Pi ("G-band" at 4314 A)
lambdaStart = 430.5
lambdaStop = 431.5
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
print(xMin, xMax)
#pylab.xlim(708, xMax)
pylab.ylim(0.0, 1.6)
pylab.plot(waveMod, fluxMod, color="gray")
#add the line IDs
for i in range(numLineIds):
if "Ca II" in lblIds[i]:
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.5, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + '.eps'
plt.savefig(epsName, format='eps', dpi=1000)
| 7,356
| 26.973384
| 106
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.