metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "numfig.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/docs/exts/numfig.py",
"type": "Python"
}
|
from docutils.nodes import figure, caption, Text, reference, raw, SkipNode, Element
from sphinx.roles import XRefRole
# Element classes
class page_ref(reference):
pass
class num_ref(reference):
pass
# Visit/depart functions
def skip_page_ref(self, node):
raise SkipNode
def latex_visit_page_ref(self, node):
self.body.append("\\pageref{%s:%s}" % (node['refdoc'], node['reftarget']))
raise SkipNode
def latex_visit_num_ref(self, node):
fields = node['reftarget'].split('#')
if len(fields) > 1:
label, target = fields
ref_link = '%s:%s' % (node['refdoc'], target)
latex = "\\hyperref[%s]{%s \\ref*{%s}}" % (ref_link, label, ref_link)
self.body.append(latex)
else:
self.body.append('\\ref{%s:%s}' % (node['refdoc'], fields[0]))
raise SkipNode
def doctree_read(app, doctree):
# first generate figure numbers for each figure
env = app.builder.env
figid_docname_map = getattr(env, 'figid_docname_map', {})
for figure_info in doctree.traverse(figure):
for id in figure_info['ids']:
figid_docname_map[id] = env.docname
env.figid_docname_map = figid_docname_map
def doctree_resolved(app, doctree, docname):
i = 1
figids = {}
for figure_info in doctree.traverse(figure):
if app.builder.name != 'latex' and app.config.number_figures:
for cap in figure_info.traverse(caption):
cap[0] = Text("%s %d: %s" % (app.config.figure_caption_prefix, i, cap[0]))
for id in figure_info['ids']:
figids[id] = i
i += 1
# replace numfig nodes with links
if app.builder.name != 'latex':
for ref_info in doctree.traverse(num_ref):
if '#' in ref_info['reftarget']:
label, target = ref_info['reftarget'].split('#')
labelfmt = label + " %d"
else:
labelfmt = '%d'
target = ref_info['reftarget']
if target not in figids:
continue
if app.builder.name == 'html':
target_doc = app.builder.env.figid_docname_map[target]
link = "%s#%s" % (app.builder.get_relative_uri(docname, target_doc),
target)
html = '<a class="pageref" href="%s">%s</a>' % (link, labelfmt %(figids[target]))
ref_info.replace_self(raw(html, html, format='html'))
else:
ref_info.replace_self(Text(labelfmt % (figids[target])))
def clean_env(app):
app.builder.env.i=1
app.builder.env.figid_docname_map = {}
def setup(app):
app.add_config_value('number_figures', True, True)
app.add_config_value('figure_caption_prefix', "Figure", True)
app.add_node(page_ref,
text=(skip_page_ref, None),
html=(skip_page_ref, None),
latex=(latex_visit_page_ref, None))
app.add_role('page', XRefRole(nodeclass=page_ref))
app.add_node(num_ref,
latex=(latex_visit_num_ref, None))
app.add_role('num', XRefRole(nodeclass=num_ref))
app.connect("builder-inited", clean_env)
app.connect('doctree-read', doctree_read)
app.connect('doctree-resolved', doctree_resolved)
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@docs@exts@numfig.py@.PATH_END.py
|
{
"filename": "test_real_coadd.py",
"repo_name": "quatrope/ProperImage",
"repo_path": "ProperImage_extracted/ProperImage-master/drafts/test_real_coadd.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_recoverstats.py
#
# Copyright 2016 Bruno S <bruno.sanchez.63@gmail.com>
#
import os
import shlex
import subprocess
import sys
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from properimage import propercoadd as pc
from properimage import utils
# =============================================================================
# PSF measure test by propercoadd
# =============================================================================
#datapath = os.path.abspath('/home/bos0109/DATA/Data/Tolar2015/CAMPAÑA_LIGO_OBS
#ERVACIONES_MACON/20151212/preprocessed/Landolt_C53')
datapath = os.path.abspath(
'/home/bruno/Documentos/Data/ESO085-030')
for root, dirs, files in os.walk(datapath):
fs = [os.path.join(root, afile) for afile in files]
print('files to process: {}'.format(fs))
with pc.ImageEnsemble(fs, pow_th=0.01) as ensemble:
R, S = ensemble.calculate_R(n_procs=4, return_S=True)
test_dir = os.path.abspath('./test/test_images/real_coadd_test/')
if not os.path.exists(test_dir):
os.mkdir(test_dir)
utils.plot_S(S, path=os.path.join(test_dir,'S.png'))
utils.plot_R(R, path=os.path.join(test_dir,'R.png'))
utils.encapsule_S(S, path=os.path.join(test_dir,'S.fits'))
utils.encapsule_R(R, path=os.path.join(test_dir,'R.fits'))
|
quatropeREPO_NAMEProperImagePATH_START.@ProperImage_extracted@ProperImage-master@drafts@test_real_coadd.py@.PATH_END.py
|
{
"filename": "_hoverinfo.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/_hoverinfo.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="treemap", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop(
"flags",
[
"label",
"text",
"value",
"name",
"current path",
"percent root",
"percent entry",
"percent parent",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@_hoverinfo.py@.PATH_END.py
|
{
"filename": "hatch_build.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/python-package/hatch_build.py",
"type": "Python"
}
|
"""
Custom hook to customize the behavior of Hatchling.
Here, we customize the tag of the generated wheels.
"""
from typing import Any, Dict
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
from packaging.tags import platform_tags
def get_tag() -> str:
"""Get appropriate wheel tag according to system"""
platform_tag = next(platform_tags())
return f"py3-none-{platform_tag}"
class CustomBuildHook(BuildHookInterface):
"""A custom build hook"""
def initialize(self, version: str, build_data: Dict[str, Any]) -> None:
"""This step ccurs immediately before each build."""
build_data["tag"] = get_tag()
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@python-package@hatch_build.py@.PATH_END.py
|
{
"filename": "euv_comparison.py",
"repo_name": "RobertJaro/InstrumentToInstrument",
"repo_path": "InstrumentToInstrument_extracted/InstrumentToInstrument-master/itipy/evaluation/euv_comparison.py",
"type": "Python"
}
|
import datetime
import glob
import os
from warnings import simplefilter
import pandas
import pandas as pd
import torch
from dateutil.parser import parse
from itipy.train.util import skip_invalid
from sunpy.map import Map
from sunpy.visualization.colormaps import cm
from itipy.data.editor import soho_norms, sdo_norms, stereo_norms
from itipy.data.dataset import SOHODataset, STEREODataset, SDODataset, get_intersecting_files
from torch.utils.data import DataLoader
from tqdm import tqdm
from itipy.translate import SOHOToSDOEUV, SOHOToSDO
from itipy.translate import STEREOToSDO
from matplotlib import pyplot as plt
import numpy as np
# init
base_path = '/gpfs/gpfs0/robert.jarolim/iti/euv_comparison_v1'
os.makedirs(base_path, exist_ok=True)
translator_soho = SOHOToSDO(model_path='/gpfs/gpfs0/robert.jarolim/iti/soho_sdo_v4/generator_AB.pt')
# translate
basenames_soho = [[os.path.basename(f) for f in glob.glob('/gpfs/gpfs0/robert.jarolim/data/iti/soho_iti2021_prep/%s/*.fits' % wl)] for
wl in ['171', '195', '284', '304', 'mag']]
basenames_soho = set(basenames_soho[0]).intersection(*basenames_soho[1:])
basenames_soho = [b for b in basenames_soho if b not in ['2010-05-13T01:19:39.fits', '2010-05-12T19:19:38.fits', '2010-05-13T07:19:37.fits']]
basenames_sdo = [[os.path.basename(f) for f in glob.glob('/gpfs/gpfs0/robert.jarolim/data/iti/sdo_comparison/%s/*.fits' % wl)] for wl in
['171', '193', '211', '304', '6173']]
basenames_sdo = set(basenames_sdo[0]).intersection(*basenames_sdo[1:])
dates_soho = sorted([parse(f.split('.')[0]) for f in basenames_soho])
dates_sdo = sorted([parse(f.split('.')[0]) for f in basenames_sdo])
closest_dates = [(date_soho, min(dates_sdo, key=lambda x: abs(x - date_soho))) for date_soho in dates_soho]
selected_dates = [(date_soho, date_sdo) for date_soho, date_sdo in closest_dates if
np.abs(date_soho - date_sdo) < datetime.timedelta(minutes=30) and date_sdo.year == 2010] # file name filter (below filter < 30 min)
selected_dates = selected_dates[::10]
basenames_soho = ['%s.fits' % date_soho.isoformat('T') for date_soho, date_sdo in selected_dates]
basenames_sdo = ['%s.fits' % date_sdo.isoformat('T') for date_soho, date_sdo in selected_dates]
soho_files = [['/gpfs/gpfs0/robert.jarolim/data/iti/soho_iti2021_prep/%s/%s' % (dir, basename) for basename in basenames_soho]
for dir in ['171', '195', '284', '304', 'mag']]
soho_dataset = SOHODataset(soho_files)
soho_iterator = DataLoader(soho_dataset, batch_size=1, shuffle=False, num_workers=4)
sdo_files = [['/gpfs/gpfs0/robert.jarolim/data/iti/sdo_comparison/%s/%s' % (dir, basename) for basename in basenames_sdo]
for dir in ['171', '193', '211', '304', '6173']]
sdo_dataset = SDODataset(sdo_files)
sdo_iterator = DataLoader(sdo_dataset, batch_size=1, shuffle=False, num_workers=4)
cmaps = [
cm.sdoaia171,
cm.sdoaia193,
cm.sdoaia211,
cm.sdoaia304,
'gray'
]
channel_mapping = {s:t for s,t in zip([171, 195, 284, 304], [171, 193, 211, 304])}
eit_calibration = {'171': [113.69278, 40.340622], '195': [60.60053, 31.752682], '284': [4.7249465, 3.9555929], '304': [64.73511, 26.619505]}
aia_calibration = {'171': [148.90274, 62.101795], '193': [146.01889, 71.47675], '211': [44.460854, 27.592617], '304': [46.21493, 18.522688]}
results = {wl: [] for wl in [171, 195, 284, 304]}
for soho_cube, sdo_cube in tqdm(skip_invalid(zip(soho_iterator, sdo_iterator)), total=len(selected_dates)):
with torch.no_grad():
iti_cube = translator_soho.generator(soho_cube.cuda())
iti_cube = iti_cube[0].cpu().numpy()
soho_cube = soho_cube[0].numpy()
sdo_cube = sdo_cube[0].numpy()
#
for i, wl in enumerate([171, 195, 284, 304]):
#
original_mean = np.mean(soho_norms[wl].inverse((soho_cube[i] + 1) / 2))
#
sdo_norm = sdo_norms[channel_mapping[wl]]
iti_mean = np.mean(sdo_norm.inverse((iti_cube[i] + 1) / 2))
#
eit_mean, eit_std = eit_calibration[str(wl)]
aia_mean, aia_std = aia_calibration[str(channel_mapping[wl])]
calibrated_mean = (np.array(original_mean) - eit_mean) * (aia_std / eit_std) + aia_mean
#
true_mean = np.mean(sdo_norm.inverse((sdo_cube[i] + 1) / 2))
#
results[wl] += [(original_mean, calibrated_mean, iti_mean, true_mean)]
fig, axs = plt.subplots(1, 3, figsize=(12, 4))
axs[0].imshow(soho_cube[0], vmin=-1, vmax=1, cmap=cm.sdoaia171)
axs[1].imshow(iti_cube[0], vmin=-1, vmax=1, cmap=cm.sdoaia171)
axs[2].imshow(sdo_cube[0], vmin=-1, vmax=1, cmap=cm.sdoaia171)
[ax.set_axis_off() for ax in axs]
plt.tight_layout(pad=0)
plt.savefig('/gpfs/gpfs0/robert.jarolim/iti/euv_comparison_v1/comparison.jpg')
plt.close()
with open(os.path.join(base_path, 'soho_evaluation.txt'), 'w') as f:
for k, v in results.items():
means = np.array(v)
print(k, 'MAE', file=f)
print('original', np.abs(means[:, -1] - means[:, 0]).mean(), file=f)
print('calibrated', np.abs(means[:, -1] - means[:, 1]).mean(), file=f)
print('iti', np.abs(means[:, -1] - means[:, 2]).mean(), file=f)
print(k, 'CC', file=f)
print('original', np.corrcoef(means[:, -1], means[:, 0])[0, 1], file=f)
print('calibrated', np.corrcoef(means[:, -1], means[:, 1])[0, 1], file=f)
print('iti', np.corrcoef(means[:, -1], means[:, 2])[0, 1], file=f)
#
print('Means', file=f)
means = np.array(list(results.values()))
print('original', np.abs(means[:, : , -1] - means[:, :, 0]).mean(), file=f)
print('calibrated', np.abs(means[:, :, -1] - means[:, :, 1]).mean(), file=f)
print('iti', np.abs(means[:, :, -1] - means[:, :, 2]).mean(), file=f)
|
RobertJaroREPO_NAMEInstrumentToInstrumentPATH_START.@InstrumentToInstrument_extracted@InstrumentToInstrument-master@itipy@evaluation@euv_comparison.py@.PATH_END.py
|
{
"filename": "Rescaling_DarkPhotonLimits.ipynb",
"repo_name": "cajohare/AxionLimits",
"repo_path": "AxionLimits_extracted/AxionLimits-master/Rescaling_DarkPhotonLimits.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from numpy import *
# ADMX
costh = sqrt(0.019)
B = 7.6
dat = loadtxt("limit_data/AxionPhoton/ADMX.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX.txt',dat,header="ADMX 2009 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 6.8
dat = loadtxt("limit_data/AxionPhoton/ADMX2018.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX2018.txt',dat,header="ADMX 2018 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7.6
dat = loadtxt("limit_data/AxionPhoton/ADMX2019_1.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX2019_1.txt',dat,header="ADMX 2019 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7.6
dat = loadtxt("limit_data/AxionPhoton/ADMX2019_2.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX2019_2.txt',dat,header="ADMX 2019 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7.6
dat = loadtxt("limit_data/AxionPhoton/ADMX2021.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX2021.txt',dat,header="ADMX 2021 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7.6 # unclear what field was used
dat = loadtxt("limit_data/AxionPhoton/ADMX2024.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX2024.txt',dat,header="ADMX 2024 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 3.11
dat = loadtxt("limit_data/AxionPhoton/ADMX_Sidecar_AC.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX_Sidecar.txt',dat,header="ADMX Sidecar (Runs A and C) rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 5.0
dat = loadtxt("limit_data/AxionPhoton/ADMX_SLIC.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ADMX_SLIC.txt',dat,header="ADMX SLIC rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 9
dat = loadtxt("limit_data/AxionPhoton/HAYSTAC_PhaseI.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/HAYSTAC_PhaseI.txt',dat,header="HAYSTAC rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8
dat = loadtxt("limit_data/AxionPhoton/HAYSTAC_PhaseII_ab.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/HAYSTAC_PhaseII_ab.txt',dat,header="HAYSTAC rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8
dat = loadtxt("limit_data/AxionPhoton/HAYSTAC_PhaseII_cd.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/HAYSTAC_PhaseII_cd.txt',dat,header="HAYSTAC rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
# CAPP
B = 7.3
dat = loadtxt("limit_data/AxionPhoton/CAPP-1.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-1.txt',dat,header="CAPP-1 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7.8
dat = loadtxt("limit_data/AxionPhoton/CAPP-2.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-2.txt',dat,header="CAPP-2 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7.9
dat = loadtxt("limit_data/AxionPhoton/CAPP-3.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
dat[argmin(dat[:,1]),1]
dat[argmin(dat[:,1]),1] = dat[argmin(dat[:,1]),1]*costh/0.2
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-3.txt',dat,header="CAPP-3 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 18
dat = loadtxt("limit_data/AxionPhoton/CAPP-4.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-4.txt',dat,header="CAPP-4 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 12
dat = loadtxt("limit_data/AxionPhoton/CAPP-5.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.019)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-5.txt',dat,header="CAPP-5 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 12
dat = loadtxt("limit_data/AxionPhoton/CAPP-6.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.02)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-6.txt',dat,header="CAPP-6 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 12
dat = loadtxt("limit_data/AxionPhoton/CAPP-7.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.02)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-7.txt',dat,header="CAPP-7 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 12
dat = loadtxt("limit_data/AxionPhoton/CAPP-8.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.022)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-8.txt',dat,header="CAPP-8 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 12
dat = loadtxt("limit_data/AxionPhoton/CAPP-MAX.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.022)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAPP-MAX.txt',dat,header="CAPP-MAX rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8
dat = loadtxt("limit_data/AxionPhoton/TASEH.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*costh*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/TASEH.txt',dat,header="CAPP-4 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8.1
dat = loadtxt("limit_data/AxionPhoton/QUAX.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.023)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/QUAX.txt',dat,header="QUAX rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8.0
dat = loadtxt("limit_data/AxionPhoton/QUAX2.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.023)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/QUAX2.txt',dat,header="QUAX2 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8.0
dat = loadtxt("limit_data/AxionPhoton/QUAX3.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.023)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/QUAX3.txt',dat,header="QUAX3 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8.0
dat = loadtxt("limit_data/AxionPhoton/QUAX4.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.023)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/QUAX4.txt',dat,header="QUAX4 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8.0
dat = loadtxt("limit_data/AxionPhoton/QUAX5.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.023)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/QUAX5.txt',dat,header="QUAX5 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 11.5
dat = loadtxt("limit_data/AxionPhoton/ORGAN-1a.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.03076631)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ORGAN-1a.txt',dat,header="ORGAN rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 11.5
dat = loadtxt("limit_data/AxionPhoton/ORGAN-1b.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.03076631)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ORGAN-1b.txt',dat,header="ORGAN rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 7
dat = loadtxt("limit_data/AxionPhoton/ORGAN-Q.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.025)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/ORGAN-Q.txt',dat,header="ORGAN-Q rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 8.8
dat = loadtxt("limit_data/AxionPhoton/CAST-CAPP.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.023)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/CAST-CAPP.txt',dat,header="CAST-CAPP rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
B = 11.7
dat = loadtxt("limit_data/AxionPhoton/RADES2.txt")
dat[:,1] = 1e-9*dat[:,1]*(B/(1.444e-3*sqrt(0.019)*dat[:,0]))
savetxt('limit_data/DarkPhoton/Rescaled/RADES2.txt',dat,header="RADES2 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
```
```python
dat = loadtxt("limit_data/DarkPhoton/DM-Pathfinder.txt")
dat[:,1] = dat[:,1]*sqrt(1/0.075)
savetxt('limit_data/DarkPhoton/Rescaled/DM-Pathfinder.txt',dat,header="DM-Pathfinder rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/SQuAD.txt")
dat[:,1] = dat[:,1]*sqrt(0.4/0.45)*sqrt(1/3/0.0019)
savetxt('limit_data/DarkPhoton/Rescaled/SQuAD.txt',dat,header="SQuAD rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/WISPDMX.txt")
dat[:,1] = dat[:,1]*sqrt(0.3/0.45)*sqrt(1/3/0.079)
savetxt('limit_data/DarkPhoton/Rescaled/WISPDMX.txt',dat,header="WISPDMX rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/DarkEfield.txt")
dat[:,1] = dat[:,1]*sqrt(0.3/0.45)*sqrt(1/3/0.29)
savetxt('limit_data/DarkPhoton/Rescaled/DarkEfield.txt',dat,header="DarkEfield rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/DarkEfield2.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.29)
savetxt('limit_data/DarkPhoton/Rescaled/DarkEfield2.txt',dat,header="DarkEfield2 rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/SHUKET.txt")
dat[:,1] = dat[:,1]*sqrt(0.3/0.45)*sqrt(1/3/0.0086)
savetxt('limit_data/DarkPhoton/Rescaled/SHUKET.txt',dat,header="SHUKET rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/Tokyo-Dish.txt")
dat[:,1] = dat[:,1]*sqrt(2/3/0.5)
savetxt('limit_data/DarkPhoton/Rescaled/Tokyo-Dish.txt',dat,header="Tokyo-Dish rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/Tokyo-Knirck.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.175)
savetxt('limit_data/DarkPhoton/Rescaled/Tokyo-Knirck.txt',dat,header="Tokyo-Knirck rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/Tokyo-Tomita.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.47)
savetxt('limit_data/DarkPhoton/Rescaled/Tokyo-Tomita.txt',dat,header="Tokyo-Tomita rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/FUNK.txt")
dat[:,1] = dat[:,1]*sqrt(0.3/0.45)*sqrt(2/3/0.27)
savetxt('limit_data/DarkPhoton/Rescaled/FUNK.txt',dat,header="FUNK rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/BRASS-p.txt")
dat[:,1] = dat[:,1]*sqrt(0.3/0.45)*sqrt(1/3/0.0245)
savetxt('limit_data/DarkPhoton/Rescaled/BRASS-p.txt',dat,header="BRASS-p rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/SRF_scanning.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.02472551)
savetxt('limit_data/DarkPhoton/Rescaled/SRF_scanning.txt',dat,header="SRF_scanning rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/SUPAX.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.003)
savetxt('limit_data/DarkPhoton/Rescaled/SUPAX.txt',dat,header="SUPAX rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/APEX.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.0019)
savetxt('limit_data/DarkPhoton/Rescaled/APEX.txt',dat,header="APEX rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
dat = loadtxt("limit_data/DarkPhoton/GigaBREAD.txt")
dat[:,1] = dat[:,1]*sqrt(1/3/0.25)
savetxt('limit_data/DarkPhoton/Rescaled/GigaBREAD.txt',dat,header="GigaBREAD rescaled to fixed polarisation scenario \n m_X [eV]\tchi")
```
```python
```
|
cajohareREPO_NAMEAxionLimitsPATH_START.@AxionLimits_extracted@AxionLimits-master@Rescaling_DarkPhotonLimits.ipynb@.PATH_END.py
|
{
"filename": "T02_create_tau_dataset.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioMC/EvtGen/tests/T02_create_tau_dataset.py",
"type": "Python"
}
|
import numpy as np
import h5py
from NuRadioMC.EvtGen import generator
from NuRadioReco.utilities import units
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("testtaueventgen")
# define simulation volume
volume = {
'fiducial_rmin':0 * units.km,
'fiducial_rmax': 3 * units.km,
'fiducial_zmin':-2.7 * units.km, # the ice sheet at South Pole is 2.7km deep
'fiducial_zmax': 0 * units.km}
generator.generate_eventlist_cylinder('tau.hdf5', 1e4, 1e18 * units.eV, 1e19 * units.eV,
volume, add_tau_second_bang=True)
print("writing many subfiles")
generator.generate_eventlist_cylinder('tau2.hdf5', 1e4, 1e16 * units.eV, 1e19 * units.eV,
volume, add_tau_second_bang=True, n_events_per_file=10)
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioMC@EvtGen@tests@T02_create_tau_dataset.py@.PATH_END.py
|
{
"filename": "_tickvals.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram/marker/colorbar/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="histogram.marker.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram@marker@colorbar@_tickvals.py@.PATH_END.py
|
{
"filename": "functionals.py",
"repo_name": "casacore/python-casacore",
"repo_path": "python-casacore_extracted/python-casacore-master/pyrap/functionals.py",
"type": "Python"
}
|
from casacore.functionals import *
|
casacoreREPO_NAMEpython-casacorePATH_START.@python-casacore_extracted@python-casacore-master@pyrap@functionals.py@.PATH_END.py
|
{
"filename": "angular.py",
"repo_name": "duetosymmetry/qnm",
"repo_path": "qnm_extracted/qnm-master/qnm/angular.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Solve the angular Teukolsky equation via spectral decomposition.
For a given complex QNM frequency ω, the separation constant and
spherical-spheroidal decomposition are found as an eigenvalue and
eigenvector of an (infinite) matrix problem. The interface to solving
this problem is :meth:`C_and_sep_const_closest`, which returns a
certain eigenvalue A and eigenvector C. The eigenvector contains the
C coefficients in the equation
.. math:: {}_s Y_{\ell m}(\\theta, \phi; a\omega) = {\sum_{\ell'=\ell_{\min} (s,m)}^{\ell_\max}} C_{\ell' \ell m}(a\omega)\ {}_s Y_{\ell' m}(\\theta, \phi) \,.
Here ℓmin=max(\|m\|,\|s\|) (see :meth:`l_min`), and ℓmax can be chosen at
run time. The C coefficients are returned as a complex ndarray, with
the zeroth element corresponding to ℓmin. You can get the associated
ℓ values by calling :meth:`ells`.
"""
from __future__ import division, print_function, absolute_import
from numba import njit
import numpy as np
# TODO some documentation here, better documentation throughout
@njit(cache=True)
def _calF(s, l, m):
""" Eq. (52b) """
if ((0==s) and (0 == l+1)):
# This can only happen when solving for the mode labeled by s=0, l=0, m=0
return 0.
return (np.sqrt( ((l+1)**2 - m*m) / (2*l+3) / (2*l+1) )
* np.sqrt( ( (l+1)**2 - s*s) / (l+1)**2 ))
@njit(cache=True)
def _calG(s, l, m):
""" Eq. (52c) """
if (0 == l):
return 0.
return np.sqrt( ( l*l - m*m ) / (4*l*l - 1)) * np.sqrt(1 - s*s/l/l)
@njit(cache=True)
def _calH(s, l, m):
""" Eq. (52d) """
if (0 == l) or (0 == s):
return 0.
return - m*s/l/(l+1)
@njit(cache=True)
def _calA(s, l, m):
""" Eq. (53a) """
return _calF(s,l,m) * _calF(s,l+1,m)
@njit(cache=True)
def _calD(s, l, m):
""" Eq. (53b) """
return _calF(s,l,m) * (_calH(s,l+1,m) + _calH(s,l,m))
@njit(cache=True)
def _calB(s, l, m):
""" Eq. (53c) """
return (_calF(s,l,m) * _calG(s,l+1,m)
+ _calG(s,l,m) * _calF(s,l-1,m)
+ _calH(s,l,m)**2)
@njit(cache=True)
def _calE(s, l, m):
""" Eq. (53d) """
return _calG(s,l,m) * (_calH(s,l-1,m) + _calH(s,l,m))
@njit(cache=True)
def _calC(s, l, m):
""" Eq. (53e) """
return _calG(s,l,m) * _calG(s,l-1,m)
@njit(cache=True)
def swsphericalh_A(s, l, m):
""" Angular separation constant at a=0.
Eq. (50). Has no dependence on m. The formula is
A_0 = l(l+1) - s(s+1)
Parameters
----------
s: int
Spin-weight of interest
l: int
Angular quantum number of interest
m: int
Magnetic quantum number, ignored
Returns
-------
int
Value of A(a=0) = l(l+1) - s(s+1)
"""
return l*(l+1) - s*(s+1)
@njit(cache=True)
def M_matrix_elem(s, c, m, l, lprime):
""" The (l, lprime) matrix element from the spherical-spheroidal
decomposition matrix from Eq. (55).
Parameters
----------
s: int
Spin-weight of interest
c: complex
Oblateness of the spheroidal harmonic
m: int
Magnetic quantum number
l: int
Angular quantum number of interest
lprime: int
Primed quantum number of interest
Returns
-------
complex
Matrix element M_{l, lprime}
"""
if (lprime == l-2):
return -c*c*_calA(s,lprime,m)
if (lprime == l-1):
return (-c*c*_calD(s,lprime,m)
+ 2*c*s*_calF(s,lprime,m))
if (lprime == l ):
return (swsphericalh_A(s,lprime,m)
- c*c*_calB(s,lprime,m)
+ 2*c*s*_calH(s,lprime,m))
if (lprime == l+1):
return (-c*c*_calE(s,lprime,m)
+ 2*c*s*_calG(s,lprime,m))
if (lprime == l+2):
return -c*c*_calC(s,lprime,m)
return 0.
def give_M_matrix_elem_ufunc(s, c, m):
"""Legacy function. Gives ufunc that implements matrix elements of
the spherical-spheroidal decomposition matrix. This function is
used by :meth:`M_matrix_old`.
Parameters
----------
s: int
Spin-weight of interest
c: complex
Oblateness of the spheroidal harmonic
m: int
Magnetic quantum number
Returns
-------
ufunc
Implements elements of M matrix
"""
def elem(l, lprime):
return M_matrix_elem(s, c, m, l, lprime)
return np.frompyfunc(elem, 2, 1)
@njit(cache=True)
def l_min(s, m):
""" Minimum allowed value of l for a given s, m.
The formula is l_min = max(\|m\|,\|s\|).
Parameters
----------
s: int
Spin-weight of interest
m: int
Magnetic quantum number
Returns
-------
int
l_min
"""
return max(abs(s), abs(m))
@njit(cache=True)
def ells(s, m, l_max):
"""Vector of ℓ values in C vector and M matrix.
The format of the C vector and M matrix is that the 0th element
corresponds to l_min(s,m) (see :meth:`l_min`).
Parameters
----------
s: int
Spin-weight of interest
m: int
Magnetic quantum number
l_max: int
Maximum angular quantum number
Returns
-------
int ndarray
Vector of ℓ values, starting from l_min
"""
return np.arange(l_min(s,m), l_max+1)
def M_matrix_old(s, c, m, l_max):
"""Legacy function. Same as :meth:`M_matrix` except trying to be cute
with ufunc's, requiring scope capture with temp func inside
:meth:`give_M_matrix_elem_ufunc`, which meant that numba could not
speed up this method. Remains here for testing purposes. See
documentation for :meth:`M_matrix` parameters and return value.
"""
_ells = ells(s, m, l_max)
uf = give_M_matrix_elem_ufunc(s, c, m)
return uf.outer(_ells,_ells).astype(complex)
@njit(cache=True)
def M_matrix(s, c, m, l_max):
"""Spherical-spheroidal decomposition matrix truncated at l_max.
Parameters
----------
s: int
Spin-weight of interest
c: complex
Oblateness of the spheroidal harmonic
m: int
Magnetic quantum number
l_max: int
Maximum angular quantum number
Returns
-------
complex ndarray
Decomposition matrix
"""
_ells = ells(s, m, l_max)
M = np.empty((len(_ells),len(_ells)), dtype=np.complex128)
for i in range(len(_ells)):
for j in range(len(_ells)):
M[i,j] = M_matrix_elem(s, c, m, _ells[i], _ells[j])
return M
def sep_consts(s, c, m, l_max):
"""Finds eigenvalues of decomposition matrix, i.e. the separation
constants, As.
Parameters
----------
s: int
Spin-weight of interest
c: complex
Oblateness of spheroidal harmonic
m: int
Magnetic quantum number
l_max: int
Maximum angular quantum number
Returns
-------
complex ndarray
Eigenvalues of spherical-spheroidal decomposition matrix
"""
return np.linalg.eigvals(M_matrix(s, c, m, l_max))
def sep_const_closest(A0, s, c, m, l_max):
"""Gives the separation constant that is closest to A0.
Parameters
----------
A0: complex
Value close to the desired separation constant.
s: int
Spin-weight of interest
c: complex
Oblateness of spheroidal harmonic
m: int
Magnetic quantum number
l_max: int
Maximum angular quantum number
Returns
-------
complex
Separation constant that is the closest to A0.
"""
As = sep_consts(s, c, m, l_max)
i_closest = np.argmin(np.abs(As-A0))
return As[i_closest]
def C_and_sep_const_closest(A0, s, c, m, l_max):
"""Get a single eigenvalue and eigenvector of decomposition
matrix, where the eigenvalue is closest to some guess A0.
Parameters
----------
A0: complex
Value close to the desired separation constant.
s: int
Spin-weight of interest
c: complex
Oblateness of spheroidal harmonic
m: int
Magnetic quantum number
l_max: int
Maximum angular quantum number
Returns
-------
complex, complex ndarray
The first element of the tuple is the eigenvalue that is closest
in value to A0. The second element of the tuple is the
corresponding eigenvector. The 0th element of this ndarray
corresponds to :meth:`l_min`.
"""
As, Cs = np.linalg.eig(M_matrix(s, c, m, l_max))
i_closest = np.argmin(np.abs(As-A0))
return As[i_closest], Cs[:,i_closest]
|
duetosymmetryREPO_NAMEqnmPATH_START.@qnm_extracted@qnm-master@qnm@angular.py@.PATH_END.py
|
{
"filename": "to_hdf5.py",
"repo_name": "jiwoncpark/baobab",
"repo_path": "baobab_extracted/baobab-master/baobab/to_hdf5.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Converting .npy image files and metadata into HDF5
This script converts the baobab data into the HDF5 format.
Example
-------
To run this script, pass in the baobab out_dir path as the first argument and the framework format as the second, e.g.::
$ to_hdf5 out_data/tdlmc_train_EmpiricalBNNPrior_seed1113 --format 'tf'
The output file will be named `tdlmc_train_EmpiricalBNNPrior_seed1113.h5` and can be found inside the directory provided as the first argument.
See the demo notebook `demo/Read_hdf5_file.ipynb` for instructions on how to access the datasets in this file.
"""
import os, sys
import numpy as np
import pandas as pd
import argparse
import h5py
from addict import Dict
from tqdm import tqdm
def parse_args():
"""Parses command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('npy_dir',
help='directory containing .npy files and metadata (path of out_dir in the baobab config)')
parser.add_argument('--format',
default='tf',
dest='format',
type=str,
choices=['tf', 'theano'],
help='format of image. Default: tf.')
args = parser.parse_args()
# sys.argv rerouting for setuptools entry point
if args is None:
args = Dict()
args.npy_dir = sys.argv[0]
args.format = sys.argv[1]
#base, ext = os.path.splitext(save_path)
#if ext.lower() not in ['.h5', '.hdf5']:
# raise argparse.ArgumentTypeError('out_filepath must have a valid HDF5 extension.')
return args
def main():
args = parse_args()
baobab_out_dir = os.path.basename(os.path.normpath(args.npy_dir))
save_path = os.path.join(args.npy_dir, '{:s}.h5'.format(baobab_out_dir))
print("Destination path: {:s}".format(save_path))
metadata_path = os.path.join(args.npy_dir, 'metadata.csv')
metadata_df = pd.read_csv(metadata_path, index_col=None)
img_path_list = metadata_df['img_filename'].values
first_img_filepath = os.path.join(args.npy_dir, img_path_list[0])
img_shape = np.load(first_img_filepath).shape # image dimensions
n_x, n_y = img_shape[-2], img_shape[-1]
n_data, n_cols = metadata_df.shape
# Initialize hdf5 file
hdf_file = h5py.File(save_path, mode='w', driver=None)
# Create dataset for images
if args.format == 'tf':
img_shape = (n_x, n_y, 1) # tf data shape
elif args.format == 'theano':
img_shape = (1, n_x, n_y) # theano data shape
else:
raise NotImplementedError
# Initialize mean and std of images, and quantities required to compute them online
hdf_file.create_dataset('pixels_mean', img_shape, np.float32)
hdf_file.create_dataset('pixels_std', img_shape, np.float32)
mean = np.zeros(img_shape, np.float32)
std = np.zeros(img_shape, np.float32)
sum_sq = np.zeros(img_shape, np.float32)
ddof = 0 # degree of freedom
print("Saving images...")
current_idx = 0 # running idx of dataset
pbar = tqdm(total=n_data)
while current_idx < n_data:
# Read in image
img_path = os.path.join(args.npy_dir, img_path_list[current_idx])
img = np.load(img_path).reshape(img_shape)
# Change axis order for theano
if format=='theano':
img = np.rollaxis(img, 2)
# Populate images dataset
dataset_name = 'image_{:d}'.format(current_idx)
hdf_file.create_dataset(dataset_name, img_shape, np.float32)
hdf_file[dataset_name][...] = img[None]
# Update running mean and std (Welford's algorithm)
current_idx += 1
delta = img - mean
mean += delta / current_idx
sum_sq += delta * (img - mean)
# Update progress
pbar.update(1)
pbar.close()
# Populate mean, std datasets
std = np.sqrt(sum_sq / (n_data - ddof))
hdf_file['pixels_mean'][...] = mean
hdf_file['pixels_std'][...] = std
hdf_file.close()
# Create dataset for metadata df
metadata_df.to_hdf(save_path, key='metadata', mode='a', format='table')
# TODO: serialize or subgroup each row so the whole dataframe isn't read into memory
if __name__ == '__main__':
main()
|
jiwoncparkREPO_NAMEbaobabPATH_START.@baobab_extracted@baobab-master@baobab@to_hdf5.py@.PATH_END.py
|
{
"filename": "dataset-desc__native-catboost__contains__full.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage-formats/dataset-desc__native-catboost__contains__full.md",
"type": "Markdown"
}
|
For each object:
- A list of features.
- The target or multiple targets for multiregression (optional).
- Other [types of data](../../../concepts/input-data_column-descfile.md).
Feature indices used in training and feature importance are numbered from 0 to `featureCount – 1`. Any non-feature column types are ignored when calculating these indices.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage-formats@dataset-desc__native-catboost__contains__full.md@.PATH_END.py
|
{
"filename": "Sedov-2d-ratio.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/functional/Hydro/Sedov/Sedov-2d-ratio.py",
"type": "Python"
}
|
#-------------------------------------------------------------------------------
# The cylindrical Sedov test case (2-D).
#-------------------------------------------------------------------------------
import os, sys, shutil
from Spheral2d import *
from SpheralTestUtilities import *
#from SpheralGnuPlotUtilities import *
from GenerateNodeDistribution2dRatio import *
from CubicNodeGenerator import GenerateSquareNodeDistribution
import mpi
title("2-D integrated hydro test -- planar Sedov problem")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(seed = "lattice",
thetaFactor = 0.5,
azimuthalOffsetFraction = 0.0,
nRadial = 200,
nTheta = 200,
rmin = 0.0,
rmax = 1.0,
rho0 = 1.0,
eps0 = 0.0,
smallPressure = False,
Espike = 1.0,
smoothSpike = True,
topHatSpike = False,
smoothSpikeScale = 0.5,
gamma = 5.0/3.0,
mu = 1.0,
rhomin = 1e-10,
# kernel
HUpdate = IdealH,
nPerh = 1.51,
order = 5,
hmin = 1e-15,
hmax = 1.0,
# hydros
crksph = False,
psph = False,
fsisph = False,
gsph = False,
# hydro options
solid = False,
asph = False,
XSPH = False,
evolveTotalEnergy = False,
compatibleEnergy = True,
gradhCorrection = True,
correctVelocityGradient = True,
densityUpdate = RigorousSumDensity, # VolumeScaledDensity,
filter = 0.0,
# crksph parameters
correctionOrder = LinearOrder,
# gsph parameters
RiemannGradientType = RiemannGradient, # (RiemannGradient,SPHGradient,HydroAccelerationGradient,OnlyDvDxGradient,MixedMethodGradient)
linearReconstruction = True,
# Artficial Viscosity
boolReduceViscosity = False,
nh = 5.0,
aMin = 0.1,
aMax = 2.0,
Qhmult = 1.0,
boolCullenViscosity = False,
alphMax = 2.0,
alphMin = 0.02,
betaC = 0.7,
betaD = 0.05,
betaE = 1.0,
fKern = 1.0/3.0,
boolHopkinsCorrection = True,
# Integration
IntegratorConstructor = CheapSynchronousRK2Integrator,
cfl = 0.5,
useVelocityMagnitudeForDt = False,
steps = None,
goalTime = None,
goalRadius = 0.8,
dt = 1e-8,
dtMin = 1.0e-8,
dtMax = None,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 1,
smoothIters = 0,
# IO
vizCycle = None,
vizTime = 0.1,
restoreCycle = -1,
restartStep = 1000,
graphics = False,
useVoronoiOutput = False,
clearDirectories = False,
dataDirBase = "dumps-cylindrical-Sedov",
outputFile = "None",
serialDump=True,
xlmin = 0.4,
xlmax = 0.5,
)
if smallPressure:
P0 = 1.0e-6
eps0 = P0/((gamma - 1.0)*rho0)
print("WARNING: smallPressure specified, so setting eps0=%g" % eps0)
assert not(boolReduceViscosity and boolCullenViscosity)
assert not(gsph and (boolReduceViscosity or boolCullenViscosity))
assert not(fsisph and not solid)
assert thetaFactor in (0.5, 1.0, 2.0)
theta = thetaFactor * pi
# Figure out what our goal time should be.
import SedovAnalyticSolution
h0 = 1.0/nRadial*nPerh
answer = SedovAnalyticSolution.SedovSolution(nDim = 2,
gamma = gamma,
rho0 = rho0,
E0 = Espike,
h0 = h0)
if goalTime is None:
assert not goalRadius is None
nu1 = 1.0/(answer.nu + 2.0)
nu2 = 2.0*nu1
goalTime = (goalRadius*(answer.alpha*rho0/Espike)**nu1)**(1.0/nu2)
vs, r2, v2, rho2, P2 = answer.shockState(goalTime)
print("Predicted shock position %g at goal time %g." % (r2, goalTime))
# Scale the spike energy according to the boundary conditions we're using.
if thetaFactor == 0.5:
Espike *= 0.25
elif thetaFactor == 1.0:
Espike *= 0.5
#-------------------------------------------------------------------------------
# Path names.
#-------------------------------------------------------------------------------
if crksph:
hydroname = "CRKSPH"
elif psph:
hydroname = "PSPH"
elif fsisph:
hydroname = "FSISPH"
elif gsph:
hydroname = "GSPH"
else:
hydroname = "SPH"
if asph:
hydroname = "A" + hydroname
dataDir = os.path.join(dataDirBase,
hydroname,
"nr=%i_nt=%i" % (nRadial, nTheta))
restartDir = os.path.join(dataDir, "restarts")
vizDir = os.path.join(dataDir, "visit")
restartBaseName = os.path.join(restartDir, "Sedov-cylindrical-2d-%i" % nRadial)
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Create our interpolation kernels -- one for normal hydro interactions, and
# one for use with the artificial viscosity
#-------------------------------------------------------------------------------
WT = TableKernel(NBSplineKernel(order), 1000)
output("WT")
kernelExtent = WT.kernelExtent
#-------------------------------------------------------------------------------
# Create a NodeList and associated Neighbor object.
#-------------------------------------------------------------------------------
if solid:
nodeListConstructor = makeSolidNodeList
else:
nodeListConstructor = makeFluidNodeList
nodes1 = nodeListConstructor("nodes1", eos,
hmin = hmin,
hmax = hmax,
kernelExtent = kernelExtent,
nPerh = nPerh,
rhoMin = rhomin)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
pos = nodes1.positions()
vel = nodes1.velocity()
mass = nodes1.mass()
eps = nodes1.specificThermalEnergy()
H = nodes1.Hfield()
if thetaFactor == 0.5:
xmin = (0.0, 0.0)
xmax = (1.0, 1.0)
elif thetaFactor == 1.0:
xmin = (-1.0, 0.0)
xmax = (1.0, 1.0)
else:
xmin = (-1.0, -1.0)
xmax = (1.0, 1.0)
generator = GenerateNodeDistribution2dRatio(nRadial, nTheta, rho0,
rmin = rmin,
rmax = rmax,
xmin = xmin,
xmax = xmax,
theta = theta,
azimuthalOffsetFraction = azimuthalOffsetFraction,
nNodePerh = nPerh,
SPH = (not ASPH),
xlmin = xlmin,
xlmax = xlmax)
if mpi.procs > 1:
#from VoronoiDistributeNodes import distributeNodes2d
from PeanoHilbertDistributeNodes import distributeNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes1, generator))
output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.SUM)")
# Set the point source of energy.
Esum = 0.0
if smoothSpike or topHatSpike:
Wsum = 0.0
for nodeID in range(nodes1.numInternalNodes):
Hi = H[nodeID]
etaij = (Hi*pos[nodeID]).magnitude()
if smoothSpike:
Wi = WT.kernelValue(etaij/smoothSpikeScale, 1.0)
else:
if etaij < smoothSpikeScale*kernelExtent:
Wi = 1.0
else:
Wi = 0.0
Ei = Wi*Espike
epsi = Ei/mass[nodeID]
eps[nodeID] = epsi
Wsum += Wi
Wsum = mpi.allreduce(Wsum, mpi.SUM)
assert Wsum > 0.0
for nodeID in range(nodes1.numInternalNodes):
eps[nodeID] /= Wsum
Esum += eps[nodeID]*mass[nodeID]
eps[nodeID] += eps0
else:
i = -1
rmin = 1e50
for nodeID in range(nodes1.numInternalNodes):
rij = pos[nodeID].magnitude()
if rij < rmin:
i = nodeID
rmin = rij
eps[nodeID] = eps0
rminglobal = mpi.allreduce(rmin, mpi.MIN)
if fuzzyEqual(rmin, rminglobal):
assert i >= 0 and i < nodes1.numInternalNodes
eps[i] += Espike/mass[i]
Esum += Espike
Eglobal = mpi.allreduce(Esum, mpi.SUM)
print("Initialized a total energy of", Eglobal)
assert fuzzyEqual(Eglobal, Espike)
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes1)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if crksph:
hydro = CRKSPH(dataBase = db,
order = correctionOrder,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
ASPH = asph)
elif fsisph:
hydro = FSISPH(dataBase = db,
W = WT,
cfl = cfl,
sumDensityNodeLists=[nodes1],
densityStabilizationCoefficient = 0.1,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
linearCorrectGradients = correctVelocityGradient,
HUpdate = HUpdate)
elif gsph:
limiter = VanLeerLimiter()
waveSpeed = DavisWaveSpeed()
solver = HLLC(limiter,waveSpeed,linearReconstruction)
hydro = GSPH(dataBase = db,
riemannSolver = solver,
W = WT,
cfl=cfl,
specificThermalEnergyDiffusionCoefficient = 0.00,
compatibleEnergyEvolution = compatibleEnergy,
correctVelocityGradient= correctVelocityGradient,
evolveTotalEnergy = evolveTotalEnergy,
XSPH = XSPH,
ASPH = asph,
densityUpdate=densityUpdate,
HUpdate = HUpdate)
elif psph:
hydro = PSPH(dataBase = db,
W = WT,
filter = filter,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
correctVelocityGradient = correctVelocityGradient,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
XSPH = XSPH,
ASPH = asph)
else:
hydro = SPH(dataBase = db,
W = WT,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
gradhCorrection = gradhCorrection,
correctVelocityGradient = correctVelocityGradient,
densityUpdate = densityUpdate,
XSPH = XSPH,
HUpdate = HUpdate,
ASPH = asph)
packages = [hydro]
output("hydro")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.HEvolution")
if not gsph:
q = hydro.Q
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the MMRV physics object.
#-------------------------------------------------------------------------------
if boolReduceViscosity:
evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax)
packages.append(evolveReducingViscosityMultiplier)
elif boolCullenViscosity:
evolveCullenViscosityMultiplier = CullenDehnenViscosity(q,WT,alphMax,alphMin,betaC,betaD,betaE,fKern,boolHopkinsCorrection)
packages.append(evolveCullenViscosityMultiplier)
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane0 = Plane(Vector(0.0, 0.0), Vector(1.0, 0.0))
yPlane0 = Plane(Vector(0.0, 0.0), Vector(0.0, 1.0))
xbc0 = ReflectingBoundary(xPlane0)
ybc0 = ReflectingBoundary(yPlane0)
for p in packages:
if thetaFactor in (0.5, ):
p.appendBoundary(xbc0)
if thetaFactor in (0.5, 1.0):
p.appendBoundary(ybc0)
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the one physics package.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.dtGrowth")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
vizMethod = None
if useVoronoiOutput:
import SpheralVoronoiSiloDump
vizMethod = SpheralVoronoiSiloDump.dumpPhysicsState
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
vizMethod = vizMethod,
vizBaseName = "Sedov-cylindrical-2d-%ix%i" % (nRadial, nTheta),
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = (not ASPH))
output("control")
#-------------------------------------------------------------------------------
# Finally run the problem and plot the results.
#-------------------------------------------------------------------------------
if steps is None:
control.advance(goalTime, maxSteps)
if restoreCycle != control.totalSteps:
control.updateViz(control.totalSteps, integrator.currentTime, 0.0)
control.dropRestartFile()
else:
control.step(steps)
# Output the energy conservation.
print("Energy conservation: ", ((control.conserve.EHistory[-1] -
control.conserve.EHistory[0])/
control.conserve.EHistory[0]))
#-------------------------------------------------------------------------------
# Generate some error metrics comparing to the analytic solution.
#-------------------------------------------------------------------------------
# Report the error norms.
rmin, rmax = 0.0, 0.95
r = mpi.allreduce([x.magnitude() for x in nodes1.positions().internalValues()], mpi.SUM)
xprof = mpi.allreduce([x.x for x in nodes1.positions().internalValues()], mpi.SUM)
yprof = mpi.allreduce([x.y for x in nodes1.positions().internalValues()], mpi.SUM)
rho = mpi.allreduce(list(nodes1.massDensity().internalValues()), mpi.SUM)
mass = mpi.allreduce(list(nodes1.mass().internalValues()), mpi.SUM)
v = mpi.allreduce([x.magnitude() for x in nodes1.velocity().internalValues()], mpi.SUM)
eps = mpi.allreduce(list(nodes1.specificThermalEnergy().internalValues()), mpi.SUM)
Pf = ScalarField("pressure", nodes1)
nodes1.pressure(Pf)
P = mpi.allreduce(list(Pf.internalValues()), mpi.SUM)
A = mpi.allreduce([Pi/(rhoi**gamma) for (Pi, rhoi) in zip(Pf.internalValues(), nodes1.massDensity().internalValues())], mpi.SUM)
Hinverse = db.newFluidSymTensorFieldList()
db.fluidHinverse(Hinverse)
hrfl = db.newFluidScalarFieldList()
htfl = db.newFluidScalarFieldList()
for Hfield, hrfield, htfield in zip(Hinverse,
hrfl,
htfl):
n = Hfield.numElements
assert hrfield.numElements == n
assert htfield.numElements == n
positions = Hfield.nodeList().positions()
for i in range(n):
runit = positions[i].unitVector()
tunit = Vector(-(positions[i].y), positions[i].x).unitVector()
hrfield[i] = (Hfield[i]*runit).magnitude()
htfield[i] = (Hfield[i]*tunit).magnitude()
hr = mpi.allreduce(list(hrfl[0].internalValues()), mpi.SUM)
ht = mpi.allreduce(list(htfl[0].internalValues()), mpi.SUM)
Aans = None
if mpi.rank == 0:
from SpheralTestUtilities import multiSort
import Pnorm
multiSort(r, rho, v, eps, P, A, hr, ht)
rans, vans, epsans, rhoans, Pans, Aans, hans = answer.solution(control.time(), r)
print("\tQuantity \t\tL1 \t\t\tL2 \t\t\tLinf")
#f = open("MCTesting.txt", "a")
#f.write(("CL=%g, Cq=%g \t") %(Cl, Cq))
for (name, data, ans) in [("Mass Density", rho, rhoans),
("Pressure", P, Pans),
("Velocity", v, vans),
("Thermal E", eps, epsans),
("Entropy", A, Aans),
("hr", hr, hans)]:
assert len(data) == len(ans)
error = [data[i] - ans[i] for i in range(len(data))]
Pn = Pnorm.Pnorm(error, r)
L1 = Pn.gridpnorm(1, rmin, rmax)
L2 = Pn.gridpnorm(2, rmin, rmax)
Linf = Pn.gridpnorm("inf", rmin, rmax)
print("\t%s \t\t%g \t\t%g \t\t%g" % (name, L1, L2, Linf))
#f.write(("\t\t%g") % (L1))
#f.write("\n")
Aans = mpi.bcast(Aans, 0)
#-------------------------------------------------------------------------------
# If requested, write out the state in a global ordering to a file.
#-------------------------------------------------------------------------------
if outputFile != "None" and mpi.rank == 0:
outputFile = os.path.join(dataDir, outputFile)
f = open(outputFile, "w")
f.write(("# " + 17*"%16s " + "\n") % ("r", "x", "y", "rho", "m", "P", "v", "eps", "A", "hr", "ht",
"rhoans", "Pans", "vans", "epsans", "Aans", "hrans"))
for (ri, xi, yi, rhoi, mi, Pi, vi, epsi, Ai, hri, hti,
rhoansi, Pansi, vansi, epsansi, Aansi, hansi) in zip(r, xprof, yprof, rho, mass, P, v, eps, A, hr, ht,
rhoans, Pans, vans, epsans, Aans, hans):
f.write((17*"%17.12e " + "\n") % (ri, xi, yi, rhoi, mi, Pi, vi, epsi, Ai, hri, hti,
rhoansi, Pansi, vansi, epsansi, Aansi, hansi))
f.close()
if serialDump:
procs = mpi.procs
rank = mpi.rank
serialData = []
i,j = 0,0
nodeSet = []
nodeSet.append(nodes1)
for i in range(procs):
for nodeL in nodeSet:
if rank == i:
for j in range(nodeL.numInternalNodes):
serialData.append([nodeL.positions()[j],3.0/(nodeL.Hfield()[j].Trace()),nodeL.mass()[j],nodeL.massDensity()[j],nodeL.specificThermalEnergy()[j]])
serialData = mpi.reduce(serialData,mpi.SUM)
if rank == 0:
f = open(dataDir + "/serialDump.ascii",'w')
for i in range(len(serialData)):
f.write("{0} {1} {2} {3} {4} {5} {6} {7}\n".format(i,serialData[i][0][0],serialData[i][0][1],0.0,serialData[i][1],serialData[i][2],serialData[i][3],serialData[i][4]))
f.close()
#-------------------------------------------------------------------------------
# Plot the final state.
#-------------------------------------------------------------------------------
if graphics:
rPlot = plotNodePositions2d(db, colorNodeLists=0, colorDomains=1)
rhoPlot, velPlot, epsPlot, PPlot, HPlot = plotRadialState(db)
plotAnswer(answer, control.time(),
rhoPlot, velPlot, epsPlot, PPlot, HPlot)
plots = [(rPlot, "Sedov-cylindrical-positions.png"),
(rhoPlot, "Sedov-cylindrical-rho.png"),
(velPlot, "Sedov-cylindrical-vel.png"),
(epsPlot, "Sedov-cylindrical-eps.png"),
(PPlot, "Sedov-cylindrical-P.png"),
(HPlot, "Sedov-cylindrical-h.png")]
# Plot the specific entropy.
AsimData = Gnuplot.Data(xprof, A,
with_ = "points",
title = "Simulation",
inline = True)
AansData = Gnuplot.Data(xprof, Aans,
with_ = "lines",
title = "Solution",
inline = True)
Aplot = generateNewGnuPlot()
Aplot.plot(AsimData)
Aplot.replot(AansData)
Aplot.title("Specific entropy")
Aplot.refresh()
plots.append((Aplot, "Sedov-cylindrical-entropy.png"))
if boolCullenViscosity:
cullAlphaPlot = plotFieldList(q.ClMultiplier(),
xFunction = "%s.magnitude()",
plotStyle = "points",
winTitle = "Cullen alpha")
plots += [(cullAlphaPlot, "Sedov-planar-Cullen-alpha.png")]
# Make hardcopies of the plots.
for p, filename in plots:
p.hardcopy(os.path.join(dataDir, filename), terminal="png")
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@functional@Hydro@Sedov@Sedov-2d-ratio.py@.PATH_END.py
|
{
"filename": "fit.py",
"repo_name": "thomasorb/orb",
"repo_path": "orb_extracted/orb-master/orb/fit.py",
"type": "Python"
}
|
#!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <thomas.martin.1@ulaval.ca>
# File: fit.py
## Copyright (c) 2010-2020 Thomas Martin <thomas.martin.1@ulaval.ca>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
"""Fit module of ORB.
Defines the general Fitting classes and the fitting models.
Best accessed through fit_lines_in_*() functions (defined at the end
of the file)
"""
__author__ = "Thomas Martin"
__licence__ = "Thomas Martin (thomas.martin.1@ulaval.ca)"
import numpy as np
import warnings
import copy
import scipy.optimize
import scipy.interpolate
import time
import logging
import gvar
import orb.utils.fft
import orb.constants
import orb.utils.spectrum
import orb.utils.fit
import orb.utils.stats
import orb.utils.validate
import orb.utils.err
import orb.cutils
from orb.core import Lines, FilterFile, Axis
import orb.core
class FitVector(object):
"""
General fitting class for a 1d array of data based on
Levenberg-Marquardt least square fit algorithm.
Accept any combination of models (based on :py:class:`fit.Model`)
.. note:: This class is a wrapper around
:py:meth:`scipy.optimize.leastsq`. Most of its purpose consists
in passing an array of purely free parameters to
:py:meth:`scipy.optimize.leastsq` and creating the objective
function from free and fixed parameters by combining the
different models.
"""
models = None
models_operation = None
models_operations = ['add', 'mult']
vector = None
priors_list = None
priors_keys_list = None
max_fev = 5000
fit_tol = None
def __init__(self, vector, models, params,
fit_tol=1e-8, signal_range=None,
max_iter=None, docomplex=False, nogvar=False,
vector_err=None):
"""Init class.
:param vector: Vector to fit
:param models: list of models to combine. Must be a list of
couples (models, model_operation). A model must be a class
which derives from :py:class:`orb.fit.Model`. Model
operation mist be 'add' if the model has to be added to the
others or 'mult' if it has to be multiplied with the
others. Models are combined in the order of the
list. e.g. [(Model1, 'add'), (Model2, 'mult'), ...].
:param params: list of parameters dictionaries for each
model. Needed parameters are defined in each model.
:param fit_tol: (Optional) Fit tolerance (default 1e-10)
:param signal_range: (Optional) couple (min, max) defining the
range of values considered in the fitting process.
:param docomplex: (Optional) If data is complex and docomplex
is True, tries a complex fit. If False, always fit the real
part only.
:param nogvar: (Optional) No gvar are returned.
"""
self.nogvar = bool(nogvar)
if not isinstance(models, tuple) and not isinstance(models, list) :
raise ValueError('models must be a tuple of (model, model_operation).')
if not isinstance(params, tuple) and not isinstance(params, list):
raise ValueError('params must be a tuple of parameter dictionaries.')
if len(models) != len(params):
raise Exception('there must be exactly one parameter dictionary by model')
if max_iter is not None:
max_iter = int(np.clip(max_iter, 0, 1e6))
logging.debug('max iteration changed to {}'.format(max_iter))
else:
max_iter = 10000
self.max_iter = max_iter
if np.any(np.iscomplex(vector)):
if docomplex:
self.vector_imag = np.copy(vector.imag)
else:
self.vector_imag = None
else:
self.vector_imag = None
vector = vector.real
self.vector = np.copy(vector)
if vector_err is not None:
assert vector_err.size == vector.size, 'error vector must have same size as vector'
self.sigma = np.copy(vector_err)
else:
self.sigma = np.ones_like(self.vector)
if np.all(self.sigma == 0.): self.sigma.fill(1.)
self.fit_tol = fit_tol
self.normalization_coeff = np.nanmax(self.vector) - np.nanmedian(self.vector)
self.vector /= self.normalization_coeff
if self.vector_imag is not None:
self.vector_imag /= self.normalization_coeff
self.sigma /= self.normalization_coeff
self.retry_count = 0
self.models = list()
self.models_operation = list()
self.priors_list = list()
self.priors_keys_list = list()
params = orb.utils.fit.pick2paramslist(params)
for i in range(len(models)):
# init each model
self.models.append(models[i][0](params[i]))
if models[i][1] in self.models_operations:
self.models_operation.append(models[i][1])
else: raise Exception('Model operation must be in {}'.format(
self.models_operations))
# guess nan values for each model
self.models[-1].make_guess(self.vector)
self.priors_list.append(self.models[-1].get_priors())
self.priors_keys_list.append(list(self.priors_list[-1].keys()))
self.all_keys_index = None
if signal_range is not None:
if (np.nanmin(signal_range) >= 0 and
np.nanmax(signal_range) < self.vector.shape[0]):
self.signal_range = [int(np.min(signal_range)),
int(np.max(signal_range))]
else: raise Exception('Bad signal range: {}'.format(signal_range))
else: self.signal_range = None
def _all_p_list2dict(self, p_list):
"""Concatenate a list of free parameters into a dictionary
ready to be passed to the fit function.
:param p_list: List of free parameters. This list is a list of
tuple of parameters. Each tuple defining the free parameters
for each model, i.e. : ([free_params_model1],
[free_params_model2], ...)
.. seealso:: :py:meth:`fit.FitVector._all_p_dict2list`
"""
all_p = dict()
for p in p_list:
# check if two parameters have the same key
for _k in p:
if _k in list(all_p.keys()):
raise Exception('Two parameters are sharing the same key: {}'.format(_k))
all_p.update(p)
return all_p
def _all_p_dict2list(self, p_vect):
"""Transform a dictionnary of free parameters as returned by
the fit function into a list of free parameters (see
:py:meth:`fit.FitVector._all_p_list2dict`)
:param p_vect: dictionnary of free parameters.
.. seealso:: :py:meth:`fit.FitVector._all_p_list2dict`
"""
if isinstance(p_vect, tuple):
p_vect = p_vect[0]
p_list = list()
last_index = 0
for keys_list in self.priors_keys_list:
ip_list = dict()
for key in keys_list:
# remove log(prior), sqrt(prior) from the list
if 'log' in key:
ip_list[key[4:-1]] = p_vect[key[4:-1]]
elif 'sqrt' in key:
ip_list[key[5:-1]] = p_vect[key[5:-1]]
elif 'erfinv' in key:
ip_list[key[7:-1]] = p_vect[key[7:-1]]
else:
ip_list[key] = p_vect[key]
p_list.append(ip_list)
return p_list
def _all_p_dict2arr(self, p_dict):
"""Return a 1d array of free parameters from a dictionary of
free parameters.
:param p_dict: Free parameters dict
"""
# check keys and create all_keys_index
all_keys_index = dict()
index = 0
for keys_list in self.priors_keys_list:
for key in keys_list:
all_keys_index[key] = index
index += 1
if all_keys_index.keys() != p_dict.keys(): raise Exception(
'Badly formatted input dict')
self.all_keys_index = all_keys_index # save keys--index dict
p_arr = np.empty(len(self.all_keys_index), dtype=float)
for key in self.all_keys_index:
p_arr[self.all_keys_index[key]] = p_dict[key]
return p_arr
def _all_p_arr2dict(self, p_arr, keep_gvar=False):
"""Return a dict of free parameters from a 1d array of
free parameters.
:param p_arr: Free parameters array
:param keep_gvar: (Optional) If True, gvar values of the
parameters are kept. Else they are converted to float
(default False).
"""
if self.all_keys_index is None:
raise Exception('self._all_p_dict2arr() must be called first')
if np.size(p_arr) != len(self.all_keys_index):
raise Exception('Badly formatted input array of free parameters')
p_dict = dict()
for key in self.all_keys_index:
ival = p_arr[self.all_keys_index[key]]
if not keep_gvar: ival = float(gvar.mean(ival))
p_dict[key] = ival
return p_dict
def get_model(self, all_p_free, return_models=False, x=None):
"""Return the combined model of the vector given a set of free
parameters.
This function is typically called to compute the objective
function. It can also be called to obtain the final model
based on fitted parameters.
:param all_p_free: Vector of free parameters.
:param return_models: (Optional) If True return also
individual models (default False)
:param x: (Optional) array of data points on which model is
computed instead of a typical np.arange(step_nb) (default
None).
"""
if isinstance(all_p_free, tuple):
all_p_free = all_p_free[0]
# check nans
for ikey in all_p_free:
if np.isnan(gvar.sdev(all_p_free[ikey])):
logging.debug('nan in passed parameters: {}'.format(all_p_free))
step_nb = self.vector.shape[0]
if x is None:
x = np.arange(step_nb, dtype=float)
if return_models:
models = dict()
else:
models = list()
all_p_list = self._all_p_dict2list(all_p_free)
if self.vector_imag is not None: return_complex = True
else: return_complex = False
# all multiplicative models must be multiplied together before
# beging applied to the the additive models using the
# dedicated class option (because multiplication by a filter
# function cannot be applied straighforward to a line model)
mult_model = np.ones_like(self.vector, dtype=float)
for i in range(len(self.models)):
if self.models_operation[i] == 'mult':
model_list = self.models[i].get_model(
x, all_p_list[i], return_models=return_models, return_complex=return_complex)
if return_models:
model_to_append, models_to_append = model_list
models[self.models[i].__class__.__name__] = models_to_append
else:
model_to_append = model_list
mult_model *= model_to_append
if self.models_operation[i] not in ['mult', 'add']:
raise Exception('Bad model operation. Model operation must be in {}'.format(self.models_operations))
model = None
if np.all(mult_model == 1): mult_model = None
for i in range(len(self.models)):
if self.models_operation[i] == 'add':
model_list = self.models[i].get_model(
x, all_p_list[i], return_models=return_models, multf=mult_model, return_complex=return_complex)
if return_models:
model_to_append, models_to_append = model_list
models[self.models[i].__class__.__name__] = models_to_append
else:
model_to_append = model_list
if model is None:
model = model_to_append
else:
model += model_to_append
if np.any(np.isnan(gvar.mean(model))):
logging.debug('Nan in model')
if return_models:
return model, models
else:
return model
def _get_model_onrange(self, x, *all_p_free):
"""Return the part of the model contained in the signal
range.
.. note:: This function has been defined only to be used with
scipy.optimize.curve_fit.
:param x: x vector on which model is computed
:param *all_p_free: Vector of free parameters.
"""
all_p_free = self._all_p_arr2dict(all_p_free)
out = self.get_model(all_p_free, x=x)
if self.vector_imag is None:
return out[np.min(self.signal_range):np.max(self.signal_range)]
else:
out = orb.utils.vector.float2complex(out)
return orb.utils.vector.complex2float((
out[0][np.min(self.signal_range):np.max(self.signal_range)],
out[1][np.min(self.signal_range):np.max(self.signal_range)]))
def _get_vector_onrange(self):
"""Return the part of the vector contained in the signal
range.
.. note:: This function has been defined only to be used with
scipy.optimize.curve_fit.
"""
out = self.vector[
np.min(self.signal_range):np.max(self.signal_range)]
if self.vector_imag is None:
return out
else:
out_imag = self.vector_imag[
np.min(self.signal_range):np.max(self.signal_range)]
return orb.utils.vector.complex2float((out, out_imag))
def _get_sigma_onrange(self):
"""Return the part of the uncertainty on the vector contained
in the signal range.
.. note:: This function has been defined only to be used with
scipy.optimize.curve_fit.
"""
return self.sigma[
np.min(self.signal_range):np.max(self.signal_range)]
def fit(self, compute_mcmc_error=False):
"""Fit data vector.
This is the central function of the class.
:param compute_mcmc_error: (Optional) Compute Markov chain
Monte-Carlo error on the fit parameters (Uncertainty
estimates might be slighly better constrained but computing
time can be orders of magnitude longer) (default False).
"""
all_args = dict(locals()) # used in case fit is retried (must stay
# at the very beginning of the
# function ;)
MCMC_RANDOM_COEFF = 1e-2
start_time = time.time()
priors_dict = self._all_p_list2dict(self.priors_list)
priors_arr = self._all_p_dict2arr(priors_dict)
try:
fit_results = scipy.optimize.curve_fit(
self._get_model_onrange,
np.arange(self.vector.shape[0]),
self._get_vector_onrange(),
#sigma=self._get_sigma_onrange(),
p0=priors_arr,
method='lm',
full_output=True,
maxfev=self.max_iter)
except RuntimeError as e:
logging.debug('RuntimeError during fit: {}'.format(e))
fit_results = list(['Runtime error during fit: {}'.format(e), 0])
fit = type('fit', (), {})
if 0 < fit_results[-1] < 5:
fit.stopping_criterion = fit_results[-1]
fit.error = None
# compute uncertainties
cov_x = fit_results[1]
if np.all(np.isfinite(cov_x)):
p_err = np.sqrt(np.diag(cov_x))
else:
p_err = np.empty_like(fit_results[0])
p_err.fill(np.nan)
fit.p = self._all_p_arr2dict(gvar.gvar(fit_results[0], p_err),
keep_gvar=True)
last_diff = fit_results[2]['fvec']
fitted_vector = self._get_model_onrange(
np.arange(self.vector.shape[0], dtype=float),
*fit_results[0])
vector = self._get_vector_onrange()
residual = (vector - fitted_vector)
if self.vector_imag is None:
res_ratio = residual/self._get_sigma_onrange()
else:
res_ratio = residual
res_ratio[np.isinf(res_ratio)] = np.nan
fit.chi2 = np.nansum(res_ratio**2)
fit.dof = self._get_vector_onrange().shape[0] - np.size(fit_results[0])
fit.logGBF = np.nan
fit.fitter_results = fit_results[2]
else:
logging.debug('bad classic fit ({}): {}'.format(fit_results[-1], fit_results[-2]))
fit.stopping_criterion = 0
fit.error = True
### fit results formatting ###
if fit.error is None:
fit_p = fit.p
# correct for normalization coeff
for key in list(fit_p.keys()):
if 'amp' in key or 'cont_p' in key:
fit_p[key] = fit_p[key] * self.normalization_coeff
if fit.stopping_criterion == 0:
logging.debug('Dit not converge: stopping criterion == 0')
logging.warning('Did not converge')
return []
returned_data = dict()
returned_data['iter_nb'] = fit.fitter_results['nfev']
## get fit model
_model, _models = self.get_model(
fit_p,
return_models=True)
if self.vector_imag is not None:
_model = orb.utils.vector.float2complex(_model)
for ikey in _models:
if isinstance(_models[ikey], list):
_new_model = list()
for imod in _models[ikey]:
_new_model.append(orb.utils.vector.float2complex(imod))
_models[ikey] = _new_model
else:
_models[ikey] = orb.utils.vector.float2complex(_models[ikey])
(returned_data['fitted_vector_gvar'],
returned_data['fitted_models_gvar']) = _model, _models
returned_data['fitted_vector'] = gvar.mean(returned_data['fitted_vector_gvar'])
returned_data['fitted_models'] = dict()
for imod in returned_data['fitted_models_gvar']:
returned_data['fitted_models'][imod] = gvar.mean(
returned_data['fitted_models_gvar'][imod])
## return fitted parameters of each models
full_p_list = list()
full_p_list_err = list()
full_p_list_gvar = list()
p_fit_list = self._all_p_dict2list(fit_p)
for i in range(len(self.models)):
# recompute p_val from new p_free
self.models[i].set_p_free(p_fit_list[i])
_ipval = self.models[i].get_p_val()
full_p_list.append(gvar.mean(_ipval))
full_p_list_err.append(gvar.sdev(_ipval))
full_p_list_gvar.append(_ipval)
returned_data['fit_params'] = full_p_list
returned_data['fit_params_err'] = full_p_list_err
if not self.nogvar:
returned_data['fit_params_gvar'] = full_p_list_gvar
## compute error on parameters
# compute reduced chi square
returned_data['rchi2'] = fit.chi2 / fit.dof
returned_data['rchi2_err'] = np.sqrt(2./self._get_vector_onrange().shape[0])
if not self.nogvar:
returned_data['rchi2_gvar'] = gvar.gvar(returned_data['rchi2'],
returned_data['rchi2_err'])
returned_data['chi2'] = fit.chi2
returned_data['residual'] = residual * self.normalization_coeff
# kolmogorov smirnov test: if p_value < 0.05 residual is not normal
returned_data['ks_pvalue'] = scipy.stats.kstest(
residual / np.std(orb.utils.stats.sigmacut(residual)), 'norm')[1]
# compute MCMC uncertainty estimates
if compute_mcmc_error:
sigma = np.nanstd(last_diff)
returned_data['fit_params_err_mcmc'] = self._compute_mcmc_error(
fit[0], cov_diag, sigma)
returned_data['logGBF'] = fit.logGBF
returned_data['fit_time'] = time.time() - start_time
returned_data['signal_range'] = self.signal_range
returned_data['nparams'] = priors_arr.size # number of free parameters
# Bayesian information criterion
returned_data['BIC'] = orb.utils.fit.BIC(returned_data['residual'], returned_data['nparams'])
else:
logging.debug('bad fit')
return []
if self.nogvar:
del returned_data['fitted_vector_gvar']
del returned_data['fitted_models_gvar']
return returned_data
class Model(object):
"""
Template class for fit models. This class cannot be used directly.
The main purpose of a Model class is to output a model given a set
of parameters.
Methods that must be implemented by real classes:
* :py:meth:`fit.Model.parse_dict`
* :py:meth:`fit.Model.check_input`
* :py:meth:`fit.Model.make_guess`
* :py:meth:`fit.Model.get_model`
.. note:: A model is computed from a given set of parameters
stored in :py:attr:`fit.Model.p_val`. From this set some
parameters are **free**, some are **fixed** and some are
**covarying**, i.e. the value of a subset of parameters can be
computed from 1 free parameter.
Taking the definition of the parameters (free, fixed or
covarying, stored in :py:attr:`fit.Model.p_def`) into account,
the reduced free parameter set is stored in
:py:attr:`fit.Model.p_free`, the reduced set of fixed parameters
is stored in :py:attr:`fit.Model.p_fixed`, the set of covarying
parameters is stored in :py:attr:`fit.Model.p_cov` and when the
model needs to be computed, the full set of model parameters
(:py:attr:`fit.Model.p_val`) is computed again from set.p_free`,
:py:attr:`fit.Model.p_fixed`, :py:attr:`fit.Model.p_cov` and
:py:attr:`fit.Model.p_def`.
A group of covarying parameters is defined by the same label. If
:py:attr:`fit.Model.p_def` is::
['free', 'fixed', '1', '2', 'free', 'fixed', '2', '1', '2', 'free']
It means that we have 3 free parameters, 2 fixed parameters and
2 groups of covarying parameters. The first group contains 2
parameters and the second group contains 3 parameters. In this
case the real number of free parameters will be 3 + 2 (one free
parameter for each group of covarying parameters) = 5 and the
real number of fixed parameters will be 2 + 5 (one fixed
parameter for each covarying parameters) = 7.
A Model class works this way :
1. Init: the dictionary defining the parameters (free, fixed,
covarying) and their values is parsed with
:py:meth:`fit.Model.parse_dict`: :py:attr:`fit.Model.p_def`,
:py:attr:`fit.Model.p_val` and :py:attr:`fit.Model.p_cov` are
created. Then :py:meth:`fit.Model.val2free` is called to
create :py:attr:`fit.Model.p_free` and
:py:attr:`fit.Model.p_fixed`.
2. the set of free parameters can then be changed with
:py:meth:`fit.Model.set_p_free` before calling
:py:meth:`fit.Model.get_model`. the updated values of
:py:attr:`fit.Model.p_val` are computed before the model is created via
:py:meth:`fit.Model.free2val`. A new set of free parameters
can also be passed to :py:meth:`fit.Model.get_model`.
"""
accepted_keys = ()
"""Accepted keys of the input dictionary (see
:py:attr:`fit.Model.p_dict`)"""
p_free = None
"""Up to date value of the free parameters. Its size is always
less or equal to the size of the full set of parameters used
directly to compute the model. It reflects the real number of
fitted parameters. For each group of covarying parameters one free
parameter is added. """
p_fixed = None
"""Array of fixed parameters. Each covarying parameter is stored
as fixed. And one free parameter is added for each group of
covarying parameters."""
p_dict = None
"""Input dictionary defining the parameters. Contains the initial
values of the parameters"""
p_def = None
"""Definition the full set of parameters (fixed, free or
covarying). This array as the same shape as :py:attr:`fit.Model.p_val`"""
p_val = None
"""Up to date values of the full set of parameters used by the model
(initial guess before fit, fitted value after fit). This array as
the same shape as :py:attr:`fit.Model.p_def`. It does not reflect the real number
of fitted parameters."""
p_cov = None
"""dict that stores the groups of covarying parameters by label
and their associated value and covarying operation (a pointer to a
function), i.e.::
{['cov_label_1': (value1, cov_operation1)],
['cov_label_2': (value2, cov_operation2)],
...}
"""
def __init__(self, p_dict):
""" Initialize model
:param p_dict: Input dictionary defining the parameters of the
model.
parameters definition can be : 'free', 'fixed' or covarying
(in this case any string label can be used to define a group
of covarying parameters)
During init p_dict is parsed with
:py:meth:`fit.Model.parse_dict`: :py:attr:`fit.Model.p_def`,
:py:attr:`fit.Model.p_val` and :py:attr:`fit.Model.p_cov` are
created. Then :py:meth:`fit.Model.val2free` is called to create
:py:attr:`fit.Model.p_free` and :py:attr:`fit.Model.p_fixed`.
"""
# parse input dict
if isinstance(p_dict, dict):
self.p_dict = dict(p_dict)
# create a copy of the dict which keys will be popped
# during init. If there are still keys at the end of the
# init an error will be raised.
self.unused_keys = dict(self.p_dict)
for key in list(self.p_dict.keys()):
if key not in self.accepted_keys:
raise Exception('Input dictionary contains unknown key: {}'.format(key))
self.parse_dict()
else: raise ValueError('p must be a dict')
# check input parameters
self.check_input()
# create free and fixed vectors
self.val2free()
if len(self.unused_keys) != 0:
raise orb.utils.err.FitInitError('Some input keys where not used during fit init: {}'.format(list(self.unused_keys.keys())))
def parse_dict(self):
"""Parse input dictionary to create :py:attr:`fit.Model.p_def`, :py:attr:`fit.Model.p_val` and
:py:attr:`fit.Model.p_cov`"""
raise NotImplementedError()
def check_input(self):
"""Check input parameters"""
raise NotImplementedError()
def make_guess(self, v):
"""If a parameter value at init is a NaN this value is guessed.
:param v: Data vector from which the guess is made.
"""
raise NotImplementedError()
def get_model(self, x, return_models=False, return_complex=False):
"""Compute a model M(x, p) for all passed x positions. p are
the parameter values stored in :py:attr:`fit.Model.p_val`
:param x: Positions where the model M(x, p) is computed.
:param return_models: (Optional) If True return also
individual models (default False)
:param return_complex: (Optional) If True return a complex
model (default False).
"""
raise NotImplementedError()
def get_p_free(self):
"""Return the vector of free parameters :py:attr:`fit.Model.p_free`"""
return copy.copy(self.p_free)
def get_priors(self):
"""Return priors
"""
priors = dict(self.get_p_free())
return priors
def set_p_free(self, p_free):
"""Set the vector of free parameters :py:attr:`fit.Model.p_free`
:param p_free: New vector of free parameters
"""
if self.p_free.keys() == p_free.keys():
self.p_free = dict(p_free)
self.free2val()
else: raise Exception('bad format of passed free parameters')
def get_p_val(self):
"""Return :py:attr:`fit.Model.p_val` """
return copy.copy(self.p_val)
def set_p_val(self, p_val):
"""Set :py:attr:`fit.Model.p_val`
.. warning:: This method is used to bypass all the initialized
parameters and reuse an already initialized model with
another full set of parameters. Note that you might want to
call :py:meth:`fit.Model.get_model` directly after this
method because any call to :py:meth:`fit.Model.set_p_free`
or :py:meth:`fit.Model.free2val` will recompute
:py:attr:`fit.Model.p_val` from the init values and the
actual :py:attr:`fit.Model.p_free`.
:param p_val: New full set of parameters.
"""
if list(p_val.keys()) == list(self.p_val.keys()):
self.p_val = copy.copy(p_val)
self.val2free()
else: raise Exception('bad format of passed val parameters')
def val2free(self):
"""Recompute the set of free parameters
:py:attr:`fit.Model.p_free` with the updated values of
:py:attr:`fit.Model.p_val`"""
if self.p_val is None or self.p_def is None or self.p_cov is None:
raise Exception('class has not been well initialized: p_val, p_def and p_cov must be defined')
self.p_free = dict()
self.p_fixed = dict()
passed_cov = list()
for idef in self.p_def:
if self.p_def[idef] == 'free':
self.p_free[idef] = self.p_val[idef]
elif self.p_def[idef] == 'fixed':
self.p_fixed[idef] = self.p_val[idef]
else:
if self.p_def[idef] not in passed_cov:
self.p_free[self.p_def[idef]]= self.p_cov[self.p_def[idef]][0]
self.p_fixed[idef] = self.p_val[idef]
passed_cov += list([self.p_def[idef]])
# remove sdev from p_fixed
for idef in self.p_fixed:
if self.p_fixed[idef] is not None:
self.p_fixed[idef] = gvar.mean(self.p_fixed[idef])
def free2val(self):
"""Read the array of parameters definition
:py:attr:`fit.Model.p_def` and update the parameter values
based on the new set of free parameters
:py:attr:`fit.Model.p_free`.
"""
if self.p_free is None or self.p_fixed is None or self.p_def is None or self.p_cov is None:
raise Exception('class has not been well initialized, p_free, p_fixed, p_def and p_cov must be defined')
passed_cov = dict()
self.p_val = dict()
for idef in self.p_def:
if self.p_def[idef] == 'free':
self.p_val[idef] = self.p_free[idef]
elif self.p_def[idef] == 'fixed':
self.p_val[idef] = self.p_fixed[idef]
else: # covarying parameter
if self.p_def[idef] not in passed_cov:
# if not already taken into account
passed_cov[self.p_def[idef]] = self.p_free[self.p_def[idef]]
# covarying operation
self.p_val[idef] = self.p_cov[self.p_def[idef]][1](
self.p_fixed[idef], passed_cov[self.p_def[idef]])
class FilterModel(Model):
"""
Simple model of filter based on a real filter shape. The only
possible free parameter is a wavelength/wavenumber shift.
Input dictionary :py:attr:`fit.Model.p_dict`::
{'filter_function':,
'shift_guess':,
'shift_def':}
:keyword filter_function: Transmission of the filter over the
fitted spectral range (axis must be exactly the same).
:keyword shift_guess: Guess on the filter shift in pixels.
:keyword shift_def: Definition of the shift parameter, can be
'free' or 'fixed'
.. note:: This model must be multiplied with the other and used
last.
"""
accepted_keys = ('filter_function',
'shift_guess',
'shift_def')
"""Accepted keys of the input dictionary (see
:py:attr:`fit.Model.p_dict`)"""
def parse_dict(self):
"""Parse input dictionary :py:attr:`fit.Model.p_dict`"""
if 'filter_function' in self.p_dict:
self.filter_function = self.p_dict['filter_function']
self.filter_function[np.isnan(self.filter_function)] = 0
self.filter_axis = np.arange(self.filter_function.shape[0])
self.filter_function = scipy.interpolate.UnivariateSpline(
self.filter_axis, self.filter_function,
k=1, s=0, ext=1)
self.unused_keys.pop('filter_function')
else:
raise Exception('filter_function must be given')
if 'shift_guess' in self.p_dict:
shift_guess = self.p_dict['shift_guess']
self.unused_keys.pop('shift_guess')
else: shift_guess = 0.
self.p_val = {'filter_shift': shift_guess}
if 'shift_def' in self.p_dict:
shift_def = self.p_dict['shift_def']
self.unused_keys.pop('shift_def')
else:
shift_def = 'free'
self.p_def = {'filter_shift': shift_def}
self.p_cov = dict()
def check_input(self):
pass
def make_guess(self, v):
pass
def get_model(self, x, p_free=None, return_models=False, return_complex=False):
"""Return model M(x, p).
:param x: Positions where the model M(x, p) is computed.
:param p_free: (Optional) New values of the free parameters
(default None).
:param return_models: (Optional) If True return also
individual models (default False)
:param return_complex: (Optional) If True return a complex
model (default False).
"""
if p_free is not None:
self.set_p_free(p_free)
self.free2val()
if len(self.p_free) == 0:
mod = copy.copy(self.filter_function(self.filter_axis))
else:
mod = copy.copy(self.filter_function(
self.filter_axis
+ gvar.mean(self.p_free['filter_shift'])))
if return_complex:
mod = orb.utils.vector.complex2float((mod, np.zeros_like(mod)))
if return_models:
return mod, (mod)
else:
return mod
class ContinuumModel(Model):
"""
Polynomial continuum model.
Input dictionary :py:attr:`fit.Model.p_dict`::
{'poly_order':
'poly_guess':}
:keyword poly_order: Order of the polynomial to fit (be careful
with high order polynomials).
:keyword poly_guess: Initial guess on the coefficient values :
must be a tuple of length poly_order + 1.
.. note:: This model must be added to the others.
"""
accepted_keys = ('poly_def',
'poly_order',
'poly_guess')
"""Accepted keys of the input dictionary (see
:py:attr:`fit.Model.p_dict`)"""
def _get_ikey(self, ip):
"""Return key corresponding to a coefficient of order ip
:param ip: order of the coefficient
"""
return 'cont_p{}'.format(int(ip))
def _get_order_from_key(self, key):
"""Return the line nb of a given key
:param key: Key to get line number from.
"""
return int(key[6:])
def get_p_val_as_array(self, p_val=None):
if p_val is None:
if p_val.keys() == self.p_val.keys():
p_val = dict(self.p_val)
else: raise Exception('Badly formatted p_val')
ans = np.empty(self.poly_order + 1)
for ipar in p_val:
i = self._get_order_from_key(ipar)
ans[i] = p_val[ipar]
return ans
def parse_dict(self):
"""Parse input dictionary :py:attr:`fit.Model.p_dict`"""
if 'poly_order' in self.p_dict:
self.poly_order = int(self.p_dict['poly_order'])
self.unused_keys.pop('poly_order')
else: self.poly_order = 0
self.p_val = dict()
self.p_def = dict()
self.p_cov = dict() # no covarying parameters
for ip in range(self.poly_order + 1):
self.p_val[self._get_ikey(ip)] = None
self.p_def[self._get_ikey(ip)] = 'free'
if 'poly_def' in self.p_dict:
if self.p_dict['poly_def'] is not None:
if np.size(self.p_dict['poly_def']) == self.poly_order + 1:
for ip in range(self.poly_order + 1):
self.p_def[self._get_ikey(ip)] = self.p_dict['poly_def'][ip]
else: raise Exception('poly_def must be an array of size equal to poly_order + 1')
self.unused_keys.pop('poly_def')
if 'poly_guess' in self.p_dict:
if self.p_dict['poly_guess'] is not None:
if np.size(self.p_dict['poly_guess']) == self.poly_order + 1:
for ip in range(self.poly_order + 1):
self.p_val[self._get_ikey(ip)] = self.p_dict['poly_guess'][ip]
else: raise Exception('poly_guess must be an array of size equal to poly_order + 1')
self.unused_keys.pop('poly_guess')
def check_input(self):
pass
def make_guess(self, v):
for key in list(self.p_val.keys()):
if self.p_val[key] is None:
order = self._get_order_from_key(key)
self.p_val[key] = np.nanmedian(v)**(1./(order+1))
self.val2free()
def get_model(self, x, p_free=None, return_models=False, multf=None, return_complex=False):
"""Return model M(x, p).
:param x: Positions where the model M(x, p) is computed.
:param p_free: (Optional) New values of the free parameters
(default None).
:param multf: 1d vector with the same length as x vector which
represent the function by which the model must be multiplied.
:param return_models: (Optional) If True return also
individual models (default False)
:param return_complex: (Optional) If True return a complex
model (default False).
"""
if p_free is not None:
self.set_p_free(p_free)
self.free2val()
coeffs = [self.p_val[self._get_ikey(ip)] for ip in range(self.poly_order + 1)]
mod = np.polyval(coeffs, x)
if multf is not None:
if isinstance(multf[0], gvar.GVar):
multfsp_mean = scipy.interpolate.UnivariateSpline(
x, gvar.mean(multf), k=1, s=0, ext=2)
multfsp_sdev = scipy.interpolate.UnivariateSpline(
x, gvar.sdev(multf), k=1, s=0, ext=2)
mod *= gvar.gvar(multfsp_mean(x), multfsp_sdev(x))
else:
multfsp = scipy.interpolate.UnivariateSpline(
x, multf, k=1, s=0, ext=2)
mod *= multfsp(x)
if np.any(np.isnan(gvar.mean(mod))):
logging.debug('Nan in model')
if return_complex:
mod = orb.utils.vector.complex2float((mod, np.zeros_like(mod)))
if return_models:
return mod, (mod)
else:
return mod
class LinesModel(Model):
"""
Emission/absorption lines model with a channel unity in pixels.
.. note:: This class is best seen as a basic class implemented
with more physical unities by :py:class:`fit.Cm1LinesModel` or
:py:class:`fit.NmLinesModel`.
.. note:: Each line is built on 3 (or more) parameters : amplitude,
FWHM, position and sigma/alpha (the 4th and 5th parameters are used only for some models -- see
below for details on the different models).
Some lines can have one or more covarying parameters: FWHM can
be the same for all the lines (this is True if lines are not
resolved), lines issued from the same ion can have the same
speed (e.g. [NII] doublet, [SII] doublet, [OIII] doublet), and
some fixed transition ratios between lines can also be set
(e.g. [NII]6584/[NII]6548 can be set to 2.89, when [NII]6548 is
likely to be really noisy).
Input dictionary :py:attr:`fit.Model.p_dict`::
{'line_nb':,
'fmodel':,
'amp_def':,
'pos_def':,
'fwhm_def':,
'sigma_def':, # only for sincgauss fmodel
'alpha_def':, # only for sincphased fmode
'amp_cov':,
'pos_cov':,
'fwhm_cov':,
'sigma_cov':, # only for sincgauss fmodel
'alpha_cov':, # only for sincphased fmodel
'amp_guess':,
'pos_guess':,
'fwhm_guess':,
'sigma_guess':, # only for sincgauss fmodel
'alpha_guess':} # only for sincphased fmodel
:keyword line_nb: Number of lines.
:keyword fmodel: Line shape, can be 'gaussian', 'sinc', 'sinc2' or
'sincgauss'.
:keyword amp_def: Definition of the amplitude parameter, can be
'free', 'fixed' or set to a label that defines its covarying
group.
:keyword pos_def: Definition of the position parameter in pixels,
can be 'free', 'fixed' or set to a label that defines its
covarying group.
:keyword fwhm_def: Definition of the FWHM parameter in pixels, can
be 'free', 'fixed' or set to a label that defines its covarying
group.
:keyword sigma_def: Definition of the sigma parameter in pixels,
can be 'free', 'fixed' or set to a label that defines its
covarying group.
:keyword amp_cov: Guess on the covariant value of the amplitude
(best set to 0 in general). There must be as many values as
covarying amplitude groups or only one value if it is the same
for all groups.
:keyword pos_cov: Guess on the covariant value of the velocity (in
pixels). There must be as many values as covarying amplitude
groups or only one value if it is the same
for all groups.
:keyword fwhm_cov: Guess on the covariant value of the FWHM
(best set to 0 in general). There must be as many values as
covarying amplitude groups or only one value if it is the same
for all groups.
:keyword sigma_cov: Guess on the covariant value of sigma (best
set to 0 in general). There must be as many values as covarying
amplitude groups or only one value if it is the same for all
groups.
:keyword amp_guess: Initial guess on the amplitude value of the
lines. Best set to a NaN in general (it can be automatically
guessed with good robusteness). But if lines have a covarying
amplitude the initial guess fixes their ratio.
:keyword pos_guess: Initial guess on the position of the lines:
the PRECISE rest frame position must be given here, especially
if lines share a covarying position, because their relative
position will be fixed.
:keyword fwhm_guess: Initial guess on the FWHM of the lines. This
guess must be the MOST PRECISE possible (to a few 10%), it is by
far the most unstable parameter especially for sinc lines.
:keyword sigma_guess: Initial guess on the value of sigma. Best
set to 0. in general
Example: A red spectrum containing [NII]6548, Halpha, [NII]6584,
[SII]6716 and [SII]6731, with a mean velocity of 1500 km/s (which
translates in a pixel shift of 5.5), with a fixed amplitude ratio
netween [NII] lines, the same speed for lines issued from the same
ions and a shared FWHM between everybody but Halpha would be
defined this way::
{'line_nb' : 5,
'amp_def' : ('1', 'free', '1', 'free', 'free'),
'pos_def' : ('1', '2', '1', '3', '3'),
'fwhm_def': ('1', '2', '1', '1', '1'),
'amp_cov': 0.,
'pos_cov': 5.5,
'fwhm_cov': 0.,
'amp_guess': (1., np.nan, 2.89, np.nan, np.nan), # here the amplitude ratio between covarying [NII] lines is fixed.
'pos_guess': (40,60,80,120,140), # positions are given in pixel and are purely arbitrary in this example
'fwhm_guess': 2.43}
.. note::
Line shapes (fmodel keyword):
* **gaussian**: A classical gaussian line shape. See :py:meth:`cutils.gaussian1d`.
* **sinc**: A pure sinc line shape, True interferometric line shape
if lines are strongly unresolved and if the interferometer has
no assymetry (generally good on SITELLE/SpIOMM low res cubes
--i.e. less than 500 steps-- if the line SNR is not too high
--i.e. < 50--). See :py:meth:`cutils.sinc1d`.
* **sinc2**: sinc2 = sqrt(sinc**2.). Can be used for spectra not
corrected in phase (where the absolute value of the complex
spectrum is taken).
* **sincgauss**: Convolution of a Gaussian (of width **sigma**) and
a sinc (FWHM). This line shape has a 4th parameter:
sigma. This is much closer to the true line shape, but it
takes much more time to compute because of the generally very
small value of sigma. This can be used to fit resolved lines,
like e.g. Halpha in absorption or active nucleus with broader
emission. See :py:meth:`cutils.sincgauss1d`.
"""
accepted_keys = ('line_nb',
'fmodel',
'ratio',
'amp_def',
'pos_def',
'fwhm_def',
'sigma_def', # only for sincgauss fmodel
'alpha_def', # only for sincphased fmode
'amp_cov',
'pos_cov',
'fwhm_cov',
'sigma_cov', # only for sincgauss fmodel
'alpha_cov', # only for sincphased fmodel
'amp_guess',
'pos_guess',
'fwhm_guess',
'sigma_guess', # only for sincgauss fmodel
'alpha_guess')
"""Accepted keys of the input dictionary (see
:py:attr:`fit.Model.p_dict`)"""
p_array = None
"""equivalent of :py:attr:`fit.Model.p_val` but presented as an
array with each row corresponding to a line which is easier to
handle."""
param_keys = ['amp', 'pos', 'fwhm', 'sigma', 'alpha']
"""Parameter keys"""
log_param_keys = ['fwhm', 'sigma']
"""Parameter keys that have a lognormal distribution"""
same_param_keys = ['fwhm', 'sigma']
"""Parameter keys which must be the same if covarying"""
def _get_ikey(self, key, iline):
"""Return key for a given line nb in
:py:attr:`fit.Model.p_val` and :py:attr:`fit.Model.p_def`
dictionnaries
:param key: may be 'amp', 'pos', 'fwhm', 'sigma', 'alpha'
:param iline: line number.
"""
if key in self.param_keys:
if 0 <= iline < self._get_line_nb():
return '{}{}'.format(key, iline)
else: raise Exception('Invalid line index, must be >=0 and < {}'.format(self._get_line_nb()))
else: raise Exception('Invalid paramter key. Must be in {}'.format(self.param_keys))
def _get_iline_from_key(self, key):
"""Return the line nb of a given key
:param key: Key to get line number from.
"""
for _k in self.param_keys:
if _k in key:
return int(key[len(_k):])
raise Exception('Invalid format for key')
def get_p_val_as_array(self, p_val=None):
if p_val is None:
if p_val.keys() == self.p_val.keys():
p_val = dict(self.p_val)
else: raise Exception('Badly formatted p_val')
ans = np.empty((self._get_line_nb(),
len(p_val) // self._get_line_nb()))
for ipar in p_val:
iline = self._get_iline_from_key(ipar)
if 'amp' in ipar:
ans[iline, 0] = p_val[ipar]
if 'pos' in ipar:
ans[iline, 1] = p_val[ipar]
if 'fwhm' in ipar:
ans[iline, 2] = p_val[ipar]
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
if 'sigma' in ipar:
ans[iline, 3] = p_val[ipar]
if self._get_fmodel() in ['sincphased']:
if 'alpha' in ipar:
ans[iline, 3] = p_val[ipar]
if self._get_fmodel() in ['sincgaussphased']:
if 'alpha' in ipar:
ans[iline, 4] = p_val[ipar]
return ans
def get_priors(self):
"""Return priors. Replace gaussian distribution by lognormal
distribution for some parameters.
"""
priors = dict(self.get_p_free())
for key in priors:
priors[key] = gvar.mean(priors[key])
return priors
def parse_dict(self):
"""Parse input dictionary :py:attr:`fit.Model.p_dict`"""
def parse_param(key, cov_operation):
key_guess = key + '_guess'
key_def = key + '_def'
key_cov = key + '_cov'
## parse guess
p_guess = dict()
if key_guess in self.p_dict:
orb.utils.validate.has_len(self.p_dict[key_guess], line_nb,
object_name=key_guess)
self.p_dict[key_guess] = np.atleast_1d(self.p_dict[key_guess])
for iline in range(line_nb):
p_guess[self._get_ikey(key, iline)] = self.p_dict[key_guess][iline]
else:
for iline in range(line_nb):
p_guess[self._get_ikey(key, iline)] = None
## parse cov
if key_cov in self.p_dict:
orb.utils.validate.is_iterable(self.p_dict[key_cov], object_name=key_cov)
p_cov = np.atleast_1d(self.p_dict[key_cov])
else:
p_cov = None
## parse def
p_cov_dict = dict()
p_def = dict()
if key_def in self.p_dict: # gives the definition of the parameter
orb.utils.validate.has_len(self.p_dict[key_def], line_nb,
object_name=key_def)
for iline in range(line_nb):
p_def[self._get_ikey(key, iline)] = self.p_dict[key_def][iline]
# manage cov values
for iline in range(line_nb):
if p_def[self._get_ikey(key, iline)] not in ['free', 'fixed']:
cov_symbol = str(key_def + str(p_def[self._get_ikey(key, iline)]))
# create singular symbol
p_def[self._get_ikey(key, iline)] = cov_symbol
# fill cov dict
if cov_symbol not in p_cov_dict:
if p_cov is None:
# todo: set it to a covarying value
# which depends on the parameter
# operation (amp=1, others=0)
cov_value = 0.
else:
if np.size(p_cov) > 0:
cov_value = p_cov[0]
else:
raise TypeError("not enough covarying parameters: {} must have the same size as the number of covarying parameters".format(key_cov))
p_cov = p_cov[1:] # used cov values are dumped
p_cov_dict[cov_symbol] = (
np.squeeze(cov_value), cov_operation)
elif p_cov is not None:
if np.size(p_cov) != 0: raise ValueError("{}_cov must not be set ({}) if {}_def is set to 'free' or 'fixed'".format(key, self.p_dict[key_cov], key))
logging.debug('Covarying symbols for {}: {}'.format(key, list(p_cov_dict.keys())))
if p_cov is not None:
if np.size(p_cov) > 0: raise TypeError("too much covarying parameters ({}): {} ({}) must have the same size as the number of covarying parameters ({})".format(np.size(p_cov) + len(p_cov_dict), key_cov, self.p_dict[key_cov], len(p_cov_dict)))
else:
for iline in range(line_nb):
p_def[self._get_ikey(key, iline)] = 'free'
self.p_def.update(p_def)
self.p_val.update(p_guess)
self.p_cov.update(p_cov_dict)
if key_guess in self.p_dict: self.unused_keys.pop(key_guess)
if key_def in self.p_dict: self.unused_keys.pop(key_def)
if key_cov in self.p_dict: self.unused_keys.pop(key_cov)
line_nb = self._get_line_nb()
self.p_def = dict()
self.p_val = dict()
self.p_cov = dict()
parse_param('amp', self._get_amp_cov_operation())
parse_param('pos', self._get_pos_cov_operation())
parse_param('fwhm', self._get_fwhm_cov_operation())
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
parse_param('sigma', self._get_sigma_cov_operation())
if self._get_fmodel() in ['sincphased', 'sincgaussphased']:
parse_param('alpha', self._get_alpha_cov_operation())
# check cov values and def for log parameters sigma and fwhm
for key_cov in self.p_cov:
for key in self.same_param_keys:
if key in key_cov:
keys_def = [key_def for key_def in self.p_def if self.p_def[key_def] == key_cov]
vals = [self.p_val[ikey_def] for ikey_def in keys_def]
vals = np.array(vals)
if np.any(vals - vals[0] != 0.): raise Exception('{} parameter must be the same for all the lines of the same covarying group'.format(key))
for ikey_def in keys_def:
self.p_val[ikey_def] = 0.
self.p_cov[key_cov] = (self.p_cov[key_cov][0] + vals[0], self.p_cov[key_cov][1])
def _get_amp_cov_operation(self):
"""Return covarying amplitude operation"""
return lambda x, y: x * y
def _get_fwhm_cov_operation(self):
"""Return covarying FWHM operation"""
return lambda x, y: x + y
def _get_pos_cov_operation(self):
"""Return covarying position operation"""
return lambda x, y: x + y
def _get_sigma_cov_operation(self):
"""Return covarying sigma operation"""
return lambda x, y: x + y
def _get_alpha_cov_operation(self):
"""Return covarying alpha operation"""
return lambda x, y: x + y
def check_input(self):
"""Check input parameters"""
for key in list(self.p_val.keys()):
if self.p_val[key] is None:
if 'pos' in key:
logging.warning('No initial position given')
if 'fwhm' in key:
logging.warning('No initial fwhm given')
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
if 'sigma' in key:
logging.warning('No initial sigma given')
if self._get_fmodel() in ['sincphased', 'sincgaussphased']:
if 'alpha' in key:
logging.warning('No initial alpha given')
ratio = self._get_ratio()
if ratio is None:
raise ValueError('ratio must be set to something else than None (e.g. 0.25)')
if not 0 < ratio < 1:
raise ValueError('ratio must be strictly between 0 and 1')
def make_guess(self, v):
"""If a parameter value at init is a NaN this value is guessed.
:param v: Data vector from which the guess is made.
"""
FWHM_INIT = 2. * orb.constants.FWHM_SINC_COEFF
FWHM_COEFF = 6.
self._p_val2array()
for key in list(self.p_array.keys()):
if self.p_array[key] is None or np.isnan(self.p_array[key]):
if 'pos' in key:
raise Exception('initial guess on lines position must be given, no automatic lines detection is implemented at this level.')
if 'fwhm' in key:
self.p_array[key] = FWHM_INIT
if 'sigma' in key:
self.p_array[key] = 1e-8
if 'alpha' in key:
self.p_array[key] = 1e-8
# amp must be checked after the others
for key in list(self.p_array.keys()):
if self.p_array[key] is None:
if 'amp' in key:
self.p_array[key] = np.nanmax(gvar.mean(v))
self._p_array2val()
self.val2free()
def _get_line_nb(self):
"""Return the number of lines"""
if 'line_nb' in self.p_dict:
if 'line_nb' in self.unused_keys: self.unused_keys.pop('line_nb')
return self.p_dict['line_nb']
else:
raise ValueError("'line_nb' must be set")
def _get_ratio(self):
"""Return the ratio value (shortest side of the interferogram over longest side)"""
if 'ratio' in self.p_dict:
if 'ratio' in self.unused_keys: self.unused_keys.pop('ratio')
return self.p_dict['ratio']
else:
raise ValueError("'ratio' must be set")
def _get_fmodel(self):
"""Return the line model"""
if 'fmodel' in self.p_dict:
if 'fmodel' in self.unused_keys: self.unused_keys.pop('fmodel')
return self.p_dict['fmodel']
else:
raise ValueError("'fmodel' must be set")
def _p_val2array(self):
self.p_array = dict(self.p_val)
def _p_array2val(self, p_array=None):
"""Transform :py:attr:`fit.LinesModel.p_array` to :py:attr:`fit.Model.p_val`."""
if p_array is None:
self.p_val = dict(self.p_array)
else:
self.p_val = dict(p_array)
def get_model(self, x, p_free=None, return_models=False, multf=None, return_complex=False):
"""Return model M(x, p).
:param x: Positions where the model M(x, p) is computed.
:param p_free: (Optional) New values of the free parameters
(default None).
:param multf: 1d vector with the same length as x vector which
represent the function by which the model must be multiplied.
:param return_models: (Optional) If True return also
individual models (default False)
:param return_complex: (Optional) If True return a complex model.
"""
if p_free is not None:
self.set_p_free(p_free)
self.free2val()
self._p_val2array()
line_nb = self._get_line_nb()
fmodel = self._get_fmodel()
ratio = self._get_ratio()
mod = None
models = list()
if multf is not None:
if isinstance(multf[0], gvar.GVar):
multfsp_mean = scipy.interpolate.UnivariateSpline(
x, gvar.mean(multf), k=1, s=0, ext=2)
multfsp_sdev = scipy.interpolate.UnivariateSpline(
x, gvar.sdev(multf), k=1, s=0, ext=2)
else:
multfsp = scipy.interpolate.UnivariateSpline(
x, multf, k=1, s=0, ext=2)
for iline in range(line_nb):
if multf is not None:
try:
mult_amp = gvar.gvar(
multfsp_mean(gvar.mean(self.p_array[self._get_ikey('pos', iline)])),
multfsp_sdev(gvar.mean(self.p_array[self._get_ikey('pos', iline)])))
except UnboundLocalError:
mult_amp = multfsp(gvar.mean(self.p_array[self._get_ikey('pos', iline)]))
else:
mult_amp = 1.
if np.any(np.isnan(gvar.mean(mult_amp))): raise Exception('Nan in mult_amp')
if fmodel == 'sinc':
if return_complex:
model_function = orb.utils.spectrum.sinc1d_complex
else:
model_function = orb.utils.spectrum.sinc1d
line_mod = model_function(
x, 0.,
self.p_array[self._get_ikey('amp', iline)],
self.p_array[self._get_ikey('pos', iline)],
self.p_array[self._get_ikey('fwhm', iline)])
elif fmodel == 'mertz':
line_mod = orb.utils.spectrum.mertz1d(
x, 0.,
self.p_array[self._get_ikey('amp', iline)],
self.p_array[self._get_ikey('pos', iline)],
self.p_array[self._get_ikey('fwhm', iline)],
ratio)
if not return_complex:
line_mod = line_mod[0] # return only the real part
elif fmodel == 'sincgauss':
if return_complex:
model_function = orb.utils.spectrum.sincgauss1d_complex
else:
model_function = orb.utils.spectrum.sincgauss1d
line_mod = model_function(
x, 0.,
self.p_array[self._get_ikey('amp', iline)],
self.p_array[self._get_ikey('pos', iline)],
self.p_array[self._get_ikey('fwhm', iline)],
self.p_array[self._get_ikey('sigma', iline)])
elif fmodel == 'sincphased':
if return_complex:
raise NotImplementedError('sincphased model not implemeted for complex vector')
line_mod = orb.utils.spectrum.sinc1d_phased(
x, 0.,
self.p_array[self._get_ikey('amp', iline)],
self.p_array[self._get_ikey('pos', iline)],
self.p_array[self._get_ikey('fwhm', iline)],
self.p_array[self._get_ikey('alpha', iline)])
elif fmodel == 'sincgaussphased':
if return_complex:
raise NotImplementedError('sincgaussphased model not implemeted for complex vector')
line_mod = orb.utils.spectrum.sincgauss1d_phased(
x, 0.,
self.p_array[self._get_ikey('amp', iline)],
self.p_array[self._get_ikey('pos', iline)],
self.p_array[self._get_ikey('fwhm', iline)],
self.p_array[self._get_ikey('sigma', iline)],
self.p_array[self._get_ikey('alpha', iline)])
elif fmodel == 'sinc2':
raise NotImplementedError()
## line_mod = np.sqrt(utils.spectrum.sinc1d(
## x, 0., p_array[iline, 0],
## p_array[iline, 1], p_array[iline, 2])**2.)
elif fmodel == 'gaussian':
line_mod = orb.utils.spectrum.gaussian1d(
x, 0.,
self.p_array[self._get_ikey('amp', iline)],
self.p_array[self._get_ikey('pos', iline)],
self.p_array[self._get_ikey('fwhm', iline)])
else:
raise ValueError("fmodel must be set to 'sinc', 'gaussian', 'sincgauss', 'sincphased', 'sincgaussphased' or 'sinc2'")
if return_complex:
line_mod = orb.utils.vector.complex2float(line_mod)
line_mod *= mult_amp
if mod is None:
mod = np.copy(line_mod)
else:
mod += np.copy(line_mod)
models.append(line_mod)
if return_models:
return mod, models
else:
return mod
class Cm1LinesModel(LinesModel):
"""Emission/absorption lines model with a channel unity in cm-1.
Reimplements :py:class:`fit.LinesModel` to use more physical units
: channels are translated to cm-1 and velocity to km/s in input
and output.
.. seealso:: For more information please refer to
:py:class:`fit.LinesModel`
"""
accepted_keys = list(LinesModel.accepted_keys) + list((
'step_nb',
'step',
'order',
'nm_laser',
'nm_laser_obs'))
"""Accepted keys of the input dictionary (see
:py:attr:`fit.Model.p_dict`)"""
def _w2pix(self, w):
"""Translate wavenumber to pixels"""
return orb.utils.spectrum.fast_w2pix(w, self.axis_min, self.axis_step)
def _pix2w(self, pix):
"""Translate pixel to wavenumber"""
return orb.utils.spectrum.fast_pix2w(pix, self.axis_min, self.axis_step)
def _get_pos_cov_operation(self):
"""Return covarying position operation for an input velocity in km/s"""
return lambda lines, vel: lines * gvar.sqrt((1. - vel / orb.constants.LIGHT_VEL_KMS)
/ (1. + vel / orb.constants.LIGHT_VEL_KMS))
def _p_val2array(self):
"""Transform :py:attr:`fit.Model.p_val` to :py:attr:`fit.LinesModel.p_array`"""
# get lines pos / fwhm
# convert pos cm-1->pix
# convert fwhm cm-1->pix
lines_cm1 = list()
fwhm_cm1 = list()
sigma_kms = list()
for iline in range(self._get_line_nb()):
lines_cm1.append(self.p_val[self._get_ikey('pos', iline)])
fwhm_cm1.append(self.p_val[self._get_ikey('fwhm', iline)])
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
sigma_kms.append(self.p_val[self._get_ikey('sigma', iline)])
lines_pix = self._w2pix(np.array(lines_cm1))
fwhm_pix = np.array(fwhm_cm1) / self.axis_step
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
sigma_pix = orb.utils.fit.vel2sigma(
np.array(gvar.mean(sigma_kms), dtype=float), lines_cm1, self.axis_step)
self.p_array = dict(self.p_val)
for iline in range(self._get_line_nb()):
self.p_array[self._get_ikey('pos', iline)] = lines_pix[iline]
self.p_array[self._get_ikey('fwhm', iline)] = fwhm_pix[iline]
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
# convert sigma km/s->pix
self.p_array[self._get_ikey('sigma', iline)] = sigma_pix[iline]
return self.p_array
def _p_array2val(self, p_array=None):
"""Transform :py:attr:`fit.LinesModel.p_array` to :py:attr:`fit.Model.p_val`."""
if p_array is None:
p_array = dict(self.p_array)
# get lines cm1 / fwhm
# convert pos pix->cm-1
# convert fwhm pix->cm-1
lines_pix = list()
fwhm_pix = list()
sigma_pix = list()
for iline in range(self._get_line_nb()):
lines_pix.append(p_array[self._get_ikey('pos', iline)])
fwhm_pix.append(p_array[self._get_ikey('fwhm', iline)])
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
sigma_pix.append(p_array[self._get_ikey('sigma', iline)])
lines_cm1 = self._pix2w(np.array(lines_pix))
fwhm_cm1 = np.array(fwhm_pix) * self.axis_step
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
sigma_kms = orb.utils.fit.sigma2vel(
np.array(sigma_pix), gvar.mean(lines_cm1), self.axis_step)
self.p_val = dict(p_array)
for iline in range(self._get_line_nb()):
self.p_val[self._get_ikey('pos', iline)] = lines_cm1[iline]
self.p_val[self._get_ikey('fwhm', iline)] = fwhm_cm1[iline]
if self._get_fmodel() in ['sincgauss', 'sincgaussphased']:
self.p_val[self._get_ikey('sigma', iline)] = sigma_kms[iline]
return self.p_val
def parse_dict(self):
"""Parse input dictionary :py:attr:`fit.Model.p_dict`"""
LinesModel.parse_dict(self)
if 'step_nb' not in self.p_dict:
raise orb.utils.err.FitInitError('step_nb keyword must be set' )
self.step_nb = float(self.p_dict['step_nb'])
self.unused_keys.pop('step_nb')
if 'step' not in self.p_dict:
raise orb.utils.err.FitInitError('step keyword must be set' )
self.step = float(self.p_dict['step'])
self.unused_keys.pop('step')
if 'order' not in self.p_dict:
raise orb.utils.err.FitInitError('order keyword must be set' )
self.order = int(self.p_dict['order'])
self.unused_keys.pop('order')
if 'nm_laser' not in self.p_dict:
raise orb.utils.err.FitInitError('nm_laser keyword must be set' )
self.nm_laser = float(self.p_dict['nm_laser'])
self.unused_keys.pop('nm_laser')
if 'nm_laser_obs' not in self.p_dict:
raise orb.utils.err.FitInitError('nm_laser_obs keyword must be set' )
self.nm_laser_obs = float(self.p_dict['nm_laser_obs'])
self.unused_keys.pop('nm_laser_obs')
self.correction_coeff = self.nm_laser_obs / self.nm_laser
self.axis_min = orb.cutils.get_cm1_axis_min(
self.step_nb, self.step, self.order,
corr=self.correction_coeff)
self.axis_step = orb.cutils.get_cm1_axis_step(
self.step_nb, self.step, corr=self.correction_coeff)
class NmLinesModel(Cm1LinesModel):
"""Emission/absorption lines model with a channel unity in nm.
Reimplements :py:class:`fit.Cm1LinesModel` to use nm instead of
cm-1. Channels are translated to cm-1 and velocity to km/s in
input and output.
.. seealso:: For more information please refer to
:py:class:`fit.Cm1LinesModel` and :py:class:`fit.LinesModel`
"""
def _get_pos_cov_operation(self):
"""Return covarying position operation for an input velocity in km/s"""
return lambda lines, vel: lines * np.sqrt((1. + vel / orb.constants.LIGHT_VEL_KMS)
/ (1. - vel / orb.constants.LIGHT_VEL_KMS))
def parse_dict(self):
"""Parse input dictionary :py:attr:`fit.Model.p_dict`"""
raise Exception('Not re-implemented')
Cm1LinesModel.parse_dict(self)
self.axis_min = orb.cutils.get_nm_axis_min(
self.step_nb, self.step, self.order,
corr=self.correction_coeff)
self.axis_step = orb.cutils.get_nm_axis_step(
self.step_nb, self.step, self.order,
corr=self.correction_coeff)
################################################
#### CLASS Params ##############################
################################################
class Params(orb.core.Params):
"""Manage a set of parameters as a special dictionary which
elements can be accessed like attributes.
"""
# parameters cannot be modified when accessed by attribute
def __setattr__(self, key, value):
raise Exception('Parameter is read-only')
#############################################
#### CLASS InputParams ######################
#############################################
class InputParams(object):
# simulate the use of this class as a dict converted class
def __getitem__(self, key): return getattr(self, key)
def __setitem__(self, key, value): return setattr(self, key, value)
def __init__(self, step_nb):
self.params = list()
self.models = list()
self.base_params = Params()
self.base_params['step_nb'] = int(step_nb)
self.allparams = Params()
self.allparams.update(self.base_params)
self.axis_min = 0
self.axis_step = 1
self.axis_max = self.base_params.step_nb
self.set_signal_range(self.axis_min, self.axis_max)
def append_model(self, model, operation, params):
if self.has_model(model):
raise orb.utils.err.FitInputError('{} already added'.format(model))
self.models.append([model, operation])
self.params.append(params)
self.check_signal_range()
self.allparams.update(params)
def set_signal_range(self, rmin, rmax):
if (not (self.axis_min <= rmin < rmax)
or not (rmin < rmax <= self.axis_max)):
raise orb.utils.err.FitInputError('Check rmin and rmax values. Must be between {} and {}'.format(self.axis_min, self.axis_max))
signal_range_pix = orb.utils.spectrum.fast_w2pix(
np.array([rmin, rmax], dtype=float),
self.axis_min, self.axis_step)
minx = max(1, int(np.min(signal_range_pix)))
maxx = min(self.base_params.step_nb - 1,
int(np.ceil(np.max(signal_range_pix))))
self.signal_range = (minx, maxx)
self.check_signal_range()
def has_model(self, model):
for imod in self.models:
if model == imod[0]: return True
return False
def check_signal_range(self):
pass
def clean_kwargs(self, kwargs, params):
# remove used params from kwargs
for key in list(params.keys()):
if key in kwargs:
kwargs.pop(key)
return kwargs
def convert(self):
"""Convert class to a pickable dict object
"""
raw = dict()
raw['models'] = list(self.models)
_params = list()
# here we convert all gvars convertible arrays or values to a
# _mean / _sdev couple to avoid pickling gvars. These couples
# must then be merged again as gvars in FitVector.__init__()
raw['params'] = orb.utils.fit.paramslist2pick(self.params)
raw['signal_range'] = list(self.signal_range)
raw['base_params'] = dict(self.base_params)
raw['allparams'] = dict()
for _iparams in raw['params']:
raw['allparams'].update(_iparams)
raw['baseclass'] = self.__class__.__name__
return raw
def add_continuum_model(self, **kwargs):
params = Params()
params['poly_order'] = 0
params['poly_guess'] = None
params.update(kwargs)
# remove bad keys in case
for key in list(params.keys()):
if key not in ContinuumModel.accepted_keys:
del params[key]
self.append_model(ContinuumModel, 'add', params)
return self.clean_kwargs(kwargs, params)
def _check_lines_params(self, kwargs, fwhm_guess, lines):
# check user defined params (kwargs)
params = Params()
params.update(kwargs)
if not 'fmodel' in params:
raise orb.utils.err.FitInputError('fmodel must be set')
# check single valued params
for iparam in params:
if '_def' in iparam or '_guess' in iparam or '_cov' in iparam:
ival = np.atleast_1d(params[iparam])
if ival.size == 1:
if not '_cov' in iparam:
params[iparam] = list(ival) * np.size(lines)
else:
params[iparam] = list(ival)
logging.debug('changed single-valued parameter {}: {}'.format(
iparam, params[iparam]))
# check sigma value
if params.fmodel in ['sincgauss', 'sincgaussphased']:
if 'fwhm_def' in params:
if np.any(np.array(params.fwhm_def, dtype=str) != 'fixed'):
logging.warning('fmodel is a sincgauss and FWHM is not fixed')
else:
params['fwhm_def'] = ['fixed'] * np.size(lines)
if 'sigma_def' in params:
sigma_cov_vel = self._get_sigma_cov_vel(fwhm_guess, lines)
params['sigma_def'] = np.array(params.sigma_def, dtype=str)
orb.utils.validate.has_len(params.sigma_def, np.size(lines), object_name='sigma_def')
orb.utils.validate.has_len(sigma_cov_vel, np.size(lines), object_name='sigma_cov_vel')
if 'sigma_guess' in params:
orb.utils.validate.has_len(params.sigma_guess, np.size(lines), object_name='sigma_guess')
# sigma cov vel is adjusted to the initial guess + apodization
sqroots = list()
for i in range(np.size(lines)):
isqroot = np.sqrt(sigma_cov_vel[i]**2. + params.sigma_guess[i]**2.)
sqroots.append(isqroot)
sigma_cov_vel = np.array(sqroots)
_sigma_guess = sigma_cov_vel
else: _sigma_guess = np.zeros_like(lines)
if 'sigma_cov' not in params:
_sigma_cov = list()
_allcov = list()
for ipar in range(len(params.sigma_def)):
if params.sigma_def[ipar] not in ['free', 'fixed']:
_sigma_guess[ipar] = 0. # must be set to 0 if covarying
if 'sigma_cov' not in params:
if params.sigma_def[ipar] not in _allcov:
_allcov.append(params.sigma_def[ipar])
_sigma_cov.append(sigma_cov_vel[ipar])
params['sigma_guess'] = list(_sigma_guess)
if 'sigma_cov' not in params and len(_sigma_cov) > 0:
params['sigma_cov'] = list(_sigma_cov)
if 'line_nb' in params:
logging.warning('line_nb reset by user')
del params.line_nb # this parameter cannot be changed
if 'pos_guess' in params:
raise orb.utils.err.FitInputError("Line position must be defined with the 'lines' parameter")
return params
def add_lines_model(self, lines, fwhm_guess, **kwargs):
lines = np.array(lines)
line_nb = np.size(lines)
default_params = {
'fmodel':'sinc',
'line_nb':line_nb,
'amp_def':['free'] * line_nb,
'fwhm_def':['fixed'] * line_nb,
'fwhm_guess':[fwhm_guess] * line_nb,
'pos_def':['free'] * line_nb,
'pos_guess':lines}
# check and update default params with user kwargs
params = self._check_lines_params(kwargs, fwhm_guess, lines)
if 'fwhm_guess' in params:
raise orb.utils.err.FitInputError('This parameter must be defined with the non-keyword parameter fwhm_guess')
default_params.update(params)
all_params = Params()
all_params.update(self.base_params)
all_params.update(default_params)
# remove bad keys in case
for key in list(all_params.keys()):
if key not in LinesModel.accepted_keys:
logging.debug('key removed: {}'.format(key))
del all_params[key]
self.append_model(LinesModel, 'add', all_params)
# continuum model is automatically added
kwargs = self.add_continuum_model(**kwargs)
return self.clean_kwargs(kwargs, all_params)
################################################
#### CLASS Cm1InputParams ######################
################################################
class Cm1InputParams(InputParams):
"""Manage the input parameters for :py:class:`orb.fit.FitVector`
and :py:meth:`orb.fit.fit_lines_in_spectrum`.
"""
def __init__(self, step, order, step_nb, nm_laser,
theta_proj, theta_orig, apodization, zpd_index,
filter_name):
"""
.. note:: A distinction is made between the incident angle of
projection and the real incident angle because the incident
angle of projection is use to define the projection axis and
thus the channel of a given wavenumber while the original
angle is used to define the theoretical fwhm (which is not
modified during the projection).
"""
self.params = list()
self.models = list()
self.base_params = Params()
self.base_params['step_nb'] = int(step_nb)
self.base_params['step'] = float(step)
self.base_params['order'] = int(order)
self.base_params['nm_laser'] = float(nm_laser)
self.base_params['apodization'] = float(apodization)
self.base_params['theta_proj'] = float(theta_proj)
self.base_params['theta_orig'] = float(theta_orig)
self.base_params['zpd_index'] = int(zpd_index)
self.base_params['axis_corr_proj'] = orb.utils.spectrum.theta2corr(theta_proj)
self.base_params['axis_corr_orig'] = orb.utils.spectrum.theta2corr(theta_orig)
self.base_params['nm_laser_obs'] = (self.base_params.nm_laser
* self.base_params.axis_corr_proj)
self.base_params['filter_name'] = str(filter_name)
self.allparams = Params()
self.allparams.update(self.base_params)
self.axis_min = orb.cutils.get_cm1_axis_min(self.base_params.step_nb,
self.base_params.step,
self.base_params.order,
corr=self.base_params.axis_corr_proj)
self.axis_step = orb.cutils.get_cm1_axis_step(self.base_params.step_nb,
self.base_params.step,
corr=self.base_params.axis_corr_proj)
self.axis_max = self.axis_min + (self.base_params.step_nb - 1) * self.axis_step
self.axis = np.arange(self.base_params.step_nb) * self.axis_step + self.axis_min
self.set_signal_range(self.axis_min, self.axis_max)
self.filterfile = FilterFile(self.base_params.filter_name)
def _get_sigma_cov_vel(self, fwhm_guess_cm1, lines_cm1):
if self.base_params.apodization == 1.:
sigma_cov_vel = [0] * np.size(lines_cm1) # km/s
else:
sigma_cov_vel = orb.utils.fit.sigma2vel(
orb.utils.fft.apod2sigma(self.base_params.apodization,
fwhm_guess_cm1.mean) / self.axis_step,
lines_cm1, self.axis_step)
return np.atleast_1d(sigma_cov_vel).astype(float)
def add_lines_model(self, lines, **kwargs):
# guess lines
lines_cm1 = list()
for iline in lines:
if isinstance(iline, str):
iline = Lines().get_line_cm1(iline)
lines_cm1.append(iline)
lines_cm1 = np.array(lines_cm1)
# guess fwhm
fwhm_guess_cm1 = orb.utils.spectrum.compute_line_fwhm(
self.base_params.step_nb - self.base_params.zpd_index,
self.base_params.step,
self.base_params.order,
apod_coeff=self.base_params.apodization,
corr=self.base_params.axis_corr_orig,
wavenumber=True)
# guess sigma from apodization
sigma_cov_vel = self._get_sigma_cov_vel(fwhm_guess_cm1, lines_cm1)
sigma_guess = np.copy(sigma_cov_vel)
# guess ratio
ratio = (float(self.base_params.zpd_index)
/ float(self.base_params.step_nb - self.base_params.zpd_index))
line_nb = np.size(lines)
default_params = {
'line_nb':line_nb,
'amp_def':['free'] * line_nb,
'ratio':ratio,
#'amp_cov':1., # never put a gvar here or the amplitude sdev is forced to a given value
'fwhm_def':['fixed'] * line_nb,
'fwhm_guess':[fwhm_guess_cm1] * line_nb,
'pos_def':['free'] * line_nb,
'pos_guess':lines_cm1,
'fmodel':'sinc'}
params = self._check_lines_params(kwargs, fwhm_guess_cm1, lines_cm1)
default_params.update(params)
all_params = Params()
all_params.update(self.base_params)
all_params.update(default_params)
# remove bad keys in case
for key in list(all_params.keys()):
if key not in Cm1LinesModel.accepted_keys:
logging.debug('key removed: {}'.format(key))
del all_params[key]
self.append_model(Cm1LinesModel, 'add', all_params)
# continuum model is automatically added
kwargs = self.add_continuum_model(**kwargs)
return self.clean_kwargs(kwargs, all_params)
def add_filter_model(self, **kwargs):
if self.base_params.filter_name is None:
raise orb.utils.err.FitInputError('filter_name is None')
filter_function = self.filterfile.project(Axis(self.axis)).data
default_params = Params()
default_params['filter_function'] = filter_function
default_params['shift_def'] = 'free'
# load user params
params = Params()
params.update(kwargs)
if 'filter_function' in params:
raise orb.utils.err.FitInputError('filter function must be defined via the filter file path at the init of the class')
default_params.update(params)
all_params = Params()
all_params.update(self.base_params)
all_params.update(default_params)
# remove bad keys in case
for key in list(all_params.keys()):
if key not in FilterModel.accepted_keys:
del all_params[key]
self.append_model(FilterModel, 'mult', all_params)
self.clean_kwargs(kwargs, all_params)
def check_signal_range(self):
if self.has_model(FilterModel):
filter_bandpass = self.filterfile.get_filter_bandpass_cm1()
if (min(self.signal_range_cm1) > min(filter_bandpass)
or max(self.signal_range_cm1) < max(filter_bandpass)):
logging.warning('Filter model might be badly constrained with such a signal range')
def set_signal_range(self, rmin, rmax):
InputParams.set_signal_range(self, rmin, rmax)
self.signal_range_cm1 = (rmin, rmax)
def convert(self):
raw = InputParams.convert(self)
raw['signal_range_cm1'] = self.signal_range_cm1
raw['axis_min'] = self.axis_min
raw['axis_max'] = self.axis_max
raw['axis_step'] = self.axis_step
raw['baseclass'] = self.__class__.__name__
return raw
################################################
#### CLASS OutputParams ########################
################################################
class OutputParams(Params):
def translate(self, inputparams, fitvector, nogvar=False):
if isinstance(inputparams, InputParams):
inputparams = inputparams.convert()
inputparams['params'] = orb.utils.fit.pick2paramslist(inputparams['params'])
all_inputparams = Params()
for iparams in inputparams['params']:
all_inputparams.update(iparams)
all_inputparams.update(inputparams['base_params'])
if isinstance(inputparams, Cm1InputParams) or inputparams['baseclass'] == 'Cm1InputParams':
wavenumber = True
elif isinstance(inputparams, InputParams) or inputparams['baseclass'] == 'InputParams':
wavenumber = None
else:
raise NotImplementedError()
if not isinstance(fitvector, FitVector):
raise Exception('fitvector must be an instance of FitVector')
if 'fit_params_err_mcmc' in self:
fit_params_err_key = 'fit_params_err_mcmc'
else:
fit_params_err_key = 'fit_params_err'
## create a formated version of the parameters:
## [N_LINES, (H, A, DX, FWHM, SIGMA, ALPHA)]
line_nb = np.size(all_inputparams['pos_guess'])
line_params = fitvector.models[0].get_p_val_as_array(self['fit_params'][0])
if all_inputparams['fmodel'] in ['sincgauss', 'sincphased', 'sincgaussphased']:
line_params[:,3] = np.abs(line_params[:,3])
else:
nan_col = np.empty(line_nb, dtype=float)
nan_col.fill(np.nan)
line_params = np.append(line_params.T, nan_col)
line_params = line_params.reshape(
line_params.shape[0]//line_nb, line_nb).T
# evaluate continuum level at each position
cont_params = self['fit_params'][1]
if wavenumber is None:
pos_pix = line_params[:,1]
else:
pos_pix = orb.utils.spectrum.fast_w2pix(
line_params[:,1],
inputparams['axis_min'],
inputparams['axis_step'])
cont_model = fitvector.models[1]
cont_model.set_p_val(self['fit_params'][1])
cont_level = cont_model.get_model(pos_pix)
all_params = np.append(cont_level, line_params.T)
line_params = all_params.reshape(
(all_params.shape[0]//line_nb, line_nb)).T
# compute vel err
line_params_err = fitvector.models[0].get_p_val_as_array(
self[fit_params_err_key][0])
if all_inputparams.fmodel not in ['sincgauss', 'sincgaussphased', 'sincphased']:
line_params_err = np.append(line_params_err.T, nan_col)
line_params_err = line_params_err.reshape(
line_params_err.shape[0]//line_nb, line_nb).T
# evaluate error on continuum level at each position
cont_params_err = self[fit_params_err_key][1]
cont_params_err_max = dict()
cont_params_err_min = dict()
for key in cont_params:
cont_params_err_max[key] = cont_params[key] + cont_params_err[key] / 2.
cont_params_err_min[key] = cont_params[key] - cont_params_err[key] / 2.
cont_model.set_p_val(cont_params_err_max)
cont_level_max = cont_model.get_model(pos_pix)
cont_model.set_p_val(cont_params_err_min)
cont_level_min = cont_model.get_model(pos_pix)
cont_level_err = gvar.fabs(cont_level_max - cont_level_min)
all_params_err = np.append(cont_level_err, line_params_err.T)
line_params_err = all_params_err.reshape(
(all_params_err.shape[0]//line_nb, line_nb)).T
# set 0 sigma to nan
if all_inputparams.fmodel in ['sincgauss', 'sincgaussphased']:
line_params[:,4][line_params[:,4] == 0.] = np.nan
if fit_params_err_key in self:
line_params_err[:,4][line_params_err[:,4] == 0.] = np.nan
## compute errors
line_params = gvar.gvar(gvar.mean(line_params),
gvar.mean(line_params_err))
if wavenumber is not None:
# compute velocity
pos_wave = line_params[:,2]
velocity = orb.utils.spectrum.compute_radial_velocity(
pos_wave, gvar.mean(all_inputparams.pos_guess),
wavenumber=wavenumber)
if not nogvar:
self['velocity_gvar'] = velocity
self['velocity'] = gvar.mean(velocity)
self['velocity_err'] = gvar.sdev(velocity)
# compute broadening
sigma_total_kms = line_params[:,4]
sigma_apod_kms = orb.utils.fit.sigma2vel(
orb.utils.fft.apod2sigma(
all_inputparams.apodization, line_params[:,3]) / inputparams['axis_step'],
pos_wave, inputparams['axis_step'])
broadening = (gvar.fabs(sigma_total_kms**2
- sigma_apod_kms**2))**0.5
if not nogvar:
self['broadening_gvar'] = broadening
self['broadening'] = gvar.mean(broadening)
self['broadening_err'] = gvar.sdev(broadening)
# compute fwhm in Angstroms to get flux
# If calibrated, amplitude unit must be in erg/cm2/s/A, then
# fwhm/width units must be in Angströms
if wavenumber:
fwhm = orb.utils.spectrum.fwhm_cm12nm(
line_params[:,3], line_params[:,2]) * 10.
else:
fwhm = line_params[:,3] * 10.
# compute sigma in Angstroms to get flux
sigma = orb.utils.spectrum.fwhm_cm12nm(
orb.utils.fit.vel2sigma(
line_params[:,4], line_params[:,2],
inputparams['axis_step']) * inputparams['axis_step'],
line_params[:,2]) * 10.
else:
fwhm = line_params[:,3]
sigma = line_params[:,4]
## compute flux
if all_inputparams.fmodel in ['sincgauss', 'sincgaussphased']:
flux = orb.utils.spectrum.sincgauss1d_flux(
line_params[:,1], fwhm, sigma)
elif all_inputparams.fmodel == 'gaussian':
flux = orb.utils.spectrum.gaussian1d_flux(
line_params[:,1],fwhm)
elif all_inputparams.fmodel == 'sinc':
flux = orb.utils.spectrum.sinc1d_flux(
line_params[:,1], fwhm)
elif all_inputparams.fmodel == 'sincphased':
flux = orb.utils.spectrum.sinc1d_flux(
line_params[:,1], fwhm)
else:
flux = None
if flux is not None:
if not nogvar:
self['flux_gvar'] = flux
self['flux'] = gvar.mean(flux)
self['flux_err'] = gvar.sdev(flux)
# compute SNR
self['snr'] = gvar.mean(line_params[:,1]) / gvar.sdev(line_params[:,1])
# store lines-params
if not nogvar:
self['lines_params_gvar'] = line_params
self['lines_params'] = gvar.mean(line_params)
self['lines_params_err'] = np.abs(gvar.sdev(line_params))
self.update(all_inputparams)
if nogvar:
for ikey in self:
if isinstance(self[ikey], np.ndarray):
if self[ikey].dtype == np.object:
self[ikey] = gvar.mean(self[ikey])
return self
def convert(self):
"""Convert class to a raw pickable format
"""
raw = dict()
for ipar in list(self.keys()):
raw[ipar] = self[ipar]
raw = orb.utils.fit.gvardict2pickdict(raw)
return raw
def get_axis(self):
"""Returns the axis of the fitted spectrum
"""
return orb.core.Axis(orb.utils.spectrum.create_cm1_axis(
self.step_nb, self.step, self.order, self.axis_corr_proj))
def get_params(self):
"""Return basic parameters of the fitted spectrum
"""
params = dict()
keys = ('step', 'order', 'filter_name')
for ikey in keys:
params[ikey] = self[ikey]
return params
def get_spectrum(self):
"""Return fitted spectrum"""
spectrum = orb.core.Cm1Vector1d(
self.fitted_vector, axis=self.get_axis(), params=self.get_params())
return spectrum
def get_residual(self):
"""Return fit residual"""
res = np.zeros_like(self.get_spectrum().data)
res[np.min(self.signal_range):np.max(self.signal_range)] = self.residual
spectrum = orb.core.Cm1Vector1d(res, axis=self.get_axis(), params=self.get_params())
return spectrum
def plot(self, *args, **kwargs):
"""Plot fitted spectrum. Convenient wrapper around pyplot.plot() function"""
self.get_spectrum().plot(*args, **kwargs)
def plot_residual(self, *args, **kwargs):
"""Plot residual. Convenient wrapper around pyplot.plot() function"""
self.get_residual().plot(*args, **kwargs)
def __repr__(self):
"""Called by repr() and print() to display the most important results of the fit."""
lines_nm = 1e7/gvar.mean(self['pos_guess'])
lines = list()
for iline in lines_nm:
line_name = orb.core.Lines().get_line_name(iline)
if line_name is None:
line_name = str(iline)
lines.append(line_name)
info = '=== Fit results ===\n'
info += 'lines: {}, fmodel: {}\n'.format(lines, self['fmodel'])
info += 'iterations: {}, fit time: {:.2e} s\n'.format(self['iter_nb'], self['fit_time'])
info += 'number of free parameters: {}, BIC: {:.5e}, chi2: {:.2e}\n'.format(self['nparams'], self['BIC'], self['chi2'])
info += 'Velocity (km/s): {} \n'.format(gvar.gvar(self['velocity'], self['velocity_err']))
info += 'Flux: {}\n'.format(gvar.gvar(self['flux'], self['flux_err']))
info += 'Broadening (km/s): {}\n'.format(gvar.gvar(self['broadening'], self['broadening_err']))
info += 'SNR (km/s): {}\n'.format(self['snr'])
return info
__str__ = __repr__
################################################
#### Functions #################################
################################################
def _fit_lines_in_spectrum(spectrum, ip, fit_tol=1e-10,
compute_mcmc_error=False, max_iter=None, nogvar=False,
vector_err=None,
**kwargs):
"""raw function for spectrum fitting. Need the InputParams
class to be defined before call.
:param spectrum: The spectrum to fit (1d vector).
:param ip: InputParams instance (can be created with
fit._prepare_input_params())
:param fit_tol: (Optional) Tolerance on the fit value (default
1e-10).
:param compute_mcmc_error: (Optional) If True, uncertainty
estimates are computed from a Markov chain Monte-Carlo
algorithm. If the estimates can be better constrained, the
fitting time is orders of magnitude longer (default False).
:param max_iter: (Optional) Maximum number of iterations (default None)
:param nogvar: (Optional) No gvar are returned.
:param kwargs: (Optional) Model parameters that must be changed in
the InputParams instance.
"""
if isinstance(ip, InputParams):
rawip = ip.convert()
else: rawip = ip
rawip['params'] = orb.utils.fit.pick2paramslist(rawip['params'])
for iparams in rawip['params']:
for key in iparams:
if key in kwargs:
if kwargs[key] is not None:
iparams[key] = kwargs[key]
logging.debug('last minute changed parameter {}: {}'.format(key, iparams[key]))
logging.debug('fwhm guess: {}'.format(
rawip['params'][0]['fwhm_guess']))
rawip['params'] = orb.utils.fit.paramslist2pick(rawip['params'])
fv = FitVector(spectrum,
rawip['models'], rawip['params'],
signal_range=rawip['signal_range'],
fit_tol=fit_tol,
max_iter=max_iter,
nogvar=nogvar,
vector_err=vector_err)
fit = fv.fit(compute_mcmc_error=compute_mcmc_error)
if fit != []:
fit = OutputParams(fit)
return fit.translate(ip, fv, nogvar=nogvar)
else: return []
def _prepare_input_params(step_nb, lines, step, order, nm_laser,
theta_proj, zpd_index, wavenumber=True,
filter_name=None,
theta_orig=None,
apodization=1.,
**kwargs):
"""Prepare input parameters
:param step_nb: Number of steps of the spectrum
:param lines: Positions of the lines in nm/cm-1
:param step: Step size in nm
:param order: Folding order
:param nm_laser: Calibration laser wavelength in nm.
:param theta_proj: Projected incident angle of the spectrum in
degrees. If the spectrum is not calibrated in wavenumber this
angle is the incident angle of the spectrum. If the spectrum is
wavenumber calibrated then theta_orig must be set to the real
incident angle of the spectrum.
:param zpd_index: Index of the ZPD in the interferogram.
:param apodization: (Optional) Apodization level. Permit to separate the
broadening due to the apodization and the real line broadening
(see 'broadening' output parameter, default 1.).
:param filter_name: (Optional) Filter file path (default
None).
:param theta_orig: (Optional) Real incident angle (in degrees) of
the spectrum. Must be set if the spectrum has been calibrated
i.e. prjetected on a new wavenumber axis (If None, the spectrum
is considered to be uncalibrated and the original theta is set
equal to the projected theta theta_proj) (default None).
:param kwargs: (Optional) Fitting parameters of
:py:class:`orb.fit.Cm1LinesInput` or
:py:class:`orb.fit.FitVector`.
"""
if wavenumber:
inputparams = Cm1InputParams
else:
raise NotImplementedError()
if theta_orig is None: theta_orig = theta_proj
logging.debug("theta_orig {}, theta_proj: {}".format(theta_orig, theta_proj))
ip = inputparams(step, order, step_nb,
nm_laser, theta_proj, theta_orig, apodization,
zpd_index, filter_name)
kwargs = ip.add_lines_model(lines, **kwargs)
if filter_name is not None:
ip.add_filter_model(**kwargs)
if 'signal_range' in kwargs:
if kwargs['signal_range'] is not None:
ip.set_signal_range(min(kwargs['signal_range']),
max(kwargs['signal_range']))
kwargs.pop('signal_range')
if len(kwargs) > 0:
raise ValueError('some kwargs are unknown: {}. Please remove them.'.format(list(kwargs.keys())))
return ip
def fit_lines_in_spectrum(spectrum, lines, step, order, nm_laser,
theta, zpd_index, wavenumber=True,
filter_name=None,
apodization=1.,
fit_tol=1e-10,
velocity_range=None,
compute_mcmc_error=False,
max_iter=None,
**kwargs):
"""Fit lines in spectrum
.. warning:: If spectrum is in wavenumber (option wavenumber set
to True) input and output unit will be in cm-1. If spectrum is
in wavelength (option wavenumber set to False) input and output
unit will be in nm.
:param spectrum: Spectrum to fit
:param lines: Positions of the lines in nm/cm-1
:param step: Step size in nm
:param order: Folding order
:param nm_laser: Calibration laser wavelength in nm.
:param theta_proj: Projected incident angle of the spectrum in
degrees.
:param zpd_index: Index of the ZPD in the interferogram.
:param apodization: (Optional) Apodization level. Permit to separate the
broadening due to the apodization and the real line broadening
(see 'broadening' output parameter, default 1.).
:param fit_tol: (Optional) Tolerance on the fit value (default
1e-10).
:param filter_name: (Optional) Filter file path (default
None).
:param velocity_range: (Optional) Range of velocity to check
around the shift_guess value. If not None, a brute force
algorithm is used to find the best velocity value. If more than
one shift_guess is given (e.g. if lines are have different
velocities, the mean velocity will be used as an initial
velocity guess). The quality of this guess depends strongly on
the spectrum noise. Try avoid using it with low a SNR spectrum.
:param compute_mcmc_error: (Optional) If True, uncertainty
estimates are computed from a Markov chain Monte-Carlo
algorithm. If the estimates can be better constrained, the
fitting time is orders of magnitude longer (default False).
:param max_iter: (Optional) Maximum number of iterations (default None)
:param kwargs: (Optional) Fitting parameters of
:py:class:`orb.fit.Cm1LinesInput` or
:py:class:`orb.fit.FitVector`.
:return: a dictionary containing:
* all the fit parameters [key: 'fit_params']
* lines parameters [key: 'lines_params'] Lines parameters are
given as an array of shape (lines_nb, 5). The order of the 5
parameters for each lines is [height at the center of the
line, amplitude, position, fwhm, sigma]. Position and FWHM are given
in nm/cm-1 depending on the input unit (i.e. nm if wavenumber
is False and cm-1 if wavenumber is True)
* lines parameters errors [key: 'lines_params_err']
* velocity [key: 'velocity'] Velocity of the lines in km/s
* velocity error [key: 'velocity_err'] Error on the velocity of
the lines in km/s
* residual [key: 'residual']
* chi-square [key: 'chi2']
* reduced chi-square [key: 'rchi2']
* SNR [key: 'snr']
* continuum parameters [key: 'cont_params']
* fitted spectrum [key: 'fitted_vector']
* log(Gaussian Bayes Factor) [key: 'logGBF']
"""
all_args = dict(locals()) # used in case fit is retried (must stay
# at the very beginning of the function
# ;)
if velocity_range is not None:
raise NotImplementedError()
ip = _prepare_input_params(spectrum.shape[0], lines, step, order, nm_laser,
theta, zpd_index, wavenumber=wavenumber,
filter_name=filter_name,
apodization=apodization,
**kwargs)
fit = _fit_lines_in_spectrum(spectrum, ip,
fit_tol=fit_tol,
compute_mcmc_error=compute_mcmc_error,
max_iter=max_iter)
if fit != []:
return fit
elif ip.allparams.fmodel in ['sincgauss', 'sincgaussphased']:
logging.warning('bad fit, fmodel replaced by a normal sinc')
all_args['fmodel'] = 'sinc'
return fit_lines_in_spectrum(**all_args)
return []
def fit_lines_in_vector(vector, lines, fwhm_guess, fit_tol=1e-10,
compute_mcmc_error=False, max_iter=None, **kwargs):
"""Fit lines in a vector
Use this function only if little is known about the vector. A
vector resulting from an interferogram FFT is assumed :
i.e. regular axis, symmetrical line shape.
.. warning:: All position units are in channels
:param vector: vector to fit
:param lines: Positions of the lines in channels
:param fwhm_guess: Initial guess on the lines FWHM (in channels).
:param fit_tol: (Optional) Tolerance on the fit value (default
1e-10).
:param compute_mcmc_error: (Optional) If True, uncertainty
estimates are computed from a Markov chain Monte-Carlo
algorithm. If the estimates can be better constrained, the
fitting time is orders of magnitude longer (default False).
:param max_iter: (Optional) Maximum number of iterations (default None)
:param kwargs: (Optional) Fitting parameters of
:py:class:`orb.fit.LinesInput` or
:py:class:`orb.fit.FitVector`.
:return: a dictionary containing:
* all the fit parameters [key: 'fit_params']
* lines parameters [key: 'lines_params'] Lines parameters are
given as an array of shape (lines_nb, 5). The order of the 5
parameters for each lines is [height at the center of the
line, amplitude, position, fwhm, sigma]. Postion and FWHM are given
in channels.
* lines parameters errors [key: 'lines_params_err']
* residual [key: 'residual']
* chi-square [key: 'chi2']
* reduced chi-square [key: 'rchi2']
* SNR [key: 'snr']
* continuum parameters [key: 'cont_params']
* fitted spectrum [key: 'fitted_vector']
* log(Gaussian Bayes Factor) [key: 'logGBF']
"""
ip = InputParams(vector.shape[0])
ip.add_lines_model(lines, fwhm_guess, **kwargs)
if 'signal_range' in kwargs:
if kwargs['signal_range'] is not None:
ip.set_signal_range(min(kwargs['signal_range']),
max(kwargs['signal_range']))
fv = FitVector(vector,
ip.models, ip.params,
signal_range=ip.signal_range,
fit_tol=fit_tol,
max_iter=max_iter)
fit = fv.fit(compute_mcmc_error=compute_mcmc_error)
if fit != []:
fit = OutputParams(fit)
return fit.translate(ip.convert(), fv)
return []
def create_cm1_lines_model_raw(lines_cm1, amp, step, order, step_nb, corr,
zpd_index, vel=0, sigma=0, alpha=0, fmodel='sinc'):
"""Return a simple emission-line spectrum model in cm-1 from raw
parameters. For more physical parameters use
create_cm1_lines_model().
:param lines_cm1: lines in cm-1
:param amp: Amplitude (must have the same size as lines)
:param step: Step size
:param order: Folding order
:param step_nb: Total numer of steps
:param corr: calibration coeff.
:param zpd_index: ZPD index.
:param vel: (Optional) Global velocity shift applied to all the
lines (in km/s, default 0.)
:param sigma: (Optional) Line broadening (in km/s, default 0.)
:param alpha: (Optional) Phase coefficient of the lines (default
0.)
:param fmodel: (Optional) Lines model. Can be 'gaussian', 'sinc',
'sincgauss', 'sincphased', 'sincgaussphased' (default sincgauss).
"""
NM_LASER = 543.5 # can be anything
fwhm_guess = orb.utils.spectrum.compute_line_fwhm(
step_nb - zpd_index, step, order, corr, wavenumber=True)
def get_defguess(param):
if np.size(param) == 1:
return np.arange(np.size(lines_cm1)).astype(str), list([param]) * np.size(lines_cm1)
elif np.size(param) == np.size(lines_cm1):
return np.arange(np.size(lines_cm1)).astype(str), param
else: raise Exception('param size must be 1 or {} but is {}'.format(
np.size(lines_cm1), np.size(param)))
pos_def, pos_cov = get_defguess(vel)
model_params = {
'step_nb':step_nb,
'step':step,
'order':order,
'nm_laser':NM_LASER,
'nm_laser_obs':NM_LASER * corr,
'line_nb':np.size(lines_cm1),
'fwhm_def':['1'] * np.size(lines_cm1),
'fwhm_guess':[fwhm_guess] * np.size(lines_cm1),
'pos_guess':lines_cm1,
'pos_cov':pos_cov,
'pos_def':pos_def,
'fmodel':fmodel,
'amp_def':['free'] * np.size(lines_cm1),
'ratio':zpd_index / (step_nb - zpd_index)
}
if fmodel in ['sincgauss', 'sincgaussphased']:
sigma_def, sigma_cov = get_defguess(sigma)
sigma_params = {
'sigma_def':sigma_def,
'sigma_guess':np.zeros(np.size(lines_cm1)),
'sigma_cov':sigma_cov}
model_params.update(sigma_params)
if fmodel in ['sincgaussphased',]:
alpha_params = {
'alpha_def':['1'] * np.size(lines_cm1),
'alpha_guess':[alpha] * np.size(lines_cm1),
'alpha_cov':[0.]}
model_params.update(alpha_params)
lines_model = Cm1LinesModel(model_params)
p_free = dict(lines_model.p_free)
for iline in range(np.size(lines_cm1)):
p_free[lines_model._get_ikey('amp', iline)] = amp[iline]
lines_model.set_p_free(p_free)
spectrum = lines_model.get_model(np.arange(step_nb))
spectrum = spectrum
cm1_axis = orb.utils.spectrum.create_cm1_axis(
spectrum.size, step, order, corr=corr)
return cm1_axis, spectrum
def create_cm1_lines_model(lines_cm1, amp, step, order, resolution,
theta, vel=0., sigma=0., alpha=0.,
fmodel='sincgauss', ratio=0.25):
"""Return a simple emission-line spectrum model in cm-1
:param lines: lines in cm-1
:param amp: Amplitude (must have the same size as lines)
:param step: Step size
:param order: Folding order
:param resolution: Resolution of the spectrum
:param theta: Incident angle
:param vel: (Optional) Global velocity shift applied to all the
lines (in km/s, default 0.)
:param sigma: (Optional) Line broadening (in km/s, default 0.)
:param alpha: (Optional) Phase coefficient of the lines (default
0.)
:param fmodel: (Optional) Lines model. Can be 'gaussian', 'sinc',
'sincgauss', 'sincphased', 'sincgaussphased' (default sincgauss).
"""
if np.size(amp) != np.size(lines_cm1):
raise Exception('The number of lines and the length of the amplitude vector must be the same')
nm_laser = 543.5 # can be anything
nm_laser_obs = nm_laser / np.cos(np.deg2rad(theta))
step_nb = orb.utils.spectrum.compute_step_nb(resolution, step, order)
total_step_nb = step_nb * (1. + ratio)
zpd_index = total_step_nb - step_nb
return create_cm1_lines_model_raw(
lines_cm1, amp, step, order, total_step_nb, orb.utils.spectrum.theta2corr(theta),
zpd_index, vel=vel, sigma=sigma, alpha=alpha, fmodel=fmodel)
def create_lines_model(lines, amp, fwhm, step_nb, line_shift=0.,
sigma=0., alpha=0., fmodel='sincgauss'):
"""Return a simple emission-line spectrum model with no physical units.
:param lines: lines channels.
:param amp: Amplitude (must have the same size as lines).
:param fwhm: lines FWHM (in channels).
:param step_nb: Number of steps of the spectrum.
:param line_shift: (Optional) Global shift applied to all the
lines (in channels, default 0.)
:param sigma: (Optional) Sigma of the lines (in channels, default
0.)
:param alpha: (Optional) Phase coefficient of the lines (default
0.)
:param fmodel: (Optional) Lines model. Can be 'gaussian', 'sinc',
'sincgauss', 'sincphased', 'sincgaussphased' (default sincgauss).
"""
if np.size(amp) != np.size(lines):
raise Exception('The number of lines and the length of the amplitude vector must be the same')
lines_model = LinesModel(
{'line_nb':np.size(lines),
'amp_def':'free',
'fwhm_def':'1',
'pos_guess':lines,
'pos_cov':line_shift,
'pos_def':'1',
'fmodel':fmodel,
'fwhm_guess':fwhm,
'sigma_def':'1',
'sigma_guess':sigma,
'sigma_cov':0., # never more than 0.
'alpha_def':'1',
'alpha_guess':alpha,
'alpha_cov':0.}) # never more than 0.
p_free = dict(lines_model.p_free)
for iline in range(np.size(lines)):
p_free[lines_model._get_ikey('amp', iline)] = amp[iline]
lines_model.set_p_free(p_free)
spectrum = lines_model.get_model(np.arange(step_nb))
#model, models = lines_model.get_model(np.arange(step_nb), return_models=True)
return spectrum
|
thomasorbREPO_NAMEorbPATH_START.@orb_extracted@orb-master@orb@fit.py@.PATH_END.py
|
{
"filename": "_marker.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolargl/_marker.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="scatterpolargl", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scatterpolargl.mar
ker.ColorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`plotly.graph_objects.scatterpolargl.mar
ker.Line` instance or dict with compatible
properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolargl@_marker.py@.PATH_END.py
|
{
"filename": "get_layer_policy.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/keras/mixed_precision/get_layer_policy.py",
"type": "Python"
}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the get_layer_policy function.
This is a separate file from policy.py to avoid a circular dependency.
get_layer_policy() relies on base_layer.py, itself which relies on policy.py.
"""
from tensorflow.python.keras.engine import base_layer
def get_layer_policy(layer):
"""Returns the dtype policy of a layer.
Warning: This function is deprecated. Use
`tf.keras.layers.Layer.dtype_policy` instead.
Args:
layer: A `tf.keras.layers.Layer`.
Returns:
The `tf.keras.mixed_precision.Policy` of the layer.
"""
if not isinstance(layer, base_layer.Layer):
raise ValueError('get_policy can only be called on a layer, but got: %s'
% (layer,))
return layer.dtype_policy
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@keras@mixed_precision@get_layer_policy.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/api/_tf_keras/keras/datasets/cifar100/__init__.py",
"type": "Python"
}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@api@_tf_keras@keras@datasets@cifar100@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/tst/regression/scripts/tests/shearingbox/__init__.py",
"type": "Python"
}
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@tst@regression@scripts@tests@shearingbox@__init__.py@.PATH_END.py
|
|
{
"filename": "_customdata.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contourcarpet/_customdata.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="contourcarpet", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contourcarpet@_customdata.py@.PATH_END.py
|
{
"filename": "code_stats.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/utils/code_stats.py",
"type": "Python"
}
|
import os
def count_loc(directory, exclude=("_test",), extensions=(".py",), verbose=0):
loc = 0
for root, _, fnames in os.walk(directory):
skip = False
for ex in exclude:
if root.endswith(ex):
skip = True
if skip:
continue
for fname in fnames:
skip = False
for ext in extensions:
if not fname.endswith(ext):
skip = True
break
for ex in exclude:
if fname.endswith(ex + ext):
skip = True
break
if skip:
continue
fname = os.path.join(root, fname)
if verbose:
print(f"Count LoCs in {fname}")
with open(fname) as f:
lines = f.read().split("\n")
string_open = False
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if not string_open:
if not line.startswith('"""'):
loc += 1
else:
if not line.endswith('"""'):
string_open = True
else:
if line.startswith('"""'):
string_open = False
return loc
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@utils@code_stats.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/treemap/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._domain import Domain
from ._hoverlabel import Hoverlabel
from ._insidetextfont import Insidetextfont
from ._legendgrouptitle import Legendgrouptitle
from ._marker import Marker
from ._outsidetextfont import Outsidetextfont
from ._pathbar import Pathbar
from ._root import Root
from ._stream import Stream
from ._textfont import Textfont
from ._tiling import Tiling
from . import hoverlabel
from . import legendgrouptitle
from . import marker
from . import pathbar
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".hoverlabel", ".legendgrouptitle", ".marker", ".pathbar"],
[
"._domain.Domain",
"._hoverlabel.Hoverlabel",
"._insidetextfont.Insidetextfont",
"._legendgrouptitle.Legendgrouptitle",
"._marker.Marker",
"._outsidetextfont.Outsidetextfont",
"._pathbar.Pathbar",
"._root.Root",
"._stream.Stream",
"._textfont.Textfont",
"._tiling.Tiling",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@treemap@__init__.py@.PATH_END.py
|
{
"filename": "wavelet_precompute_torch.py",
"repo_name": "astro-informatics/s2wav",
"repo_path": "s2wav_extracted/s2wav-main/s2wav/transforms/wavelet_precompute_torch.py",
"type": "Python"
}
|
import torch
from typing import Tuple, List
from s2fft.precompute_transforms import wigner, spherical
from s2wav import samples
def synthesis(
f_wav: torch.tensor,
f_scal: torch.tensor,
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
sampling: str = "mw",
nside: int = None,
reality: bool = False,
filters: Tuple[torch.tensor] = None,
precomps: List[List[torch.tensor]] = None,
) -> torch.tensor:
r"""Computes the synthesis directional wavelet transform [1,2].
Specifically, this transform synthesises the signal :math:`_{s}f(\omega) \in \mathbb{S}^2`
by summing the contributions from wavelet and scaling coefficients in harmonic space,
see equation 27 from `[2] <https://arxiv.org/pdf/1509.06749.pdf>`_.
Args:
f_wav (torch.tensor): Array of wavelet pixel-space coefficients
with shape :math:`[n_{J}, 2N-1, n_{\theta}, n_{\phi}]`.
f_scal (torch.tensor): Array of scaling pixel-space coefficients
with shape :math:`[n_{\theta}, n_{\phi}]`.
L (int): Harmonic bandlimit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 1.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
spin (int, optional): Spin (integer) of input signal. Defaults to 0.
sampling (str, optional): Spherical sampling scheme from {"mw","mwss", "dh",
"healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required if
sampling="healpix". Defaults to None.
reality (bool, optional): Whether :math:`f \in \mathbb{R}`, if True exploits
conjugate symmetry of harmonic coefficients. Defaults to False.
filters (Tuple[torch.tensor], optional): Precomputed wavelet filters. Defaults to None.
precomps (List[torch.tensor]): Precomputed list of recursion coefficients. At most
of length :math:`L^2`, which is a minimal memory overhead.
Raises:
AssertionError: Shape of wavelet/scaling coefficients incorrect.
Returns:
torch.tensor: Signal :math:`f` on the sphere with shape :math:`[n_{\theta}, n_{\phi}]`.
Notes:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the sphere", A&A, vol. 558, p. A128, 2013.
[2] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint arXiv:1509.06749 (2015).
"""
if precomps == None:
raise ValueError("Must provide precomputed kernels for this transform!")
J = samples.j_max(L, lam)
Ls = samples.scal_bandlimit(L, J_min, lam, True)
flm = torch.zeros((L, 2 * L - 1), dtype=torch.complex128)
f_scal_lm = spherical.forward_transform_torch(
f_scal, precomps[1], Ls, sampling, reality, spin, nside
)
# Sum the all wavelet wigner coefficients for each lmn
# Note that almost the entire compute is concentrated at the highest two scales.
for j in range(J_min, J + 1):
Lj, Nj, L0j = samples.LN_j(L, j, N, lam, True)
shift = 0 if j < J else -1
temp = wigner.forward_transform_torch(
f_wav[j - J_min],
precomps[2][j - J_min + shift],
Lj,
Nj,
sampling,
reality,
nside,
)
flm[L0j:Lj, L - Lj : L - 1 + Lj] += torch.einsum(
"ln,nlm->lm",
filters[0][j, L0j:Lj, L - Nj : L - 1 + Nj : 2],
temp[::2, L0j:, :],
)
# Sum the all scaling harmonic coefficients for each lm
phi = filters[1][:Ls] * torch.sqrt(
4 * torch.pi / (2 * torch.arange(Ls, dtype=torch.float64) + 1)
)
flm[:Ls, L - Ls : L - 1 + Ls] += torch.einsum("lm,l->lm", f_scal_lm, phi)
return spherical.inverse_transform_torch(
flm, precomps[0], L, sampling, reality, spin, nside
)
def analysis(
f: torch.tensor,
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
sampling: str = "mw",
nside: int = None,
reality: bool = False,
filters: Tuple[torch.tensor] = None,
precomps: List[List[torch.tensor]] = None,
) -> Tuple[torch.tensor]:
r"""Wavelet analysis from pixel space to wavelet space for complex signals.
Args:
f (torch.tensor): Signal :math:`f` on the sphere with shape :math:`[n_{\theta}, n_{\phi}]`.
L (int): Harmonic bandlimit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between consecutive wavelet scales.
Note that :math:`\lambda = 2` indicates dyadic wavelets. Defaults to 2.
spin (int, optional): Spin (integer) of input signal. Defaults to 0.
sampling (str, optional): Spherical sampling scheme from {"mw","mwss", "dh", "healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required if sampling="healpix". Defaults
to None.
reality (bool, optional): Whether :math:`f \in \mathbb{R}`, if True exploits
conjugate symmetry of harmonic coefficients. Defaults to False.
filters (Tuple[torch.tensor], optional): Precomputed wavelet filters. Defaults to None.
precomps (List[torch.tensor]): Precomputed list of recursion coefficients. At most
of length :math:`L^2`, which is a minimal memory overhead.
Returns:
f_wav (torch.tensor): Array of wavelet pixel-space coefficients
with shape :math:`[n_{J}, 2N-1, n_{\theta}, n_{\phi}]`.
f_scal (torch.tensor): Array of scaling pixel-space coefficients
with shape :math:`[n_{\theta}, n_{\phi}]`.
"""
if precomps == None:
raise ValueError("Must provide precomputed kernels for this transform!")
J = samples.j_max(L, lam)
Ls = samples.scal_bandlimit(L, J_min, lam, True)
f_wav_lmn = samples.construct_flmn_torch(L, N, J_min, J, lam, True)
f_wav = samples.construct_f_torch(L, J_min, J, lam)
wav_lm = torch.einsum(
"jln, l->jln",
torch.conj(filters[0]),
8 * torch.pi**2 / (2 * torch.arange(L, dtype=torch.float64) + 1),
)
flm = spherical.forward_transform_torch(
f, precomps[0], L, sampling, reality, spin, nside
)
# Project all wigner coefficients for each lmn onto wavelet coefficients
# Note that almost the entire compute is concentrated at the highest J
for j in range(J_min, J + 1):
Lj, Nj, L0j = samples.LN_j(L, j, N, lam, True)
f_wav_lmn[j - J_min][::2, L0j:] += torch.einsum(
"lm,ln->nlm",
flm[L0j:Lj, L - Lj : L - 1 + Lj],
wav_lm[j, L0j:Lj, L - Nj : L - 1 + Nj : 2],
)
shift = 0 if j < J else -1
f_wav[j - J_min] = wigner.inverse_transform_torch(
f_wav_lmn[j - J_min],
precomps[2][j - J_min + shift],
Lj,
Nj,
sampling,
reality,
nside,
)
# Project all harmonic coefficients for each lm onto scaling coefficients
phi = filters[1][:Ls] * torch.sqrt(
4 * torch.pi / (2 * torch.arange(Ls, dtype=torch.float64) + 1)
)
temp = torch.einsum("lm,l->lm", flm[:Ls, L - Ls : L - 1 + Ls], phi)
# Handle edge case
if Ls == 1:
f_scal = temp * torch.sqrt(
torch.tensor(1 / (4 * torch.pi), dtype=torch.float64)
)
else:
f_scal = spherical.inverse_transform_torch(
temp, precomps[1], Ls, sampling, reality, spin, nside
)
return f_wav, f_scal
def flm_to_analysis(
flm: torch.tensor,
L: int,
N: int = 1,
J_min: int = 0,
J_max: int = None,
lam: float = 2.0,
sampling: str = "mw",
nside: int = None,
reality: bool = False,
filters: Tuple[torch.tensor] = None,
precomps: List[List[torch.tensor]] = None,
) -> Tuple[torch.tensor]:
r"""Wavelet analysis from pixel space to wavelet space for complex signals.
Args:
f (torch.tensor): Signal :math:`f` on the sphere with shape :math:`[n_{\theta}, n_{\phi}]`.
L (int): Harmonic bandlimit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between consecutive wavelet scales.
Note that :math:`\lambda = 2` indicates dyadic wavelets. Defaults to 2.
spin (int, optional): Spin (integer) of input signal. Defaults to 0.
sampling (str, optional): Spherical sampling scheme from {"mw","mwss", "dh", "healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required if sampling="healpix". Defaults
to None.
reality (bool, optional): Whether :math:`f \in \mathbb{R}`, if True exploits
conjugate symmetry of harmonic coefficients. Defaults to False.
filters (Tuple[torch.tensor], optional): Precomputed wavelet filters. Defaults to None.
precomps (List[torch.tensor]): Precomputed list of recursion coefficients. At most
of length :math:`L^2`, which is a minimal memory overhead.
Returns:
f_wav (torch.tensor): Array of wavelet pixel-space coefficients
with shape :math:`[n_{J}, 2N-1, n_{\theta}, n_{\phi}]`.
f_scal (torch.tensor): Array of scaling pixel-space coefficients
with shape :math:`[n_{\theta}, n_{\phi}]`.
"""
if precomps == None:
raise ValueError("Must provide precomputed kernels for this transform!")
J = J_max if J_max is not None else samples.j_max(L, lam)
f_wav_lmn = samples.construct_flmn_torch(L, N, J_min, J, lam, True)
f_wav = samples.construct_f_torch(L, J_min, J, lam)
wav_lm = torch.einsum(
"jln, l->jln",
torch.conj(filters),
8 * torch.pi**2 / (2 * torch.arange(L, dtype=torch.float64) + 1),
)
# Project all wigner coefficients for each lmn onto wavelet coefficients
# Note that almost the entire compute is concentrated at the highest J
for j in range(J_min, J + 1):
Lj, Nj, L0j = samples.LN_j(L, j, N, lam, True)
f_wav_lmn[j - J_min][::2, L0j:] += torch.einsum(
"lm,ln->nlm",
flm[L0j:Lj, L - Lj : L - 1 + Lj],
wav_lm[j, L0j:Lj, L - Nj : L - 1 + Nj : 2],
)
shift = 0 if j < J else -1
f_wav[j - J_min] = wigner.inverse_transform_torch(
f_wav_lmn[j - J_min],
precomps[2][j - J_min + shift],
Lj,
Nj,
sampling,
reality,
nside,
)
return f_wav
|
astro-informaticsREPO_NAMEs2wavPATH_START.@s2wav_extracted@s2wav-main@s2wav@transforms@wavelet_precompute_torch.py@.PATH_END.py
|
{
"filename": "_y1shift.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/shape/_y1shift.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Y1ShiftValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="y1shift", parent_name="layout.shape", **kwargs):
super(Y1ShiftValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", -1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@shape@_y1shift.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/sankey/node/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey.node"
_path_str = "sankey.node.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.node.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.sankey.node.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.node.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.node.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.node.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@sankey@node@_hoverlabel.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/legendgrouptitle/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="densitymapbox.legendgrouptitle.font",
**kwargs,
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymapbox@legendgrouptitle@font@_size.py@.PATH_END.py
|
{
"filename": "watershed.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/samples/python/watershed.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
Watershed segmentation
=========
This program demonstrates the watershed segmentation algorithm
in OpenCV: watershed().
Usage
-----
watershed.py [image filename]
Keys
----
1-7 - switch marker color
SPACE - update segmentation
r - reset
a - toggle autoupdate
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from common import Sketcher
class App:
def __init__(self, fn):
self.img = cv.imread(fn)
if self.img is None:
raise Exception('Failed to load image file: %s' % fn)
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255
self.auto_update = True
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
def get_colors(self):
return list(map(int, self.colors[self.cur_marker])), self.cur_marker
def watershed(self):
m = self.markers.copy()
cv.watershed(self.img, m)
overlay = self.colors[np.maximum(m, 0)]
vis = cv.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv.CV_8UC3)
cv.imshow('watershed', vis)
def run(self):
while cv.getWindowProperty('img', 0) != -1 or cv.getWindowProperty('watershed', 0) != -1:
ch = cv.waitKey(50)
if ch == 27:
break
if ch >= ord('1') and ch <= ord('7'):
self.cur_marker = ch - ord('0')
print('marker: ', self.cur_marker)
if ch == ord(' ') or (self.sketch.dirty and self.auto_update):
self.watershed()
self.sketch.dirty = False
if ch in [ord('a'), ord('A')]:
self.auto_update = not self.auto_update
print('auto_update if', ['off', 'on'][self.auto_update])
if ch in [ord('r'), ord('R')]:
self.markers[:] = 0
self.markers_vis[:] = self.img
self.sketch.show()
cv.destroyAllWindows()
if __name__ == '__main__':
print(__doc__)
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
App(cv.samples.findFile(fn)).run()
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@samples@python@watershed.py@.PATH_END.py
|
{
"filename": "catalogue_match.ipynb",
"repo_name": "radiocosmology/cora",
"repo_path": "cora_extracted/cora-master/cora/foreground/data/catalogue_match.ipynb",
"type": "Jupyter Notebook"
}
|
```
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
WARNING: pylab import has clobbered these variables: ['f', 'beta']
`%pylab --no-import-all` prevents importing * from pylab and numpy
```
import pandas as pd
import numpy as np
import networkx as nx
```
```
nvss_tab = pd.read_table("nvss.dat", sep="|", comment="#", header=56, skiprows=[57,58,2265,2267], skipinitialspace=True)
vlss_tab = pd.read_table("vlss.dat", sep="|", comment="#", header=48, skiprows=[49,50,1342,1343], skipinitialspace=True)
```
```
def merge_sources(cat1, cat2, sep):
cat1 = np.array(cat1)
cat2 = np.array(cat2)
# Get x, y separations
dx = cat1[:, 0, np.newaxis] - cat2[np.newaxis, :, 0]
dy = cat1[:, 1, np.newaxis] - cat2[np.newaxis, :, 1]
# Unwrap x separation
dx = np.where(dx < 180.0, dx, dx - 360.0)
# Convert to radians
dxr = np.radians(dx)
dyr = np.radians(dy)
# Find separation
dd = (dxr**2 * np.cos(dyr) + dyr**2)**0.5
# Return merge mask
return (dd < np.radians(sep))
```
## Merge Close Sources
```
nvss_pos = nvss_tab.iloc[:, :2]
vlss_pos = vlss_tab.iloc[:, :2]
```
```
nvss_merge = merge_sources(nvss_pos, nvss_pos, 0.2)
vlss_merge = merge_sources(vlss_pos, vlss_pos, 0.2)
```
```
nvss_groups = nx.connected_components(nx.Graph(nvss_merge))
vlss_groups = nx.connected_components(nx.Graph(vlss_merge))
```
```
indarr_nvss = -1*np.ones(nvss_tab.shape[0], dtype=np.int)
for c_ind, c_list in enumerate(nvss_groups):
for n_ind in c_list:
indarr_nvss[n_ind] = c_ind
indarr_vlss = -1*np.ones(vlss_tab.shape[0], dtype=np.int)
for c_ind, c_list in enumerate(vlss_groups):
for n_ind in c_list:
indarr_vlss[n_ind] = c_ind
```
```
def select_nvss(group):
# Base off group max
groupmax = group.ix[group['S1.4'].idxmax()]
# Sum fluxes
groupmax['S1.4'] = group['S1.4'].sum()
groupmax['polFlux'] = group['polFlux'].sum()
# Combine errors in quadrature
groupmax['e_S1.4'] = (group['e_S1.4']**2).sum()**0.5
groupmax['e_polFlux'] = (group['e_polFlux']**2).sum()**0.5
return groupmax
def select_vlss(group):
# Base off group max
groupmax = group.ix[group['Si'].idxmax()]
# Sum fluxes
groupmax['Si'] = group['Si'].sum()
# Combine errors in quadrature
groupmax['e_Si'] = (group['e_Si']**2).sum()**0.5
return groupmax
```
```
nvss_grouped = nvss_tab.copy().groupby(indarr_nvss, axis=0)
vlss_grouped = vlss_tab.copy().groupby(indarr_vlss, axis=0)
```
```
nvss_tab2 = nvss_grouped.apply(select_nvss)
vlss_tab2 = vlss_grouped.apply(select_vlss)
```
## Merge Catalogs
```
nvss_pos = nvss_tab2.iloc[:, :2]
vlss_pos = vlss_tab2.iloc[:, :2]
```
```
merge_ind = merge_sources(nvss_pos, vlss_pos, 0.1)
```
```
nvss_map, vlss_map = np.where(merge_ind)
```
```
nvss_mtab = nvss_tab2.copy().take(nvss_map).reset_index(drop=True)
vlss_mtab = vlss_tab2.copy().take(vlss_map).reset_index(drop=True)
```
Clean-up columns before merging
```
del nvss_mtab["RAJ2000"]
del nvss_mtab["DEJ2000"]
del nvss_mtab["e_RAJ2000"]
del nvss_mtab["e_DEJ2000"]
nvss_mtab.rename(columns={'NVSS' : 'NAME', '_RAJ2000' : 'RA', '_DEJ2000' : 'DEC', 'S1.4' : 'S1400', 'e_S1.4' : 'e_S1400', 'polFlux' : 'P1400', 'e_polFlux' : 'e_P1400', 'polPA' : 'POLANG', 'e_polPA' : 'e_POLANG'}, inplace=True)
```
```
del vlss_mtab["_RAJ2000"]
del vlss_mtab["_DEJ2000"]
del vlss_mtab["VLSS"]
del vlss_mtab["RAJ2000"]
del vlss_mtab["DEJ2000"]
del vlss_mtab["e_RAJ2000"]
del vlss_mtab["e_DEJ2000"]
vlss_mtab.rename(columns={'Si' : 'S74', 'e_Si' : 'e_S74'}, inplace=True)
```
Merge tables
```
merged_table = nvss_mtab.join(vlss_mtab)
```
```
merged_pos = merged_table.iloc[:, :2]
```
```
plot(nvss_pos['_RAJ2000'], nvss_pos['_DEJ2000'], 'rD')
plot(vlss_pos['_RAJ2000'], vlss_pos['_DEJ2000'], 'b+')
plot(merged_pos['RA'], merged_pos['DEC'], 'gx')
xlim(0, 50)
ylim(0, 20)
```
(0, 20)

Change units into Janskys
```
merged_table['S1400'] = merged_table['S1400'] / 1000.0
merged_table['e_S1400'] = merged_table['e_S1400'] / 1000.0
merged_table['P1400'] = merged_table['P1400'] / 1000.0
merged_table['e_P1400'] = merged_table['e_P1400'] / 1000.0
```
## Give Useful Names
```
cat3c = pd.read_table("3c_modern.dat", sep="|", comment="#", skipinitialspace=True, header=None, names=['ID', '3CNAME', 'NAME', 'TYPE', 'RA', 'DEC'], skiprows=6)
```
```
pos3c = cat3c.iloc[:, -2:]
```
```
merge_3c = merge_sources(merged_pos, pos3c, 0.5)
```
```
p1, p2 = np.where(merge_3c)
```
```
merged_table2 = merged_table.copy()
merged_table2['3CNAME'] = 'NONE'
```
Replace NVSS names with more useful resolved names if we can.
```
for mi, ci in zip(p1, p2):
merged_table2['NAME'][mi] = cat3c['NAME'][ci]
merged_table2['3CNAME'][mi] = cat3c['3CNAME'][ci]
```
```
merged_table2.sort(["S1400"], ascending=False).to_csv("combined.dat", sep="|", index=False)
```
```
merged_sort = merged_table2.sort(["S1400"], ascending=False).copy()
```
```
merged_sort['NAME'] = merged_sort['NAME'].str.strip().str.replace(" ", "_")
merged_sort['3CNAME'] = merged_sort['3CNAME'].str.strip().str.replace(" ", "_")
```
## Add Derived Spectral Information
Calculate spectral index
```
beta = np.log(merged_sort['S74'] / merged_sort['S1400']) / np.log(74.0 / 1400.0)
s600 = merged_sort['S74'] * (600.0 / 74.0)**beta
p600 = merged_sort['P1400'] * (600.0 / 74.0)**beta
```
Add into table
```
final_table = merged_sort.copy()
final_table['S600'] = s600
final_table['P600'] = p600
final_table['BETA'] = beta
final_table['GAMMA'] = 0.0
final_table = final_table.sort(["S600"], ascending=False).copy()
```
```
final_table[:10]
```
<div style="max-height:1000px;max-width:1500px;overflow:auto;">
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>RA</th>
<th>DEC</th>
<th>NAME</th>
<th>S1400</th>
<th>e_S1400</th>
<th>P1400</th>
<th>POLANG</th>
<th>e_P1400</th>
<th>e_POLANG</th>
<th>S74</th>
<th>e_S74</th>
<th>3CNAME</th>
<th>S600</th>
<th>P600</th>
<th>BETA</th>
<th>GAMMA</th>
</tr>
</thead>
<tbody>
<tr>
<th>0 </th>
<td> 350.85658</td>
<td> 58.83928</td>
<td> CASSIOPEIA_A</td>
<td> 1767.8760</td>
<td> 26.410249</td>
<td> 5.62293</td>
<td>-75.6</td>
<td> 0.098041</td>
<td> 0.0</td>
<td> 18842.05</td>
<td> 1334.050712</td>
<td> 3C_461.0</td>
<td> 3496.296206</td>
<td> 1.043381</td>
<td>-0.804824</td>
<td> 0</td>
</tr>
<tr>
<th>2 </th>
<td> 299.88362</td>
<td> 40.72997</td>
<td> CYGNUS_A</td>
<td> 1605.7735</td>
<td> 40.671599</td>
<td> 2.41593</td>
<td> 50.2</td>
<td> 0.106708</td>
<td> 0.0</td>
<td> 16611.68</td>
<td> 1667.590000</td>
<td> 3C_405</td>
<td> 3148.542843</td>
<td> 0.457910</td>
<td>-0.794684</td>
<td> 0</td>
</tr>
<tr>
<th>1 </th>
<td> 83.61679</td>
<td> 22.03403</td>
<td> M_1</td>
<td> 887.8423</td>
<td> 13.943689</td>
<td> 4.91882</td>
<td> 71.1</td>
<td> 0.111831</td>
<td> 0.0</td>
<td> 1888.55</td>
<td> 188.540000</td>
<td> 3C_144</td>
<td> 1103.568578</td>
<td> 2.874298</td>
<td>-0.256710</td>
<td> 0</td>
</tr>
<tr>
<th>4 </th>
<td> 187.70608</td>
<td> 12.38933</td>
<td> M_87</td>
<td> 147.0479</td>
<td> 4.860562</td>
<td> 1.81558</td>
<td> -0.6</td>
<td> 0.025236</td>
<td> 0.0</td>
<td> 1567.37</td>
<td> 89.051934</td>
<td> 3C_274.0</td>
<td> 290.820972</td>
<td> 0.336876</td>
<td>-0.804852</td>
<td> 0</td>
</tr>
<tr>
<th>7 </th>
<td> 252.79875</td>
<td> 4.98861</td>
<td> HER_A</td>
<td> 46.6135</td>
<td> 0.908627</td>
<td> 1.14107</td>
<td> 43.6</td>
<td> 0.002504</td>
<td> 0.0</td>
<td> 1076.32</td>
<td> 75.063179</td>
<td> 3C_348</td>
<td> 115.192908</td>
<td> 0.122123</td>
<td>-1.067769</td>
<td> 0</td>
</tr>
<tr>
<th>24 </th>
<td> 260.14362</td>
<td> -0.97900</td>
<td> 3C_353</td>
<td> 55.5160</td>
<td> 1.179413</td>
<td> 1.82420</td>
<td>-49.2</td>
<td> 0.008856</td>
<td> 0.0</td>
<td> 455.26</td>
<td> 45.600000</td>
<td> 3C_353.0</td>
<td> 101.805100</td>
<td> 0.407927</td>
<td>-0.715674</td>
<td> 0</td>
</tr>
<tr>
<th>773</th>
<td> 69.26904</td>
<td> 29.67061</td>
<td> PER_B</td>
<td> 49.7285</td>
<td> 1.684100</td>
<td> 0.07699</td>
<td>-80.5</td>
<td> 0.000660</td>
<td> 0.1</td>
<td> 454.97</td>
<td> 46.760000</td>
<td> 3C_123</td>
<td> 94.114319</td>
<td> 0.015926</td>
<td>-0.752902</td>
<td> 0</td>
</tr>
<tr>
<th>772</th>
<td> 139.52408</td>
<td>-12.09481</td>
<td> HYDRA_A</td>
<td> 40.8499</td>
<td> 1.278800</td>
<td> 0.01643</td>
<td> 8.1</td>
<td> 0.001340</td>
<td> 0.9</td>
<td> 579.59</td>
<td> 58.050000</td>
<td> 3C_218</td>
<td> 87.731363</td>
<td> 0.002487</td>
<td>-0.902133</td>
<td> 0</td>
</tr>
<tr>
<th>774</th>
<td> 187.27671</td>
<td> 2.05142</td>
<td> 3C_273C</td>
<td> 54.9912</td>
<td> 1.900300</td>
<td> 0.57594</td>
<td>-33.3</td>
<td> 0.000560</td>
<td> 0.0</td>
<td> 149.96</td>
<td> 15.000000</td>
<td> 3C_273C</td>
<td> 73.425748</td>
<td> 0.282001</td>
<td>-0.341204</td>
<td> 0</td>
</tr>
<tr>
<th>3 </th>
<td> 6.40142</td>
<td> 64.17856</td>
<td> 3C_10C</td>
<td> 25.9311</td>
<td> 0.317754</td>
<td> 0.20349</td>
<td> 68.7</td>
<td> 0.075088</td>
<td> 1.8</td>
<td> 317.66</td>
<td> 16.236792</td>
<td> 3C__10C</td>
<td> 53.382913</td>
<td> 0.034197</td>
<td>-0.852177</td>
<td> 0</td>
</tr>
</tbody>
</table>
</div>
# Better fits to the brightest sources
```
def fit_curved_spectrum(freq, flux, err):
d = np.log(flux)
x = np.log(freq / 600.0)
A = np.array([x**0.0, x, x**2]).T
Nh = np.diag(err**-0.5)
m = np.dot(np.linalg.pinv(np.dot(Nh, A)), np.dot(Nh, d))
return np.array([np.exp(m[0]), m[1], m[2]])
def fit_curved_spectrum_data(data):
d = np.log(data[:, 1])
x = np.log(data[:, 0] / 600.0)
A = np.array([x**0.0, x, x**2]).T
Nh = np.diag(data[:, 2]**-0.5)
m = np.dot(np.linalg.pinv(np.dot(Nh, A)), np.dot(Nh, d))
return np.array([np.exp(m[0]), m[1], m[2]])
def model_curve(model, fa):
xa = np.log(fa / 600.0)
Sa = model[0] * np.exp(model[1] * xa + model[2]*xa**2)
return Sa
fa = np.logspace(1, 4, 1000)
```
```
ultimate_table = final_table.copy()
```
## Cas A
```
freq_casa = np.array([74.0, 152.0, 320.0, 562.0, 708.0, 860.0, 1190.0, 1440.0])
flux_casa = np.array([17693.0, 13130.0, 7680.0, 4885.0, 4085.0, 3295.0, 2985.0, 2335.0])
err_casa = np.array([12.2, 381.0, 384.0, 146.0, 102.0, 824.0, 119.0, 49.0])
year_casa = np.array([2007.0, 1968.0, 1963.0, 1963.0, 1963.0, 1963.0, 1963.0, 1965.0])
```
```
flux_casa = (1.0 - (0.0097 * (2013 - year_casa))) * flux_casa # Decrement from wikipedia
```
```
m_casa = fit_curved_spectrum(freq_casa, flux_casa, err_casa)
```
```
errorbar(freq_casa, flux_casa, yerr=err_casa, fmt=None)
loglog(fa, model_curve(m_casa, fa))
loglog(fa, final_table['S600'][0] * (fa /600.0)**final_table['BETA'][0])
```
[<matplotlib.lines.Line2D at 0x1184bc890>]

```
t_ind = 0
ultimate_table['S600'][t_ind] = m_casa[0]
ultimate_table['BETA'][t_ind] = m_casa[1]
ultimate_table['GAMMA'][t_ind] = m_casa[2]
```
```
```
## Cyg A
```
freq_cyga = np.array([74.0, 178.0, 327.0, 750.0, 1400.0])
flux_cyga = np.array([16611.68, 8700.0, 6015.3, 2980.0, 1598.0])
err_cyga = np.array([1667.0, 1300.0, 600.0, 145.0, 41.0])
```
```
m_cyga = fit_curved_spectrum(freq_cyga, flux_cyga, err_cyga)
```
```
#loglog(freq_cyga, flux_cyga)
errorbar(freq_cyga, flux_cyga, fmt=None, yerr=err_cyga)
loglog(fa, model_curve(m_cyga, fa))
loglog(fa, final_table['S600'][2] * (fa /600.0)**final_table['BETA'][2])
```
[<matplotlib.lines.Line2D at 0x112a30950>]

```
t_ind = 2
ultimate_table['S600'][t_ind] = m_cyga[0]
ultimate_table['BETA'][t_ind] = m_cyga[1]
ultimate_table['GAMMA'][t_ind] = m_cyga[2]
```
## Taurus A (M1)
```
freq_taua = np.array([74.0, 178.0, 1400.0, 5000.0])
flux_taua = np.array([1888.6, 1530.0, 930.0, 680.0])
err_taua = np.array([188.0, 122.0, 46.5, 34.0])
```
```
m_taua = fit_curved_spectrum(freq_taua, flux_taua, err_taua)
```
```
errorbar(freq_taua, flux_taua, yerr=err_taua, fmt=None)
loglog(fa, model_curve(m_taua, fa))
loglog(fa, final_table['S600'][1] * (fa /600.0)**final_table['BETA'][1])
```
[<matplotlib.lines.Line2D at 0x113964510>]

```
t_ind = 1
ultimate_table['S600'][t_ind] = m_taua[0]
ultimate_table['BETA'][t_ind] = m_taua[1]
ultimate_table['GAMMA'][t_ind] = m_taua[2]
```
## M87
```
freq_m87 = np.array([178.0, 408.0, 635.0, 750.0, 750.0, 1400.0])
flux_m87 = np.array([1144.0, 510.0, 460.3, 337.0, 368.2, 214])
err_m87 = np.array([114.0, 77.0, 18.7, 17.0, 18.4, 10.5])
```
```
m_m87 = fit_curved_spectrum(freq_m87, flux_m87, err_m87)
```
```
errorbar(freq_m87, flux_m87, yerr=err_m87, fmt=None)
loglog(fa, model_curve(m_m87, fa))
loglog(fa, final_table['S600'][4] * (fa /600.0)**final_table['BETA'][4])
```
[<matplotlib.lines.Line2D at 0x1135f2590>]

```
t_ind = 4
ultimate_table['S600'][t_ind] = m_m87[0]
ultimate_table['BETA'][t_ind] = m_m87[1]
ultimate_table['GAMMA'][t_ind] = m_m87[2]
```
## Hercules A
```
freq_hera = np.array([38.0, 80.0, 178.0, 408.0, 580.0, 750.0, 960.0, 1400.0])
flux_hera = np.array([1840.0, 830.0, 386.75, 141.7, 124.09, 87.6, 74.77, 45.7])
err_hera = np.array([82.0, 113.0, 18.4, 22.3, 1.67, 4.40, 0.44, 1.3])
```
```
m_hera = fit_curved_spectrum(freq_hera, flux_hera, err_hera)
```
```
#loglog(freq_hera, flux_hera)
errorbar(freq_hera, flux_hera, yerr=err_hera, fmt=None)
loglog(fa, model_curve(m_hera, fa))
loglog(fa, final_table['S600'][7] * (fa /600.0)**final_table['BETA'][7])
```
[<matplotlib.lines.Line2D at 0x113a13350>]

```
t_ind = 7
ultimate_table['S600'][t_ind] = m_hera[0]
ultimate_table['BETA'][t_ind] = m_hera[1]
ultimate_table['GAMMA'][t_ind] = m_hera[2]
```
## 3C 353
```
freq_3c353 = np.array([74.0, 178.0, 408.0, 580.0, 750.0, 1400.0])
flux_3c353 = np.array([455.3, 241.6, 114.5, 114.6, 88.4, 56.5])
err_3c353 = np.array([45.6, 12.4, 18.2, 2.6, 4.4, 2.8])
```
```
np.vstack((freq_3c353, flux_3c353,err_3c353)).T
```
array([[ 74. , 455.3, 45.6],
[ 178. , 241.6, 12.4],
[ 408. , 114.5, 18.2],
[ 580. , 114.6, 2.6],
[ 750. , 88.4, 4.4],
[ 1400. , 56.5, 2.8]])
```
data_3c353 = np.array([[ 74. , 455.3, 45.6],
[ 178. , 241.6, 12.4],
[ 408. , 114.5, 18.2],
[ 580. , 114.6, 2.6],
[ 750. , 88.4, 4.4],
[ 1400. , 56.5, 2.8]])
```
```
m_3c353 = fit_curved_spectrum(freq_3c353, flux_3c353, err_3c353)
```
```
#loglog(freq_3c353, flux_3c353)
errorbar(freq_3c353, flux_3c353, yerr=err_3c353, fmt=None)
loglog(fa, model_curve(m_3c353, fa))
loglog(fa, final_table['S600'][24] * (fa /600.0)**final_table['BETA'][24])
```
[<matplotlib.lines.Line2D at 0x113a71310>]

```
t_ind = 24
ultimate_table['S600'][t_ind] = m_3c353[0]
ultimate_table['BETA'][t_ind] = m_3c353[1]
ultimate_table['GAMMA'][t_ind] = m_3c353[2]
```
## Per B
```
data_perb = np.array([[ 86.0, 391.00, 9.0 ],
[178.0, 208.25, 9.90],
[318.0, 135.45, 5.20],
[408.0, 122.53, 9.46],
[750.0, 75.60, 3.80],
[1400., 47.96, 1.50]])
```
```
m_perb = fit_curved_spectrum_data(data_perb)
```
```
t_ind = 773; d = data_perb; m = m_perb
errorbar(d[:, 0], d[:, 1], yerr=d[:, 2], fmt=None)
loglog(fa, model_curve(m, fa))
loglog(fa, final_table['S600'][t_ind] * (fa /600.0)**final_table['BETA'][t_ind])
```
[<matplotlib.lines.Line2D at 0x11790ba50>]

```
ultimate_table['S600'][t_ind] = m[0]
ultimate_table['BETA'][t_ind] = m[1]
ultimate_table['GAMMA'][t_ind] = m[2]
```
## Hydra A
```
data_hydraa = np.array([[ 74.0, 579.6, 58.1],
[ 160.0, 245.8, 31.9],
[ 468.0, 115.0, 0.6],
[ 635.0, 97.1, 0.7],
[ 750.0, 78.9, 0.5],
[ 960.0, 65.5, 0.3],
[1400.0, 45.1, 0.4]])
```
```
m_hydraa = fit_curved_spectrum_data(data_hydraa)
```
```
t_ind = 772; d = data_hydraa; m = m_hydraa
errorbar(d[:, 0], d[:, 1], yerr=d[:, 2], fmt=None)
loglog(fa, model_curve(m, fa))
loglog(fa, final_table['S600'][t_ind] * (fa /600.0)**final_table['BETA'][t_ind])
```
[<matplotlib.lines.Line2D at 0x1179ed2d0>]

```
ultimate_table['S600'][t_ind] = m[0]
ultimate_table['BETA'][t_ind] = m[1]
ultimate_table['GAMMA'][t_ind] = m[2]
```
## 3C 273
Variable (I think), so only using information from the same measurement (Kueher 1981)
```
data_3c273 = np.array([[ 80.0, 176.0, 26.0],
[ 160.0, 97.3, 12.7],
[ 178.0, 84.4, 8.4],
[ 318.0, 64.0, 2.5],
[ 468.0, 59.9, 0.5],
[ 635.0, 56.5, 0.9],
[ 750.0, 47.4, 2.4],
[ 960.0, 49.6, 0.8],
[1400.0, 45.0, 2.3]])
```
```
m_3c273 = fit_curved_spectrum_data(data_3c273)
```
```
t_ind = 774; d = data_3c273; m = m_3c273
errorbar(d[:, 0], d[:, 1], yerr=d[:, 2], fmt=None)
loglog(fa, model_curve(m, fa))
loglog(fa, final_table['S600'][t_ind] * (fa /600.0)**final_table['BETA'][t_ind])
```
[<matplotlib.lines.Line2D at 0x117ad23d0>]

```
ultimate_table['S600'][t_ind] = m[0]
ultimate_table['BETA'][t_ind] = m[1]
ultimate_table['GAMMA'][t_ind] = m[2]
```
## 3C 10
```
data_3c10 = np.array([[ 74.0, 252.1, 1.9],
[ 178.0, 145.0, 14.5],
[ 750.0, 62.9, 2.0],
[1400.0, 43.5, 2.0]])
```
```
m_3c10 = fit_curved_spectrum_data(data_3c10)
```
```
t_ind = 3; d = data_3c10; m = m_3c10
errorbar(d[:, 0], d[:, 1], yerr=d[:, 2], fmt=None)
loglog(fa, model_curve(m, fa))
loglog(fa, final_table['S600'][t_ind] * (fa /600.0)**final_table['BETA'][t_ind])
```
[<matplotlib.lines.Line2D at 0x117b272d0>]

```
ultimate_table['S600'][t_ind] = m[0]
ultimate_table['BETA'][t_ind] = m[1]
ultimate_table['GAMMA'][t_ind] = m[2]
```
# Construct Final Table
```
final_table[:10]
```
<div style="max-height:1000px;max-width:1500px;overflow:auto;">
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>RA</th>
<th>DEC</th>
<th>NAME</th>
<th>S1400</th>
<th>e_S1400</th>
<th>P1400</th>
<th>POLANG</th>
<th>e_P1400</th>
<th>e_POLANG</th>
<th>S74</th>
<th>e_S74</th>
<th>3CNAME</th>
<th>S600</th>
<th>P600</th>
<th>BETA</th>
<th>GAMMA</th>
</tr>
</thead>
<tbody>
<tr>
<th>0 </th>
<td> 350.85658</td>
<td> 58.83928</td>
<td> CASSIOPEIA_A</td>
<td> 1767.8760</td>
<td> 26.410249</td>
<td> 5.62293</td>
<td>-75.6</td>
<td> 0.098041</td>
<td> 0.0</td>
<td> 18842.05</td>
<td> 1334.050712</td>
<td> 3C_461.0</td>
<td> 3496.296206</td>
<td> 1.043381</td>
<td>-0.804824</td>
<td> 0</td>
</tr>
<tr>
<th>2 </th>
<td> 299.88362</td>
<td> 40.72997</td>
<td> CYGNUS_A</td>
<td> 1605.7735</td>
<td> 40.671599</td>
<td> 2.41593</td>
<td> 50.2</td>
<td> 0.106708</td>
<td> 0.0</td>
<td> 16611.68</td>
<td> 1667.590000</td>
<td> 3C_405</td>
<td> 3148.542843</td>
<td> 0.457910</td>
<td>-0.794684</td>
<td> 0</td>
</tr>
<tr>
<th>1 </th>
<td> 83.61679</td>
<td> 22.03403</td>
<td> M_1</td>
<td> 887.8423</td>
<td> 13.943689</td>
<td> 4.91882</td>
<td> 71.1</td>
<td> 0.111831</td>
<td> 0.0</td>
<td> 1888.55</td>
<td> 188.540000</td>
<td> 3C_144</td>
<td> 1103.568578</td>
<td> 2.874298</td>
<td>-0.256710</td>
<td> 0</td>
</tr>
<tr>
<th>4 </th>
<td> 187.70608</td>
<td> 12.38933</td>
<td> M_87</td>
<td> 147.0479</td>
<td> 4.860562</td>
<td> 1.81558</td>
<td> -0.6</td>
<td> 0.025236</td>
<td> 0.0</td>
<td> 1567.37</td>
<td> 89.051934</td>
<td> 3C_274.0</td>
<td> 290.820972</td>
<td> 0.336876</td>
<td>-0.804852</td>
<td> 0</td>
</tr>
<tr>
<th>7 </th>
<td> 252.79875</td>
<td> 4.98861</td>
<td> HER_A</td>
<td> 46.6135</td>
<td> 0.908627</td>
<td> 1.14107</td>
<td> 43.6</td>
<td> 0.002504</td>
<td> 0.0</td>
<td> 1076.32</td>
<td> 75.063179</td>
<td> 3C_348</td>
<td> 115.192908</td>
<td> 0.122123</td>
<td>-1.067769</td>
<td> 0</td>
</tr>
<tr>
<th>24 </th>
<td> 260.14362</td>
<td> -0.97900</td>
<td> 3C_353</td>
<td> 55.5160</td>
<td> 1.179413</td>
<td> 1.82420</td>
<td>-49.2</td>
<td> 0.008856</td>
<td> 0.0</td>
<td> 455.26</td>
<td> 45.600000</td>
<td> 3C_353.0</td>
<td> 101.805100</td>
<td> 0.407927</td>
<td>-0.715674</td>
<td> 0</td>
</tr>
<tr>
<th>773</th>
<td> 69.26904</td>
<td> 29.67061</td>
<td> PER_B</td>
<td> 49.7285</td>
<td> 1.684100</td>
<td> 0.07699</td>
<td>-80.5</td>
<td> 0.000660</td>
<td> 0.1</td>
<td> 454.97</td>
<td> 46.760000</td>
<td> 3C_123</td>
<td> 94.114319</td>
<td> 0.015926</td>
<td>-0.752902</td>
<td> 0</td>
</tr>
<tr>
<th>772</th>
<td> 139.52408</td>
<td>-12.09481</td>
<td> HYDRA_A</td>
<td> 40.8499</td>
<td> 1.278800</td>
<td> 0.01643</td>
<td> 8.1</td>
<td> 0.001340</td>
<td> 0.9</td>
<td> 579.59</td>
<td> 58.050000</td>
<td> 3C_218</td>
<td> 87.731363</td>
<td> 0.002487</td>
<td>-0.902133</td>
<td> 0</td>
</tr>
<tr>
<th>774</th>
<td> 187.27671</td>
<td> 2.05142</td>
<td> 3C_273C</td>
<td> 54.9912</td>
<td> 1.900300</td>
<td> 0.57594</td>
<td>-33.3</td>
<td> 0.000560</td>
<td> 0.0</td>
<td> 149.96</td>
<td> 15.000000</td>
<td> 3C_273C</td>
<td> 73.425748</td>
<td> 0.282001</td>
<td>-0.341204</td>
<td> 0</td>
</tr>
<tr>
<th>3 </th>
<td> 6.40142</td>
<td> 64.17856</td>
<td> 3C_10C</td>
<td> 25.9311</td>
<td> 0.317754</td>
<td> 0.20349</td>
<td> 68.7</td>
<td> 0.075088</td>
<td> 1.8</td>
<td> 317.66</td>
<td> 16.236792</td>
<td> 3C__10C</td>
<td> 53.382913</td>
<td> 0.034197</td>
<td>-0.852177</td>
<td> 0</td>
</tr>
</tbody>
</table>
</div>
```
ultimate_table[:10]
```
<div style="max-height:1000px;max-width:1500px;overflow:auto;">
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>RA</th>
<th>DEC</th>
<th>NAME</th>
<th>S1400</th>
<th>e_S1400</th>
<th>P1400</th>
<th>POLANG</th>
<th>e_P1400</th>
<th>e_POLANG</th>
<th>S74</th>
<th>e_S74</th>
<th>3CNAME</th>
<th>S600</th>
<th>P600</th>
<th>BETA</th>
<th>GAMMA</th>
</tr>
</thead>
<tbody>
<tr>
<th>0 </th>
<td> 350.85658</td>
<td> 58.83928</td>
<td> CASSIOPEIA_A</td>
<td> 1767.8760</td>
<td> 26.410249</td>
<td> 5.62293</td>
<td>-75.6</td>
<td> 0.098041</td>
<td> 0.0</td>
<td> 18842.05</td>
<td> 1334.050712</td>
<td> 3C_461.0</td>
<td> 2375.563418</td>
<td> 1.043381</td>
<td>-0.775882</td>
<td> 0.073726</td>
</tr>
<tr>
<th>2 </th>
<td> 299.88362</td>
<td> 40.72997</td>
<td> CYGNUS_A</td>
<td> 1605.7735</td>
<td> 40.671599</td>
<td> 2.41593</td>
<td> 50.2</td>
<td> 0.106708</td>
<td> 0.0</td>
<td> 16611.68</td>
<td> 1667.590000</td>
<td> 3C_405</td>
<td> 3613.567711</td>
<td> 0.457910</td>
<td>-0.888608</td>
<td>-0.085651</td>
</tr>
<tr>
<th>1 </th>
<td> 83.61679</td>
<td> 22.03403</td>
<td> M_1</td>
<td> 887.8423</td>
<td> 13.943689</td>
<td> 4.91882</td>
<td> 71.1</td>
<td> 0.111831</td>
<td> 0.0</td>
<td> 1888.55</td>
<td> 188.540000</td>
<td> 3C_144</td>
<td> 1142.621198</td>
<td> 2.874298</td>
<td>-0.242263</td>
<td>-0.001170</td>
</tr>
<tr>
<th>4 </th>
<td> 187.70608</td>
<td> 12.38933</td>
<td> M_87</td>
<td> 147.0479</td>
<td> 4.860562</td>
<td> 1.81558</td>
<td> -0.6</td>
<td> 0.025236</td>
<td> 0.0</td>
<td> 1567.37</td>
<td> 89.051934</td>
<td> 3C_274.0</td>
<td> 436.338553</td>
<td> 0.336876</td>
<td>-0.811899</td>
<td>-0.035656</td>
</tr>
<tr>
<th>7 </th>
<td> 252.79875</td>
<td> 4.98861</td>
<td> HER_A</td>
<td> 46.6135</td>
<td> 0.908627</td>
<td> 1.14107</td>
<td> 43.6</td>
<td> 0.002504</td>
<td> 0.0</td>
<td> 1076.32</td>
<td> 75.063179</td>
<td> 3C_348</td>
<td> 120.352428</td>
<td> 0.122123</td>
<td>-1.052856</td>
<td>-0.045649</td>
</tr>
<tr>
<th>24 </th>
<td> 260.14362</td>
<td> -0.97900</td>
<td> 3C_353</td>
<td> 55.5160</td>
<td> 1.179413</td>
<td> 1.82420</td>
<td>-49.2</td>
<td> 0.008856</td>
<td> 0.0</td>
<td> 455.26</td>
<td> 45.600000</td>
<td> 3C_353.0</td>
<td> 106.512561</td>
<td> 0.407927</td>
<td>-0.720365</td>
<td>-0.025560</td>
</tr>
<tr>
<th>773</th>
<td> 69.26904</td>
<td> 29.67061</td>
<td> PER_B</td>
<td> 49.7285</td>
<td> 1.684100</td>
<td> 0.07699</td>
<td>-80.5</td>
<td> 0.000660</td>
<td> 0.1</td>
<td> 454.97</td>
<td> 46.760000</td>
<td> 3C_123</td>
<td> 87.455842</td>
<td> 0.015926</td>
<td>-0.721306</td>
<td> 0.019997</td>
</tr>
<tr>
<th>772</th>
<td> 139.52408</td>
<td>-12.09481</td>
<td> HYDRA_A</td>
<td> 40.8499</td>
<td> 1.278800</td>
<td> 0.01643</td>
<td> 8.1</td>
<td> 0.001340</td>
<td> 0.9</td>
<td> 579.59</td>
<td> 58.050000</td>
<td> 3C_218</td>
<td> 96.976365</td>
<td> 0.002487</td>
<td>-0.839342</td>
<td>-0.057817</td>
</tr>
<tr>
<th>774</th>
<td> 187.27671</td>
<td> 2.05142</td>
<td> 3C_273C</td>
<td> 54.9912</td>
<td> 1.900300</td>
<td> 0.57594</td>
<td>-33.3</td>
<td> 0.000560</td>
<td> 0.0</td>
<td> 149.96</td>
<td> 15.000000</td>
<td> 3C_273C</td>
<td> 54.976965</td>
<td> 0.282001</td>
<td>-0.288965</td>
<td> 0.099464</td>
</tr>
<tr>
<th>3 </th>
<td> 6.40142</td>
<td> 64.17856</td>
<td> 3C_10C</td>
<td> 25.9311</td>
<td> 0.317754</td>
<td> 0.20349</td>
<td> 68.7</td>
<td> 0.075088</td>
<td> 1.8</td>
<td> 317.66</td>
<td> 16.236792</td>
<td> 3C__10C</td>
<td> 71.500775</td>
<td> 0.034197</td>
<td>-0.589441</td>
<td> 0.005817</td>
</tr>
</tbody>
</table>
</div>
```
```
# Write table to disk
```
ultimate_table = ultimate_table.sort(["S600"], ascending=False).copy()
with open("combinedps_new.dat", "w") as f:
f.write(ultimate_table.to_string(index=False))
```
```
```
|
radiocosmologyREPO_NAMEcoraPATH_START.@cora_extracted@cora-master@cora@foreground@data@catalogue_match.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/map/layer/symbol/textfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@map@layer@symbol@textfont@__init__.py@.PATH_END.py
|
{
"filename": "nca.py",
"repo_name": "barentsen/dave",
"repo_path": "dave_extracted/dave-master/fileio/nca.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
__version__ = "$Id$"
__URL__ = "$URL$"
class Nca(np.ndarray):
"""
A thin wrapper around numpy to allow column addressing by name
instead of number.
I frequently have a large 2d array where each row represents an
object, and each column represent an attribute of that object.
For example::
Obj1 Period Epoch Radius
Obj2 Period Epoch Radius
To get all the radii, I'd like to say
array[:, "radius"] instead of array[:,3]
This class allows me to do that. Here is an example::
row = '0 1 2 3'.split()
col = 'a b c d'.split()
nameDict = dict()
nameDict[0] = row
nameDict[1] = col
data = np.arange(16).reshape(4,4) + 1
ca = Nca( data , nameDict)
Anywhere you normally put a number, you can now use a string.
For example::
ca['0', 'b']
ca['0', :'b']
ca[idx, 'a':'c'] etc.
The return behaviour is similar to numpy, except that an object
of the extended class is returned where numpy would return an array.
The lookup information is also transferred in a natural manner.
For example::
x = ca[:, 'c']
x[:, 'b'] #Doesn't work
x[:, 'c'] #returns x[:,0]
Inputs:
---------
ndArray
A numpy array
nameDict
A lookup table matching strings to array positions.
See setLookup() for more details
Notes:
------
* How to subclass numpy's ndarray is taken from
<http://docs.scipy.org/doc/numpy/user/basics.subclassing.html>_
The __new__ and __array_finalize__ methods are copied from there.
* There is one known bug with this code.arr[:4] doesn't return
the correct lookup table. As a workaround
use arr[:,4, :], which does.
Todo:
-------
* Fix bug where array[:4] fails (but array[:'4'] doesn't
* Add a metadata dictionary
* Write tests to make sure lookup dictionary is being correctly sliced
"""
def __new__(cls, input_array, nameDict=None):
obj = np.asarray(input_array).view(cls)
obj.lookup = nameDict
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.lookup = getattr(obj, 'lookup', None)
def __getitem__(self, key):
#print "Input key", key, type(key)
key = self.parseKey(key)
#print "Parsed key", key
returnObj = np.ndarray.__getitem__(self, key)
#
#This stuff fails for arr[:4]
#
if isinstance( returnObj, Nca):
if self.lookup is None:
newLookup = None
else:
newLookup = self._setNewLookup(key)
return Nca(returnObj, nameDict=newLookup)
else:
return returnObj
def __setitem__(self, key, value):
key = self.parseKey(key)
np.ndarray.__setitem__(self, key, value)
def _setNewLookup(self, key):
#If only 1 int, we're looking at one row of zeroth dimension.
if isinstance(key, int):
try:
return self.lookup[0][key]
except KeyError:
return None
#Similarly for a slice
if isinstance(key, slice):
return self.lookup[0][key]
#If it's a tuple, we have multiple dimensions
if isinstance(key, tuple):
newLookup = dict()
for i in range(len(key)):
try:
newLookup[i] = np.array(self.lookup[i])[ key[i]]
except KeyError:
#Lookup not defined for this dimension
continue
newLookup[i] = list(newLookup[i])
return newLookup
def parseKey(self, key, dim=0):
#import pdb; pdb.set_trace()
if isinstance(key, str):
try:
key = self.lookup[dim].index(key)
except ValueError:
raise KeyError(\
"key '%s' not a recognised column in dimension %i" %(key, dim))
if isinstance(key, list):
#raise NotImplementedError("List should be supported but aren't")
tmp = key
for i in range(len(tmp)):
tmp[i] = self.parseKey(tmp[i], dim=dim)
#import pdb; pdb.set_trace()
return tmp
if isinstance(key, tuple):
tmp = list(key)
for i in range(len(tmp)):
tmp[i] = self.parseKey(tmp[i], dim=i)
return tuple(tmp)
if isinstance(key, slice):
start = self.parseKey(key.start, dim=dim)
stop = self.parseKey(key.stop, dim=dim)
step = key.step
return slice(start, stop, step)
return key
#No more strings to strip out
return key
def setLookup(self, dim, colNameList):
"""Set the mapping from column name to column number.
Inputs:
dim (int) Which dimension to create a lookup table for.
colNameList (list) What to call the column names. colNameList
must be the same length as self.shape[dim]
For example
setLookup(1, ['time', 'flux', 'unc'])
allows you to access an array with
x[:, 'time']
"""
if not isinstance(dim, int):
raise TypeError("Dim must be an integer")
if len(colNameList) != self.shape[dim]:
raise ValueError(" Length of colNameList (%i) not equal length of array in dimension %i (%i)" \
%(len(colNameList), dim, self.shape[dim]))
if self.lookup is None:
self.lookup = dict()
self.lookup[dim] = list(colNameList)
def asarray(self):
return self.view(np.ndarray)
def example():
row = '0 1 2 3'.split()
col = 'a b c d'.split()
nameDict = dict()
nameDict[0] = row
nameDict[1] = col
data = np.arange(16).reshape(4,4) + 1
ca = Nca( data , nameDict)
return ca
|
barentsenREPO_NAMEdavePATH_START.@dave_extracted@dave-master@fileio@nca.py@.PATH_END.py
|
{
"filename": "model.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/python_models/wrong_return_type/model.py",
"type": "Python"
}
|
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
def initialize(self, args):
self.model_config = model_config = json.loads(args["model_config"])
output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0")
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config["data_type"]
)
def execute(self, requests):
output0_dtype = self.output0_dtype
responses = []
for request in requests:
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
out_0 = in_0.as_numpy()
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0.astype(output0_dtype))
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0]
)
request.set_release_flags(pb_utils.TRITONSERVER_REQUEST_RELEASE_RESCHEDULE)
# Should append `None` for rescheduled requests.
responses.append(inference_response)
return responses
def finalize(self):
pass
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@python_models@wrong_return_type@model.py@.PATH_END.py
|
{
"filename": "_two_body.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/phase_space/_two_body.py",
"type": "Python"
}
|
"""
Module for integrating two-body phase space.
"""
from typing import Sequence, Tuple, Callable, Any, Optional
import numpy as np
from scipy import integrate
from hazma.utils import kallen_lambda
from ._base import AbstractPhaseSpaceIntegrator
def _msqrd_flat(z):
if np.isscalar(z):
return 1.0
return np.zeros_like(z)
class TwoBody(AbstractPhaseSpaceIntegrator):
r"""Class for working with 2-body phase space."""
def __init__(
self,
cme: float,
masses: Sequence[float],
msqrd: Optional[Callable[..., Any]] = None,
msqrd_signature: Optional[str] = None,
):
"""
Parameters
----------
cme: float
Center-of-mass energy.
masses: sequence float
The two final state particle masses.
msqrd: callable, optional
Function to compute squared matrix element. The signature of the
function depends on the value of `msqrd_signature`. If no matrix
element is passed, it is taken to be flat (i.e. |M|^2 = 1).
msqrd_signature: str, optional
Signature of squared matrix element. If 'momenta', then the function
is assumed to take in a NumPy array containing the momenta of the
final state particles. If 'z', then it is assumed to take in
the angle between the 3-momenta of the two final state particles.
Default is 'z'.
"""
assert (
len(masses) == 2
), f"Expected 'masses' to have length 2, found {len(masses)}."
self.__cme = cme
self.__masses = (masses[0], masses[1])
if msqrd is not None:
assert callable(msqrd), "The squared matrix element must be callable."
self.__msqrd_signature_z = True
if msqrd_signature is not None:
assert msqrd_signature in ["momenta", "z"], (
f"Invalid 'msqrd_signature' {msqrd_signature}." "Use 'momenta' or 'z'."
)
if msqrd_signature == "momenta":
self.__msqrd_signature_z = False
if msqrd is None:
self.__msqrd = _msqrd_flat
else:
self.__msqrd = msqrd
@property
def cme(self) -> float:
r"""Center-of-mass energy of the proccess."""
return self.__cme
@cme.setter
def cme(self, val) -> None:
self.__cme = val
@property
def masses(self) -> Tuple[float, float]:
r"""Masses of the final state particles."""
return self.__masses
@masses.setter
def masses(self, masses: Sequence[float]) -> None:
assert (
len(masses) == 2
), f"Expected 'masses' to have length 3, found {len(masses)}."
self.__masses = (masses[0], masses[1])
@property
def msqrd(self):
r"""Squared matrix element of the proccess."""
return self.__msqrd
@msqrd.setter
def msqrd(self, fn) -> None:
r"""Squared matrix element of the proccess."""
self.__msqrd = fn
def __integrate_momenta(self):
r"""Integrate over phase space assuming msqrd take the final-state
particle momenta as its argument.
"""
cme = self.cme
m1, m2 = self.masses
p = np.sqrt(kallen_lambda(cme**2, m1**2, m2**2)) / (2 * cme)
e1 = np.hypot(m1, p)
e2 = np.hypot(m2, p)
ps = np.zeros((4, 2), dtype=np.float64)
def integrand(z):
sin = np.sqrt(1 - z**2)
ps[:, 0] = np.array([e1, sin * p, 0.0, z * p])
ps[:, 1] = np.array([e2, -sin * p, 0.0, -z * p])
return self.__msqrd(ps)
pre = 1.0 / (8.0 * np.pi) * p / cme
integral, error = integrate.quad(integrand, -1.0, 1.0)
return integral * pre, error * pre
def __integrate_angle(self):
r"""Integrate over phase space assuming msqrd take the angle as
argument.
"""
cme = self.cme
m1, m2 = self.masses
p = np.sqrt(kallen_lambda(cme**2, m1**2, m2**2)) / (2 * cme)
pre = 1.0 / (8.0 * np.pi) * p / cme
integral, error = integrate.quad(self.__msqrd, -1.0, 1.0)
return integral * pre, error * pre
def integrate(self): # pylint: disable=arguments-differ
r"""Integrate over phase space.
Returns
-------
integral: float
Value of the phase space integration.
error_estimate: float
Estimation of the error.
"""
if self.__msqrd_signature_z:
return self.__integrate_angle()
return self.__integrate_momenta()
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@phase_space@_two_body.py@.PATH_END.py
|
{
"filename": "_west.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/map/bounds/_west.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WestValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="west", parent_name="layout.map.bounds", **kwargs):
super(WestValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@map@bounds@_west.py@.PATH_END.py
|
{
"filename": "pull-request-template.md",
"repo_name": "ashleychontos/pySYD",
"repo_path": "pySYD_extracted/pySYD-master/.github/pull-request-template.md",
"type": "Markdown"
}
|
---
name: Pull request
about: Contribute to this project!
title: ''
labels: ''
assignees: ''
---
------
## Relevant Materials
### Description
Using the checklist as a guide, please provide more details about the nature of the pull request.
### Attachments & Supporting Files
Please provide any relevant files i.e. data if adding a new target to a test or as an example
### Other
Any other relevant context i.e. reference/link if directly addressing an existing issue
------
## Summary Checklist:
- Is this a major request? <ul><li>- [ ] Yes <br/> → Have you already submitted an issue outlining the request?<ul><li>- [ ] Yes</li><li>- [ ] No</li></ul></li><li>- [ ] No</li><li>- [ ] IDK</li></ul>
- What type of request is this? <ul><li>- [ ] New feature</li><li>- [ ] Modification</li><li>- [ ] Suggested update</li><li>- [ ] Other*</li></ul>
- Why is the change needed? <ul><li>- [ ] General improvement</li><li>- [ ] Solves a problem </li><li>- [ ] Relevant to an open issue <br/> → Did you provide the link?<ul><li>- [ ] Yes</li><li>- [ ] No</li></ul></li><li>- [ ] Increased efficiency and/or speed</li><li>- [ ] Other?*</li></ul>
- Did you test your changes? <ul><li>- [ ] Yes</li><li>- [ ] No (please do ***not*** submit a request until you have)</li><li>- [ ] N/A</li></ul>
- Does the code comply with our [style guide](https://github.com/ashleychontos/pySYD/blob/master/CONTRIBUTING.md) and is it complete with docstrings and other relevant documentation? <ul><li>- [ ] Yes</li><li>- [ ] No</li></ul>
- Will this request require any changes from our end? <ul><li>- [ ] Yes**</li><li>- [ ] No</li></ul>
<br/>
* If 'Other' was selected at any point, please expand on each of these points in more detail below. <br/>** If 'Yes' was selected for the last bullet, please elaborate below.
------
|
ashleychontosREPO_NAMEpySYDPATH_START.@pySYD_extracted@pySYD-master@.github@pull-request-template.md@.PATH_END.py
|
{
"filename": "_showlegend.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/_showlegend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="choropleth", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choropleth@_showlegend.py@.PATH_END.py
|
{
"filename": "modules.py",
"repo_name": "PolymathicAI/AstroCLIP",
"repo_path": "AstroCLIP_extracted/AstroCLIP-main/downstream_tasks/property_estimation/baselines/modules.py",
"type": "Python"
}
|
import lightning as L
import torch
from torch import nn, optim
from torchvision import models
from torchvision.transforms import (
Compose,
GaussianBlur,
RandomHorizontalFlip,
RandomVerticalFlip,
)
from astroclip.env import format_with_env
ASTROCLIP_ROOT = format_with_env("{ASTROCLIP_ROOT}")
class SupervisedModel(L.LightningModule):
def __init__(
self,
model_name,
modality,
properties,
scale,
num_epochs,
lr=1e-3,
save_dir=None,
):
super().__init__()
self.model_name = model_name
self.modality = modality
self.properties = properties
self.scale = scale
self.lr = lr
self.num_epochs = num_epochs
self.criterion = nn.MSELoss()
self.save_dir = save_dir
self._initialize_model(model_name)
self.image_transforms = Compose(
[
RandomHorizontalFlip(),
RandomVerticalFlip(),
GaussianBlur(kernel_size=3),
]
)
def _initialize_model(self, model_name):
if model_name == "resnet18":
self.model = ResNet18(n_out=len(self.properties))
elif model_name == "conv+att":
self.model = SpectrumEncoder(n_latent=len(self.properties))
elif model_name == "mlp":
self.model = MLP(
n_in=3,
n_out=len(self.properties),
n_hidden=(64, 64),
act=[nn.ReLU()] * 3,
)
else:
raise ValueError("Invalid model name")
def forward(self, x):
return self.model(x).squeeze()
def training_step(self, batch, batch_idx):
X_batch, y_batch = batch
if self.modality == "image":
X_batch = self.image_transforms(X_batch)
y_pred = self(X_batch)
loss = self.criterion(y_pred, y_batch.squeeze())
self.log("train_loss", loss, prog_bar=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
X_batch, y_batch = batch
y_pred = self(X_batch)
loss = self.criterion(y_pred, y_batch.squeeze())
self.log("val_loss", loss, prog_bar=True, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.num_epochs)
return {"optimizer": optimizer, "scheduler": scheduler}
class ResNet18(nn.Module):
"""Modfied ResNet18."""
def __init__(self, n_out=1):
super(ResNet18, self).__init__()
self.resnet = models.resnet18(weights=None)
self.resnet.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
self.resnet.fc = nn.Linear(512, n_out)
def forward(self, x):
return self.resnet(x)
class MLP(nn.Sequential):
"""MLP model"""
def __init__(self, n_in, n_out, n_hidden=(16, 16, 16), act=None, dropout=0):
if act is None:
act = [
nn.LeakyReLU(),
] * (len(n_hidden) + 1)
assert len(act) == len(n_hidden) + 1
layer = []
n_ = [n_in, *n_hidden, n_out]
for i in range(len(n_) - 2):
layer.append(nn.Linear(n_[i], n_[i + 1]))
layer.append(act[i])
layer.append(nn.Dropout(p=dropout))
layer.append(nn.Linear(n_[-2], n_[-1]))
super(MLP, self).__init__(*layer)
class SpectrumEncoder(nn.Module):
"""Spectrum encoder
Modified version of the encoder by Serrà et al. (2018), which combines a 3 layer CNN
with a dot-product attention module. This encoder adds a MLP to further compress the
attended values into a low-dimensional latent space.
Paper: Serrà et al., https://arxiv.org/abs/1805.03908
"""
def __init__(self, n_latent, n_hidden=(32, 32), act=None, dropout=0):
super(SpectrumEncoder, self).__init__()
self.n_latent = n_latent
filters = [8, 16, 16, 32]
sizes = [5, 10, 20, 40]
self.conv1, self.conv2, self.conv3, self.conv4 = self._conv_blocks(
filters, sizes, dropout=dropout
)
self.n_feature = filters[-1] // 2
# pools and softmax work for spectra and weights
self.pool1, self.pool2, self.pool3 = tuple(
nn.MaxPool1d(s, padding=s // 2) for s in sizes[:3]
)
self.softmax = nn.Softmax(dim=-1)
# small MLP to go from CNN features to latents
if act is None:
act = [nn.PReLU(n) for n in n_hidden]
# last activation identity to have latents centered around 0
act.append(nn.Identity())
self.mlp = MLP(
self.n_feature, self.n_latent, n_hidden=n_hidden, act=act, dropout=dropout
)
def _conv_blocks(self, filters, sizes, dropout=0):
convs = []
for i in range(len(filters)):
f_in = 1 if i == 0 else filters[i - 1]
f = filters[i]
s = sizes[i]
p = s // 2
conv = nn.Conv1d(
in_channels=f_in,
out_channels=f,
kernel_size=s,
padding=p,
)
norm = nn.InstanceNorm1d(f)
act = nn.PReLU(f)
drop = nn.Dropout(p=dropout)
convs.append(nn.Sequential(conv, norm, act, drop))
return tuple(convs)
def _downsample(self, x):
# compression
x = x.unsqueeze(1)
x = self.pool1(self.conv1(x))
x = self.pool2(self.conv2(x))
x = self.pool3(self.conv3(x))
x = self.conv4(x)
C = x.shape[1] // 2
# split half channels into attention value and key
h, a = torch.split(x, [C, C], dim=1)
return h, a
def forward(self, y):
# run through CNNs
h, a = self._downsample(y)
# softmax attention
a = self.softmax(a)
# attach hook to extract backward gradient of a scalar prediction
# for Grad-FAM (Feature Activation Map)
if ~self.training and a.requires_grad == True:
a.register_hook(self._attention_hook)
# apply attention
x = torch.sum(h * a, dim=2)
# run attended features into MLP for final latents
x = self.mlp(x)
return x
@property
def n_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def _attention_hook(self, grad):
self._attention_grad = grad
@property
def attention_grad(self):
if hasattr(self, "_attention_grad"):
return self._attention_grad
else:
return None
|
PolymathicAIREPO_NAMEAstroCLIPPATH_START.@AstroCLIP_extracted@AstroCLIP-main@downstream_tasks@property_estimation@baselines@modules.py@.PATH_END.py
|
{
"filename": "test_mpirng.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/tests/test_mpirng.py",
"type": "Python"
}
|
from runtests.mpi import MPITest
from nbodykit import setup_logging
from nbodykit.mpirng import MPIRandomState
from numpy.testing import assert_array_equal
import numpy
from mpi4py import MPI
import os
import pytest
setup_logging("debug")
@MPITest([4])
def test_mpirng_large_chunk(comm):
rng = MPIRandomState(comm, seed=1234, size=1, chunksize=10)
local = rng.uniform()
all = numpy.concatenate(comm.allgather(local), axis=0)
rng1 = MPIRandomState(MPI.COMM_SELF, seed=1234, size=rng.csize, chunksize=rng.chunksize)
correct = rng1.uniform()
assert_array_equal(all, correct)
@MPITest([4])
def test_mpirng_small_chunk(comm):
rng = MPIRandomState(comm, seed=1234, size=10, chunksize=3)
local = rng.uniform()
all = numpy.concatenate(comm.allgather(local), axis=0)
rng1 = MPIRandomState(MPI.COMM_SELF, seed=1234, size=rng.csize, chunksize=rng.chunksize)
correct = rng1.uniform()
assert_array_equal(all, correct)
@MPITest([4])
def test_mpirng_unique(comm):
rng = MPIRandomState(comm, seed=1234, size=10, chunksize=3)
local1 = rng.uniform()
local2 = rng.uniform()
# it shouldn't be the same!
assert (local1 != local2).any()
@MPITest([4])
def test_mpirng_args(comm):
rng = MPIRandomState(comm, seed=1234, size=10, chunksize=3)
local = rng.uniform(low=numpy.ones(rng.size) * 0.5)
all = numpy.concatenate(comm.allgather(local), axis=0)
rng1 = MPIRandomState(MPI.COMM_SELF, seed=1234, size=rng.csize, chunksize=rng.chunksize)
correct = rng1.uniform(low=0.5)
assert_array_equal(all, correct)
@MPITest([4])
def test_mpirng_itemshape(comm):
rng = MPIRandomState(comm, seed=1234, size=10, chunksize=3)
local = rng.uniform(low=numpy.ones(rng.size)[:, None] * 0.5, itemshape=(3,))
all = numpy.concatenate(comm.allgather(local), axis=0)
rng1 = MPIRandomState(MPI.COMM_SELF, seed=1234, size=rng.csize, chunksize=rng.chunksize)
correct = rng1.uniform(low=0.5, itemshape=(3,))
assert_array_equal(all, correct)
@MPITest([4])
def test_mpirng_poisson(comm):
rng = MPIRandomState(comm, seed=1234, size=10, chunksize=3)
local = rng.poisson(lam=numpy.ones(rng.size)[:, None] * 0.5, itemshape=(3,))
all = numpy.concatenate(comm.allgather(local), axis=0)
rng1 = MPIRandomState(MPI.COMM_SELF, seed=1234, size=rng.csize, chunksize=rng.chunksize)
correct = rng1.poisson(lam=0.5, itemshape=(3,))
assert_array_equal(all, correct)
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@tests@test_mpirng.py@.PATH_END.py
|
{
"filename": "filt_cinv.py",
"repo_name": "carronj/plancklens",
"repo_path": "plancklens_extracted/plancklens-master/plancklens/filt/filt_cinv.py",
"type": "Python"
}
|
"""conjugate gradient solver CMB filtering module.
"""
from __future__ import print_function
from __future__ import absolute_import
import healpy as hp
import numpy as np
import pickle as pk
import os
from plancklens.helpers import mpi
from plancklens import utils
from plancklens.filt import filt_simple
from plancklens.qcinv import opfilt_pp, opfilt_tt, opfilt_tp
from plancklens.qcinv import util, util_alm
from plancklens.qcinv import multigrid, cd_solve
class cinv(object):
def __init__(self, lib_dir, lmax):
self.lib_dir = lib_dir
self.lmax = lmax
def get_tal(self, a, lmax=None):
if lmax is None: lmax = self.lmax
assert a.lower() in ['t', 'e', 'b'], a
ret = np.loadtxt(os.path.join(self.lib_dir, "tal.dat"))
assert len(ret) > lmax, (len(ret), lmax)
return ret[:lmax +1 ]
def get_fmask(self):
return hp.read_map(os.path.join(self.lib_dir, "fmask.fits.gz"))
def get_ftl(self, lmax=None):
if lmax is None: lmax = self.lmax
ret = np.loadtxt(os.path.join(self.lib_dir, "ftl.dat"))
assert len(ret) > lmax, (len(ret), lmax)
return ret[:lmax + 1]
def get_fel(self, lmax=None):
if lmax is None: lmax = self.lmax
ret = np.loadtxt(os.path.join(self.lib_dir, "fel.dat"))
assert len(ret) > lmax, (len(ret), lmax)
return ret[:lmax + 1]
def get_fbl(self, lmax=None):
if lmax is None: lmax = self.lmax
ret = np.loadtxt(os.path.join(self.lib_dir, "fbl.dat"))
assert len(ret) > lmax, (len(ret), lmax)
return ret[:lmax + 1]
class cinv_t(cinv):
r"""Temperature-only inverse-variance (or Wiener-)filtering instance.
Args:
lib_dir: mask and other things will be cached there
lmax: filtered alm's are reconstructed up to lmax
nside: healpy resolution of maps to filter
cl: fiducial CMB spectra used to filter the data (dict with 'tt' key)
transf: CMB maps transfer function (array)
ninv: inverse pixel variance map. Must be a list of paths or of healpy maps with consistent nside.
rescal_cl: isotropic rescaling of the map prior the cg inversion. This just makes the convergence criterium change a bit
Note:
The only difference of the original plancklens filter is the rescaling of the maps. In effect, the modes of :math'`D_\ell` rather than :math'`C_\ell` are reconstructed
This changes nothing to the iterations, but gives the dot product testing for convergence more sensitvity to relevant scales
"""
def __init__(self, lib_dir, lmax, nside, cl, transf, ninv, rescal_cl='default',
marge_monopole=True, marge_dipole=True, marge_maps=(), pcf='default', chain_descr=None):
assert lib_dir is not None and lmax >= 1024 and nside >= 512, (lib_dir, lmax, nside)
assert isinstance(ninv, list)
super(cinv_t, self).__init__(lib_dir, lmax)
if rescal_cl in ['default', None]:
default_rescal = True
rescal_cl = np.sqrt(np.arange(lmax + 1, dtype=float) * np.arange(1, lmax + 2, dtype=float) / 2. / np.pi)
else:
default_rescal = False
# otherwise will throw index error if rescal_cl is too small
assert len(rescal_cl) >= lmax + 1, [rescal_cl.shape, lmax]
dl = {k: rescal_cl[:lmax + 1] ** 2 * cl[k][:lmax + 1] for k in cl.keys()} # rescaled cls (Dls by default)
transf_dl = transf[:lmax + 1] * utils.cli(rescal_cl)
self.nside = nside
self.cl = cl
self.dl = dl
self.transf = transf[:lmax + 1]
self.rescaled_transf =transf_dl
self.rescal_cl = rescal_cl
self.default_rescal = default_rescal # track default behaviour for hash key
self.ninv = ninv
self.marge_monopole = marge_monopole
self.marge_dipole = marge_dipole
self.marge_maps = marge_maps
pcf = os.path.join(lib_dir, "dense.pk") if pcf == 'default' else '' # Dense matrices will be cached there.
if chain_descr is None : chain_descr = \
[[3, ["split(dense(" + pcf + "), 64, diag_cl)"], 256, 128, 3, 0.0, cd_solve.tr_cg, cd_solve.cache_mem()],
[2, ["split(stage(3), 256, diag_cl)"], 512, 256, 3, 0.0, cd_solve.tr_cg, cd_solve.cache_mem()],
[1, ["split(stage(2), 512, diag_cl)"], 1024, 512, 3, 0.0, cd_solve.tr_cg, cd_solve.cache_mem()],
[0, ["split(stage(1), 1024, diag_cl)"], lmax, nside, np.inf, 1.0e-5, cd_solve.tr_cg, cd_solve.cache_mem()]]
n_inv_filt = util.jit(opfilt_tt.alm_filter_ninv, ninv, transf_dl,
marge_monopole=marge_monopole, marge_dipole=marge_dipole, marge_maps=marge_maps)
self.chain_descr = chain_descr
self.chain = util.jit(multigrid.multigrid_chain, opfilt_tt, self.chain_descr, dl, n_inv_filt)
if mpi.rank == 0:
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if not os.path.exists(os.path.join(lib_dir, "filt_hash.pk")):
pk.dump(self.hashdict(), open(os.path.join(lib_dir, "filt_hash.pk"), 'wb'), protocol=2)
if not os.path.exists(os.path.join(self.lib_dir, "ftl.dat")):
np.savetxt(os.path.join(self.lib_dir, "ftl.dat"), self._calc_ftl())
if not os.path.exists(os.path.join(self.lib_dir, "tal.dat")):
np.savetxt(os.path.join(self.lib_dir, "tal.dat"), self._calc_tal())
if not os.path.exists(os.path.join(self.lib_dir, "fmask.fits.gz")):
hp.write_map(os.path.join(self.lib_dir, "fmask.fits.gz"), self._calc_mask())
mpi.barrier()
utils.hash_check(pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')), self.hashdict(), fn=os.path.join(lib_dir, "filt_hash.pk"))
def _ninv_hash(self):
ret = []
for ninv_comp in self.ninv:
if isinstance(ninv_comp, np.ndarray) and ninv_comp.size > 1:
ret.append(utils.clhash(ninv_comp))
else:
ret.append(ninv_comp)
# Get only filename (useful for runs on different scratch systems of NERSC)
# ret.append(os.path.basename(ninv_comp))
return ret
def _calc_ftl(self):
ninv = self.chain.n_inv_filt.n_inv
npix = len(ninv[:])
NlevT_uKamin = np.sqrt(4. * np.pi / npix / np.sum(ninv) * len(np.where(ninv != 0.0)[0])) * 180. * 60. / np.pi
print("cinv_t::noiseT_uk_arcmin = %.3f"%NlevT_uKamin)
s_cls = self.cl
b_transf = self.transf
if s_cls['tt'][0] == 0.: assert self.chain.n_inv_filt.marge_monopole
if s_cls['tt'][1] == 0.: assert self.chain.n_inv_filt.marge_dipole
ftl = utils.cli(s_cls['tt'][0:self.lmax + 1] + (NlevT_uKamin * np.pi / 180. / 60.) ** 2 *utils.cli(b_transf[0:self.lmax + 1] ** 2))
if self.chain.n_inv_filt.marge_monopole: ftl[0] = 0.0
if self.chain.n_inv_filt.marge_dipole: ftl[1] = 0.0
return ftl
def _calc_tal(self):
return utils.cli(self.transf)
def _calc_mask(self):
ninv = self.chain.n_inv_filt.n_inv
assert hp.npix2nside(len(ninv)) == self.nside
return np.where(ninv > 0, 1., 0.)
def hashdict(self):
hd = {'lmax': self.lmax,
'nside': self.nside,
'cltt': utils.clhash(self.cl['tt'][:self.lmax + 1]),
'transf': utils.clhash(self.transf[:self.lmax + 1]),
'ninv': self._ninv_hash(),
'marge_monopole': self.marge_monopole,
'marge_dipole': self.marge_dipole,
'marge_maps': self.marge_maps}
# Don't hash rescaling unless deviates from default behaviour (Dls) ? Otherwise will produce key error in hash check for
# maps filtered before this change.
# Alternatively: Could automatically update hash pk if missing 'rescal_cl' key?
if self.default_rescal is False:
hd['rescal_cl'] = utils.clhash(self.rescal_cl)
return hd
def apply_ivf(self, tmap, soltn=None):
if soltn is None:
talm = np.zeros(hp.Alm.getsize(self.lmax), dtype=complex)
else:
talm = soltn.copy()
self.chain.solve(talm, tmap)
hp.almxfl(talm, self.rescal_cl, inplace=True)
return talm
class cinv_p(cinv):
r"""Polarization-only inverse-variance (or Wiener-)filtering instance.
Args:
lib_dir: mask and other things will be cached there
lmax: filtered alm's are reconstructed up to lmax
nside: healpy resolution of maps to filter
cl: fiducial CMB spectra used to filter the data (dict with 'tt' key)
transf: CMB E-mode polarization transfer function (array)
ninv: inverse pixel variance maps. Must be a list of either 3 (QQ, QU, UU) or 1 (QQ = UU noise) elements.
These element are themselves list of paths or of healpy maps with consistent nside.
transf_blm(optional): B-polarization transfer function (if different from E-mode one)
Note:
This implementation now supports template projection
"""
def __init__(self, lib_dir, lmax, nside, cl, transf, ninv, pcf='default',
chain_descr=None, transf_blm=None, marge_qmaps=(), marge_umaps=()):
assert lib_dir is not None and lmax >= 1024 and nside >= 512, (lib_dir, lmax, nside)
super(cinv_p, self).__init__(lib_dir, lmax)
self.nside = nside
self.cl = cl
self.transf_e = transf
self.transf_b = transf if transf_blm is None else transf_blm
self.transf = transf if transf_blm is None else 0.5 * self.transf_e + 0.5 * self.transf_b
self.ninv = ninv
pcf = os.path.join(lib_dir, "dense.pk") if pcf == 'default' else None
if chain_descr is None: chain_descr = \
[[2, ["split(dense(" + pcf + "), 32, diag_cl)"], 512, 256, 3, 0.0, cd_solve.tr_cg,cd_solve.cache_mem()],
[1, ["split(stage(2), 512, diag_cl)"], 1024, 512, 3, 0.0, cd_solve.tr_cg, cd_solve.cache_mem()],
[0, ["split(stage(1), 1024, diag_cl)"], lmax, nside, np.inf, 1.0e-5, cd_solve.tr_cg, cd_solve.cache_mem()]]
n_inv_filt = util.jit(opfilt_pp.alm_filter_ninv, ninv, transf[0:lmax + 1],
b_transf_b=transf_blm, marge_umaps=marge_umaps, marge_qmaps=marge_qmaps)
self.chain = util.jit(multigrid.multigrid_chain, opfilt_pp, chain_descr, cl, n_inv_filt)
if mpi.rank == 0:
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if not os.path.exists(os.path.join(lib_dir, "filt_hash.pk")):
pk.dump(self.hashdict(), open(os.path.join(lib_dir, "filt_hash.pk"), 'wb'), protocol=2)
if not os.path.exists(os.path.join(self.lib_dir, "fbl.dat")):
fel, fbl = self._calc_febl()
np.savetxt(os.path.join(self.lib_dir, "fel.dat"), fel)
np.savetxt(os.path.join(self.lib_dir, "fbl.dat"), fbl)
if not os.path.exists(os.path.join(self.lib_dir, "tal.dat")):
np.savetxt(os.path.join(self.lib_dir, "tal.dat"), self._calc_tal())
if not os.path.exists(os.path.join(self.lib_dir, "fmask.fits.gz")):
hp.write_map(os.path.join(self.lib_dir, "fmask.fits.gz"), self._calc_mask())
mpi.barrier()
utils.hash_check(pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')), self.hashdict(), fn=os.path.join(lib_dir, "filt_hash.pk"))
def hashdict(self):
return {'lmax': self.lmax,
'nside': self.nside,
'clee': utils.clhash(self.cl.get('ee', np.array([0.]))),
'cleb': utils.clhash(self.cl.get('eb', np.array([0.]))),
'clbb': utils.clhash(self.cl.get('bb', np.array([0.]))),
'transf':utils.clhash(self.transf),
'ninv': self._ninv_hash()}
def apply_ivf(self, tmap, soltn=None):
if soltn is not None:
assert len(soltn) == 2
assert hp.Alm.getlmax(soltn[0].size) == self.lmax, (hp.Alm.getlmax(soltn[0].size), self.lmax)
assert hp.Alm.getlmax(soltn[1].size) == self.lmax, (hp.Alm.getlmax(soltn[1].size), self.lmax)
talm = util_alm.eblm([soltn[0], soltn[1]])
else:
telm = np.zeros(hp.Alm.getsize(self.lmax), dtype=complex)
tblm = np.zeros(hp.Alm.getsize(self.lmax), dtype=complex)
talm = util_alm.eblm([telm, tblm])
assert len(tmap) == 2
self.chain.solve(talm, [tmap[0], tmap[1]])
return talm.elm, talm.blm
def _calc_febl(self):
assert not 'eb' in self.chain.s_cls.keys()
if len(self.chain.n_inv_filt.n_inv) == 1:
ninv = self.chain.n_inv_filt.n_inv[0]
npix = len(ninv)
NlevP_uKamin = np.sqrt(
4. * np.pi / npix / np.sum(ninv) * len(np.where(ninv != 0.0)[0])) * 180. * 60. / np.pi
else:
assert len(self.chain.n_inv_filt.n_inv) == 3
ninv = self.chain.n_inv_filt.n_inv
NlevP_uKamin= 0.5 * np.sqrt(
4. * np.pi / len(ninv[0]) / np.sum(ninv[0]) * len(np.where(ninv[0] != 0.0)[0])) * 180. * 60. / np.pi
NlevP_uKamin += 0.5 * np.sqrt(
4. * np.pi / len(ninv[2]) / np.sum(ninv[2]) * len(np.where(ninv[2] != 0.0)[0])) * 180. * 60. / np.pi
print("cinv_p::noiseP_uk_arcmin = %.3f"%NlevP_uKamin)
s_cls = self.chain.s_cls
b_transf_e = self.chain.n_inv_filt.b_transf_e
b_transf_b = self.chain.n_inv_filt.b_transf_b
fel = utils.cli(s_cls['ee'][:self.lmax + 1] + (NlevP_uKamin * np.pi / 180. / 60.) ** 2 * utils.cli(b_transf_e[0:self.lmax + 1] ** 2))
fbl = utils.cli(s_cls['bb'][:self.lmax + 1] + (NlevP_uKamin * np.pi / 180. / 60.) ** 2 * utils.cli(b_transf_b[0:self.lmax + 1] ** 2))
fel[0:2] *= 0.0
fbl[0:2] *= 0.0
return fel, fbl
def _calc_tal(self):
return utils.cli(self.transf)
def _calc_mask(self):
mask = np.ones(hp.nside2npix(self.nside), dtype=float)
for ninv in self.chain.n_inv_filt.n_inv:
assert hp.npix2nside(len(ninv)) == self.nside
mask *= (ninv > 0.)
return mask
def _ninv_hash(self):
ret = []
for ninv_comp in self.ninv[0]:
if isinstance(ninv_comp, np.ndarray) and ninv_comp.size > 1:
ret.append(utils.clhash(ninv_comp))
else:
ret.append(ninv_comp)
return [ret]
class cinv_tp:
def __init__(self, lib_dir, lmax, nside, cl, transf, ninv,
marge_maps_t=(), marge_monopole=False, marge_dipole=False,
pcf='default', rescal_cl='default', chain_descr=None, transf_p=None):
"""Instance for joint temperature-polarization filtering
Args:
lib_dir: a few quantities might get cached there
lmax: CMB filtering performed up to multipole lmax
nside: healpy resolution of the input maps
cl: fiducial CMB spectra used to filter the data (dict with 'tt', 'te', 'ee', 'bb' keys)
transf: CMB transfer function in temperature
ninv: list of lists with mask paths and / or inverse pixel noise levels.
TT, (QQ + UU) / 2 if len(ninv) == 2 or TT, QQ, QU UU if == 4
e.g. [[iNevT,mask1,mask2,..],[iNevP,mask1,mask2...]]
marge_maps_t: maps to project out in the filtering (T-part)
marge_monopole: marginalizes out the T monopole if set
marge_dipole: marginalizes out the T dipole if set
chain_descr: preconditioner mulitgrid chain description (if different from default)
transf_p: polarization transfer function (if different from temperature)
"""
assert (lmax >= 1024)
assert (nside >= 512)
assert len(ninv) == 2 or len(ninv) == 4 # TT, (QQ + UU)/2 or TT,QQ,QU,UU
if rescal_cl == 'default':
rescal_cl = {a: np.sqrt(np.arange(lmax + 1, dtype=float) * np.arange(1, lmax + 2, dtype=float) / 2. / np.pi) for a in ['t', 'e', 'b']}
elif rescal_cl is None:
rescal_cl = {a: np.ones(lmax + 1, dtype=float) for a in ['t', 'e', 'b']}
elif rescal_cl == 'tonly':
rescal_cl = {a: np.ones(lmax + 1, dtype=float) for a in ['e', 'b']}
rescal_cl['t'] = np.sqrt(np.arange(lmax + 1, dtype=float) * np.arange(1, lmax + 2, dtype=float) / 2. / np.pi)
else:
assert 0
for k in rescal_cl.keys():
rescal_cl[k] /= np.mean(rescal_cl[k]) # in order not mess around with the TEB relative weights of the spectra
dl = {k: rescal_cl[k[0]] * rescal_cl[k[1]] * cl[k][:lmax + 1] for k in cl.keys()} # rescaled cls (Dls by default)
if transf_p is None:
transf_p = transf
transf_dls = {a: transf_p[:lmax + 1] * utils.cli(rescal_cl[a]) for a in ['e', 'b']}
transf_dls['t'] = transf[:lmax + 1] * utils.cli(rescal_cl['t'])
self.lmax = lmax
self.nside = nside
self.cl = cl
self.transf_t = transf
self.transf_p = transf_p
self.ninv = ninv
self.marge_maps_t = marge_maps_t
self.marge_maps_p = []
self.lib_dir = lib_dir
self.rescal_cl = rescal_cl
if chain_descr is None:
pcf = lib_dir + "/dense_tp.pk" if pcf == 'default' else None
chain_descr = [[3, ["split(dense(" + pcf + "), 64, diag_cl)"], 256, 128, 3, 0.0, cd_solve.tr_cg,
cd_solve.cache_mem()],
[2, ["split(stage(3), 256, diag_cl)"], 512, 256, 3, 0.0, cd_solve.tr_cg,
cd_solve.cache_mem()],
[1, ["split(stage(2), 512, diag_cl)"], 1024, 512, 3, 0.0, cd_solve.tr_cg,
cd_solve.cache_mem()],
[0, ["split(stage(1), 1024, diag_cl)"], lmax, nside, np.inf, 1.0e-5, cd_solve.tr_cg,
cd_solve.cache_mem()]]
n_inv_filt = util.jit(opfilt_tp.alm_filter_ninv, ninv, transf_dls['t'], b_transf_e=transf_dls['e'], b_transf_b=transf_dls['b'],
marge_maps_t=marge_maps_t, marge_monopole=marge_monopole, marge_dipole=marge_dipole)
self.chain = util.jit(multigrid.multigrid_chain, opfilt_tp, chain_descr, dl, n_inv_filt)
if mpi.rank == 0:
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if not os.path.exists(os.path.join(lib_dir, "filt_hash.pk")):
pk.dump(self.hashdict(), open(os.path.join(lib_dir, "filt_hash.pk"), 'wb'), protocol=2)
if not os.path.exists(os.path.join(lib_dir, "fal.pk")):
pk.dump(self._calc_fal(), open(os.path.join(lib_dir, "fal.pk"), 'wb'), protocol=2)
if not os.path.exists(os.path.join(self.lib_dir, "fmask.fits.gz")):
fmask = self.calc_mask()
hp.write_map(os.path.join(self.lib_dir, "fmask.fits.gz"), fmask)
mpi.barrier()
utils.hash_check(pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')), self.hashdict(), fn=os.path.join(lib_dir, "filt_hash.pk"))
def hashdict(self):
ret = {'lmax': self.lmax,
'nside': self.nside,
'rescal_cl':{k: utils.clhash(self.rescal_cl[k]) for k in self.rescal_cl.keys()},
'cls':{k : utils.clhash(self.cl[k]) for k in self.cl.keys()},
'transf': utils.clhash(self.transf_t),
'ninv': self._ninv_hash(),
'marge_maps_t': self.marge_maps_t,
'marge_maps_p': self.marge_maps_p}
if self.transf_p is not self.transf_t:
ret['transf_p'] = utils.clhash(self.transf_p)
return ret
def get_fal(self):
return pk.load(open(os.path.join(self.lib_dir, "fal.pk"), 'rb'))
def _calc_fal(self):
"""Isotropic approximation to filtering matrix
Used e.g. for plancklens response calculations
"""
ninv = self.chain.n_inv_filt.n_inv
assert len(ninv) == 2, 'implement this, easy'
assert ninv[0].size == 12 * self.nside ** 2
assert ninv[1].size == 12 * self.nside ** 2
npix = 12 * self.nside ** 2
nlevt = np.sqrt(4. * np.pi / npix / np.sum(ninv[0]) * len(np.where(ninv[0] != 0.0)[0])) * 180. * 60. / np.pi
nlevp = np.sqrt(4. * np.pi / npix / np.sum(ninv[1]) * len(np.where(ninv[1] != 0.0)[0])) * 180. * 60. / np.pi
print("cinv_tp::noiseT_uk_arcmin = %.3f"%nlevt)
print("cinv_tp::noiseP_uk_arcmin = %.3f"%nlevp)
fals = np.zeros((self.lmax + 1, 3, 3), dtype=float)
for i, a in enumerate(['t', 'e', 'b']):
for j, b in enumerate(['t', 'e', 'b']):
fals[:, i, j] = self.cl.get(a + b, self.cl.get(b + a, np.zeros(self.lmax + 1)))[:self.lmax+1]
fals[1:, 0, 0] += ( (nlevt / 180 / 60 * np.pi) / self.transf_t[1:self.lmax + 1] ) ** 2
fals[2:, 1, 1] += ( (nlevp / 180 / 60 * np.pi) / self.transf_p[2:self.lmax + 1] ) ** 2
fals[2:, 2, 2] += ( (nlevp / 180 / 60 * np.pi) / self.transf_p[2:self.lmax + 1] ) ** 2
fals = np.linalg.pinv(fals)
fals_dict = {}
for i, a in enumerate(['t', 'e', 'b']):
for j, b in enumerate(['t', 'e', 'b'][i:]):
if np.any(fals[:, i, i + j]):
fals_dict[a + b] = fals[:, i, i + j]
return fals_dict
def calc_mask(self):
mask = np.ones(hp.nside2npix(self.nside), dtype=float)
for ninv in self.chain.n_inv_filt.n_inv:
assert hp.npix2nside(len(ninv)) == self.nside
mask *= (ninv > 0.)
return mask
def get_fmask(self):
return hp.read_map(os.path.join(self.lib_dir, "fmask.fits.gz"))
def apply_ivf(self, tqumap, soltn=None, apply_fini=''):
assert (len(tqumap) == 3)
if soltn is None:
ttlm = np.zeros(hp.Alm.getsize(self.lmax), dtype=complex)
telm = np.zeros(hp.Alm.getsize(self.lmax), dtype=complex)
tblm = np.zeros(hp.Alm.getsize(self.lmax), dtype=complex)
else:
ttlm, telm, tblm = soltn
hp.almxfl(ttlm, self.rescal_cl['t'], inplace=True)
hp.almxfl(telm, self.rescal_cl['e'], inplace=True)
hp.almxfl(tblm, self.rescal_cl['b'], inplace=True)
talm = opfilt_tp.teblm([ttlm, telm, tblm])
self.chain.solve(talm, [tqumap[0], tqumap[1], tqumap[2]], apply_fini=apply_fini)
hp.almxfl(talm.tlm, self.rescal_cl['t'], inplace=True)
hp.almxfl(talm.elm, self.rescal_cl['e'], inplace=True)
hp.almxfl(talm.blm, self.rescal_cl['b'], inplace=True)
return talm.tlm, talm.elm, talm.blm
def _ninv_hash(self):
ret = []
for ninv_comp in self.ninv:
if isinstance(ninv_comp, np.ndarray) and ninv_comp.size > 1:
ret.append(utils.clhash(ninv_comp))
else:
ret.append(ninv_comp)
return [ret]
class library_cinv_sepTP(filt_simple.library_sepTP):
"""Library to perform inverse-variance filtering of a simulation library.
Suitable for separate temperature and polarization filtering.
Args:
lib_dir (str): a
sim_lib: simulation library instance (requires get_sim_tmap, get_sim_pmap methods)
cinvt: temperature-only filtering library
cinvp: poalrization-only filtering library
soltn_lib (optional): simulation libary providing starting guesses for the filtering.
"""
def __init__(self, lib_dir, sim_lib, cinvt:cinv_t, cinvp:cinv_p, cl_weights:dict, soltn_lib=None):
self.cinv_t = cinvt
self.cinv_p = cinvp
super(library_cinv_sepTP, self).__init__(lib_dir, sim_lib, cl_weights, soltn_lib=soltn_lib)
if mpi.rank == 0:
fname_mask = os.path.join(self.lib_dir, "fmask.fits.gz")
if not os.path.exists(fname_mask):
fmask = self.cinv_t.get_fmask()
assert np.all(fmask == self.cinv_p.get_fmask())
hp.write_map(fname_mask, fmask)
mpi.barrier()
utils.hash_check(pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')), self.hashdict(), fn=os.path.join(lib_dir, "filt_hash.pk"))
def hashdict(self):
return {'cinv_t': self.cinv_t.hashdict(),
'cinv_p': self.cinv_p.hashdict(),
'sim_lib': self.sim_lib.hashdict()}
def get_fmask(self):
return hp.read_map(os.path.join(self.lib_dir, "fmask.fits.gz"))
def get_tal(self, a, lmax=None):
assert (a.lower() in ['t', 'e', 'b']), a
if a.lower() == 't':
return self.cinv_t.get_tal(a, lmax=lmax)
else:
return self.cinv_p.get_tal(a, lmax=lmax)
def get_ftl(self, lmax=None):
return self.cinv_t.get_ftl(lmax=lmax)
def get_fel(self, lmax=None):
return self.cinv_p.get_fel(lmax=lmax)
def get_fbl(self, lmax=None):
return self.cinv_p.get_fbl(lmax=lmax)
def _apply_ivf_t(self, tmap, soltn=None):
return self.cinv_t.apply_ivf(tmap, soltn=soltn)
def _apply_ivf_p(self, pmap, soltn=None):
return self.cinv_p.apply_ivf(pmap, soltn=soltn)
def get_tmliklm(self, idx):
return hp.almxfl(self.get_sim_tlm(idx), self.cinv_t.cl['tt'])
def get_emliklm(self, idx):
assert not hasattr(self.cinv_p.cl, 'eb')
return hp.almxfl(self.get_sim_elm(idx), self.cinv_p.cl['ee'])
def get_bmliklm(self, idx):
assert not hasattr(self.cinv_p.cl, 'eb')
return hp.almxfl(self.get_sim_blm(idx), self.cinv_p.cl['bb'])
class library_cinv_jTP(filt_simple.library_jTP):
"""Library to perform inverse-variance filtering of a simulation library.
Suitable for separate temperature and polarization filtering.
Args:
lib_dir (str): a place to cache the maps
sim_lib: simulation library instance (requires get_sim_tmap, get_sim_pmap methods)
cinv_jtp: temperature and pol joint filtering library
cl_weights: spectra used to build the Wiener filtered leg from the inverse-variance maps
soltn_lib (optional): simulation libary providing starting guesses for the filtering.
"""
def __init__(self, lib_dir:str, sim_lib, cinv_jtp:cinv_tp, cl_weights:dict, soltn_lib=None):
self.cinv_tp = cinv_jtp
super(library_cinv_jTP, self).__init__(lib_dir, sim_lib, cl_weights, soltn_lib=soltn_lib)
if mpi.rank == 0:
fname_mask = os.path.join(self.lib_dir, "fmask.fits.gz")
if not os.path.exists(fname_mask):
fmask = self.cinv_tp.get_fmask()
assert np.all(fmask == self.cinv_tp.get_fmask())
hp.write_map(fname_mask, fmask)
mpi.barrier()
utils.hash_check(pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')), self.hashdict(), fn=os.path.join(lib_dir, "filt_hash.pk"))
def hashdict(self):
return {'cinv_tp': self.cinv_tp.hashdict(),
'clw':{k:utils.clhash(self.cl[k]) for k in self.cl.keys()},
'sim_lib': self.sim_lib.hashdict()}
def get_fmask(self):
return hp.read_map(os.path.join(self.lib_dir, "fmask.fits.gz"))
def get_fal(self, lmax=None):
return self.cinv_tp.get_fal(lmax=lmax)
def _apply_ivf(self, tqumap, soltn=None):
return self.cinv_tp.apply_ivf(tqumap, soltn=soltn)
|
carronjREPO_NAMEplancklensPATH_START.@plancklens_extracted@plancklens-master@plancklens@filt@filt_cinv.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "ali-beheshti/Astro-Paint",
"repo_path": "Astro-Paint_extracted/Astro-Paint-master/docs/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('.'))
print(sys.path)
# -- Project information -----------------------------------------------------
project = 'AstroPaint'
copyright = '2020, Siavash Yasini'
author = 'Siavash Yasini'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
master_doc = 'index'
extensions = ['recommonmark', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon']
napoleon_google_docstring = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
ali-beheshtiREPO_NAMEAstro-PaintPATH_START.@Astro-Paint_extracted@Astro-Paint-master@docs@conf.py@.PATH_END.py
|
{
"filename": "figure_pooling.ipynb",
"repo_name": "deepsphere/deepsphere-cosmo-tf1",
"repo_path": "deepsphere-cosmo-tf1_extracted/deepsphere-cosmo-tf1-master/figure_pooling.ipynb",
"type": "Jupyter Notebook"
}
|
# [DeepSphere]: a spherical convolutional neural network
[DeepSphere]: https://github.com/SwissDataScienceCenter/DeepSphere
[Nathanaël Perraudin](https://perraudin.info), [Michaël Defferrard](http://deff.ch), Tomasz Kacprzak, Raphael Sgier
# Figure: pooling
```python
import os;
import healpy as hp; import numpy as np;
from IPython.core.display import display, HTML; display(HTML("<style>.container { width:100% !important; }</style>"))
%pylab inline
%load_ext autoreload
%autoreload 2
```
```python
pl = plt
```
```python
pathfig = './figures/'
os.makedirs(pathfig, exist_ok=True)
```
```python
def make_ball(map_test1, cmap=cm.gray_r, sub=None):
cmap.set_under('w')
cmap.set_bad('lightgray')
dot_size=10
rot = (0,30,345)
vmin, vmax = -0.5, 1.5
# map_test1_bw = map_test1.astype(np.float)/np.max(map_test1);
hp.visufunc.orthview(map=map_test1, half_sky=True, title='', rot=rot, cmap=cmap, cbar=False, hold=True, nest=True, min=vmin, max=vmax, notext=True, sub=sub);
theta, phi = hp.pix2ang(hp.npix2nside(len(map_test1)), range(len(map_test1)), nest=True);
hp.projscatter(theta, phi, c='k', s=dot_size);
hp.graticule();
hp.graticule(dmer=360,dpar=360,alpha=1, rot=(0,0,15), local=True);
hp.graticule(dmer=360,dpar=360,alpha=1, rot=(0,0,195), local=True);
# pl.savefig('test_fig1.pdf', bbox_inches='tight', transparent=True);
```
```python
npix = hp.nside2npix(16)
map_data = np.arange(npix)
# map_data = hp.read_map('/Users/tomek/notes/160223_advanced_stats_methods/ASM_2017/ASM/Lectures/CMB/COM_CMB_IQU-smica_1024_R2.02_full.fits')
# map_data = hp.ud_grade(map_data, nside_out=16).astype(np.float32)
# map_data -= np.min(map_data)
# map_data /= np.max(map_data)
npix = hp.nside2npix(1)
map_select_1 = (np.arange(npix)==0).astype(np.float)
map_select_1[map_select_1==0] = hp.UNSEEN
map_select_2 = hp.ud_grade(map_select_1, nside_out=2, order_in='NESTED', order_out='NESTED')
map_select_4 = hp.ud_grade(map_select_1, nside_out=4, order_in='NESTED', order_out='NESTED')
# map_select_1[map_select_1!=hp.UNSEEN] = np.arange(np.count_nonzero(map_select_1!=hp.UNSEEN))
n_filled = float(np.count_nonzero(map_select_2!=hp.UNSEEN))
map_select_2[map_select_2!=hp.UNSEEN] = np.arange(n_filled, dtype=np.float64)/n_filled
n_filled = float(np.count_nonzero(map_select_4!=hp.UNSEEN))
map_select_4[map_select_4!=hp.UNSEEN] = np.arange(n_filled, dtype=np.float64)/n_filled
# map_select_2[map_select_2==hp.UNSEEN] = np.nan
# map_select_4[map_select_4==hp.UNSEEN] = np.nan
```
```python
nx, ny = 3,1
# pl.figure()
# fig, ax = plt.subplots(ncols=nx, nrows=ny, figsize=(nx*5, ny*7))
# pl.subplot(ny,nx,1)
# pl.subplot(ny,nx,2)
# pl.axes(ax[1])
# make_ball(map_select_2, cmap=pl.cm.RdYlBu)
# pl.subplot(ny,nx,3)
# pl.axes(ax[2])
pl.figure()
# newax = pl.gcf().add_axes([0.1, 0.1, 0.8, 0.8],frameon=False)
# pl.axes(newax)
make_ball(map_select_1, cmap=pl.cm.RdYlBu)
filename_plot = os.path.join(pathfig,'figure_pooling1.svg')
pl.savefig(filename_plot, bbox_inches='tight', transparent=True); print('saved {}'.format(filename_plot))
# pl.axes(ax[1])
pl.figure()
# newax = pl.gcf().add_axes([0.1, 0.4, 0.8, 0.8],frameon=False)
# pl.axes(newax)
make_ball(map_select_2, cmap=pl.cm.RdYlBu)
filename_plot = os.path.join(pathfig,'figure_pooling2.svg')
pl.savefig(filename_plot, bbox_inches='tight', transparent=True); print('saved {}'.format(filename_plot))
# pl.axes(ax[0])
pl.figure()
# newax = pl.gcf().add_axes([0.1, 0.1, 0.8, 0.8],frameon=False)
# pl.axes(newax)
make_ball(map_select_4, cmap=pl.cm.RdYlBu)
filename_plot = os.path.join(pathfig,'figure_pooling3.svg')
pl.savefig(filename_plot, bbox_inches='tight', transparent=True); print('saved {}'.format(filename_plot))
# , sub=(ny, nx, 3)
# pl.subplots_adjust(wspace=0.01, hspace=0.01)
# import matplotlib.pyplot as plt
# newax = pl.gcf().add_axes([0.1, 0.1, 0.8, 0.8],frameon=False)
# print(newax)
# newax.plot([22, 50], [70,72], 'k-')
# pl.xlim(0,100)
# pl.ylim(0,100)
# pl.grid()
# pl.xticks([])
# pl.yticks([])
# newax.axis('equal')
# plt.show()
filename_plot = os.path.join(pathfig,'figure_pooling.pdf')
pl.savefig(filename_plot, bbox_inches='tight'); print('saved {}'.format(filename_plot))
```
```python
import svgutils.compose as sc
from IPython.display import SVG # /!\ note the 'SVG' function also in svgutils.compose
```
```python
lw=3
point2=(377,87)
point3=(603,72)
```
```python
svg_fig = sc.Figure("22.5cm", "6.75cm",
# sc.Panel(sc.SVG("./figure_pooling1.svg").scale(1)).move('1cm', '1cm')
sc.SVG(os.path.join(pathfig, "figure_pooling3.svg")).scale(1).move(0,0),
sc.SVG(os.path.join(pathfig, "figure_pooling2.svg")).scale(1).move(250, 0),
sc.SVG(os.path.join(pathfig, "figure_pooling1.svg")).scale(1).move(500, 0),
sc.Line( ((122,80), point2), width=lw, color='darkred'),
sc.Line( ((148,93), point2), width=lw, color='darkred'),
sc.Line( ((153,71), point2), width=lw, color='darkred'),
sc.Line( ((124,107), point2), width=lw, color='darkred'),
sc.Line( ((384,87), point3), width=lw, color='darkblue'),
sc.Line( ((396,44), point3), width=lw, color='darkblue'),
sc.Line( ((348,55), point3), width=lw, color='darkblue'),
sc.Line( ((337,105), point3), width=lw, color='darkblue')
)
svg_fig.save(os.path.join(pathfig, "figure_pooling_svg.svg"))
SVG(os.path.join(pathfig, 'figure_pooling_svg.svg'))
```
```python
map_select_8 = hp.ud_grade(map_select_1, nside_out=8, order_in='NESTED', order_out='NESTED')
n_filled = float(np.count_nonzero(map_select_8!=hp.UNSEEN))
map_select_8[map_select_8!=hp.UNSEEN] = np.arange(n_filled, dtype=np.float64)/n_filled
make_ball(map_select_8, cmap=plt.cm.RdYlBu)
filename_plot = os.path.join(pathfig,'figure_pooling4.svg')
plt.savefig(filename_plot, bbox_inches='tight', transparent=True); print('saved {}'.format(filename_plot))
filename_plot = os.path.join(pathfig,'figure_pooling4.pdf')
plt.savefig(filename_plot, bbox_inches='tight'); print('saved {}'.format(filename_plot))
```
|
deepsphereREPO_NAMEdeepsphere-cosmo-tf1PATH_START.@deepsphere-cosmo-tf1_extracted@deepsphere-cosmo-tf1-master@figure_pooling.ipynb@.PATH_END.py
|
{
"filename": "conv1d_transpose.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/convolutional/conv1d_transpose.py",
"type": "Python"
}
|
from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose
@keras_export(
[
"keras.layers.Conv1DTranspose",
"keras.layers.Convolution1DTranspose",
]
)
class Conv1DTranspose(BaseConvTranspose):
"""1D transposed convolution layer.
The need for transposed convolutions generally arise from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution
to something that has the shape of its input while maintaining a
connectivity pattern that is compatible with said convolution.
Args:
filters: int, the dimension of the output space (the number of filters
in the transpose convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
transposed convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the transposed convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated transposed convolution.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing
`activation(conv1d_transpose(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
Example:
>>> x = np.random.rand(4, 10, 128)
>>> y = keras.layers.Conv1DTranspose(32, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 21, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@convolutional@conv1d_transpose.py@.PATH_END.py
|
{
"filename": "test_containers.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/integrations/prefect-docker/tests/test_containers.py",
"type": "Python"
}
|
from unittest.mock import MagicMock
from prefect_docker.containers import (
create_docker_container,
get_docker_container_logs,
remove_docker_container,
start_docker_container,
stop_docker_container,
)
from prefect.logging import disable_run_logger
class TestCreateDockerContainer:
async def test_create_kwargs(self, mock_docker_host: MagicMock):
create_kwargs = dict(
image="test_image",
command="test_command",
name="test_name",
detach=False,
ports={"2222/tcp": 3333},
entrypoint=None,
environment=None,
)
with disable_run_logger():
container = await create_docker_container.fn(
docker_host=mock_docker_host, **create_kwargs
)
assert container.id == "id_1"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.create.assert_called_once_with(
**create_kwargs
)
class TestGetDockerContainerLogs:
async def test_logs_kwargs(self, mock_docker_host: MagicMock):
logs_kwargs = dict(container_id="42")
with disable_run_logger():
logs = await get_docker_container_logs.fn(
docker_host=mock_docker_host, **logs_kwargs
)
assert logs == "here are logs"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.get.assert_called_once_with("42")
class TestStartDockerContainer:
async def test_start_kwargs(self, mock_docker_host: MagicMock):
start_kwargs = dict(container_id="42")
with disable_run_logger():
container = await start_docker_container.fn(
docker_host=mock_docker_host, **start_kwargs
)
assert container.id == "42"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.get.assert_called_once_with("42")
class TestStopDockerContainer:
async def test_stop_kwargs(self, mock_docker_host: MagicMock):
stop_kwargs = dict(container_id="42")
with disable_run_logger():
container = await stop_docker_container.fn(
docker_host=mock_docker_host, **stop_kwargs
)
assert container.id == "42"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.get.assert_called_once_with("42")
class TestRemoveDockerContainer:
async def test_remove_kwargs(self, mock_docker_host: MagicMock):
remove_kwargs = dict(container_id="42")
with disable_run_logger():
container = await remove_docker_container.fn(
docker_host=mock_docker_host, **remove_kwargs
)
assert container.id == "42"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.get.assert_called_once_with("42")
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@integrations@prefect-docker@tests@test_containers.py@.PATH_END.py
|
{
"filename": "ComponentGetterImpl.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/containerTests/corbaRefPersistenceTest/test/demoImpl/ComponentGetterImpl.py",
"type": "Python"
}
|
import demo__POA
from Acspy.Servants.ContainerServices import ContainerServices
from Acspy.Servants.ComponentLifecycle import ComponentLifecycle
from Acspy.Servants.ACSComponent import ACSComponent
class ComponentGetterImpl(demo__POA.ComponentGetter,
ACSComponent,
ContainerServices,
ComponentLifecycle):
def __init__(self):
ACSComponent.__init__(self)
ComponentLifecycle.__init__(self)
ContainerServices.__init__(self)
def getOtherComponent(self):
self.getComponent("COMP_TO_GET")
self.getLogger().logInfo("Got component without problems :)");
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@containerTests@corbaRefPersistenceTest@test@demoImpl@ComponentGetterImpl.py@.PATH_END.py
|
{
"filename": "_extract.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/sparse/_extract.py",
"type": "Python"
}
|
"""Functions to extract parts of sparse matrices
"""
__docformat__ = "restructuredtext en"
__all__ = ['find', 'tril', 'triu']
from ._coo import coo_matrix, coo_array
from ._base import sparray
def find(A):
"""Return the indices and values of the nonzero elements of a matrix
Parameters
----------
A : dense or sparse array or matrix
Matrix whose nonzero elements are desired.
Returns
-------
(I,J,V) : tuple of arrays
I,J, and V contain the row indices, column indices, and values
of the nonzero entries.
Examples
--------
>>> from scipy.sparse import csr_array, find
>>> A = csr_array([[7.0, 8.0, 0],[0, 0, 9.0]])
>>> find(A)
(array([0, 0, 1], dtype=int32),
array([0, 1, 2], dtype=int32),
array([ 7., 8., 9.]))
"""
A = coo_array(A, copy=True)
A.sum_duplicates()
# remove explicit zeros
nz_mask = A.data != 0
return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
def tril(A, k=0, format=None):
"""Return the lower triangular portion of a sparse array or matrix
Returns the elements on or below the k-th diagonal of A.
- k = 0 corresponds to the main diagonal
- k > 0 is above the main diagonal
- k < 0 is below the main diagonal
Parameters
----------
A : dense or sparse array or matrix
Matrix whose lower trianglar portion is desired.
k : integer : optional
The top-most diagonal of the lower triangle.
format : string
Sparse format of the result, e.g. format="csr", etc.
Returns
-------
L : sparse matrix
Lower triangular portion of A in sparse format.
See Also
--------
triu : upper triangle in sparse format
Examples
--------
>>> from scipy.sparse import csr_array, tril
>>> A = csr_array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
... dtype='int32')
>>> A.toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]], dtype=int32)
>>> tril(A).toarray()
array([[1, 0, 0, 0, 0],
[4, 5, 0, 0, 0],
[0, 0, 8, 0, 0]], dtype=int32)
>>> tril(A).nnz
4
>>> tril(A, k=1).toarray()
array([[1, 2, 0, 0, 0],
[4, 5, 0, 0, 0],
[0, 0, 8, 9, 0]], dtype=int32)
>>> tril(A, k=-1).toarray()
array([[0, 0, 0, 0, 0],
[4, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=int32)
>>> tril(A, format='csc')
<Compressed Sparse Column sparse array of dtype 'int32'
with 4 stored elements and shape (3, 5)>
"""
coo_sparse = coo_array if isinstance(A, sparray) else coo_matrix
# convert to COOrdinate format where things are easy
A = coo_sparse(A, copy=False)
mask = A.row + k >= A.col
row = A.row[mask]
col = A.col[mask]
data = A.data[mask]
new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype)
return new_coo.asformat(format)
def triu(A, k=0, format=None):
"""Return the upper triangular portion of a sparse array or matrix
Returns the elements on or above the k-th diagonal of A.
- k = 0 corresponds to the main diagonal
- k > 0 is above the main diagonal
- k < 0 is below the main diagonal
Parameters
----------
A : dense or sparse array or matrix
Matrix whose upper trianglar portion is desired.
k : integer : optional
The bottom-most diagonal of the upper triangle.
format : string
Sparse format of the result, e.g. format="csr", etc.
Returns
-------
L : sparse array or matrix
Upper triangular portion of A in sparse format.
Sparse array if A is a sparse array, otherwise matrix.
See Also
--------
tril : lower triangle in sparse format
Examples
--------
>>> from scipy.sparse import csr_array, triu
>>> A = csr_array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
... dtype='int32')
>>> A.toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]], dtype=int32)
>>> triu(A).toarray()
array([[1, 2, 0, 0, 3],
[0, 5, 0, 6, 7],
[0, 0, 8, 9, 0]], dtype=int32)
>>> triu(A).nnz
8
>>> triu(A, k=1).toarray()
array([[0, 2, 0, 0, 3],
[0, 0, 0, 6, 7],
[0, 0, 0, 9, 0]], dtype=int32)
>>> triu(A, k=-1).toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]], dtype=int32)
>>> triu(A, format='csc')
<Compressed Sparse Column sparse array of dtype 'int32'
with 8 stored elements and shape (3, 5)>
"""
coo_sparse = coo_array if isinstance(A, sparray) else coo_matrix
# convert to COOrdinate format where things are easy
A = coo_sparse(A, copy=False)
mask = A.row + k <= A.col
row = A.row[mask]
col = A.col[mask]
data = A.data[mask]
new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype)
return new_coo.asformat(format)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@sparse@_extract.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/__init__.py",
"type": "Python"
}
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interactive widgets for the Jupyter notebook.
Provide simple interactive controls in the notebook.
Each Widget corresponds to an object in Python and Javascript,
with controls on the page.
To put a Widget on the page, you can display it with Jupyter's display machinery::
from ipywidgets import IntSlider
slider = IntSlider(min=1, max=10)
display(slider)
Moving the slider will change the value. Most Widgets have a current value,
accessible as a `value` attribute.
"""
# Must import __version__ first to avoid errors importing this file during the build process. See https://github.com/pypa/setuptools/issues/1724#issuecomment-627241822
from ._version import __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__
import os
import sys
from traitlets import link, dlink
from IPython import get_ipython
from .widgets import *
def load_ipython_extension(ip):
"""Set up Jupyter to work with widgets"""
if not hasattr(ip, 'kernel'):
return
register_comm_target()
def register_comm_target(kernel=None):
"""Register the jupyter.widget comm target"""
from . import comm
comm_manager = comm.get_comm_manager()
if comm_manager is None:
return
comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened)
comm_manager.register_target('jupyter.widget.control', Widget.handle_control_comm_opened)
def _handle_ipython():
"""Register with the comm target at import if running in Jupyter"""
ip = get_ipython()
if ip is None:
return
register_comm_target()
_handle_ipython()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@__init__.py@.PATH_END.py
|
{
"filename": "run_PPDC_tests.py",
"repo_name": "SPARTA-dev/SPARTA",
"repo_path": "SPARTA_extracted/SPARTA-master/examples/Partial Distance Correlation Periodogram/run_PPDC_tests.py",
"type": "Python"
}
|
# ----------------------------------------
# run_PPDC_tests.py (SPARTA file)
# ----------------------------------------
# This file contains a few functions, used in the example notebooks to demonstrate the partial PDC periodograms.
#
# This file stores the following methods:
# ---------------------------------------------
#
# 1) simulate_planet_around_active_star - simulates observations of an active star,
# either randomly or periodically active, orbited by a planets.
# 2) run_ppdc_tests - enables running the simulate_planet_around_active_star on a range of parameter values,
# for testing purposes.
#
# Dependencies: numpy, random, scipy, matplotlib and copy.
# Last update: Avraham Binnenfeld, 20210510.
from sparta.UNICOR.Spectrum import Spectrum
from sparta.UNICOR.Template import Template
from sparta.Auxil.TimeSeries import TimeSeries
from sparta.Observations import Observations
import numpy as np
import random
from scipy import interpolate
import matplotlib.pyplot as plt
from copy import deepcopy
from examples.run_USuRPER_tests import simulate_kepler_ellipse
def simulate_planet_around_active_star(v_sin_i, epsilon, integration_ratio, star_template, template_spot,
p_spot, p_planet, spec_power_ratio, planet_k, star_k, planet_param,
N, snr, periocic_spot_flag, seed=-1):
new_temp = Template(template=Spectrum(wv=star_template.model.wv,
sp=star_template.model.sp).InterpolateSpectrum(delta=0.5))
new_temp.RotationalBroadening(epsilon=epsilon, vsini=v_sin_i)
template_star_broadend = deepcopy(Template(template=Spectrum(wv=[new_temp.model.wv[0][60:-60]],
sp=[new_temp.model.sp[0][60:-60]]).SpecPreProccess()))
if seed != -1:
random.seed(seed)
times = [(random.random() * 100) for _ in range(N)]
if periocic_spot_flag:
vals_spot = [star_k * np.sin(2 * t * np.pi / p_spot) for t in times]
for i, t in enumerate(times):
if abs(t - p_spot/2) < 10:
vals_spot[i] = 0
else:
std_spot_i = 0.5
mu, sigma = 0, std_spot_i # mean and standard deviation
vals_spot = [star_k * np.random.normal(mu, sigma) for _ in times]
keplerian_velocities = simulate_kepler_ellipse(times, planet_param, p_planet)
vals_planet = [planet_k * v for v in keplerian_velocities]
visit_spec_list = []
for i, v in enumerate(vals_spot):
new_wl_spot = star_template.doppler(v)
z1 = interpolate.interp1d(new_wl_spot[0], star_template.model.sp[0], kind='quadratic')
z2 = interpolate.interp1d(template_star_broadend.model.wv[0], template_star_broadend.model.sp[0], kind='quadratic')
spotted_spec = z1(template_star_broadend.model.wv[0][60:-60]) * - spec_power_ratio + template_star_broadend.model.sp[0][60:-60]
spotted_wl = template_star_broadend.model.wv[0][60:-60]
spotted_t = Template(spectrum=spotted_spec, wavelengths=spotted_wl)
new_wl_spot = template_spot.doppler(v)
z1 = interpolate.interp1d(new_wl_spot[0], template_spot.model.sp[0], kind='quadratic')
z2 = interpolate.interp1d(spotted_t.model.wv[0], spotted_t.model.sp[0], kind='quadratic')
spotted_spec = z1(spotted_t.model.wv[0][60:-60]) * spec_power_ratio + spotted_t.model.sp[0][60:-60]
spotted_wl = spotted_t.model.wv[0][60:-60]
spotted_t = Template(spectrum=spotted_spec, wavelengths=spotted_wl)
if vals_planet[i] != 0:
spotted_t_vel = spotted_t.doppler(vals_planet[i])
else:
spotted_t_vel = spotted_t.model.wv
new_temp = Spectrum(wv=[spotted_t_vel[0]], sp=[spotted_t.model.sp[0]]).InterpolateSpectrum(delta=1)
rot_flux = Template().GaussianBroadening(wv=new_temp.wv, sp=new_temp.sp, resolution=100_000)
new_temp.sp = rot_flux
if integration_ratio:
wv, sp = Template().integrate_spec(integration_ratio=integration_ratio, wv=new_temp.wv, sp=new_temp.sp)
new_temp.wv = wv
new_temp.sp = sp
if seed != -1:
new_temp.sp = Template().add_noise(snr, new_temp.sp, rndseed=seed)
else:
new_temp.sp = Template().add_noise(snr, new_temp.sp)
new_temp = new_temp.SpecPreProccess()
visit_spec_list.append(new_temp)
template_for_calc = Template(template=Spectrum(wv=[star_template.model.wv[0][60:-60]],
sp=[star_template.model.sp[0][60:-60]]).SpecPreProccess())
return N, times, visit_spec_list, template_for_calc # star_template # template_star_broadend
def run_ppdc_tests(N, v_sin_i, spec_power_ratio, planet_k, snr, template_star, template_spot, period, periocic_spot_flag):
print("Details:", "N:", N, "v_sin_i:", v_sin_i, "spec_power_ratio:", spec_power_ratio,
"planet_k:", planet_k, "snr:", snr, "...")
N, times, visit_spec_list, template_star_broadend =\
simulate_planet_around_active_star(v_sin_i=v_sin_i,
epsilon=0.5,
integration_ratio=[],
star_template=template_star,
template_spot=template_spot,
p_spot=19,
p_planet=7,
spec_power_ratio=spec_power_ratio,
star_k=1,
planet_k=planet_k,
planet_param=[],
N=N,
snr=snr,
periocic_spot_flag=periocic_spot_flag)
ts = TimeSeries(size=N, times=times, vals=visit_spec_list,
calculated_vrad_list=[])
obs = Observations(time_series=ts)
obs.initialize_periodicity_detector(freq_range=(1 / 1000, 0.5), periodogram_grid_resolution=1000)
www = obs.calc_rv_against_template(template_star_broadend, dv=0.01,
VelBound=[-015.5, 015.5], fastccf=True)
calculated_vrad_list = www.vels
_ = obs.ccf_list[3].plotCCFs()
plt.show()
calculated_ccf_peaks = obs.ccf_peaks
obs.time_series.calculated_vrad_list = calculated_vrad_list
obs.time_series.calculated_ccf_peaks = calculated_ccf_peaks
obs.periodicity_detector.run_PDC_process(calc_biased_flag=False, calc_unbiased_flag=True)
obs.periodicity_detector.run_USURPER_process(calc_biased_flag=False, calc_unbiased_flag=True)
obs.periodicity_detector.run_Partial_USURPER_process(reversed_flag=True)
obs.periodicity_detector.run_Partial_USURPER_process(reversed_flag=False)
obs.periodicity_detector.run_GLS_process()
obs.periodicity_detector.period = period [7, 19]
obs.periodicity_detector.periodogram_plots(velocities_flag=True)
plt.show()
if __name__ == '__main__':
pass
|
SPARTA-devREPO_NAMESPARTAPATH_START.@SPARTA_extracted@SPARTA-master@examples@Partial Distance Correlation Periodogram@run_PPDC_tests.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/tools/module/setup/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-02-13 at 16:35
@author: cook
"""
__all__ = []
# =============================================================================
# Define functions
# =============================================================================
# Nothing to see here.
# =============================================================================
# End of code
# ============================================================================
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@tools@module@setup@__init__.py@.PATH_END.py
|
{
"filename": "api.md",
"repo_name": "tigerchenlu98/rebound",
"repo_path": "rebound_extracted/rebound-main/docs/api.md",
"type": "Markdown"
}
|
# REBOUND API
These pages describe the main features of REBOUND and its API.
There are two structures (*objects* in Python) which you will encounter frequently when working with REBOUND.
The first is the [Simulation structure](simulation.md) which contains all the configuration, status and particle data of one REBOUND simulation.
The second is the [Particle structure](particles.md) which represents one particle in a simulation.
REBOUND is a modular code.
You can combine different [gravity solvers](gravity.md), [collision detection algorithms](collisions.md), [boundary conditions](boundaryconditions.md), and [integration methods](integrators.md).
Not all combinations make physically sense, and not all combinations are supported.
We describe the different modules and their configuration in this section.
Also make sure to some of the other concepts documented in this section.
They will help you understand the [units](units.md) used in REBOUND, how REBOUND handles [orbital elements](orbitalelements.md), how to save and load simulations to [Simulationarchive](simulationarchive.md) files, how to use [chaos indicators](chaosindicators.md), how to use the [browser based 3D visualization](visualization.md), and several other topics.
!!! Info
Because the C and Python versions of REBOUND are very similar, we describe both languages in one documentation.
The syntax and examples are provided in both C and Python.
Use the tabs to switch between them.
|
tigerchenlu98REPO_NAMEreboundPATH_START.@rebound_extracted@rebound-main@docs@api.md@.PATH_END.py
|
{
"filename": "_categoryorder.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/carpet/baxis/_categoryorder.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CategoryorderValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="categoryorder", parent_name="carpet.baxis", **kwargs
):
super(CategoryorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
["trace", "category ascending", "category descending", "array"],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@carpet@baxis@_categoryorder.py@.PATH_END.py
|
{
"filename": "spectral_metric.py",
"repo_name": "ucl-exoplanets/ADC2023-baseline",
"repo_path": "ADC2023-baseline_extracted/ADC2023-baseline-main/spectral_metric.py",
"type": "Python"
}
|
import numpy as np
from posterior_utils import default_prior_bounds,restrict_to_prior
from FM_utils_final import *
def L2_loss(truth, predicted):
"""Simple MSE"""
return np.mean(np.square(truth-predicted))
def L1_loss(truth, predicted):
"""Simple MAE"""
return np.mean(np.abs(truth-predicted))
def huber_loss(truth, predicted, alpha):
"""huber loss with threshold (alpha) set at 1"""
if alpha >= 1:
return L2_loss(truth, predicted)
else:
return L1_loss(truth, predicted)
def compute_score(median, bound, GT_median, GT_bound):
"""compute the score contribution from the similaries between two spectra.
Args:
median (array): median spectra from participants
bound (array): The IQR bound from participants.
GT_median (array): median spectra generated from GT
GT_bound (array): The IQR bound from GT.
Returns:
scalar: the score from spectral loss
"""
GT_level = np.mean(GT_median)
level = np.mean(median)
alpha = np.abs(np.log10(level/GT_level))
log_truth = np.log10(GT_median)
log_predicted = np.log10(median)
median_loss = 100*huber_loss(log_truth,log_predicted,alpha)
log_bound = np.log10(bound)
log_GTbound = np.log10(GT_bound)
mean_bound = np.mean(bound)
mean_GTbound = np.mean(GT_bound)
alpha_bound = np.abs(np.log10(mean_bound/mean_GTbound))
bound_loss = 100*huber_loss(log_GTbound, log_bound,alpha_bound)
score = 1000-np.mean([bound_loss,median_loss])
## the minimum score is 0
score = np.maximum(score, 0)
return score
def compute_spectral_loss(tr1, weights1, tr2,weights2,bounds_matrix,fm_func,q_list):
tr1 = restrict_to_prior(tr1, bounds_matrix)
q1, q2, q3 = compute_approx_mean_and_bound(tr1, weights1, fm_func, q_list)
q1, q2, q3 = check_output(q1, q2, q3)
median, bound = q2, q3 - q1 + 1e-8
## compute for ground truth
tr2 = restrict_to_prior(tr2, bounds_matrix)
q1_GT, q2_GT, q3_GT = compute_approx_mean_and_bound(tr2, weights2, fm_func, q_list)
q1_GT, q2_GT, q3_GT = check_output(q1_GT, q2_GT, q3_GT)
GT_median, GT_bound = q2_GT, q3_GT - q1_GT + 1e-8
score = compute_score(median, bound, GT_median, GT_bound)
return score
|
ucl-exoplanetsREPO_NAMEADC2023-baselinePATH_START.@ADC2023-baseline_extracted@ADC2023-baseline-main@spectral_metric.py@.PATH_END.py
|
{
"filename": "mel_ops.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/signal/mel_ops.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""mel conversion ops."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with ops.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = ops.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype):
"""Checks the inputs to linear_to_mel_weight_matrix."""
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if not isinstance(sample_rate, tensor.Tensor):
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Got: %s' % dtype)
@tf_export('signal.linear_to_mel_weight_matrix')
@dispatch.add_dispatch_support
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype=dtypes.float32,
name=None):
r"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel].
Returns a weight matrix that can be used to re-weight a `Tensor` containing
`num_spectrogram_bins` linearly sampled frequency information from
`[0, sample_rate / 2]` into `num_mel_bins` frequency information from
`[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel].
This function follows the [Hidden Markov Model Toolkit
(HTK)](http://htk.eng.cam.ac.uk/) convention, defining the mel scale in
terms of a frequency in hertz according to the following formula:
$$\textrm{mel}(f) = 2595 * \textrm{log}_{10}(1 + \frac{f}{700})$$
In the returned matrix, all the triangles (filterbanks) have a peak value
of 1.0.
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram"
`M` of shape `[frames, num_mel_bins]`.
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = tf.tensordot(S, A, 1)
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in the
source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: An integer or float `Tensor`. Samples per second of the input
signal used to create the spectrogram. Used to figure out the frequencies
corresponding to each spectrogram bin, which dictates how they are mapped
into the mel scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, `upper_edge_hertz` is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name:
# Convert Tensor `sample_rate` to float, if possible.
if isinstance(sample_rate, tensor.Tensor):
maybe_const_val = tensor_util.constant_value(sample_rate)
if maybe_const_val is not None:
sample_rate = maybe_const_val
# Note: As num_spectrogram_bins is passed to `math_ops.linspace`
# and the validation is already done in linspace (both in shape function
# and in kernel), there is no need to validate num_spectrogram_bins here.
_validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
# This function can be constant folded by graph optimization since there are
# no Tensor inputs.
sample_rate = math_ops.cast(
sample_rate, dtype, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
lower_edge_hertz, dtype, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
upper_edge_hertz, dtype, name='upper_edge_hertz')
zero = ops.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = shape_ops.frame(
math_ops.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape(
t, [1, num_mel_bins]) for t in array_ops.split(
band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
zero, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return array_ops.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@signal@mel_ops.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "pyFFTW/pyFFTW",
"repo_path": "pyFFTW_extracted/pyFFTW-master/pyfftw/_version.py",
"type": "Python"
}
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> master)"
git_full = "9bbc6da44f63b74c5508915a0c84aeda01656f73"
git_date = "2024-11-12 12:18:34 +0100"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "pyfftw-"
cfg.versionfile_source = "pyfftw/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
pyFFTWREPO_NAMEpyFFTWPATH_START.@pyFFTW_extracted@pyFFTW-master@pyfftw@_version.py@.PATH_END.py
|
{
"filename": "ECHO_server.py",
"repo_name": "dannyjacobs/ECHO",
"repo_path": "ECHO_extracted/ECHO-master/scripts/old/ECHO_server.py",
"type": "Python"
}
|
#! /usr/bin/env python
'''
Author: Jacob Burba
ECHO_server.py initiates a queriable Flask server that contains GPS positions for the drone.
The GPS information is read in from the user specified output file from ECHO_GET_GPS.py
and then broadcast via 10.1.1.1:5000, where the port 5000 is the default port for Flask.
The user can specify a custom IP address and port number, if desired. A secondary machine,
used to obtain radio spectra from the Signal Hound, connected via ethernet/wifi can then
query this server server with the time at which a spectrum is read. The server then returns
an interpolated GPS position at the query time, if available, or an error message saying
that the query time lies outside the range of available GPS position data. An example call
can be seen as follows
python ECHO_server.py --gps_file <output_filename>
The user does not need to pass the flags --dt and --host, as they have default values of
0.5 and 10.1.1.1, respectively. They can be set to a custom value by issuing the flags
such as in the following example
python ECHO_server.py --gps_file <output_filename> --dt 0.3 --host 10.13.22.1
NOTE: Due to the structure of the Flask app, the user must exit the code by executing
CTRL + C twice, once to end the Flask app, and one to end the Python program.
'''
import optparse,sys,threading,atexit
import numpy as np
from time import sleep,strftime
from astropy.time import Time
from flask import Flask,jsonify
from ECHO_read_utils import get_data
from ECHO_position_utils import interp_pos
from ECHO_server_utils import create_app
o = optparse.OptionParser()
o.set_description('Reads in GPS positional data in realtime from a user specified \
text file. Starts a server which is queryable by a user on the same or another \
machine. The query returns an interpolated GPS position which can be read by the \
querier and used to accumulate GPS and spectral data into one output file.\
See ECHO_accumulate.py for the output file format.')
o.add_option('--gps_file',type=str,
help='File name for GPS positional data to be read')
o.add_option('--dt',type=float,default=0.5,
help='User specified time interval for binning resolution')
#Since v_drone<2m/s, dt gives a maximum positional extrapolation range, i.e. dx~v*dt')
o.add_option('--host',type=str,default='10.1.1.1',
help='Host address')
opts,args = o.parse_args(sys.argv[1:])
# Verify a GPS file was passed by the user
if not opts.gps_file:
print '\n Please enter valid file for GPS information\nExiting...\n\n'
sys.exit()
POOL_TIME = 0.3 # Seconds between thread creation/execution
def create_app():
app = Flask(__name__)
def interrupt(): # Method called upon script exit
global yourThread
yourThread.cancel()
def collection():
global gps_times,lats,lons,alts
global lati,loni,alti
global lastlen
global tmin,tmax,dt
global counts,tbins#,weights
global yourThread
with dataLock: # Wait for lock on current thread
gps_times,lats,lons,alts = get_data(opts.gps_file,filetype='gps')
lati,loni,alti = interp_pos(gps_times.gps[:,0],lats,lons,alts)
currlen = gps_times.shape[0]
if currlen == lastlen:
sleep(POOL_TIME)
elif currlen > lastlen:
lastlen = currlen
tmin,tmax = gps_times.gps.min(),gps_times.gps.max()
# Create weights array for check of GPS data when user queries server
counts,tbins = np.histogram(gps_times.gps,bins=int((tmax-tmin)/dt))
# Start the next thread
yourThread = threading.Timer(POOL_TIME, collection, ())
yourThread.start()
def initialize():
global yourThread
# Create your thread
yourThread = threading.Timer(POOL_TIME, collection, ())
yourThread.start()
# Initiate
initialize()
# When you kill Flask (SIGTERM), clear the trigger for the next thread
atexit.register(interrupt)
# Return app with get function
return app
# Global variables
gps_times,lats,lons,alts = get_data(opts.gps_file,filetype='gps')
print gps_times.shape,lats.shape,lons.shape,alts.shape
lati,loni,alti = interp_pos(gps_times.gps[:,0],lats,lons,alts)
# Get current number of GPS data points for monitoring of opts.gps_file
lastlen = gps_times.shape[0]
tmin,tmax = gps_times.gps.min(),gps_times.gps.max()
# Create weights array for check of GPS data when user queries server
dt = opts.dt
counts,tbins = np.histogram(gps_times.gps,bins=int((tmax-tmin)/dt))
# Create Lock object to access variables on an individual thread
dataLock = threading.Lock()
# Thread handler
yourThread = threading.Thread()
# Initiate app
app = create_app()
@app.route('/ECHO/lms/v1.0/pos/<float:query_time>', methods=['GET'])
def get_gps_pos(query_time):
if np.logical_and(query_time>=gps_times.gps[0],query_time<=gps_times.gps[-1]):
if counts[np.abs(tbins-query_time).argmin()] > 0:
# Return a dictionary of latitude, longitude, and altitude at query time
lat,lon,alt = float(lati(query_time)),float(loni(query_time)),float(alti(query_time))
pos = {'lat': lat, 'lon': lon, 'alt': alt}
return jsonify(pos)
else:
pos = {'lat': -1, 'lon': -1, 'alt': -1}
return jsonify(pos)
else:
#return 'Error: Query time '+str(query_time)+' outside range '+\
# str(gps_raw[0,0])+'to'+str(gps_raw[-1,0])
pos = {'lat': -1, 'lon': -1, 'alt': -1}
return jsonify(pos)
# Run server app
app.run(debug=True,host=opts.host,port=5000)
|
dannyjacobsREPO_NAMEECHOPATH_START.@ECHO_extracted@ECHO-master@scripts@old@ECHO_server.py@.PATH_END.py
|
{
"filename": "required_by_vounit.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/units/required_by_vounit.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines SI prefixed units that are required by the VOUnit standard
but that are rarely used in practice and liable to lead to confusion (such as
``msolMass`` for milli-solar mass). They are in a separate module from
`astropy.units.deprecated` because they need to be enabled by default for
`astropy.units` to parse compliant VOUnit strings. As a result, e.g.,
``Unit('msolMass')`` will just work, but to access the unit directly, use
``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom
possible for the non-prefixed unit, ``astropy.units.solMass``.
"""
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import astrophys
from .core import _add_prefixes
_add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
if __doc__ is not None:
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import (
generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary,
)
from .utils import generate_unit_summary as _generate_unit_summary
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def _enable():
"""
Enable the VOUnit-required extra units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')``
idiom.
"""
# Local import to avoid cyclical import
# Local import to avoid polluting namespace
import inspect
from .core import add_enabled_units
return add_enabled_units(inspect.getmodule(_enable))
# Because these are VOUnit mandated units, they start enabled (which is why the
# function is hidden).
_enable()
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@units@required_by_vounit.py@.PATH_END.py
|
{
"filename": "_ordering.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/transition/_ordering.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OrderingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ordering", parent_name="layout.transition", **kwargs
):
super(OrderingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["layout first", "traces first"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@transition@_ordering.py@.PATH_END.py
|
{
"filename": "graphics_functional_fboxplot.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/docs/source/plots/graphics_functional_fboxplot.py",
"type": "Python"
}
|
"""
Created on Fri May 04 11:10:51 2012
Author: Ralf Gommers
"""
#Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
#surface temperature data.
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
data = sm.datasets.elnino.load()
#Create a functional boxplot. We see that the years 1982-83 and 1997-98 are
#outliers; these are the years where El Nino (a climate pattern
#characterized by warming up of the sea surface and higher air pressures)
#occurred with unusual intensity.
fig = plt.figure()
ax = fig.add_subplot(111)
res = sm.graphics.fboxplot(data.raw_data.iloc[:, 1:], wfactor=2.58,
labels=data.raw_data.iloc[:, 0].astype(int),
ax=ax)
ax.set_xlabel("Month of the year")
ax.set_ylabel("Sea surface temperature (C)")
ax.set_xticks(np.arange(13, step=3) - 1)
ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
ax.set_xlim([-0.2, 11.2])
#plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@docs@source@plots@graphics_functional_fboxplot.py@.PATH_END.py
|
{
"filename": "isomap.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py2/sklearn/manifold/isomap.py",
"type": "Python"
}
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py2@sklearn@manifold@isomap.py@.PATH_END.py
|
{
"filename": "run_tempo.py",
"repo_name": "Fermipy/fermipy",
"repo_path": "fermipy_extracted/fermipy-master/fermipy/scripts/run_tempo.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import sys
import argparse
import tempfile
import re
import shutil
import logging
from fermipy.batch import submit_jobs, add_lsf_args
from fermipy.utils import mkdir
from fermipy.logger import Logger
def getEntries(inFile):
import ROOT
FP = ROOT.TFile.Open(inFile)
tree = FP.Get('MeritTuple')
return tree.GetEntries()
def skimMerit(inFile, outfilename, selection,
nentries, firstentry, enableB=None, disableB=None):
import ROOT
print('Preparing merit chunk from %s' % inFile)
print('Opening input file %s' % inFile)
oldFP = ROOT.TFile.Open(inFile)
oldTree = oldFP.Get('MeritTuple')
oldTree.SetBranchStatus('*', 1)
oldTree.SetBranchStatus('Pulsar_Phase', 0)
# for branch in enableB:
# oldTree.SetBranchStatus(branch, 1)
# for branch in disableB:
# oldTree.SetBranchStatus(branch, 0)
newFP = ROOT.TFile(outfilename, "recreate")
newTree = oldTree.CopyTree(selection, "fast", nentries, firstentry)
newTree.AutoSave()
nevents = newTree.GetEntries()
print('Skimmed events ', nevents)
newFP.Close()
print('Closing output file %s' % outfilename)
oldFP.Close()
return nevents
def phase_ft1(ft1file, outfile, logFile, ft2file, ephemfile, dry_run=False):
cmd = '$TEMPO2ROOT/bin/tempo2 '
cmd += ' -gr fermi -ft1 %s ' % (ft1file)
cmd += ' -ft2 %s ' % (ft2file)
cmd += ' -f %s -phase ' % (ephemfile)
print(cmd)
if not dry_run:
os.system(cmd)
print('cp %s %s' % (ft1file, outfile))
if not dry_run:
os.system('cp %s %s' % (ft1file, outfile))
def phase_merit(meritFile, outfile, logFile, ft2file, ephemfile, dry_run=False):
import ROOT
nevent_chunk = 30000 # number of events to process per chunk
mergeChain = ROOT.TChain('MeritTuple')
skimmedEvents = getEntries(meritFile)
for firstEvent in range(0, skimmedEvents, nevent_chunk):
filename = os.path.splitext(os.path.basename(meritFile))[0]
meritChunk = filename + '_%s.root' % firstEvent
nevts = skimMerit(meritFile, meritChunk,
'', nevent_chunk, firstEvent)
cmd = 'tempo2 -gr root -inFile %s -ft2 %s -f %s -graph 0 -nobs 32000 -npsr 1 -addFriend -phase' % (
meritChunk, ft2file, ephemfile)
print(cmd)
os.system(cmd + ' >> %s 2>> %s' % (logFile, logFile))
mergeChain.Add(meritChunk)
mergeFile = ROOT.TFile('merged.root', 'RECREATE')
if mergeChain.GetEntries() > 0:
mergeChain.CopyTree('')
mergeFile.Write()
print('merged events %s' % mergeChain.GetEntries())
mergeFile.Close()
os.system('mv merged.root %s' % (outfile))
def main():
usage = "usage: %(prog)s [options] "
description = "Run tempo2 application on one or more FT1 files."
parser = argparse.ArgumentParser(usage=usage, description=description)
add_lsf_args(parser)
parser.add_argument('--par_file', default=None, type=str, required=True,
help='Ephemeris file')
parser.add_argument('--scfile', default=None, type=str, required=True,
help='FT2 file')
parser.add_argument('--outdir', default=None, type=str, help='')
parser.add_argument('--phase_colname', default='PULSE_PHASE',
type=str, help='Set the name of the phase column.')
parser.add_argument('--dry_run', default=False, action='store_true')
parser.add_argument('--overwrite', default=False, action='store_true')
parser.add_argument('files', nargs='+', default=None,
help='List of directories in which the analysis will '
'be run.')
args = parser.parse_args()
if args.outdir is None:
outdirs = [os.path.dirname(os.path.abspath(x)) for x in args.files]
else:
outdir = os.path.abspath(args.outdir)
mkdir(args.outdir)
outdirs = [outdir for x in args.files]
input_files = [[os.path.abspath(x)] for x in args.files]
output_files = [os.path.join(y, os.path.basename(x))
for x, y in zip(args.files, outdirs)]
if args.batch:
opts = vars(args).copy()
del opts['files']
del opts['batch']
submit_jobs('fermipy-run-tempo', # 'python ' + os.path.abspath(__file__.rstrip('cd')),
input_files, opts, output_files, overwrite=args.overwrite,
dry_run=args.dry_run)
# batch_opts = {'W' : args.time, 'R' : args.resources,
# 'oo' : 'batch.log' }
# args.batch=False
# for infile, outfile in zip(input_files,output_files):
#
# if os.path.isfile(outfile) and not args.overwrite:
# print('Output file exists, skipping.',outfile)
# continue
#
# batch_opts['oo'] = os.path.join(outdir,
# os.path.splitext(outfile)[0] +
# '_tempo2.log')
# dispatch_jobs('python ' + os.path.abspath(__file__.rstrip('cd')),
# [infile], args, batch_opts, dry_run=args.dry_run)
sys.exit(0)
logger = Logger.get(__file__, None, logging.INFO)
par_file = os.path.abspath(args.par_file)
ft2_file = os.path.abspath(args.scfile)
cwd = os.getcwd()
user = os.environ['USER']
tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch')
logger.info('tmpdir %s', tmpdir)
os.chdir(tmpdir)
for infiles, outfile in zip(input_files, output_files):
infile = infiles[0]
staged_infile = os.path.join(tmpdir, os.path.basename(infile))
logFile = os.path.splitext(infile)[0] + '_tempo2.log'
print('cp %s %s' % (infile, staged_infile))
os.system('cp %s %s' % (infile, staged_infile))
if not re.search('\.root?', infile) is None:
phase_merit(staged_infile, outfile, logFile,
ft2_file, par_file, args.dry_run)
elif not re.search('\.fits?', infile) is None:
phase_ft1(staged_infile, outfile, logFile,
ft2_file, par_file, args.dry_run)
else:
print('Unrecognized file extension: ', infile)
os.chdir(cwd)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
main()
|
FermipyREPO_NAMEfermipyPATH_START.@fermipy_extracted@fermipy-master@fermipy@scripts@run_tempo.py@.PATH_END.py
|
{
"filename": "_scons_subprocess.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/compat/_scons_subprocess.py",
"type": "Python"
}
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import string
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
try:
import threading
except ImportError:
# SCons: the threading module is only used by the communicate()
# method, which we don't actually use, so don't worry if we
# can't import it.
pass
import msvcrt
if 0: # <-- change this to use pywin32 instead of the _subprocess driver
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
# SCons: don't die on Python versions that don't have _subprocess.
try:
from _subprocess import *
except ImportError:
pass
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
try:
fcntl.F_GETFD
except AttributeError:
fcntl.F_GETFD = 1
try:
fcntl.F_SETFD
except AttributeError:
fcntl.F_SETFD = 2
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
try:
isinstance(1, int)
except TypeError:
def is_int(obj):
return type(obj) == type(1)
def is_int_or_long(obj):
return type(obj) in (type(1), type(1L))
else:
def is_int(obj):
return isinstance(obj, int)
def is_int_or_long(obj):
return isinstance(obj, (int, long))
try:
types.StringTypes
except AttributeError:
try:
types.StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
types.StringTypes = (types.StringType,)
def is_string(obj):
return type(obj) in types.StringTypes
else:
def is_string(obj):
return isinstance(obj, types.StringTypes)
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return apply(Popen, popenargs, kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = apply(call, popenargs, kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return string.join(result, '')
try:
object
except NameError:
class object:
pass
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not is_int_or_long(bufsize):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif is_int(stdin):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif is_int(stdout):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags = creationflags | CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise apply(WindowsError, e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif is_int(stdin):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif is_int(stdout):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if is_string(args):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
try:
set
except NameError:
# Fall-back for earlier Python versions, so epydoc
# can use this module directly to execute things.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
else:
for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
if fd: os.close(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = string.join(exc_lines, '')
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
input_offset = input_offset + bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = string.join(stdout, '')
if stderr is not None:
stderr = string.join(stderr, '')
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
sys.stderr.write( "Gosh. No error.\n" )
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@compat@_scons_subprocess.py@.PATH_END.py
|
{
"filename": "colormatch.py",
"repo_name": "alphaparrot/ExoPlaSim",
"repo_path": "ExoPlaSim_extracted/ExoPlaSim-master/exoplasim/colormatch.py",
"type": "Python"
}
|
import numpy as np
from scipy import interpolate
cie_wvl,cie_xx,cie_yy,cie_zz = np.loadtxt("/".join(__file__.split("/")[:-1])+"/cmf.csv",unpack=True,delimiter=',')
def _loadcie():
wvl,xx,yy,zz = np.loadtxt("/".join(__file__.split("/")[:-1])+"/cmf.csv",unpack=True,delimiter=',')
return wvl,xx,yy,zz
def interpolant(wvl):
'''Construct interpolated color-matching functions for a series of wavelengths.
Parameters
----------
wvl : array-like
Array of N wavelengths in nanometers
Returns
-------
(array-like,array-like,array-like)
(fx(lambda),fy(lambda),fz(lambda))
'''
#w0,xx,yy,zz = _loadcie()
w0 = cie_wvl
xx = cie_xx
yy = cie_yy
zz = cie_zz
fx = interpolate.interp1d(w0,xx)
fy = interpolate.interp1d(w0,yy)
fz = interpolate.interp1d(w0,zz)
imin = np.where(wvl>np.amin(w0))[0][0]
imax = np.where(wvl<np.amax(w0))[0][-1]
wn = wvl[imin:imax+1]
xn = fx(wn)
yn = fy(wn)
zn = fz(wn)
return (xn,yn,zn)
def makexyz(wvl,spec,interpolant=None):
'''Convert a spectrum to XYZ colour coordinates.
The XYZ colour coordinate system is related to how the human eye's colour-receptive cells respond to
light. The XYZ coordinates are computed by convolving the spectrum with three different empirically-derived
response functions. These coordinates can then be transformed into RGB colour coordinates. Note that in this
system, (x,y,z) are brightness-normalized, while (X,Y,Z) are not. Additionally, z=1-x-y. Therefore, the
three coordinates that are needed to produce an RGB tuple are not (x,y,z), but (x,y,Y).
Parameters
----------
wvl : array-like
Wavelengths in nanometers, of shape (N,)
spec : array-like
Spectrum, of shape (N,); units are arbitrary, but it should be given in flux, not flux density.
interpolant : array-like, optional
2D array of shape (3,N), corresponding to interpolated color-matching functions
Returns
-------
numpy.ndarray, numpy.ndarray, numpy.ndarray
x,y,Y--z can be inferred from x and y (z = 1-x-y), but Y preserves intensity information.
'''
if np.amin(wvl)<1.0e-3: #probably meters not nanometers
wvl*=1.0e9
#w0,xx,yy,zz = _loadcie()
w0 = cie_wvl
xx = cie_xx
yy = cie_yy
zz = cie_zz
imin = np.where(wvl>np.amin(w0))[0][0]
imax = np.where(wvl<np.amax(w0))[0][-1]
wn = wvl[imin:imax+1]
specn = spec[imin:imax+1]
if interpolant is None:
fx = interpolate.interp1d(w0,xx)
fy = interpolate.interp1d(w0,yy)
fz = interpolate.interp1d(w0,zz)
xn = fx(wn)
yn = fy(wn)
zn = fz(wn)
else:
xn = interpolant[0]
yn = interpolant[1]
zn = interpolant[2]
XI = np.trapz(xn[~np.isnan(specn)]*specn[~np.isnan(specn)],x=wn[~np.isnan(specn)])
YI = np.trapz(yn[~np.isnan(specn)]*specn[~np.isnan(specn)],x=wn[~np.isnan(specn)])
ZI = np.trapz(zn[~np.isnan(specn)]*specn[~np.isnan(specn)],x=wn[~np.isnan(specn)])
xyzmin = np.amin((XI,YI,ZI))
if xyzmin<0:
XI -=xyzmin
YI -=xyzmin
ZI -=xyzmin
if (XI+YI+ZI)>0:
xnu = XI/(XI+YI+ZI)
ynu = YI/(XI+YI+ZI)
znu = 1.0-(xnu+ynu)
else:
xnu=0
ynu=0
znu=0
return xnu,ynu,YI
def xyz2rgb(x,y,normalization,gamut="sRGB"):
'''Convert (x,y) coordinates to RGB tuples, normalized to a given value.
Note that z=1-x-y. This routine uses a wide gamut colourspace found at http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html.
Parameters
----------
x : array-like
x colour-coordinate
y : array-like
y colour-coordinate.
normalization : array-like or float
Normalization factor for scaling RGB values
gamut : str or np.ndarray(3,3), optional
Color gamut to be used. For available built-in color gamuts, see colormatch.colorgamuts.
Returns
-------
array-like, array-like, array-like
R,G,B colour values.
'''
if gamut in colorgamuts:
colorgamut = colorgamuts[gamut]
if "%s_norm"%gamut in colorgamuts:
extranorm = colorgamuts["%s_norm"%gamut]
else:
extranorm = 1.0
else:
if gamut.shape==np.array((3,3)).shape:
colorgamut = gamut
extranorm = 1.0
else:
raise Exception("Error: must specify valid RGB color gamut")
z = 1-(x+y)
r = np.sum(colorgamut[0,:]*x)*normalization
g = np.sum(colorgamut[1,:]*y)*normalization
b = np.sum(colorgamut[2,:]*z)*normalization
cmax = np.amax((r,g,b))
#return r/cmax,g/cmax,b/cmax
return r,g,b
#From http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
#Assume whitepoint is D50 unless indicated otherwise
colorgamuts = {"wide": np.array([[ 1.4628067, -0.1840623, -0.2743606],
[-0.5217933, 1.4472381, 0.0677227],
[ 0.0349342, -0.0968930, 1.2884099]]),
"sRGB_D65": np.array([[ 3.2404542, -1.5371385, -0.4985314],
[-0.9692660, 1.8760108, 0.0415560],
[ 0.0556434, -0.2040259, 1.0572252]]),
"sRGB": np.array([[ 3.1338561, -1.6168667, -0.4906146],
[-0.9787684, 1.9161415, 0.0334540],
[ 0.0719453, -0.2289914, 1.4052427]]),
"CIE_E": np.array([[ 2.3706743, -0.9000405, -0.4706338],
[-0.5138850, 1.4253036, 0.0885814],
[ 0.0052982, -0.0146949, 1.0093968]]),
"NTSC_C": np.array([[ 1.9099961, -0.5324542, -0.2882091],
[-0.9846663, 1.9991710, -0.0283082],
[ 0.0583056, -0.1183781, 0.8975535]]),
"ProPhoto": np.array([[ 1.3459433, -0.2556075, -0.0511118],
[-0.5445989, 1.5081673, 0.0205351],
[ 0.0000000, 0.0000000, 1.2118128]]),
"CIE": np.array([[ 2.3638081, -0.8676030, -0.4988161],
[-0.5005940, 1.3962369, 0.1047562],
[ 0.0141712, -0.0306400, 1.2323842]]),
"Apple": np.array([[ 2.8510695, -1.3605261, -0.4708281],
[-1.0927680, 2.0348871, 0.0227598],
[ 0.1027403, -0.2964984, 1.4510659]]),
"Adobe_D65": np.array([[ 2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[ 0.0134474, -0.1183897, 1.0154096]]),
"Apple_D65": np.array([[ 2.9515373, -1.2894116, -0.4738445],
[-1.0851093, 1.9908566, 0.0372026],
[ 0.0854934, -0.2694964, 1.0912975]]),
"Best": np.array([[ 1.6832270, -0.4282363, -0.2360185],
[-0.7710229, 1.7065571, 0.0446900],
[ 0.0400013, -0.0885376, 1.2723640]]),
"Bruce_D65": np.array([[ 2.7454669, -1.1358136, -0.4350269],
[-0.9692660, 1.8760108, 0.0415560],
[ 0.0112723, -0.1139754, 1.0132541]]),
"ColorMatch": np.array([[ 2.6422874, -1.2234270, -0.3930143],
[-1.1119763, 2.0590183, 0.0159614],
[ 0.0821699, -0.2807254, 1.4559877]]),
"Don": np.array([[ 1.7603902, -0.4881198, -0.2536126],
[-0.7126288, 1.6527432, 0.0416715],
[ 0.0078207, -0.0347411, 1.2447743]]),
"ECI": np.array([[ 1.7827618, -0.4969847, -0.2690101],
[-0.9593623, 1.9477962, -0.0275807],
[ 0.0859317, -0.1744674, 1.3228273]]),
"Ekta-Space-PS5": np.array([[ 2.0043819, -0.7304844, -0.2450052],
[-0.7110285, 1.6202126, 0.0792227],
[ 0.0381263, -0.0868780, 1.2725438]]),
"PAL/SECAM_D65": np.array([[ 3.0628971, -1.3931791, -0.4757517],
[-0.9692660, 1.8760108, 0.0415560],
[ 0.0678775, -0.2288548, 1.0693490]]),
"SMPTE-C_D65": np.array([[ 3.5053960, -1.7394894, -0.5439640],
[-1.0690722, 1.9778245, 0.0351722],
[ 0.0563200, -0.1970226, 1.0502026]]),
"Adobe": np.array([[ 1.9624274, -0.6105343, -0.3413404],
[-0.9787684, 1.9161415, 0.0334540],
[ 0.0286869, -0.1406752, 1.3487655]]),
"Bruce": np.array([[ 2.6502856, -1.2014485, -0.4289936],
[-0.9787684, 1.9161415, 0.0334540],
[ 0.0264570, -0.1361227, 1.3458542]]),
"NTSC": np.array([[ 1.8464881, -0.5521299, -0.2766458],
[-0.9826630, 2.0044755, -0.0690396],
[ 0.0736477, -0.1453020, 1.3018376]]),
"PAL/SECAM": np.array([[ 2.9603944, -1.4678519, -0.4685105],
[-0.9787684, 1.9161415, 0.0334540],
[ 0.0844874, -0.2545973, 1.4216174]]),
"SMPTE-C": np.array([[ 3.3921940, -1.8264027, -0.5385522],
[-1.0770996, 2.0213975, 0.0207989],
[ 0.0723073, -0.2217902, 1.3960932]])
}
#XYZ components for each whitepoint illuminant, again from Bruce Lindbloom
illuminants = {"A" :np.array([1.09850,1.00000,0.35585]),
"B" :np.array([0.99072,1.00000,0.85223]),
"C" :np.array([0.98074,1.00000,1.18232]),
"D50" :np.array([0.96422,1.00000,0.82521]),
"D55" :np.array([0.95682,1.00000,0.92149]),
"D65" :np.array([0.95047,1.00000,1.08883]),
"D75" :np.array([0.94972,1.00000,1.22638]),
"E" :np.array([1.00000,1.00000,1.00000]),
"F2" :np.array([0.99186,1.00000,0.67393]),
"F7" :np.array([0.95041,1.00000,1.08747]),
"F11" :np.array([1.00962,1.00000,0.64350])}
illuminantsxy = {"A" : np.array([illuminants["A" ][0]/np.sum(illuminants["A" ]),
illuminants["A" ][1]/np.sum(illuminants["A" ])]),
"B" : np.array([illuminants["B" ][0]/np.sum(illuminants["B" ]),
illuminants["B" ][1]/np.sum(illuminants["B" ])]),
"C" : np.array([illuminants["C" ][0]/np.sum(illuminants["C" ]),
illuminants["C" ][1]/np.sum(illuminants["C" ])]),
"D50" : np.array([illuminants["D50"][0]/np.sum(illuminants["D50"]),
illuminants["D50"][1]/np.sum(illuminants["D50"])]),
"D55" : np.array([illuminants["D55"][0]/np.sum(illuminants["D55"]),
illuminants["D55"][1]/np.sum(illuminants["D55"])]),
"D65" : np.array([illuminants["D65"][0]/np.sum(illuminants["D65"]),
illuminants["D65"][1]/np.sum(illuminants["D65"])]),
"D75" : np.array([illuminants["D75"][0]/np.sum(illuminants["D75"]),
illuminants["D75"][1]/np.sum(illuminants["D75"])]),
"E" : np.array([illuminants["E" ][0]/np.sum(illuminants["E" ]),
illuminants["E" ][1]/np.sum(illuminants["E" ])]),
"F2" : np.array([illuminants["F2" ][0]/np.sum(illuminants["F2" ]),
illuminants["F2" ][1]/np.sum(illuminants["F2" ])]),
"F7" : np.array([illuminants["F7" ][0]/np.sum(illuminants["F7" ]),
illuminants["F7" ][1]/np.sum(illuminants["F7" ])]),
"F11" : np.array([illuminants["F11"][0]/np.sum(illuminants["F11"]),
illuminants["F11"][1]/np.sum(illuminants["F11"])])}
#Compute the internal normlization factor for each colorspace
_gamuts = list(colorgamuts.keys())
for gamut in _gamuts:
if "_" in gamut:
il = gamut.split("_")[-1]
else:
il = "D50"
white = np.array(xyz2rgb(illuminantsxy[il][0],illuminantsxy[il][1],1.0,gamut=gamut))
extranorm = 1.0/white.max() #So the equal-power colour has a max RGB value of 1.0
colorgamuts["%s_norm"%gamut] = extranorm
#Compute the purity of each colorspace--i.e. given the white illuminant, how white is it? Pure white is
#(1,1,1).
for gamut in _gamuts:
if "_" in gamut:
il = gamut.split("_")[-1]
else:
il = "D50"
white = np.array(xyz2rgb(illuminantsxy[il][0],illuminantsxy[il][1],1.0,gamut=gamut))
purity = 1.0 - np.mean(abs(1.0-white))
colorgamuts["%s_purity"%gamut] = purity
def spec2rgb(wvl,spec,normalization=None,gamma=True,gamut="sRGB"):
'''Convert a spectrum to (R,G,B) tuple, with optional normalization
Parameters
----------
wvl : array-like
Array of length N containing wavelengths in nanometers
spec : array-like
Array of length N containing fluxes
normalization : float, optional
Maximum value. If not specified, defaults to 1.0.
gamma : bool or float, optional
If True, use the piecewise gamma-function defined for sRGB; otherwise if a float, use rgb^(1/gamma).
If None, gamma=1.0 is used.
gamut : str or np.ndarray(3,3)
Color gamut to be used. For available built-in color gamuts, see colormatch.colorgamuts.
Returns
-------
(float,float,float)
(R,G,B).
'''
x,y,I = makexyz(wvl,spec)
if normalization:
norm = normalization
else:
norm = 1.0
r,g,b = xyz2rgb(x,y,norm,gamut=gamut)
colors = np.array((r,g,b))
if gamma is True:
colors[colors<0.0031308] = 12.92*colors[colors<0.0031308]
colors[colors>=0.0031308] = 1.055*colors[colors>=0.0031308]**(1./2.4)-0.055
elif gamma is not None:
colors = colors**(1./gamma)
r,g,b = colors
return r,g,b
def specs2rgb(wvl,specs,gamma=True,gamut='sRGB'):
'''Convert a set of spectra into RGB colour values, so that relative intensity is preserved.
Parameters
----------
wvl : array-like
Wavelengths in nanometers. Must have shape (N,)
specs : array-like
Spectra (fluxes), of shape (M,N) where M is the number of spectra.
gamma : bool or float, optional
If True, use the piecewise gamma-function defined for sRGB; otherwise if a float, use rgb^(1/gamma).
If None, gamma=1.0 is used.
gamut : str or np.ndarray(3,3)
Color gamut to be used. For available built-in color gamuts, see colormatch.colorgamuts.
Returns
-------
array-like
(M,3)-shape numpy array of R/G/B values
'''
interpol = interpolant(wvl)
intensities = np.zeros((len(specs),3))
for n in range(0,len(specs)):
intensities[n,:] = makexyz(wvl,specs[n,:],interpolant=interpol)
norms = intensities[:,2]/np.amax(intensities[:,2])
colors = np.zeros((len(specs),3))
for n in range(0,len(specs)):
colors[n,:] = xyz2rgb(intensities[n,0],intensities[n,1],norms[n],gamut=gamut)
if gamma is True:
colors[colors<0.0031308] = 12.92*colors[colors<0.0031308]
colors[colors>=0.0031308] = 1.055*colors[colors>=0.0031308]**(1./2.4)-0.055
elif gamma is not None:
colors = colors**(1./gamma)
return colors
|
alphaparrotREPO_NAMEExoPlaSimPATH_START.@ExoPlaSim_extracted@ExoPlaSim-master@exoplasim@colormatch.py@.PATH_END.py
|
{
"filename": "_scattersmith.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/template/data/_scattersmith.py",
"type": "Python"
}
|
from plotly.graph_objs import Scattersmith
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@template@data@_scattersmith.py@.PATH_END.py
|
{
"filename": "trajectory.py",
"repo_name": "blackjax-devs/blackjax",
"repo_path": "blackjax_extracted/blackjax-main/blackjax/mcmc/trajectory.py",
"type": "Python"
}
|
# Copyright 2020- The Blackjax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Procedures to build trajectories for algorithms in the HMC family.
To propose a new state, algorithms in the HMC family generally proceed by
:cite:p:`betancourt2017conceptual`:
1. Sampling a trajectory starting from the initial point;
2. Sampling a new state from this sampled trajectory.
Step (1) ensures that the process is reversible and thus that detailed balance
is respected. The traditional implementation of HMC does not sample a
trajectory, but instead takes a fixed number of steps in the same direction and
flips the momentum of the last state.
We distinguish here between two different methods to sample trajectories: static
and dynamic sampling. In the static setting we sample trajectories with a fixed
number of steps, while in the dynamic setting the total number of steps is
determined by a dynamic termination criterion. Traditional HMC falls in the
former category, NUTS in the latter.
There are also two methods to sample proposals from these trajectories. In the
static setting we first build the trajectory and then sample a proposal from
this trajectory. In the progressive setting we update the proposal as the
trajectory is being sampled. While the former is faster, we risk saturating the
memory by keeping states that will subsequently be discarded.
"""
from typing import Callable, NamedTuple
import jax
import jax.numpy as jnp
from blackjax.mcmc.integrators import IntegratorState
from blackjax.mcmc.proposal import (
Proposal,
progressive_biased_sampling,
progressive_uniform_sampling,
proposal_generator,
)
from blackjax.types import ArrayTree, PRNGKey
class Trajectory(NamedTuple):
leftmost_state: IntegratorState
rightmost_state: IntegratorState
momentum_sum: ArrayTree
num_states: int
def append_to_trajectory(trajectory: Trajectory, state: IntegratorState) -> Trajectory:
"""Append a state to the (right of the) trajectory to form a new trajectory."""
momentum_sum = jax.tree_util.tree_map(
jnp.add, trajectory.momentum_sum, state.momentum
)
return Trajectory(
trajectory.leftmost_state, state, momentum_sum, trajectory.num_states + 1
)
def reorder_trajectories(
direction: int, trajectory: Trajectory, new_trajectory: Trajectory
) -> tuple[Trajectory, Trajectory]:
"""Order the two trajectories depending on the direction."""
return jax.lax.cond(
direction > 0,
lambda _: (
trajectory,
new_trajectory,
),
lambda _: (
new_trajectory,
trajectory,
),
operand=None,
)
def merge_trajectories(left_trajectory: Trajectory, right_trajectory: Trajectory):
momentum_sum = jax.tree_util.tree_map(
jnp.add, left_trajectory.momentum_sum, right_trajectory.momentum_sum
)
return Trajectory(
left_trajectory.leftmost_state,
right_trajectory.rightmost_state,
momentum_sum,
left_trajectory.num_states + right_trajectory.num_states,
)
# -------------------------------------------------------------------
# Integration
#
# Generating samples by choosing a direction and running the integrator
# several times along this direction. Distinct from sampling.
# -------------------------------------------------------------------
def static_integration(
integrator: Callable,
direction: int = 1,
) -> Callable:
"""Generate a trajectory by integrating several times in one direction."""
def integrate(
initial_state: IntegratorState, step_size, num_integration_steps
) -> IntegratorState:
directed_step_size = jax.tree_util.tree_map(
lambda step_size: direction * step_size, step_size
)
def one_step(_, state):
return integrator(state, directed_step_size)
return jax.lax.fori_loop(0, num_integration_steps, one_step, initial_state)
return integrate
class DynamicIntegrationState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_progressive_integration(
integrator: Callable,
kinetic_energy: Callable,
update_termination_state: Callable,
is_criterion_met: Callable,
divergence_threshold: float,
):
"""Integrate a trajectory and update the proposal sequentially in one direction
until the termination criterion is met.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
update_termination_state
Updates the state of the termination mechanism.
is_criterion_met
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above
which we say a transition is divergent.
"""
_, generate_proposal = proposal_generator(hmc_energy(kinetic_energy))
sample_proposal = progressive_uniform_sampling
def integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
termination_state,
max_num_steps: int,
step_size,
initial_energy,
):
"""Integrate the trajectory starting from `initial_state` and update the
proposal sequentially (hence progressive) until the termination
criterion is met (hence dynamic).
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
termination_state
The state that keeps track of the information needed for the
termination criterion.
max_num_steps
The maximum number of integration steps. The expansion will stop
when this number is reached if the termination criterion has not
been met.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial
energy of the subtree)
"""
def do_keep_integrating(loop_state):
"""Decide whether we should continue integrating the trajectory"""
integration_state, (is_diverging, has_terminated) = loop_state
return (
(integration_state.step < max_num_steps)
& ~has_terminated
& ~is_diverging
)
def add_one_state(loop_state):
integration_state, _ = loop_state
step, proposal, trajectory, termination_state = integration_state
proposal_key = jax.random.fold_in(rng_key, step)
new_state = integrator(trajectory.rightmost_state, direction * step_size)
new_proposal = generate_proposal(initial_energy, new_state)
is_diverging = -new_proposal.weight > divergence_threshold
# At step 0, we always accept the proposal, since we
# take one step to get the leftmost state of the tree.
(new_trajectory, sampled_proposal) = jax.lax.cond(
step == 0,
lambda _: (
Trajectory(new_state, new_state, new_state.momentum, 1),
new_proposal,
),
lambda _: (
append_to_trajectory(trajectory, new_state),
sample_proposal(proposal_key, proposal, new_proposal),
),
operand=None,
)
new_termination_state = update_termination_state(
termination_state, new_trajectory.momentum_sum, new_state.momentum, step
)
has_terminated = is_criterion_met(
new_termination_state, new_trajectory.momentum_sum, new_state.momentum
)
new_integration_state = DynamicIntegrationState(
step + 1,
sampled_proposal,
new_trajectory,
new_termination_state,
)
return (new_integration_state, (is_diverging, has_terminated))
proposal_placeholder = generate_proposal(initial_energy, initial_state)
trajectory_placeholder = Trajectory(
initial_state, initial_state, initial_state.momentum, 0
)
integration_state_placeholder = DynamicIntegrationState(
0,
proposal_placeholder,
trajectory_placeholder,
termination_state,
)
new_integration_state, (is_diverging, has_terminated) = jax.lax.while_loop(
do_keep_integrating,
add_one_state,
(integration_state_placeholder, (False, False)),
)
_, proposal, trajectory, termination_state = new_integration_state
# In the while_loop we always extend on the right most direction.
new_trajectory = jax.lax.cond(
direction > 0,
lambda _: trajectory,
lambda _: Trajectory(
trajectory.rightmost_state,
trajectory.leftmost_state,
trajectory.momentum_sum,
trajectory.num_states,
),
operand=None,
)
return (
proposal,
new_trajectory,
termination_state,
is_diverging,
has_terminated,
)
return integrate
def dynamic_recursive_integration(
integrator: Callable,
kinetic_energy: Callable,
uturn_check_fn: Callable,
divergence_threshold: float,
use_robust_uturn_check: bool = False,
):
"""Integrate a trajectory and update the proposal recursively in Python
until the termination criterion is met.
This is the implementation of Algorithm 6 from :cite:p:`hoffman2014no` with
multinomial sampling. The implemenation here is mostly for validating the
progressive implementation to make sure the two are equivalent. The recursive
implementation should not be used for actually sampling as it cannot be jitted and
thus likely slow.
Parameters
----------
integrator
The symplectic integrator used to integrate the hamiltonian trajectory.
kinetic_energy
Function to compute the current value of the kinetic energy.
uturn_check_fn
Determines whether the termination criterion has been met.
divergence_threshold
Value of the difference of energy between two consecutive states above which we
say a transition is divergent.
use_robust_uturn_check
Bool to indicate whether to perform additional U turn check between two
trajectory.
"""
_, generate_proposal = proposal_generator(hmc_energy(kinetic_energy))
sample_proposal = progressive_uniform_sampling
def buildtree_integrate(
rng_key: PRNGKey,
initial_state: IntegratorState,
direction: int,
tree_depth: int,
step_size,
initial_energy: float,
):
"""Integrate the trajectory starting from `initial_state` and update
the proposal recursively with tree doubling until the termination criterion is met.
The function `buildtree_integrate` calls itself for tree_depth > 0, thus invokes
the recursive scheme that builds a trajectory by doubling a binary tree.
Parameters
----------
rng_key
Key used by JAX's random number generator.
initial_state
The initial state from which we start expanding the trajectory.
direction int in {-1, 1}
The direction in which to expand the trajectory.
tree_depth
The depth of the binary tree doubling.
step_size
The step size of the symplectic integrator.
initial_energy
Initial energy H0 of the HMC step (not to confused with the initial energy
of the subtree)
"""
if tree_depth == 0:
# Base case - take one velocity_verlet step in the direction v.
next_state = integrator(initial_state, direction * step_size)
new_proposal = generate_proposal(initial_energy, next_state)
is_diverging = -new_proposal.weight > divergence_threshold
trajectory = Trajectory(next_state, next_state, next_state.momentum, 1)
return (
rng_key,
new_proposal,
trajectory,
is_diverging,
False,
)
else:
(
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
initial_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
# Note that is_diverging and is_turning is inplace updated
if (not is_diverging) & (not is_turning):
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
rng_key,
new_proposal,
new_trajectory,
is_diverging,
is_turning,
) = buildtree_integrate(
rng_key,
start_state,
direction,
tree_depth - 1,
step_size,
initial_energy,
)
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
trajectory = merge_trajectories(left_trajectory, right_trajectory)
if not is_turning:
is_turning = uturn_check_fn(
trajectory.leftmost_state.momentum,
trajectory.rightmost_state.momentum,
trajectory.momentum_sum,
)
if use_robust_uturn_check & (tree_depth - 1 > 0):
momentum_sum_left = jax.tree_util.tree_map(
jnp.add,
left_trajectory.momentum_sum,
right_trajectory.leftmost_state.momentum,
)
is_turning_left = uturn_check_fn(
left_trajectory.leftmost_state.momentum,
right_trajectory.leftmost_state.momentum,
momentum_sum_left,
)
momentum_sum_right = jax.tree_util.tree_map(
jnp.add,
left_trajectory.rightmost_state.momentum,
right_trajectory.momentum_sum,
)
is_turning_right = uturn_check_fn(
left_trajectory.rightmost_state.momentum,
right_trajectory.rightmost_state.momentum,
momentum_sum_right,
)
is_turning = is_turning | is_turning_left | is_turning_right
rng_key, proposal_key = jax.random.split(rng_key)
proposal = sample_proposal(proposal_key, proposal, new_proposal)
return (
rng_key,
proposal,
trajectory,
is_diverging,
is_turning,
)
return buildtree_integrate
# -------------------------------------------------------------------
# Sampling
#
# Sampling a trajectory by choosing a direction at random and integrating
# the trajectory in this direction. In the simplest case we perform one
# integration step, but can also perform several as is the case in the
# NUTS algorithm.
# -------------------------------------------------------------------
class DynamicExpansionState(NamedTuple):
step: int
proposal: Proposal
trajectory: Trajectory
termination_state: NamedTuple
def dynamic_multiplicative_expansion(
trajectory_integrator: Callable,
uturn_check_fn: Callable,
max_num_expansions: int = 10,
rate: int = 2,
) -> Callable:
"""Sample a trajectory and update the proposal sequentially
until the termination criterion is met.
The trajectory is sampled with the following procedure:
1. Pick a direction at random;
2. Integrate `num_step` steps in this direction;
3. If the integration has stopped prematurely, do not update the proposal;
4. Else if the trajectory is performing a U-turn, return current proposal;
5. Else update proposal, `num_steps = num_steps ** rate` and repeat from (1).
Parameters
----------
trajectory_integrator
A function that runs the symplectic integrators and returns a new proposal
and the integrated trajectory.
uturn_check_fn
Function used to check the U-Turn criterion.
step_size
The step size used by the symplectic integrator.
max_num_expansions
The maximum number of trajectory expansions until the proposal is returned.
rate
The rate of the geometrical expansion. Typically 2 in NUTS, this is why
the literature often refers to "tree doubling".
"""
proposal_sampler = progressive_biased_sampling
def expand(
rng_key: PRNGKey,
initial_expansion_state: DynamicExpansionState,
initial_energy: float,
step_size: float,
):
def do_keep_expanding(loop_state) -> bool:
"""Determine whether we need to keep expanding the trajectory."""
expansion_state, (is_diverging, is_turning) = loop_state
return (
(expansion_state.step < max_num_expansions)
& ~is_diverging
& ~is_turning
)
def expand_once(loop_state):
"""Expand the current trajectory.
At each step we draw a direction at random, build a subtrajectory
starting from the leftmost or rightmost point of the current
trajectory that is twice as long as the current trajectory.
Once that is done, possibly update the current proposal with that of
the subtrajectory.
"""
expansion_state, _ = loop_state
step, proposal, trajectory, termination_state = expansion_state
subkey = jax.random.fold_in(rng_key, step)
direction_key, trajectory_key, proposal_key = jax.random.split(subkey, 3)
# create new subtrajectory that is twice as long as the current
# trajectory.
direction = jnp.where(jax.random.bernoulli(direction_key), 1, -1)
start_state = jax.lax.cond(
direction > 0,
lambda _: trajectory.rightmost_state,
lambda _: trajectory.leftmost_state,
operand=None,
)
(
new_proposal,
new_trajectory,
termination_state,
is_diverging,
is_turning_subtree,
) = trajectory_integrator(
trajectory_key,
start_state,
direction,
termination_state,
rate**step,
step_size,
initial_energy,
)
# Update the proposal
#
# We do not accept proposals that come from diverging or turning
# subtrajectories. However the definition of the acceptance probability is
# such that the acceptance probability needs to be computed across the
# entire trajectory.
def update_sum_log_p_accept(inputs):
_, proposal, new_proposal = inputs
return Proposal(
proposal.state,
proposal.energy,
proposal.weight,
jnp.logaddexp(
proposal.sum_log_p_accept, new_proposal.sum_log_p_accept
),
)
updated_proposal = jax.lax.cond(
is_diverging | is_turning_subtree,
update_sum_log_p_accept,
lambda x: proposal_sampler(*x),
operand=(proposal_key, proposal, new_proposal),
)
# Is the full trajectory making a U-Turn?
#
# We first merge the subtrajectory that was just generated with the
# trajectory and check the U-Turn criterior on the whole trajectory.
left_trajectory, right_trajectory = reorder_trajectories(
direction, trajectory, new_trajectory
)
merged_trajectory = merge_trajectories(left_trajectory, right_trajectory)
is_turning = uturn_check_fn(
merged_trajectory.leftmost_state.momentum,
merged_trajectory.rightmost_state.momentum,
merged_trajectory.momentum_sum,
)
new_state = DynamicExpansionState(
step + 1, updated_proposal, merged_trajectory, termination_state
)
info = (is_diverging, is_turning_subtree | is_turning)
return (new_state, info)
expansion_state, (is_diverging, is_turning) = jax.lax.while_loop(
do_keep_expanding,
expand_once,
(initial_expansion_state, (False, False)),
)
return expansion_state, (is_diverging, is_turning)
return expand
def hmc_energy(kinetic_energy):
def energy(state):
return -state.logdensity + kinetic_energy(
state.momentum, position=state.position
)
return energy
|
blackjax-devsREPO_NAMEblackjaxPATH_START.@blackjax_extracted@blackjax-main@blackjax@mcmc@trajectory.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "deepskies/deeplenstronomy",
"repo_path": "deeplenstronomy_extracted/deeplenstronomy-master/exploded_setup_old/test/__init__.py",
"type": "Python"
}
|
deepskiesREPO_NAMEdeeplenstronomyPATH_START.@deeplenstronomy_extracted@deeplenstronomy-master@exploded_setup_old@test@__init__.py@.PATH_END.py
|
|
{
"filename": "equalization.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/preprocessing/image_preprocessing/equalization.py",
"type": "Python"
}
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.Equalization")
class Equalization(BaseImagePreprocessingLayer):
"""Preprocessing layer for histogram equalization on image channels.
Histogram equalization is a technique to adjust image intensities to
enhance contrast by effectively spreading out the most frequent
intensity values. This layer applies equalization on a channel-wise
basis, which can improve the visibility of details in images.
This layer works with both grayscale and color images, performing
equalization independently on each color channel. At inference time,
the equalization is consistently applied.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
value_range: Optional list/tuple of 2 floats specifying the lower
and upper limits of the input data values. Defaults to `[0, 255]`.
If the input image has been scaled, use the appropriate range
(e.g., `[0.0, 1.0]`). The equalization will be scaled to this
range, and output values will be clipped accordingly.
bins: Integer specifying the number of histogram bins to use for
equalization. Defaults to 256, which is suitable for 8-bit images.
Larger values can provide more granular intensity redistribution.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
Example:
```python
# Create an equalization layer for standard 8-bit images
equalizer = keras.layers.Equalization()
# An image with uneven intensity distribution
image = [...] # your input image
# Apply histogram equalization
equalized_image = equalizer(image)
# For images with custom value range
custom_equalizer = keras.layers.Equalization(
value_range=[0.0, 1.0], # for normalized images
bins=128 # fewer bins for more subtle equalization
)
custom_equalized = custom_equalizer(normalized_image)
```
"""
def __init__(
self, value_range=(0, 255), bins=256, data_format=None, **kwargs
):
super().__init__(**kwargs)
self.bins = bins
self._set_value_range(value_range)
self.data_format = backend.standardize_data_format(data_format)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _custom_histogram_fixed_width(self, values, value_range, nbins):
values = self.backend.cast(values, "float32")
value_min, value_max = value_range
value_min = self.backend.cast(value_min, "float32")
value_max = self.backend.cast(value_max, "float32")
scaled = (values - value_min) * (nbins - 1) / (value_max - value_min)
indices = self.backend.cast(scaled, "int32")
indices = self.backend.numpy.clip(indices, 0, nbins - 1)
flat_indices = self.backend.numpy.reshape(indices, [-1])
if backend.backend() == "jax":
# for JAX bincount is never jittable because of output shape
histogram = self.backend.numpy.zeros(nbins, dtype="int32")
for i in range(nbins):
matches = self.backend.cast(
self.backend.numpy.equal(flat_indices, i), "int32"
)
bin_count = self.backend.numpy.sum(matches)
one_hot = self.backend.cast(
self.backend.numpy.arange(nbins) == i, "int32"
)
histogram = histogram + (bin_count * one_hot)
return histogram
else:
# TensorFlow/PyTorch/NumPy implementation using bincount
return self.backend.numpy.bincount(
flat_indices,
minlength=nbins,
)
def _scale_values(self, values, source_range, target_range):
source_min, source_max = source_range
target_min, target_max = target_range
scale = (target_max - target_min) / (source_max - source_min)
offset = target_min - source_min * scale
return values * scale + offset
def _equalize_channel(self, channel, value_range):
if value_range != (0, 255):
channel = self._scale_values(channel, value_range, (0, 255))
hist = self._custom_histogram_fixed_width(
channel, value_range=(0, 255), nbins=self.bins
)
nonzero_bins = self.backend.numpy.count_nonzero(hist)
equalized = self.backend.numpy.where(
nonzero_bins <= 1, channel, self._apply_equalization(channel, hist)
)
if value_range != (0, 255):
equalized = self._scale_values(equalized, (0, 255), value_range)
return equalized
def _apply_equalization(self, channel, hist):
cdf = self.backend.numpy.cumsum(hist)
if self.backend.name == "jax":
mask = cdf > 0
first_nonzero_idx = self.backend.numpy.argmax(mask)
cdf_min = self.backend.numpy.take(cdf, first_nonzero_idx)
else:
cdf_min = self.backend.numpy.take(
cdf, self.backend.numpy.nonzero(cdf)[0][0]
)
denominator = cdf[-1] - cdf_min
denominator = self.backend.numpy.where(
denominator == 0,
self.backend.numpy.ones_like(1, dtype=denominator.dtype),
denominator,
)
lookup_table = ((cdf - cdf_min) * 255) / denominator
lookup_table = self.backend.numpy.clip(
self.backend.numpy.round(lookup_table), 0, 255
)
scaled_channel = (channel / 255.0) * (self.bins - 1)
indices = self.backend.cast(
self.backend.numpy.clip(scaled_channel, 0, self.bins - 1), "int32"
)
return self.backend.numpy.take(lookup_table, indices)
def transform_images(self, images, transformations=None, **kwargs):
images = self.backend.cast(images, self.compute_dtype)
if self.data_format == "channels_first":
channels = []
for i in range(self.backend.core.shape(images)[-3]):
channel = images[..., i, :, :]
equalized = self._equalize_channel(channel, self.value_range)
channels.append(equalized)
equalized_images = self.backend.numpy.stack(channels, axis=-3)
else:
channels = []
for i in range(self.backend.core.shape(images)[-1]):
channel = images[..., i]
equalized = self._equalize_channel(channel, self.value_range)
channels.append(equalized)
equalized_images = self.backend.numpy.stack(channels, axis=-1)
return self.backend.cast(equalized_images, self.compute_dtype)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"bins": self.bins, "value_range": self.value_range})
return config
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@preprocessing@image_preprocessing@equalization.py@.PATH_END.py
|
{
"filename": "HelloWorld.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/containerTests/corbaRefPersistenceTest/test/pythonImpl/HelloWorld.py",
"type": "Python"
}
|
# @(#) $Id: HelloWorld.py,v 1.1 2008/04/02 13:22:01 acaproni Exp $
#
# Copyright (C) 2001
# Associated Universities, Inc. Washington DC, USA.
#
# Produced for the ALMA project
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Library General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more
# details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 675 Massachusetts Ave, Cambridge, MA 02139, USA. Correspondence concerning
# ALMA should be addressed as follows:
#
# Internet email: alma-sw-admin@nrao.edu
# "@(#) $Id: HelloWorld.py,v 1.1 2008/04/02 13:22:01 acaproni Exp $"
#
# who when what
# -------- ---------- -------------------------------------------------------
# acaproni 2008/04/02 Created.
#------------------------------------------------------------------------------
#--CORBA STUBS-----------------------------------------------------------------
import CorbaRefTest__POA
import CorbaRefTest
#--ACS Imports-----------------------------------------------------------------
from Acspy.Servants.ContainerServices import ContainerServices
from Acspy.Servants.ComponentLifecycle import ComponentLifecycle
from Acspy.Servants.ACSComponent import ACSComponent
#--GLOBALS---------------------------------------------------------------------
#------------------------------------------------------------------------------
class HelloWorld(CorbaRefTest__POA.HelloWorld, #CORBA stubs for IDL interface
ACSComponent, #Base IDL interface
ContainerServices, #Developer niceties
ComponentLifecycle): #HLA stuff
'''
Simple component implementation provided as a reference for developers.
'''
def __init__(self):
'''
Just call superclass constructors here.
'''
ACSComponent.__init__(self)
ContainerServices.__init__(self)
return
#------------------------------------------------------------------------------
#--Override ComponentLifecycle methods-----------------------------------------
#------------------------------------------------------------------------------
def initialize(self):
self.getLogger().logInfo("initialize called...")
#------------------------------------------------------------------------------
def cleanUp(self):
self.getLogger().logInfo("cleanUp called...")
#------------------------------------------------------------------------------
#--Implementation of IDL methods-----------------------------------------------
#------------------------------------------------------------------------------
def displayMessage(self):
self.getLogger().logInfo("displayMessage called...")
print "hello"
#------------------------------------------------------------------------------
#--Main defined only for generic testing---------------------------------------
#------------------------------------------------------------------------------
if __name__ == "__main__":
print "Creating an object"
g = HelloWorld()
print "Done..."
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@containerTests@corbaRefPersistenceTest@test@pythonImpl@HelloWorld.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/image/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="image", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@image@_name.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "AWehrhahn/PyReduce",
"repo_path": "PyReduce_extracted/PyReduce-master/pyreduce/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Define Version
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# add logger to console
import logging
import tqdm
# We need to use this to have logging messages handle properly with the progressbar
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.captureWarnings(True)
console = TqdmLoggingHandler()
console.setLevel(logging.INFO)
try:
import colorlog
console.setFormatter(
colorlog.ColoredFormatter("%(log_color)s%(levelname)s - %(message)s")
)
del colorlog
except ImportError:
console.setFormatter("%(levelname)s - %(message)s")
print("Install colorlog for colored logging output")
logger.addHandler(console)
del logging
# do not del tqdm, it is needed in the Log Handler
# Load externally available modules
from . import configuration, datasets, reduce, util
|
AWehrhahnREPO_NAMEPyReducePATH_START.@PyReduce_extracted@PyReduce-master@pyreduce@__init__.py@.PATH_END.py
|
{
"filename": "plotqdotabund.py",
"repo_name": "artis-mcrt/artistools",
"repo_path": "artistools_extracted/artistools-main/artistools/gsinetwork/plotqdotabund.py",
"type": "Python"
}
|
# PYTHON_ARGCOMPLETE_OK
import argparse
import contextlib
import math
import string
import typing as t
from collections.abc import Sequence
from functools import partial
from pathlib import Path
import argcomplete
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
import pandas as pd
import polars as pl
import artistools as at
def strnuc_to_latex(strnuc: str) -> str:
"""Convert a string like sr89 to $^{89}$Sr."""
elsym = strnuc.rstrip(string.digits)
massnum = strnuc.removeprefix(elsym)
return rf"$^{{{massnum}}}${elsym.title()}"
def plot_qdot(
modelpath: Path,
dfpartcontrib: pl.DataFrame,
lzdfmodel: pl.LazyFrame,
modelmeta: dict[str, t.Any],
allparticledata: dict[int, dict[str, npt.NDArray[np.float64]]],
arr_time_artis_days: Sequence[float], # noqa: ARG001
arr_time_gsi_days: Sequence[float],
pdfoutpath: Path | str,
xmax: float | None = None,
) -> None:
try:
depdata = at.get_deposition(modelpath=modelpath)
except FileNotFoundError:
print("Can't do qdot plot because no deposition.out file")
return
heatcols = ["hbeta", "halpha", "hbfis", "hspof", "Ye", "Qdot"]
arr_heat = {col: np.zeros_like(arr_time_gsi_days) for col in heatcols}
series_mass_g = lzdfmodel.select("mass_g").collect().get_column("mass_g")
model_mass_grams = series_mass_g.sum()
print(f"model mass: {model_mass_grams / 1.989e33:.3f} Msun")
cell_mass_fracs = series_mass_g / model_mass_grams
print("Calculating global heating rates from the individual particle heating rates...")
dfpartcontrib_nomissing = dfpartcontrib.filter(pl.col("particleid").is_in(allparticledata.keys()))
for cellindex, dfpartcontribthiscell in dfpartcontrib_nomissing.group_by("cellindex"):
assert isinstance(cellindex, int)
mgi = cellindex - 1
if mgi >= modelmeta["npts_model"]:
continue
cell_mass_frac = cell_mass_fracs[mgi]
if cell_mass_frac == 0.0:
continue
frac_of_cellmass_sum = dfpartcontribthiscell["frac_of_cellmass"].sum()
for particleid, frac_of_cellmass in dfpartcontribthiscell.select([
"particleid",
"frac_of_cellmass",
]).iter_rows():
thisparticledata = allparticledata[particleid]
for col in heatcols:
arr_heat[col] += thisparticledata[col] * cell_mass_frac * frac_of_cellmass / frac_of_cellmass_sum
print(" done.")
nrows = 1
fig, axes = plt.subplots(
nrows=nrows,
ncols=1,
sharex=True,
sharey=False,
figsize=(6, 1 + 3 * nrows),
tight_layout={"pad": 0.4, "w_pad": 0.0, "h_pad": 0.0},
)
if nrows == 1:
axes = np.array([axes])
assert isinstance(axes, np.ndarray)
axis = axes[0]
# axis.set_ylim(bottom=1e7, top=2e10)
# axis.set_xlim(left=depdata["tmid_days"].min(), right=depdata["tmid_days"].max())
xmin = min(arr_time_gsi_days) * 0.9
xmax = xmax or max(arr_time_gsi_days) * 1.03
axis.set_xlim(left=xmin, right=xmax)
# axis.set_xscale('log')
# axis.set_xlim(left=1., right=arr_time_artis[-1])
axes[-1].set_xlabel("Time [days]")
axis.set_yscale("log")
# axis.set_ylabel(f'X({strnuc})')
axis.set_ylabel("Qdot [erg/s/g]")
# arr_time_days, arr_qdot = zip(
# *[(t, qdot) for t, qdot in zip(arr_time_days, arr_qdot)
# if depdata['tmid_days'].min() <= t and t <= depdata['tmid_days'].max()])
# axis.plot(arr_time_gsi_days, arr_heat['Qdot'],
# # linestyle='None',
# linewidth=2, color='black',
# # marker='x', markersize=8,
# label='Qdot GSI Network')
# axis.plot(depdata['tmid_days'], depdata['Qdot_ana_erg/s/g'],
# linewidth=2, color='red',
# # linestyle='None',
# # marker='+', markersize=15,
# label='Qdot ARTIS')
axis.plot(
arr_time_gsi_days,
arr_heat["hbeta"],
linewidth=2,
color="black",
linestyle="dashed",
# marker='x', markersize=8,
label=r"$\dot{Q}_\beta$ GSI Network",
)
axis.plot(
depdata["tmid_days"],
depdata["Qdot_betaminus_ana_erg/s/g"],
linewidth=2,
color="red",
linestyle="dashed",
# marker='+', markersize=15,
label=r"$\dot{Q}_\beta$ ARTIS",
)
axis.plot(
arr_time_gsi_days,
arr_heat["halpha"],
linewidth=2,
color="black",
linestyle="dotted",
# marker='x', markersize=8,
label=r"$\dot{Q}_\alpha$ GSI Network",
)
axis.plot(
depdata["tmid_days"],
depdata["Qdotalpha_ana_erg/s/g"],
linewidth=2,
color="red",
linestyle="dotted",
# marker='+', markersize=15,
label=r"$\dot{Q}_\alpha$ ARTIS",
)
axis.plot(
arr_time_gsi_days,
arr_heat["hbfis"],
linewidth=2,
linestyle="dotted",
# marker='x', markersize=8,
# color='black',
label=r"$\dot{Q}_{\beta fis}$ GSI Network",
)
axis.plot(
arr_time_gsi_days,
arr_heat["hspof"],
linewidth=2,
linestyle="dotted",
# marker='x', markersize=8,
# color='black',
label=r"$\dot{Q}_{sponfis}$ GSI Network",
)
axis.legend(loc="best", frameon=False, handlelength=1, ncol=3, numpoints=1)
# fig.suptitle(f'{at.get_model_name(modelpath)}', fontsize=10)
at.plottools.autoscale(axis, margin=0.0)
fig.savefig(pdfoutpath, format="pdf")
print(f"Saved {pdfoutpath}")
def plot_cell_abund_evolution(
modelpath: Path, # noqa: ARG001
dfpartcontrib: pl.DataFrame,
allparticledata: dict[int, dict[str, npt.NDArray[np.float64]]],
arr_time_artis_days: Sequence[float],
arr_time_gsi_days: Sequence[float],
arr_strnuc: Sequence[str],
arr_abund_artis: dict[str, list[float]],
t_model_init_days: float,
dfcell: pl.DataFrame,
pdfoutpath: Path,
mgi: int,
hideinputmodelpoints: bool = True,
xmax: float | None = None,
) -> None:
dfpartcontrib_thiscell = dfpartcontrib.filter(
(pl.col("cellindex") == (mgi + 1)) & (pl.col("particleid").is_in(allparticledata.keys()))
)
frac_of_cellmass_sum = dfpartcontrib_thiscell["frac_of_cellmass"].sum()
print(f"frac_of_cellmass_sum: {frac_of_cellmass_sum} (can be < 1.0 because of missing particles)")
# if arr_strnuc[0] != 'Ye':
# arr_strnuc.insert(0, 'Ye')
arr_abund_gsi: dict[str, np.ndarray[t.Any, np.dtype[np.float64]]] = {
strnuc: np.zeros_like(arr_time_gsi_days) for strnuc in arr_strnuc
}
# calculate the GSI values from the particles contributing to this cell
for particleid, frac_of_cellmass in dfpartcontrib_thiscell.select(["particleid", "frac_of_cellmass"]).iter_rows():
frac_of_cellmass = dfpartcontrib_thiscell.filter(pl.col("particleid") == particleid)["frac_of_cellmass"].sum()
for strnuc in arr_strnuc:
arr_abund_gsi[strnuc] += allparticledata[particleid][strnuc] * frac_of_cellmass / frac_of_cellmass_sum
fig, axes = plt.subplots(
nrows=len(arr_strnuc),
ncols=1,
sharex=False,
sharey=False,
figsize=(6, 1 + 2.0 * len(arr_strnuc)),
tight_layout={"pad": 0.4, "w_pad": 0.0, "h_pad": 0.0},
)
fig.subplots_adjust(top=0.8)
# axis.set_xscale('log')
assert isinstance(axes, np.ndarray)
axes[-1].set_xlabel("Time [days]")
axis = axes[0]
print("nuc gsi_abund artis_abund")
for axis, strnuc in zip(axes, arr_strnuc, strict=False):
# print(arr_time_artis_days)
xmin = min(arr_time_gsi_days) * 0.9
xmax = xmax or max(arr_time_gsi_days) * 1.03
axis.set_xlim(left=xmin, right=xmax)
# axis.set_yscale('log')
# axis.set_ylabel(f'X({strnuc})')
if strnuc == "Ye":
axis.set_ylabel("Electron fraction")
else:
axis.set_ylabel("Mass fraction")
strnuc_latex = strnuc_to_latex(strnuc)
axis.plot(
arr_time_gsi_days,
arr_abund_gsi[strnuc],
# linestyle='None',
linewidth=2,
marker="x",
markersize=8,
label=f"{strnuc_latex} Network",
color="black",
)
if strnuc in arr_abund_artis:
axis.plot(
arr_time_artis_days,
arr_abund_artis[strnuc],
linewidth=2,
# linestyle='None',
# marker='+', markersize=15,
label=f"{strnuc_latex} ARTIS",
color="red",
)
print(f"{strnuc} {arr_abund_gsi[strnuc][0]:.2e} {arr_abund_artis[strnuc][0]:.2e}")
if f"X_{strnuc}" in dfcell and not hideinputmodelpoints:
axis.plot(
t_model_init_days,
dfcell[f"X_{strnuc}"],
marker="+",
markersize=15,
markeredgewidth=2,
label=f"{strnuc_latex} ARTIS inputmodel",
color="blue",
)
axis.legend(loc="best", frameon=False, handlelength=1, ncol=1, numpoints=1)
at.plottools.autoscale(ax=axis)
# fig.suptitle(f"{at.get_model_name(modelpath)} cell {mgi}", y=0.995, fontsize=10)
at.plottools.autoscale(axis, margin=0.05)
fig.savefig(pdfoutpath, format="pdf")
print(f"Saved {pdfoutpath}")
def get_particledata(
arr_time_s: Sequence[float] | npt.NDArray[np.float64],
arr_strnuc_z_n: list[tuple[str, int, int]],
traj_root: Path,
particleid: int,
verbose: bool = False,
) -> tuple[int, dict[str, np.ndarray]]:
"""For an array of times (NSM time including time before merger), interpolate the heating rates of various decay channels and (if arr_strnuc is not empty) the nuclear mass fractions."""
try:
nts_min = at.inputmodel.rprocess_from_trajectory.get_closest_network_timestep(
traj_root, particleid, timesec=min(arr_time_s), cond="lessthan"
)
nts_max = at.inputmodel.rprocess_from_trajectory.get_closest_network_timestep(
traj_root, particleid, timesec=max(arr_time_s), cond="greaterthan"
)
except FileNotFoundError:
print(f"No network calculation for particle {particleid}")
# make sure we weren't requesting abundance data for this particle that has no network data
if arr_strnuc_z_n:
print("ERROR:", particleid, arr_strnuc_z_n)
assert not arr_strnuc_z_n
return -1, {}
if verbose:
print(
"Reading network calculation heating.dat,"
f" energy_thermo.dat{', and nz-plane abundances' if arr_strnuc_z_n else ''} for particle {particleid}..."
)
particledata = {}
nstep_timesec = {}
with at.inputmodel.rprocess_from_trajectory.open_tar_file_or_extracted(
traj_root, particleid, "./Run_rprocess/heating.dat"
) as f:
dfheating = pd.read_csv(f, sep=r"\s+", usecols=["#count", "time/s", "hbeta", "halpha", "hbfis", "hspof"])
heatcols = ["hbeta", "halpha", "hbfis", "hspof"]
heatrates_in: dict[str, list[float]] = {col: [] for col in heatcols}
arr_time_s_source = []
for _, row in dfheating.iterrows():
nstep_timesec[row["#count"]] = row["time/s"]
arr_time_s_source.append(row["time/s"])
for col in heatcols:
try:
heatrates_in[col].append(float(row[col]))
except ValueError:
heatrates_in[col].append(float(row[col].replace("-", "e-")))
for col in heatcols:
particledata[col] = np.array(np.interp(arr_time_s, arr_time_s_source, heatrates_in[col]))
with at.inputmodel.rprocess_from_trajectory.open_tar_file_or_extracted(
traj_root, particleid, "./Run_rprocess/energy_thermo.dat"
) as f:
storecols = ["Qdot", "Ye"]
dfthermo = pd.read_csv(f, sep=r"\s+", usecols=["#count", "time/s", *storecols])
data_in: dict[str, list[float]] = {col: [] for col in storecols}
arr_time_s_source = []
for _, row in dfthermo.iterrows():
nstep_timesec[row["#count"]] = row["time/s"]
arr_time_s_source.append(row["time/s"])
for col in storecols:
try:
data_in[col].append(float(row[col]))
except ValueError:
data_in[col].append(float(row[col].replace("-", "e-")))
for col in storecols:
particledata[col] = np.array(np.interp(arr_time_s, arr_time_s_source, data_in[col]))
if arr_strnuc_z_n:
arr_traj_time_s = []
arr_massfracs: dict[str, list[float]] = {strnuc: [] for strnuc, _, _ in arr_strnuc_z_n}
for nts in range(nts_min, nts_max + 1):
timesec = nstep_timesec[nts]
arr_traj_time_s.append(timesec)
# print(nts, timesec / 86400)
traj_nuc_abund = at.inputmodel.rprocess_from_trajectory.get_trajectory_abund_q(
particleid, traj_root=traj_root, nts=nts
)
for strnuc, Z, N in arr_strnuc_z_n:
arr_massfracs[strnuc].append(traj_nuc_abund.get((Z, N), 0.0))
for strnuc, _, _ in arr_strnuc_z_n:
massfracs_interp = np.interp(arr_time_s, arr_traj_time_s, arr_massfracs[strnuc])
particledata[strnuc] = np.array(massfracs_interp)
return particleid, particledata
def plot_qdot_abund_modelcells(
modelpath: Path,
merger_root: Path,
mgiplotlist: Sequence[int],
arr_el_a: list[tuple[str, int]],
xmax: float | None = None,
) -> None:
# default values, because early model.txt didn't specify this
griddatafolder: Path = Path("SFHo_snapshot")
mergermodelfolder: Path = Path("SFHo_short")
trajfolder: Path = Path("SFHo")
with at.zopen(modelpath / "model.txt") as fmodel:
while True:
line = fmodel.readline()
if not line.startswith("#"):
break
if line.startswith("# gridfolder:"):
griddatafolder = Path(line.strip().removeprefix("# gridfolder: "))
mergermodelfolder = Path(line.strip().removeprefix("# gridfolder: ").removesuffix("_snapshot"))
elif line.startswith("# trajfolder:"):
trajfolder = Path(line.strip().removeprefix("# trajfolder: ").replace("SFHO", "SFHo"))
griddata_root = Path(merger_root, mergermodelfolder, griddatafolder)
traj_root = Path(merger_root, mergermodelfolder, trajfolder)
print(f"model.txt traj_root: {traj_root}")
print(f"model.txt griddata_root: {griddata_root}")
assert traj_root.is_dir()
arr_el, arr_a = zip(*arr_el_a, strict=False)
arr_strnuc: list[str] = [el + str(a) for el, a in arr_el_a]
arr_z = [at.get_atomic_number(el) for el in arr_el]
arr_n = [a - z for z, a in zip(arr_z, arr_a, strict=False)]
arr_strnuc_z_n = list(zip(arr_strnuc, arr_z, arr_n, strict=True))
# arr_z = [at.get_atomic_number(el) for el in arr_el]
lzdfmodel, modelmeta = at.inputmodel.get_modeldata_polars(
modelpath, derived_cols=["mass_g", "rho", "logrho", "volume"]
)
npts_model = modelmeta["npts_model"]
# these factors correct for missing mass due to skipped shells, and volume error due to Cartesian grid map
correction_factors = {}
assoc_cells: dict[int, list[int]] = {}
mgi_of_propcells: dict[int, int] = {}
try:
assoc_cells, mgi_of_propcells = at.get_grid_mapping(modelpath)
direct_model_propgrid_map = all(
len(propcells) == 1 and mgi == propcells[0] for mgi, propcells in assoc_cells.items()
)
if direct_model_propgrid_map:
print(" detected direct mapping of model cells to propagation grid")
except FileNotFoundError:
print("No grid mapping file found, assuming direct mapping of model cells to propagation grid")
direct_model_propgrid_map = True
if direct_model_propgrid_map:
correction_factors = dict.fromkeys(arr_strnuc, 1.0)
lzdfmodel = lzdfmodel.with_columns(n_assoc_cells=pl.lit(1.0))
else:
ncoordgridx = math.ceil(np.cbrt(max(mgi_of_propcells.keys())))
propcellcount = int(math.ceil(ncoordgridx ** (1 / 3.0)) ** 3)
assert propcellcount**3 == ncoordgridx
xmax_tmodel = modelmeta["vmax_cmps"] * modelmeta["t_model_init_days"] * 86400
wid_init = at.get_wid_init_at_tmodel(modelpath, propcellcount, modelmeta["t_model_init_days"], xmax_tmodel)
lzdfmodel = lzdfmodel.with_columns(
n_assoc_cells=pl.Series([
len(assoc_cells.get(inputcellid - 1, []))
for (inputcellid,) in lzdfmodel.select("inputcellid").collect().iter_rows()
])
)
# for spherical models, ARTIS mapping to a cubic grid introduces some errors in the cell volumes
lzdfmodel = lzdfmodel.with_columns(mass_g_mapped=10 ** pl.col("logrho") * wid_init**3 * pl.col("n_assoc_cells"))
for strnuc in arr_strnuc:
corr = (
lzdfmodel.select(pl.col(f"X_{strnuc}") * pl.col("mass_g_mapped")).sum().collect().item()
/ lzdfmodel.select(pl.col(f"X_{strnuc}") * pl.col("mass_g")).sum().collect().item()
)
# print(strnuc, corr)
correction_factors[strnuc] = corr
tmids = at.get_timestep_times(modelpath, loc="mid")
MH = 1.67352e-24 # g
arr_time_artis_days: list[float] = []
arr_abund_artis: dict[int, dict[str, list[float]]] = {}
with contextlib.suppress(FileNotFoundError):
get_mgi_list = tuple(mgiplotlist) # all cells if Ye is calculated
estimators_lazy = at.estimators.scan_estimators(modelpath=modelpath, modelgridindex=get_mgi_list)
assert estimators_lazy is not None
estimators_lazy = estimators_lazy.filter(pl.col("timestep") > 0)
first_mgi = None
estimators_lazy = estimators_lazy.filter(pl.col("modelgridindex").is_in(mgiplotlist))
estimators_lazy = estimators_lazy.select(
"modelgridindex",
"timestep",
*[f"nniso_{strnuc}" for strnuc in arr_strnuc if f"nniso_{strnuc}" in estimators_lazy.columns],
)
estimators_lazy = (
estimators_lazy.join(
lzdfmodel.select(
"modelgridindex",
"rho",
*[f"X_{strnuc}" for strnuc in arr_strnuc if f"X_{strnuc}" in lzdfmodel.columns],
),
on="modelgridindex",
)
.collect()
.lazy()
)
estimators_lazy = estimators_lazy.join(
pl.DataFrame({"timestep": range(len(tmids)), "time_mid": tmids})
.with_columns(pl.col("timestep").cast(pl.Int32))
.lazy(),
on="timestep",
how="left",
)
estimators_lazy = estimators_lazy.with_columns(
rho_init=pl.col("rho"), rho=pl.col("rho") * (modelmeta["t_model_init_days"] / pl.col("time_mid")) ** 3
)
# assert False
# estimators_lazy = estimators_lazy.with_columns(
# rho=pl.col("rho") * (modelmeta["t_model_init_days"] / pl.col("time_mid")) ** 3
# )
estimators_lazy = estimators_lazy.sort(by=["timestep", "modelgridindex"])
estimators = estimators_lazy.collect()
for (nts, mgi), estimtsmgsi in estimators.group_by(["timestep", "modelgridindex"], maintain_order=True):
assert isinstance(nts, int)
assert isinstance(mgi, int)
if first_mgi is None:
first_mgi = mgi
time_days = estimtsmgsi["time_mid"].item()
if mgi == first_mgi:
arr_time_artis_days.append(time_days)
for strnuc, a in zip(arr_strnuc, arr_a, strict=False):
abund = estimtsmgsi[f"nniso_{strnuc}"].item()
massfrac = abund * a * MH / estimtsmgsi["rho"].item()
massfrac += estimtsmgsi[f"X_{strnuc}"].item() * (correction_factors[strnuc] - 1.0)
if mgi not in arr_abund_artis:
arr_abund_artis[mgi] = {}
if strnuc not in arr_abund_artis[mgi]:
arr_abund_artis[mgi][strnuc] = []
arr_abund_artis[mgi][strnuc].append(massfrac)
arr_time_artis_days_alltimesteps = at.get_timestep_times(modelpath)
arr_time_artis_s_alltimesteps = np.array([t * 8.640000e04 for t in arr_time_artis_days_alltimesteps])
# no completed timesteps yet, so display full set of timesteps that artis will compute
if not arr_time_artis_days:
arr_time_artis_days = arr_time_artis_days_alltimesteps.copy()
arr_time_gsi_s = np.array([modelmeta["t_model_init_days"] * 86400, *arr_time_artis_s_alltimesteps])
# times in artis are relative to merger, but NSM simulation time started earlier
mergertime_geomunits = at.inputmodel.modelfromhydro.get_merger_time_geomunits(griddata_root)
t_mergertime_s = mergertime_geomunits * 4.926e-6
arr_time_gsi_s_incpremerger = np.array([
modelmeta["t_model_init_days"] * 86400 + t_mergertime_s,
*arr_time_artis_s_alltimesteps,
])
arr_time_gsi_days = list(arr_time_gsi_s / 86400)
dfpartcontrib = at.inputmodel.rprocess_from_trajectory.get_gridparticlecontributions(modelpath).filter(
(pl.col("cellindex") <= npts_model) & (pl.col("frac_of_cellmass") > 0)
)
mgiplotlistplus1 = [mgi + 1 for mgi in mgiplotlist]
list_particleids_getabund = dfpartcontrib.filter(pl.col("cellindex").is_in(mgiplotlistplus1))["particleid"].unique()
fworkerwithabund = partial(get_particledata, arr_time_gsi_s_incpremerger, arr_strnuc_z_n, traj_root, verbose=True)
print(f"Reading trajectories from {traj_root}")
print(f"Reading Qdot/thermo and abundance data for {len(list_particleids_getabund)} particles")
if at.get_config()["num_processes"] > 1:
with at.get_multiprocessing_pool() as pool:
list_particledata_withabund = pool.map(fworkerwithabund, list_particleids_getabund)
pool.close()
pool.join()
else:
list_particledata_withabund = [fworkerwithabund(particleid) for particleid in list_particleids_getabund]
list_particleids_noabund = [
pid for pid in dfpartcontrib["particleid"].unique() if pid not in list_particleids_getabund
]
fworkernoabund = partial(get_particledata, arr_time_gsi_s_incpremerger, [], traj_root)
print(f"Reading for Qdot/thermo data (no abundances needed) for {len(list_particleids_noabund)} particles")
if at.get_config()["num_processes"] > 1:
with at.get_multiprocessing_pool() as pool:
list_particledata_noabund = pool.map(fworkernoabund, list_particleids_noabund)
pool.close()
pool.join()
else:
list_particledata_noabund = [fworkernoabund(particleid) for particleid in list_particleids_noabund]
allparticledata = dict(list_particledata_withabund + list_particledata_noabund)
plot_qdot(
modelpath,
dfpartcontrib,
lzdfmodel,
modelmeta,
allparticledata,
arr_time_artis_days,
arr_time_gsi_days,
pdfoutpath=Path(modelpath, "gsinetwork_global-qdot.pdf"),
xmax=xmax,
)
for mgi in mgiplotlist:
plot_cell_abund_evolution(
modelpath,
dfpartcontrib,
allparticledata,
arr_time_artis_days,
arr_time_gsi_days,
arr_strnuc,
arr_abund_artis.get(mgi, {}),
modelmeta["t_model_init_days"],
lzdfmodel.select(modelgridindex=mgi).collect(),
mgi=mgi,
pdfoutpath=Path(modelpath, f"gsinetwork_cell{mgi}-abundance.pdf"),
xmax=xmax,
)
def addargs(parser: argparse.ArgumentParser) -> None:
parser.add_argument("-modelpath", default=".", help="Path for ARTIS files")
parser.add_argument(
"-mergerroot",
default=Path(Path.home() / "Google Drive/Shared Drives/GSI NSM/Mergers"),
help="Base path for merger snapshot and trajectory data specified in model.txt",
)
parser.add_argument("-outputpath", "-o", default=".", help="Path for output files")
parser.add_argument("-xmax", default=None, type=int, help="Maximum time in days to plot")
parser.add_argument(
"-modelgridindex",
"-cell",
"-mgi",
default=None,
help="Modelgridindex (zero-indexed) to plot or list such as 4,5,6",
)
def main(args: argparse.Namespace | None = None, argsraw: Sequence[str] | None = None, **kwargs: t.Any) -> None:
"""Compare the energy release and abundances from ARTIS to the GSI Network calculation."""
if args is None:
parser = argparse.ArgumentParser(formatter_class=at.CustomArgHelpFormatter, description=__doc__)
addargs(parser)
at.set_args_from_dict(parser, kwargs)
argcomplete.autocomplete(parser)
args = parser.parse_args([] if kwargs else argsraw)
arr_el_a = [
("He", 4),
# ("Ga", 72),
("Sr", 89),
("Sr", 91),
("Sr", 92),
("Y", 92),
("Y", 93),
("Zr", 93),
("Ba", 140),
("Ce", 141),
("Nd", 147),
# ('Rn', 222),
# ("Ra", 223),
# ("Ra", 224),
# ("Ra", 225),
# ("Ac", 225),
# ('Th', 234),
# ('Pa', 233),
# ('U', 235),
]
# arr_el_a = [
# ("He", 4),
# ("Ga", 72),
# ("Sr", 91),
# ("Sr", 92),
# ]
# arr_el_a = [
# ("Y", 92),
# ("Zr", 93),
# ("Ce", 141),
# ("Nd", 147),
# ]
arr_el_a.sort(key=lambda x: (at.get_atomic_number(x[0]), x[1]))
modelpath = Path(args.modelpath)
if args.modelgridindex is None:
mgiplotlist = []
elif hasattr(args.modelgridindex, "split"):
mgiplotlist = [int(mgi) for mgi in args.modelgridindex.split(",")]
else:
mgiplotlist = [int(args.modelgridindex)]
plot_qdot_abund_modelcells(
modelpath=modelpath, merger_root=args.mergerroot, mgiplotlist=mgiplotlist, arr_el_a=arr_el_a, xmax=args.xmax
)
if __name__ == "__main__":
main()
|
artis-mcrtREPO_NAMEartistoolsPATH_START.@artistools_extracted@artistools-main@artistools@gsinetwork@plotqdotabund.py@.PATH_END.py
|
{
"filename": "resetdb.py",
"repo_name": "juanep97/iop4",
"repo_path": "iop4_extracted/iop4-main/iop4site/resetdb.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""Script to reset the database, keeping the users and catalog data. USE WITH CARE."""
# iop4lib config
import iop4lib
iop4conf = iop4lib.Config(config_db=False)
# other imports
import os
from datetime import datetime
from termcolor import colored, cprint
manage_fpath = f"{iop4conf.basedir}/iop4site/manage.py"
backupdir_path = f"{iop4conf.basedir}/priv.backups/"
datetime_str = datetime.now().strftime("%Y-%m-%d_%H%M")
print(colored("DANGER! This script will reset the database, keeping the users and catalog data. USE WITH CARE.", "yellow"))
if not os.path.exists(backupdir_path):
os.makedirs(backupdir_path)
c = input("Are you sure you want to continue? (y/n) ")
if c != "y":
print("Aborting.")
exit(1)
print(f"Backing up catalog and users to {backupdir_path}/priv.iop4.dump.*.{datetime_str}.yaml ...")
os.system(f"python {manage_fpath} dumpdata --natural-primary --natural-foreign --format=yaml iop4api.astrosource > {backupdir_path}/priv.iop4.dump.catalog.{datetime_str}.yaml")
os.system(f"python {manage_fpath} dumpdata --natural-primary --natural-foreign --format=yaml auth > {backupdir_path}/priv.iop4.dump.auth.{datetime_str}.yaml")
c = input("Reset the DB? (y/n) ")
if c != "y":
print("Aborting.")
exit(1)
print("Resetting database ...")
os.system(rf"rm {iop4conf.db_path}")
os.system(rf"rm -rf {iop4conf.basedir}/iop4site/migrations")
os.system(rf"rm -rf {iop4conf.basedir}/iop4site/iop4api/migrations")
os.system(rf"python {manage_fpath} flush")
#os.system(r"python manage.py migrate iop4api zero")
os.system(rf"python {manage_fpath} makemigrations iop4api")
os.system(rf"python {manage_fpath} migrate")
print(f"Loading catalog and users from priv.iop4.dump.*.{datetime_str}.yaml ...")
c = input("Do you want to load the data? (y/n) ")
if c != "y":
print("Aborting.")
exit(1)
os.system(rf"python {manage_fpath} loaddata {backupdir_path}/priv.iop4.*.{datetime_str}.yaml")
|
juanep97REPO_NAMEiop4PATH_START.@iop4_extracted@iop4-main@iop4site@resetdb.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "oliverphilcox/PolyBin3D",
"repo_path": "PolyBin3D_extracted/PolyBin3D-main/README.md",
"type": "Markdown"
}
|
# PolyBin3D
PolyBin3D is a Python code that estimates the binned power spectrum and bispectrum for 3D fields such as the distributions of matter and galaxies, using the algorithms of [Philcox 2020](https://arxiv.org/abs/2012.09389), [Philcox 2021](https://arxiv.org/abs/2107.06287), [Ivanov et al. 2023](https://arxiv.org/abs/2302.04414) and [Philcox & Flöss 2024](https://arxiv.org/abs/2404.07249). It is a sister code to [PolyBin](https://github.com/oliverphilcox/PolyBin), which computes the polyspectra of data on the two-sphere and is a modern reimplementation of the former [Spectra-Without-Windows](https://github.com/oliverphilcox/Spectra-Without-Windows) code.
For each statistic, two estimators are available: the standard (ideal) estimators, which do not take into account the mask, and window-deconvolved estimators. In the second case, we require computation of a Fisher matrix; this depends on binning and the mask, but does not need to be recomputed for each new simulation.
The code supports GPU acceleration using JAX, which can be enabled using the `backend` argument in the `base` class, as demonstrated below.
PolyBin contains the following modules:
- `pspec`: Binned power spectra
- `bspec`: Binned bispectra
The basic usage of the power spectrum class is the following:
```
# Import code
import PolyBin3D as pb
import numpy as np
# Load base class
base = pb.PolyBin3D(boxsize, # dimensions of box
gridsize, # dimensions of Fourier-space grid,
boxcenter=[0,0,0], # center of simulation box
pixel_window='tsc', # pixel window function
backend='fftw', # backend for performing FFTs ('fftw' for cpu, 'jax' for gpu)
nthreads=4, # number of CPUs for performing FFTs (only applies to 'fftw' backend)
sightline='global') # redshift-space axis
# Load power spectrum class
pspec = pb.PSpec(base,
k_bins, # k-bin edges
lmax, # Legendre multipoles
mask, # real-space mask
applySinv, # filter to apply to data
)
# Compute Fisher matrix and shot-noise using Monte Carlo simulations (should usually be parallelized)
fish, shot_num = pspec.compute_fisher(10, N_cpus=1, verb=True)
# Compute windowed power spectra
Pk_ideal = pspec.Pk_ideal(data)
# Compute unwindowed power spectra, using the Fisher matrix we just computed
Pk_unwindowed = pspec.Pk_unwindowed(data, fish=fish, shot_num=shot_num, subtract_shotnoise=False)
```
Bispectra can be computed similarly:
```
# Load bispectrum class
bspec = pb.BSpec(base,
k_bins, # k-bin edges
lmax, # Legendre multipoles
mask, # real-space mask
applySinv, # filter to apply to data
)
# Compute Fisher matrix using Monte Carlo simulations (should usually be parallelized)
fish = bspec.compute_fisher(10, N_cpus=1, verb=True)
# Compute windowed bispectra
Bk_ideal = bspec.Bk_ideal(data)
# Compute unwindowed bispectra using the Fisher matrix we just computed
Bk_unwindowed = bspec.Bk_unwindowed(data, fish=fish, include_linear_term=False)
```
Further details are described in the tutorials, which describe
- [Tutorial 1](Tutorial%201%20-%20Pk%20from%20Simulations.ipynb): introduction to PolyBin3D, and computing the power spectrum from simulations
- [Tutorial 2](Tutorial%202%20-%20Validating%20the%20Unwindowed%20Pk%20Estimators.ipynb): validation of the window-deconvolved power spectrum estimators
- [Tutorial 3](Tutorial%203%20-%20BOSS%20Pk%20Multipoles.ipynb): application of the power spectrum esitmators to the BOSS DR12 dataset
- [Tutorial 4](Tutorial%204%20-%20Bk%20from%20Simulations.ipynb): introduction to computing bispectra
- [Tutorial 5](Tutorial%205%20-%20Validating%20the%20Unwindowed%20Bk%20Estimators.ipynb): validation of the window-deconvolved bispectrum estimators
## Authors
- [Oliver Philcox](mailto:ohep2@cantab.ac.uk) (Columbia / Simons Foundation)
- [Thomas Flöss](mailto:tsfloss@gmail.com) (University of Groningen)
## Dependencies
- Python 2/3
- numpy, scipy
- fftw [for FFTs]
- Nbodykit [not required, but useful for testing]
- JAX (for GPU acceleration, see [here](https://jax.readthedocs.io/en/latest/installation.html) for installation instructions.)
## References
**Code references:**
1. **Philcox, O. H. E. & Flöss, T.: "PolyBin3D: A Suite of Optimal and Efficient Power Spectrum and Bispectrum Estimators for Large-Scale Structure", (2024) ([arXiv](https://arxiv.org/abs/2404.07249))**
2. Philcox, O. H. E., "Cosmology Without Window Functions: Quadratic Estimators for the Galaxy Power Spectrum", (2020) ([arXiv](https://arxiv.org/abs/2012.09389))
3. Philcox, O. H. E., "Cosmology Without Window Functions: Cubic Estimators for the Galaxy Bispectrum", (2021) ([arXiv](https://arxiv.org/abs/2107.06287))
4. Ivanov, M. M., Philcox, O. H. E., et al. "Cosmology with the Galaxy Bispectrum Multipoles: Optimal Estimation and Application to BOSS Data" (2023) ([arXiv](https://arxiv.org/abs/2302.04414))
**Some works using data from PolyBin3D (or its predecessor)**
- Philcox & Ivanov (2021, [arXiv](https://arxiv.org/abs/2112.04515)): Combined constraints on LambdaCDM from the BOSS power spectrum and bispectrum.
- Cabass et al. (2022, [arXiv](https://arxiv.org/abs/2201.07238)): Constraints on single-field inflation from the BOSS power spectrum and bispectrum.
- Cabass et al. (2022, [arXiv](https://arxiv.org/abs/2204.01781)): Constraints on multi-field inflation from the BOSS power spectrum and bispectrum.
- Nunes et al. (2022, [arXiv](https://arxiv.org/abs/2203.08093)): Constraints on dark-sector interactions from the BOSS galaxy power spectrum.
- Rogers et al. (2023, [arXiv](https://arxiv.org/abs/2301.08361)): Ultra-light axions and the S8 tension: joint constraints from the cosmic microwave background and galaxy clustering.
- Ivanov et al. (2023, [arXiv](https://arxiv.org/abs/2302.04414)): Cosmology with the Galaxy Bispectrum Multipoles: Optimal Estimation and Application to BOSS Data.
- Moretti et al. (2023, [arXiv](https://arxiv.org/abs/2306.09275)): Constraints on the growth index and neutrino mass from the BOSS power spectrum.
- He et al. (2023, [arXiv](https://arxiv.org/abs/2309.03956)): Self-Interacting Neutrinos in Light of Large-Scale Structure Data.
- Camarena et al. (2023, [arXiv](https://arxiv.org/abs/2309.03941)): The two-mode puzzle: Confronting self-interacting neutrinos with the full shape of the galaxy power spectrum
|
oliverphilcoxREPO_NAMEPolyBin3DPATH_START.@PolyBin3D_extracted@PolyBin3D-main@README.md@.PATH_END.py
|
{
"filename": "exec_calcos_for_darks.py",
"repo_name": "kimakan/FaintCOS",
"repo_path": "FaintCOS_extracted/FaintCOS-master/optional/exec_calcos_for_darks.py",
"type": "Python"
}
|
"""This module uses CALCOS to reduce all rawtag files in the folder.
The reduced files (corrtags) are stored in the folder "reduced".
All obsolete files (flt_a.fits, counts_a.fits etc.) will be
automatically removed.
"""
import subprocess, sys, os
all_files = os.listdir(".")
files = []
for f in all_files:
filename = f.split("_")
if (len(filename) > 2):
if (f.split("_")[1] == "rawtag" and f.split("_")[2] == "a.fits"):
files.append(f)
for f in files:
process = subprocess.Popen("calcos -o reduced " + f + " > log.txt", shell=True)
print("Working on " + f)
process.wait()
os.system("rm -r reduced/*counts_a.fits")
os.system("rm -r reduced/*counts_b.fits")
os.system("rm -r reduced/*flt_a.fits")
os.system("rm -r reduced/*flt_b.fits")
os.system("rm -r reduced/*.tra")
|
kimakanREPO_NAMEFaintCOSPATH_START.@FaintCOS_extracted@FaintCOS-master@optional@exec_calcos_for_darks.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/grid/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._domain import Domain
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._domain.Domain"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@grid@__init__.py@.PATH_END.py
|
{
"filename": "test_retrieval.py",
"repo_name": "MartianColonist/POSEIDON",
"repo_path": "POSEIDON_extracted/POSEIDON-main/tests/test_retrieval.py",
"type": "Python"
}
|
import pytest
def test_continuum_retrieval():
'''
Test POSEIDON's retrieval functionality on a synthetic WASP-121b
transmission spectrum only including H-, H2-H2 and H2-He CIA, and Rayleigh
scattering (i.e. a continuum-only retrieval).
'''
from POSEIDON.constants import R_Sun, R_J
from POSEIDON.core import create_star, create_planet, define_model, \
make_atmosphere, read_opacities, wl_grid_constant_R, \
compute_spectrum, load_data, set_priors
from POSEIDON.instrument import generate_syn_data_from_user
from POSEIDON.visuals import plot_spectra, plot_spectra_retrieved
from POSEIDON.utility import plot_collection, read_retrieved_spectrum
from POSEIDON.retrieval import run_retrieval
from POSEIDON.corner import generate_cornerplot
import scipy.constants as sc
import pymultinest
import numpy as np
import os
#***** Define stellar properties *****#
R_s = 1.46*R_Sun # Stellar radius (m)
T_s = 6776.0 # Stellar effective temperature (K)
Met_s = 0.13 # Stellar metallicity [log10(Fe/H_star / Fe/H_solar)]
log_g_s = 4.24 # Stellar log surface gravity (log10(cm/s^2) by convention)
# Create the stellar object
star = create_star(R_s, T_s, log_g_s, Met_s)
#***** Define planet properties *****#
planet_name = 'WASP-121b' # Planet name used for plots, output files etc.
R_p = 1.753*R_J # Planetary radius (m)
log_g_p = 2.97 # Gravitational field of planet (cgs)
T_eq = 2450 # Equilibrium temperature (K)
# Create the planet object
planet = create_planet(planet_name, R_p, log_g = log_g_p, T_eq = T_eq)
#***** Model wavelength grid *****#
wl_min = 0.4 # Minimum wavelength (um)
wl_max = 1.8 # Maximum wavelength (um)
R = 1000 # We can get away with R = 1k for this test, since we only have continuum opacity
# We need to provide a model wavelength grid to initialise instrument properties
wl = wl_grid_constant_R(wl_min, wl_max, R)
#***** Define model *****#
model_name = 'H-_retrieval_test'
bulk_species = ['H2', 'He']
param_species = ['H-']
# Create the model object
model = define_model(model_name, bulk_species, param_species,
PT_profile = 'isotherm')
# Specify the pressure grid of the atmosphere
P_min = 1.0e-7 # 0.1 ubar
P_max = 100 # 100 bar
N_layers = 100 # 100 layers
# We'll space the layers uniformly in log-pressure
P = np.logspace(np.log10(P_max), np.log10(P_min), N_layers)
# Specify the reference pressure and radius
P_ref = 10.0 # Reference pressure (bar)
R_p_ref = R_p # Radius at reference pressure
# Provide a specific set of model parameters for the atmosphere
PT_params = np.array([T_eq])
log_X_params = np.array([-9.0])
# Generate the atmosphere
atmosphere = make_atmosphere(planet, model, P, P_ref, R_p_ref,
PT_params, log_X_params)
#***** Read opacity data *****#
opacity_treatment = 'opacity_sampling'
# Define fine temperature grid (K)
T_fine_min = 800
T_fine_max = 3000
T_fine_step = 10
T_fine = np.arange(T_fine_min, (T_fine_max + T_fine_step), T_fine_step)
# Define fine pressure grid (log10(P/bar))
log_P_fine_min = -6.0
log_P_fine_max = 2.0
log_P_fine_step = 0.2
log_P_fine = np.arange(log_P_fine_min, (log_P_fine_max + log_P_fine_step),
log_P_fine_step)
# Read cross sections
opac = read_opacities(model, wl, opacity_treatment, T_fine, log_P_fine,
testing = True)
# Generate transmission spectrum
spectrum = compute_spectrum(planet, star, model, atmosphere, opac, wl,
spectrum_type = 'transmission')
#***** Generate synthetic data *****#
os.mkdir('./data')
os.mkdir('./data/WASP-121b')
data_dir = './data/WASP-121b'
generate_syn_data_from_user(planet, wl, spectrum, data_dir, instrument = 'dummy',
R_data = 30, err_data = 50, wl_start = 0.45,
wl_end = 1.7, Gauss_scatter = False)
# Load synthetic data file
datasets = ['WASP-121b_SYNTHETIC_dummy.dat']
instruments = ['dummy']
# Load dataset, pre-load instrument PSF and transmission function
data = load_data(data_dir, datasets, instruments, wl, wl_unit = 'micron',
bin_width = 'half', spectrum_unit = 'transit_depth', skiprows = None)
#***** Set priors for retrieval *****#
# Initialise prior type dictionary
prior_types = {}
# Specify whether priors are linear, Gaussian, etc.
prior_types['T'] = 'uniform'
prior_types['R_p_ref'] = 'uniform'
prior_types['log_H-'] = 'uniform'
# Initialise prior range dictionary
prior_ranges = {}
# Specify prior ranges for each free parameter
prior_ranges['T'] = [800, 3000]
prior_ranges['R_p_ref'] = [0.85*R_p, 1.15*R_p]
prior_ranges['log_H2O'] = [-14, -2]
# Create prior object for retrieval
priors = set_priors(planet, star, model, data, prior_types, prior_ranges)
#***** Run atmospheric retrieval *****#
run_retrieval(planet, star, model, opac, data, priors, wl, P, P_ref, R = R,
spectrum_type = 'transmission', sampling_algorithm = 'MultiNest',
N_live = 400, verbose = True)
#***** Read MultiNest retrieval results *****#
true_vals = [1.753, T_eq, log_X_params[0]]
# Change directory into MultiNest result file folder
output_dir = './POSEIDON_output/' + planet_name + '/retrievals/'
os.chdir(output_dir + 'MultiNest_raw/')
n_params = len(model['param_names'])
# Run PyMultiNest analyser to extract posterior samples and model evidence
analyzer = pymultinest.Analyzer(n_params, verbose = False,
outputfiles_basename = model_name + '-')
stats = analyzer.get_stats()
# Load retrieved median values for the planet radius, temperature, and log(H2O)
for i in range(n_params):
parameter = model['param_names'][i]
true_median = true_vals[i]
m = stats['marginals'][i]
retrieved_median = m['median']
# Check relative difference between solutions < 1%
relative_diff = np.abs((retrieved_median - true_median)/true_median)
print('Relative diff for ' + parameter + ' = ' +
str(round(np.max(relative_diff*1e2),3)) + ' %')
assert relative_diff < 0.01
os.chdir('../../../../')
print("Retrieval test passed!")
#test_continuum_retrieval()
|
MartianColonistREPO_NAMEPOSEIDONPATH_START.@POSEIDON_extracted@POSEIDON-main@tests@test_retrieval.py@.PATH_END.py
|
{
"filename": "demo_ROS_vehicle.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/ros/demo_ROS_vehicle.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2021 projectchrono.org
# All right reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Aaron Young
# =============================================================================
#
# Demo to show the use of Chrono::Vehicle with ROS in python
#
# =============================================================================
import pychrono as ch
import pychrono.vehicle as veh
import pychrono.ros as chros
def main():
# Create systems
# Create the HMMWV vehicle, set parameters, and initialize
hmmwv = veh.HMMWV_Full()
hmmwv.SetContactMethod(ch.ChContactMethod_NSC)
hmmwv.SetChassisCollisionType(veh.CollisionType_NONE)
hmmwv.SetChassisFixed(False)
hmmwv.SetInitPosition(ch.ChCoordsysd(ch.ChVector3d(0, 0, 1.6), ch.ChQuaterniond(1, 0, 0, 0)))
hmmwv.SetEngineType(veh.EngineModelType_SHAFTS)
hmmwv.SetTransmissionType(veh.TransmissionModelType_AUTOMATIC_SHAFTS)
hmmwv.SetDriveType(veh.DrivelineTypeWV_AWD)
hmmwv.SetSteeringType(veh.SteeringTypeWV_PITMAN_ARM)
hmmwv.SetTireType(veh.TireModelType_TMEASY)
hmmwv.SetTireStepSize(1e-3)
hmmwv.Initialize()
# Create the terrain
terrain = veh.RigidTerrain(hmmwv.GetSystem())
patch_mat = ch.ChContactMaterialNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
terrain.AddPatch(patch_mat, ch.CSYSNORM, 100.0, 100.0)
terrain.Initialize()
# Create the driver system
driver = veh.ChDriver(hmmwv.GetVehicle())
driver.Initialize()
# Create ROS manager
ros_manager = chros.ChROSPythonManager()
ros_manager.RegisterHandler(chros.ChROSClockHandler())
ros_manager.RegisterHandler(chros.ChROSDriverInputsHandler(25, driver, "~/input/driver_inputs"))
ros_manager.RegisterHandler(chros.ChROSBodyHandler(25, hmmwv.GetChassisBody(), "~/output/hmmwv/state"))
ros_manager.Initialize()
# Simulation loop
time = 0
time_step = 1e-3
time_end = 30
hmmwv.GetVehicle().EnableRealtime(True)
while time < time_end:
time = hmmwv.GetSystem().GetChTime()
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
hmmwv.Synchronize(time, driver_inputs, terrain)
# Advance simulation for one timestep for all modules
driver.Advance(time_step)
terrain.Advance(time_step)
hmmwv.Advance(time_step)
if not ros_manager.Update(time, time_step):
break
if __name__ == "__main__":
main()
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@ros@demo_ROS_vehicle.py@.PATH_END.py
|
{
"filename": "_customdata.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/_customdata.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="ohlc", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@_customdata.py@.PATH_END.py
|
{
"filename": "models.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/lasair/apps/watchlist/models.py",
"type": "Python"
}
|
from django.db import models
# A WATCHLIST IS OWNED BY A USER AND GIVEN A NAME AND DESCRIPTION
# ONLY ACTIVE WATCHLISTS ARE RUN AGAINST THE REALTIME INGESTION
# THE PREQUEL_WHERE CAN BE USED TO SELECT WHICH CANDIDATES ARE COMPARED WITH THE WATCHLIST
from django.contrib.auth.models import User
class WatchlistCone(models.Model):
"""WatchlistCone.
"""
cone_id = models.AutoField(primary_key=True)
wl = models.ForeignKey('Watchlist', models.DO_NOTHING, blank=True, null=True)
name = models.CharField(max_length=32, blank=True, null=True)
ra = models.FloatField(blank=True, null=True)
decl = models.FloatField(blank=True, null=True)
radius = models.FloatField(blank=True, null=True)
class Meta:
"""Meta.
"""
managed = True
db_table = 'watchlist_cones'
class Watchlist(models.Model):
"""Watchlist.
"""
wl_id = models.AutoField(primary_key=True)
user = models.ForeignKey(User, models.DO_NOTHING, db_column='user', blank=True, null=True)
name = models.CharField(max_length=256, blank=True, null=True)
description = models.TextField(max_length=4096, blank=True, null=True)
active = models.BooleanField(blank=True, null=True)
public = models.BooleanField(blank=True, null=True)
radius = models.FloatField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True, editable=False, blank=True, null=True)
date_modified = models.DateTimeField(auto_now= True, editable=False, blank=True, null=True)
date_expire = models.DateTimeField( editable=True, blank=True, null=True)
class Meta:
"""Meta.
"""
managed = True
db_table = 'watchlists'
def __str__(self):
return self.user.first_name + ' ' + self.user.last_name + ': ' + self.name
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@lasair@apps@watchlist@models.py@.PATH_END.py
|
{
"filename": "_mesh3d.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/template/data/_mesh3d.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Mesh3DValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="mesh3d", parent_name="layout.template.data", **kwargs
):
super(Mesh3DValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Mesh3d"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@template@data@_mesh3d.py@.PATH_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choropleth/hoverlabel/font/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="choropleth.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choropleth@hoverlabel@font@_sizesrc.py@.PATH_END.py
|
{
"filename": "parse_log.py",
"repo_name": "sdss/idlspec2d",
"repo_path": "idlspec2d_extracted/idlspec2d-master/python/boss_drp/utils/daily_log/parse_log.py",
"type": "Python"
}
|
from boss_drp.utils.daily_log.Flag import *
from boss_drp.utils.chpc2html import chpc2html
from boss_drp.field import field_to_string as f2s
from boss_drp.field import field_dir, field_spec_dir, field_png_dir
import pandas as pd
import os.path as ptt
import re
from collections import OrderedDict
import numpy as np
import time
class Crash_log:
def __init__(self, step, error,msg=None,line=None, flag=Error_warn):
self.step = step
self.error = re.compile('.*'+error+'.*')
self.msg = msg
self.line = line
self.flag = flag
def check(self,i, line, step):
if self.step is not None:
if self.step != step:
return
if self.line is not None:
if i > self.line:
return
if self.error.match(line):
if self.msg is None:
return(line.replace('\n',''))
else:
return(self.msg.format(step=step))
errors = [Crash_log('spDiag2d','LOCATESKYLINES:.*WARNING: Maximum sky-line shift is.*(DISABLING)'),
Crash_log('spDiag2d','ABORT: Only 0 sky fibers found',
msg='No Sky Fibers Found', flag=stopped),
Crash_log('spDiag2d','ABORT: No good flats (saturated?)', flag=stopped),
Crash_log('spDiag2d','SPCALIB: .*: .* paired with no arc', flag=stopped),
Crash_log('spDiag2d','SUPERFLAT: .*: Creating superflat from .* fibers',
flag=stopped, line =1),
Crash_log('spDiag2d','ABORT: Reject science as too bright: 25-th-percentile =',
msg='Reject Bright Science', flag=stopped),
Crash_log('spDiag2d','SKYSUBTRACT:.*: Discarding .*(fractional) of the sky pixels as bad',
msg='Failed Sky Subtraction', line = -1, flag=stopped),
Crash_log('spDiag2d','FITSPECTRARESOL: .*: Calculating the spectra resolution',
msg='Failed FITSPECTRARESOL', line = 1, flag=stopped),
Crash_log('spDiag2d','EXTRACT_BUNDLE_IMAGE: .*: sigmasize:',
msg='Failure Extracting Exposure', line = 1, flag=stopped),
Crash_log('spDiag2d','FITMEANX: .*:',msg='Failure in Sky Line Identification',
line = 1, flag=stopped),
Crash_log('spDiag2d','XCEN is not sorted or not separated by greater than 3 pixels.',
msg='Warning: Close or Overlapping Traces', flag=Error_warn),
Crash_log('spDiag2d','Big wavelength gap',flag=Silent_warn),
Crash_log('spDiagcomb','RM_SPFLUX_V5:.*: USING XYFIT', flag=stopped,
msg='SpectroPhoto Calibration Failure', line = 1),
Crash_log('spDiagcomb','RM_SPCOMBINE_V5: ABORT: No exposures with SCORE > 0',
msg='No Good Exposures', flag=NoExp),
Crash_log('spDiagcomb','RM_SPFLUX_V5: Rejected .* of .* std stars',
msg='Failure Combining Exposures', line = 1, flag=stopped),
Crash_log('spDiagcomb','RM_SPFLUX_V5: ABORT: No good fluxing stars!',
flag=Error_warn, msg='ABORT: No good fluxing stars!'),
Crash_log('spDiagcomb','RM_SPFLUX_V5: WARNING: Already rejected .* of .* std stars',
flag=Error_warn),
Crash_log('spDiagcomb','RM_SPFLUX_V5: Iteration #',
msg='Failure in Fluxing', line = 1, flag=stopped),
Crash_log('spDiag1d','ZCOMPUTE: .*',msg='Failure in COMPUTECHI2 for ZFIND',
line = 1, flag=stopped),
Crash_log('spDiag1d','ZFIND: .*',msg='Failure in COMPUTECHI2 for ZFIND',
line = 1, flag=stopped),
Crash_log('run_spTrace','Execution halted', msg='Failed run_spTrace', flag=stopped),
Crash_log('run_spTrace','Killed', msg='Failed run_spTrace', flag=stopped),
Crash_log('spAll','fieldmerge: EXITING!!', flag=stopped),
Crash_log('spSpec_reformat', 'read_spAll: ERROR: Missing .*',
msg='Failed spSpec_reformat: missing spAll field', flag=stopped)]
py_err = [Crash_log(None,'exception:',
msg='Failed {step}', flag=stopped),
Crash_log(None,'SyntaxError:',
msg='Failed {step}', flag=stopped),
Crash_log('spAll','fieldmerge: No valid spAll entries', flag=stopped),
Crash_log(None,'FileNotFoundError', msg='Failed {step}', flag=stopped)]
noerr_cal_b = Crash_log('spDiag2d','SPCALIB: b.*: .* paired with arc',
msg='No error', flag=NoIssues)
noerr_cal_r = Crash_log('spDiag2d','SPCALIB: r.*: .* paired with arc',
msg='No error', flag=NoIssues)
def parse_log(file, custom=None):
if custom is None:
complete= {'spfibermap':'Successful completion of readfibermaps',
'spDiag2d':'Successful completion of SPREDUCE2D',
'spDiagcomb':'Successful completion of SPCOMBINE',
'spDiag1d':'Successful completion of SPREDUCE1D',
'spXCSAO':'CPU time to compute RVs',
'fieldlist':'Successful completion of fieldlist',
'spAll':'Successful completion of fieldmerge',
'spSpec_reformat':'Successful completion of spSpec_reformat',
'spCalib_QA':'SpectroPhoto QA Complete',
'run_spTrace':'Successful completion of boss_arcs_to_trace'}#,
else:
complete = {'spDiagcomb':'SPSPEC_TARGET_MERGE: Successful completion of spspec_target_merge',
'spDiag1d':'Successful completion of SPREDUCE1D',
'spXCSAO':'CPU time to compute RVs',
'spAll':'Successful completion of fieldmerge',
'spSpec_reformat':'Successful completion of spSpec_reformat'}
for key in complete:
if key not in file:
continue
line = -2 if key == 'spfibermap' else -1
if not ptt.exists(file):
return(running, f'<b>{key}</b> not yet run')
with open(file) as f:
try:
last_line = f.readlines()[line]
except:
last_line = ''
if complete[key] in last_line:
with open(file) as f:
lines = f.readlines()
lines.reverse()
for i, line in enumerate(lines):
for err in errors:
msg = err.check(i,line,key)
if msg is not None:
if key == 'spDiag2d':
if 'SPCALIB: b' in msg:
for li, ll in enumerate(lines):
noerr = noerr_cal_b.check(li, ll, key)
if noerr == 'No error':
msg = None
elif 'SPCALIB: r' in msg:
for li, ll in enumerate(lines):
noerr = noerr_cal_b.check(li, ll, key)
if noerr == 'No error':
msg = None
if msg is not None:
return(err.flag,msg)
return(NoIssues, None)
elif key == 'run_spTrace' and '.e.log' in file:
with open(file) as f:
lines = f.readlines()
lines.reverse()
for i, line in enumerate(lines):
for err in errors:
msg = err.check(i,line,key)
if msg is not None:
return(stopped,msg)
return(NoIssues,None)
else:
if ((key in ['spfibermap','spXCSAO','fieldlist','spAll','run_spTrace'])
and '.log' in file):
with open(file) as f:
lines = f.readlines()
lines.reverse()
for i, line in enumerate(lines):
for err in py_err:
msg = err.check(i,line,key)
if msg is not None:
return(err.flag,msg)
return(running, None)
else:
if time.time() - ptt.getmtime(file) > 300:
# check if log has been updated in last 5 minutes
# if not then check for errors
with open(file) as f:
lines = f.readlines()
lines.reverse()
for i, line in enumerate(lines):
for err in errors:
msg = err.check(i,line,key)
if msg is not None:
return(err.flag,msg)
return(running, None)
return(running,None)
class LogCheck:
def __init__(self, topdir, run2d, run1d, field, mjd, dither='F',
epoch=False, custom = None, mjd1d=None,obs=None):
self.topdir = topdir
self.run2d = run2d
self.run1d = run1d
self.field = f2s(field)
self.mjd = mjd
self.mjd1d = mjd1d
self.custom = custom
self.dither = dither
self.epoch = epoch
self.obs = obs.lower()
def html(self, fbase=[], exts =None):
rs = ''
note = []
colors = []
top2d = ptt.join(self.topdir, self.run2d)
for i, fb in enumerate(fbase):
cs = False if self.custom is None else True
fd = field_dir(top2d, self.field, custom = cs)
ed = 'epoch' if self.epoch else ''
file = ptt.join(fd,ed,fb.format(field=self.field, mjd=self.mjd,
custom=self.custom, mjd1d=self.mjd1d,
obs = self.obs))
file = ptt.abspath(file)
if ptt.splitext(file)[-1] == '.log':
gf = f'log'
bf = f"<s style='color:{running.color};'>log</s> "
else:
if ptt.splitext(file)[-1] == '.gz':
ext = ptt.splitext(ptt.splitext(file)[0])[-1]
else:
ext = ptt.splitext(file)[-1]
if exts is not None:
ext = exts[i]
ext = ext.replace('.','')
gf = f'{ext}'
bf = f"<s style='color:{running.color};'>{ext}</s> "
flag = NoIssues
if ptt.splitext(file)[-1] == '.log':
flag, tnote = parse_log(file, custom=self.custom)
if 'v6_1' in self.run2d:
if 'spCalib_QA' in fb:
flag = NoIssues
bf = f"<s style='color:{NoIssues.color};'>log</s> "
tnote = None
if tnote is not None:
note.append(tnote)
if ptt.exists(file):
if ptt.getsize(file.replace('.pdf','.ps')) > 0:
rs = rs + "<A HREF="+chpc2html(file)+f" style='color:{flag.color};'>"+gf+"</A> "
colors.append(flag)
continue
else:
with open(file, 'rb') as ff:
if len(ff.readlines()) > 100:
rs = rs + "<A HREF="+chpc2html(file)+f" style='color:{flag.color};'>"+gf+"</A> "
colors.append(flag)
continue
elif '*' in file:
if len(glob.glob(file)) > 0:
rs = rs + "<A HREF="+chpc2html(ptt.dirname(file))+f" style='color:{flag.color};'>"+gf+"</A> "
colors.append(flag)
continue
if ptt.exists(file.replace('.pdf','.ps')):
if ptt.getsize(file.replace('.pdf','.ps')) > 0:
rs = rs + "<A HREF="+chpc2html(file.replace('.pdf','.ps'))+f" style='color:{flag.color};'>"+gf+"</A> "
colors.append(flag)
continue
elif 'spDiagcomb' in fbase[0]:
if colors[0] == NoIssues:
color = NoIssues.color
bf = bf.replace(f'color:{running.color}',f'color:{NoIssues.color}')
rs = rs + bf
colors.append(bf)
if self.dither == 'T':
rs = (rs.replace(f'color:{stopped.color}',f'color:{stopped.code}')
.replace(f'color:{running.color}',f'color:{running.code}'))
if f'color:{stopped.color}' in rs:
rs = (rs.replace(f'color:{running.color}',f'color:{stopped.color}')
.replace(f'color:{NoIssues.color}',f'color:{stopped.color}'))
if 'redux-' in rs:
rs = rs.replace('<A','<A class="redux"')
if self.custom is not None and 'redux_' in rs:
rs = rs.replace('<A','<A class="redux"')
return(rs, note)
def CheckRedux(topdir, run2d, run1d, field, mjd, obs, dither = 'F', epoch=False,
plan2d=None, custom = None, mjd1d = None):
lc = LogCheck(topdir, run2d, run1d, field, mjd, dither = dither,
epoch=epoch, custom=custom, mjd1d=mjd1d, obs=obs.lower())
fmjd = pd.Series({}, dtype=object)
note = OrderedDict()
fmjd['Field'] = field
if custom is not None:
fmjd['MJD'] = mjd1d
else:
fmjd['MJD'] = mjd
fmjd['OBS'] = obs.upper()
fmjd['Dither'] = dither
if custom is None:
fmjd['redux'], _ = lc.html(['redux-{field}-{mjd}','redux-{field}-{mjd}.e',
'redux-{field}-{mjd}.o'], exts=['s','e','o'])
else:
fmjd['redux'], _ = lc.html(['redux-{field}-{mjd}', 'redux_{field}-{mjd}.e','redux_{field}-{mjd}.o',
'redux_{field}-{mjd}_{mjd1d}','redux_{field}-{mjd}_{mjd1d}.e',
'redux_{field}-{mjd}_{mjd1d}.o'], exts=['s','e','o','1s','1e','1o'])
if epoch:
plan2d = ['../'+x for x in plan2d]
exts = ['2d']*len(plan2d)
exts.append('comb')
plan2d.append('spPlancombepoch-{field}-{mjd}.par')
fmjd['plans'], _ = lc.html(plan2d, exts=exts)
fmjd['spfibermap'] = '-'
note['spfibermap'] = []
fmjd['spreduce2D'] = '-'
note['spreduce2D'] = []
elif custom is not None:
fmjd['plans'], _ = lc.html(['spPlanCustom-{custom}_{obs}-{mjd}.par'])
else:
fmjd['plans'], _ = lc.html(['spPlan2d-{field}-{mjd}.par',
'spPlancomb-{field}-{mjd}.par'],exts=['2d','comb'])
fmjd['spfibermap'], note['spfibermap'] = lc.html(['spfibermap-{field}-{mjd}.log',
'spfibermap-{field}-{mjd}.fits'])
fmjd['spreduce2D'], note['spreduce2D'] = lc.html(['spDiag2d-{field}-{mjd}.log',
'spDiag2d-{field}-{mjd}.pdf'])
if custom is None:
fmjd['specombine'], note['specombine'] = lc.html(['spDiagcomb-{field}-{mjd}.log',
'spDiagcomb-{field}-{mjd}.pdf',
'spFluxdistort-{field}-{mjd}.pdf',
'spSN2d-{field}-{mjd}.pdf'])
fmjd['spreduce1d'], note['spreduce1d'] = lc.html([run1d+'/spDiag1d-{field}-{mjd}.log'])
fmjd['spXCSAO'], note['spXCSAO'] = lc.html([run1d+'/spXCSAO-{field}-{mjd}.log'])
fmjd['Fieldlist'], note['Fieldlist'] = lc.html(['fieldlist-{field}-{mjd}.log'])
else:
fmjd['specombine'], note['specombine'] = lc.html(['spDiagcomb-{field}-{mjd}.log',
'spSN2d-{field}-{mjd1d}.pdf'])
fmjd['spreduce1d'], note['spreduce1d'] = lc.html([run1d+'/spDiag1d-{field}-{mjd1d}.log',
run1d+'/spDiag1d-{field}-{mjd1d}.pdf'])
fmjd['spXCSAO'], note['spXCSAO'] = lc.html([run1d+'/spXCSAO-{field}-{mjd1d}.log'])
cs = False if custom is None else True
fd = field if custom is None else field
mj = mjd if custom is None else mjd1d
spec_dir = field_spec_dir(topdir, run2d, fd, mj, epoch=epoch,
custom = cs, custom_name = custom)
img_dir = field_png_dir(topdir,run2d,run1d,fd,mj,epoch=epoch,
custom = cs, custom_name = custom)
fd = field_dir(ptt.join(topdir,run2d),fd,custom=cs)
if epoch:
fd = ptt.join(fd,'epoch')
spec_dir = ptt.relpath(spec_dir, start = fd)
img_dir = ptt.relpath(img_dir, start = fd)
if custom is None:
fmjd['Fieldmerge'], note['Fieldmerge'] = lc.html(['spAll-{field}-{mjd}.log',
ptt.join(spec_dir,f'spAll-{field}-{mjd}.fits.gz')])
fmjd['Reformat'], note['Reformat'] = lc.html(['spSpec_reformat-{field}-{mjd}.log',
img_dir, spec_dir],
exts=['.log','.png','.fits'])
fmjd['SpCalib'], note['SpCalib'] = lc.html(['spCalib_QA-'+run2d+'-{field}-{mjd}.log',
'spCalib_QA-'+run2d+'-{field}-{mjd}.pdf'])
else:
fmjd['Fieldmerge'], note['Fieldmerge'] = lc.html(['spAll-{field}-{mjd1d}.log',
ptt.join(spec_dir,f'spAll-{field}-{mjd1d}.fits.gz')])
fmjd['Reformat'], note['Reformat'] = lc.html(['spSpec_reformat-{field}-{mjd1d}.log',
img_dir, spec_dir],
exts=['.log','.png','.fits'])
fmjd['Note'] = []
nep = False
for key in note:
if note[key] is not None:
note[key] = np.unique(np.asarray(note[key])).tolist()
if 'No Good Exposures' in ', '.join(note[key]):
nep = True
if nep is True:
fmjd[key] = (fmjd[key].replace(f'color:{stopped.color}',f'color:{NoExp.color}')
.replace(f'color:{running.color}',f'color:{NoExp.color}'))
fmjd['Note'].append(', '.join(note[key]))
fmjd['Note'] = list(dict.fromkeys(fmjd['Note'])) #list(set(fmjd['Note']))
try:
fmjd['Note'].remove('')
except:
pass
fmjd['Note'] = ', '.join(fmjd['Note'])
return(fmjd)
|
sdssREPO_NAMEidlspec2dPATH_START.@idlspec2d_extracted@idlspec2d-master@python@boss_drp@utils@daily_log@parse_log.py@.PATH_END.py
|
{
"filename": "spectral_extraction.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/modules/spectral_extraction/src/spectral_extraction.py",
"type": "Python"
}
|
# Standard dependencies
"""
This module defines class SpectralExtraction which inherits from `KPF0_Primitive` and provides methods to perform
the event on spectral extraction in the recipe.
Description:
* Method `__init__`:
SpectralExtraction constructor, the following arguments are passed to `__init__`,
- `action (keckdrpframework.models.action.Action)`: `action.args` contains positional arguments and
keyword arguments passed by the `SpectralExtraction` event issued in the recipe:
- `action.args[0] (kpfpipe.models.level0.KPF0)`: Instance of `KPF0` containing spectrum data for
spectral extraction.
- `action.args[1] (kpfpipe.models.level0.KPF0)`: Instance of `KPF0` containing flat data and order
trace result.
- `action.args[2] (kpfpipe.models.level1.KPF1)`: Instance of `KPF1` containing spectral
extraction results. If not existing, it is None.
- `action.args['ccd_index'] (int, optional)`: index of the ccd. Defaults to None.
- `action.args['orderlet_names'] (str|list, optional)`: Name or list of names of the order to be
processed. Defaults to 'SCI1'.
- `action.args['max_result_order']: (int, optional)`: Total orders to be processed, Defaults to -1.
- `action.args['start_order'] (int, optional)`: Index of the first order to be processed.
Defaults to 0.
- `action.args['rectification_method']: (str, optional)`: Rectification method, '`norect`',
'`vertial`', or '`normal`', to rectify the curved order trace. Defaults to '`norect`',
meaning no rectification.
- `action.args['extraction_method']: (str, optional)`: Extraction method, '`sum`',
or '`optimal`', to extract and reducethe curved order trace, and 'rectonly' to rectify the curve
with no reduction. Defaults to '`optimal`', meaning optimal extraction which produces 1-D flux
for each order trace based on the spectrum
data and its variance and the weighting based on the flat data instead of doing summation on
the spectrum data directly.
- `action.args['wavecal_fits']: (str|KPF1 optional)`: Path of the fits file or `KPF1` instance
containing wavelength calibration data. Defaults to None.
- `action.args['to_set_wavelength_cal']: (boolean, optional)`: if setting the wavelength calibration
values from ``action.args['wavecal_fits']``. Defaults to False.
- `action.args['clip_file'] (str, optional)`: Prefix of clip file path. Defaults to None.
Clip file is used to store the polygon clip data for the rectification method
which is not NoRECT.
- `action.args['total_order_per_ccd']: (int, optional)`: total order per ccd. Defaults to False.
- `action.args['data_extension']: (str, optional)`: the name of the extension in spectrum containing data.
- `action.args['flat_extension']: (str, optional)`: the name of the extension in flat containing data.
- `action.args['trace_extension']: (str, optional)`: the name of the extension containing order
trace results.
- `action.args['trace_file']: (str, optional)`: the name file containing order trace results.
- `action.args['orderlets_on_image'] (str|list, optional)`: Name or list of names of the order
appearing on the image. Defaults to None.
- `action.args['poly_degree']: (str, optional)`: Polynomial degree for order trace curve fitting.
Defaults to 3.
- `action.args['origin']: (list, optional)`: Origin of the image where the order trace is related
to. Defaults to [0, 0]
- `action.args['do_outlier_rejection']: (bool, optional)`: perform outlier rejection on spectrum
data. Defaults to False.
- `action.args['outlier_file']: (str, optional)`: L0 file with outlier rejection results. Defaults
to None.
- `action.args['spec_no_bk']: (str, optional)`: L0 file before background subtraction. Defaults
to None.
- `context (keckdrpframework.models.processing_context.ProcessingContext)`: `context.config_path`
contains the path of the config file defined for the module of spectral extraction in the master
config file associated with the recipe.
and the following attributes are defined to initialize the object,
- `input_spectrum (kpfpipe.models.level0.KPF0)`: Instance of `KPF0`, assigned by `actions.args[0]`.
- `input_flat (kpfpipe.models.level0.KPF0)`: Instance of `KPF0`, assigned by `actions.args[1]`.
- `output_level1 (kpfpipe.models.level1.KPF1)`: Instance of `KPF1`, assigned by `actions.args[2]`.
- `ccd_index (int)`: ccd index.
- `orderlet_names (str)`: Name of the order to be processed.
- `start_order (int)`: Index of the first order to be processed.
- `max_result_order (int)`: Total orders to be processed.
- `rectification_method (int)`: Rectification method code as defined in `SpectralExtractionAlg`.
- `extraction_method (str)`: Extraction method code as defined in `SpectralExtractionAlg`.
- `wavecal_fits (str)`: Path of the fits file or `KPF1` instance with wavelength calibration data.
- `to_set_wavelength_cal`: Flag indicates if setting wavelength calibration data to wavelength
calibration extension from ``wavecal_fits``.
- `clip_file (str)`: Prefix of clip file path. Defaults to None.
- `total_order_per_ccd (list)`: Total order per ccd.
- `order_trace_data (Union[numpy.ndarray, pandas.DataFrame])`: Order trace data including
polynomial coefficients, top/bottom edges and horizontal coverage of the order trace.
- `spec_flux (numpy.ndarray)`: 2D spectrum data, raw data or rectified data.
- `spec_header (fits.header.Header)`: fits header of spectrum data.
- `config_path (str)`: Path of config file for spectral extraction.
- `config (configparser.ConfigParser)`: Config context per the file defined by `config_path`.
- `logger (logging.Logger)`: Instance of logging.Logger.
- `alg (modules.order_trace.src.alg.SpectralExtractionAlg)`: Instance of `SpectralExtractionAlg` which
has operation codes for the computation of spectral extraction.
* Method `__perform`:
SpectralExtraction returns the result in `Arguments` object which contains a level 1 data object (`KPF1`)
with the spectral extraction results and the wavelength data tentatively transported from
`action.args['wavecal_fits']` if there is.
Usage:
For the recipe, the spectral extraction event is issued like::
:
lev0_data = kpf0_from_fits(input_lev0_file, data_type=data_type)
op_data = SpectralExtraction(lev0_data, lev0_flat_data,
None, orderlet_names=order_name,
rectification_method=rect_method,
trace_file=trace_file,
wavecal_fits=input_lev1_file)
:
where `op_data` is KPF1 object wrapped in `Arguments` class object.
"""
import configparser
import pandas as pd
import numpy as np
import os.path
# Pipeline dependencies
# from kpfpipe.logger import start_logger
from kpfpipe.primitives.level0 import KPF0_Primitive
from kpfpipe.models.level0 import KPF0
from kpfpipe.models.level1 import KPF1
# External dependencies
from keckdrpframework.models.action import Action
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.models.processing_context import ProcessingContext
# Local dependencies
from modules.spectral_extraction.src.alg import SpectralExtractionAlg
# Global read-only variables
DEFAULT_CFG_PATH = 'modules/spectral_extraction/configs/default.cfg'
class SpectralExtraction(KPF0_Primitive):
default_args_val = {
'orderlet_names': ['SCI'],
'max_result_order': -1,
'start_order': 0,
'rectification_method': 'norect', # 'norect', 'normal', 'vertical'
'extraction_method': 'optimal',
'wavecal_fits': None,
'to_set_wavelength_cal': False,
'clip_file': None,
'data_extension': 'DATA',
'flat_extension': 'DATA',
'var_extension': 'VAR',
'poly_degree': 3,
'origin': [0, 0],
'trace_extension': None,
'trace_file': None,
'ccd_index': None,
'first_orderlet_idx': None,
'total_order_per_ccd': None,
'orderlets_on_image': None,
'do_outlier_rejection': False,
'outlier_file': '',
'spec_no_bk': None
}
NORMAL = 0
VERTICAL = 1
NoRECT = 2
def __init__(self,
action: Action,
context: ProcessingContext) -> None:
# Initialize parent class
KPF0_Primitive.__init__(self, action, context)
args_keys = [item for item in action.args.iter_kw() if item != "name"]
# input argument
# action.args[0] is for level 0 fits
# action.args[1] is for level 0 flat with order trace result extension
self.input_spectrum = action.args[0] # kpf0 instance
self.input_flat = action.args[1] # kpf0 instance with flat data
self.output_level1 = action.args[2] # kpf1 instance already exist or None
self.ccd_index = self.get_args_value('ccd_index', action.args, args_keys)
self.orderlet_names = self.get_args_value('orderlet_names', action.args, args_keys)
self.max_result_order = self.get_args_value("max_result_order", action.args, args_keys)
self.start_order = self.get_args_value("start_order", action.args, args_keys) # for the result of order trace
self.rectification_method = self.get_args_value("rectification_method", action.args, args_keys)
self.extraction_method = self.get_args_value('extraction_method', action.args, args_keys)
self.wavecal_fits = self.get_args_value('wavecal_fits', action.args, args_keys) # providing wavelength calib.
self.to_set_wavelength_cal = self.get_args_value('to_set_wavelength_cal', action.args, args_keys) # set wave cal
self.clip_file = self.get_args_value('clip_file', action.args, args_keys)
self.total_order_per_ccd = self.get_args_value('total_order_per_ccd', action.args, args_keys)
data_ext = self.get_args_value('data_extension', action.args, args_keys)
var_ext = self.get_args_value('var_extension', action.args, args_keys)
flat_ext = self.get_args_value('flat_extension', action.args, args_keys)
self.data_ext = data_ext
order_trace_ext = self.get_args_value('trace_extension', action.args, args_keys)
order_trace_file = self.get_args_value('trace_file', action.args, args_keys)
orderlets_on_image = self.get_args_value("orderlets_on_image", action.args, args_keys)
self.outlier_rejection = self.get_args_value('do_outlier_rejection', action.args, args_keys)
self.outlier_file = self.get_args_value("outlier_file", action.args, args_keys) if self.outlier_rejection \
else ''
spec_no_bk = self.get_args_value('spec_no_bk', action.args, args_keys)
# input configuration
self.config = configparser.ConfigParser()
try:
self.config_path = context.config_path['spectral_extraction']
except:
self.config_path = DEFAULT_CFG_PATH
self.config.read(self.config_path)
# start a logger
self.logger = None
if not self.logger:
self.logger = self.context.logger
self.logger.info('Loading config from: {}'.format(self.config_path))
self.order_trace_data = None
if order_trace_file:
self.order_trace_data = pd.read_csv(order_trace_file, header=0, index_col=0)
poly_degree = self.get_args_value('poly_degree', action.args, args_keys)
origin = self.get_args_value('origin', action.args, args_keys)
order_trace_header = {'STARTCOL': origin[0], 'STARTROW': origin[1], 'POLY_DEG': poly_degree}
elif order_trace_ext:
self.order_trace_data = self.input_flat[order_trace_ext]
order_trace_header = self.input_flat.header[order_trace_ext]
# Order trace algorithm setup
self.spec_header = self.input_spectrum.header[data_ext] \
if (self.input_spectrum is not None and hasattr(self.input_spectrum, data_ext)) else None
self.spec_flux = self.input_spectrum[data_ext] \
if (self.input_spectrum is not None and hasattr(self.input_spectrum, data_ext)) else None
self.outlier_lev0 = None
if self.outlier_rejection:
if self.outlier_file:
if os.path.exists(self.outlier_file):
self.outlier_lev0 = KPF0.from_fits(self.outlier_file)
else:
self.outlier_lev0 = KPF0()
if self.outlier_lev0[self.data_ext].size == 0:
self.outlier_lev0[self.data_ext] = np.zeros_like(self.spec_flux)
# self.outlier_lev0[self.data_ext][:] = self.spec_flux
outlier_flux = self.outlier_lev0[self.data_ext] \
if self.outlier_lev0 is not None and hasattr(self.outlier_lev0, data_ext) else None
if spec_no_bk is not None and hasattr(spec_no_bk, var_ext) and spec_no_bk[var_ext].size > 0:
var_data = spec_no_bk[var_ext]
else:
var_data = None
try:
self.alg = SpectralExtractionAlg(self.input_flat[flat_ext] if hasattr(self.input_flat, flat_ext) else None,
self.input_flat.header[flat_ext] if hasattr(self.input_flat, flat_ext) else None,
self.spec_flux,
self.spec_header,
self.order_trace_data,
order_trace_header,
config=self.config, logger=self.logger,
rectification_method=self.rectification_method,
extraction_method=self.extraction_method,
ccd_index=self.ccd_index,
orderlet_names=orderlets_on_image,
total_order_per_ccd=self.total_order_per_ccd,
clip_file=self.clip_file,
do_outlier_rejection = self.outlier_rejection,
outlier_flux=outlier_flux,
var_data=var_data)
except Exception as e:
self.alg = None
def _pre_condition(self) -> bool:
"""
Check for some necessary pre conditions
"""
# flat data for extraction and order trace data
success = isinstance(self.input_flat, KPF0)
return success
def _post_condition(self) -> bool:
"""
Check for some necessary post conditions
"""
return True
def _perform(self):
"""
Primitive action -
perform spectral extraction by calling method `extract_spectrum` from SpectralExtractionAlg and create an instance
of level 1 data (KPF1) to contain the analysis result.
Returns:
Level 1 data containing spectral extraction result.
"""
# rectification_method: SpectralExtractAlg.NoRECT(fastest) SpectralExtractAlg.VERTICAL, SpectralExtractAlg.NORMAL
# extraction_method: 'optimal' (default), 'sum'
if self.logger:
self.logger.info("SpectralExtraction: rectifying and extracting order...")
if self.alg is None:
if self.logger:
self.logger.info("SpectralExtraction: no extension data, order trace data or improper header.")
return Arguments(None)
ins = self.alg.get_instrument().upper()
kpf1_sample = None
kpf0_sample = None
if self.wavecal_fits is not None: # get the header and wavecal from this fits
if isinstance(self.wavecal_fits, str):
kpf1_sample = KPF1.from_fits(self.wavecal_fits, ins)
elif isinstance(self.wavecal_fits, KPF1):
kpf1_sample = self.wavecal_fits
elif isinstance(self.wavecal_fits, KPF0):
kpf0_sample = self.wavecal_fits
all_order_names = self.orderlet_names if type(self.orderlet_names) is list else [self.orderlet_names]
all_o_sets = []
s_order = self.start_order if self.start_order is not None else 0
first_trace_at = []
for order_name in all_order_names:
o_set, f_idx = self.get_order_set(order_name, s_order, self.alg.get_orderlet_index(order_name))
all_o_sets.append(o_set)
first_trace_at.append(f_idx)
good_result = True
for idx, order_name in enumerate(all_order_names):
if not good_result: # process stops once an empty result is made
continue
o_set = all_o_sets[idx]
# orderlet_index = self.alg.get_orderlet_index(order_name)
first_index = first_trace_at[idx]
if o_set.size == 0 or first_index < 0:
if self.logger:
self.logger.info("no data to be extracted for " + order_name)
continue
if self.spec_flux is None or self.spec_flux.size == 0:
data_df = None
if self.logger:
self.logger.info('**** ' + order_name + ' has no data to be extracted ****')
else:
# if self.logger:
# self.logger.info(order_name + ' has first spectra starting from index ' + str(first_index))
if self.logger:
self.logger.info("SpectralExtraction: do " +
SpectralExtractionAlg.rectifying_method[self.rectification_method] +
" rectification and " +
SpectralExtractionAlg.extracting_method[self.extraction_method] +
" extraction on " + order_name + " of " + str(o_set.size) + " orders")
opt_ext_result = self.alg.extract_spectrum(order_set=o_set, first_index=first_index, order_name = order_name)
assert('spectral_extraction_result' in opt_ext_result and
isinstance(opt_ext_result['spectral_extraction_result'], pd.DataFrame))
data_df = opt_ext_result['spectral_extraction_result']
good_result = good_result and data_df is not None
if good_result:
self.output_level1 = self.construct_level1_data(data_df, ins, kpf1_sample,
order_name, self.output_level1)
self.add_wavecal_to_level1_data(self.output_level1, order_name, kpf1_sample, kpf0_sample)
data_outlier = opt_ext_result['outlier_rejection_result']
if data_outlier is not None and self.outlier_lev0 is not None:
self.outlier_lev0[self.data_ext][:] = data_outlier
if self.outlier_lev0 is not None and self.outlier_file:
self.outlier_lev0.to_fits(self.outlier_file)
if good_result and self.output_level1 is not None:
self.output_level1.receipt_add_entry('SpectralExtraction', self.__module__,
f'orderlets={" ".join(all_order_names)}', 'PASS')
if not good_result and self.logger:
self.logger.info("SpectralExtraction: no spectrum extracted")
elif good_result and self.logger:
self.logger.info("SpectralExtraction: Receipt written")
self.logger.info("SpectralExtraction: Done for orders " + " ".join(all_order_names) + "!")
return Arguments(self.output_level1) if good_result else Arguments(None)
def get_order_set(self, order_name, s_order, orderlet_index):
o_set = self.alg.get_order_set(order_name)
if o_set.size > 0:
e_order = min(self.max_result_order, len(o_set)) \
if (self.max_result_order is not None and self.max_result_order > 0) else o_set.size
o_set_ary = o_set[0:e_order] + s_order
valid_idx = np.where(o_set_ary >= 0)[0]
first_idx = valid_idx[0] if valid_idx.size > 0 else -1
return o_set_ary[np.where((o_set_ary < self.alg.get_spectrum_order()) & (o_set_ary >= 0))], first_idx
else:
return o_set
def construct_level1_data(self, op_result, ins, level1_sample: KPF1, order_name: str, output_level1:KPF1):
FLUX_EXT = 0
VAR_EXT = 1
WAVE_EXT = 2
update_primary_header = False if level1_sample is None or ins != 'NEID' else True
if output_level1 is not None:
kpf1_obj = output_level1
else:
kpf1_obj = KPF1.from_l0(self.input_spectrum)
if op_result is not None:
total_order, width = np.shape(op_result.values)
else:
total_order = 0
def get_data_extensions_on(order_name, ins):
if ins in ['NEID', 'KPF'] and 'FLUX' in order_name:
ext_name = [order_name, order_name.replace('FLUX', 'VAR'),
order_name.replace('FLUX', 'WAVE')]
else:
ext_name = [order_name, order_name.replace('FLUX', 'VAR'),
order_name.replace('FLUX', 'WAVE')] if 'FLUX' in order_name else [order_name]
return ext_name
if total_order <= 0:
return kpf1_obj
# if no data in op_result, not build data extension and the associated header
ext_names = get_data_extensions_on(order_name, ins)
data_ext_name = ext_names[FLUX_EXT]
# data = op_result.values
kpf1_obj[data_ext_name] = op_result.values
for att in op_result.attrs:
kpf1_obj.header[data_ext_name][att] = op_result.attrs[att]
if len(ext_names) > VAR_EXT: # init var and wave extension if there is
# get data for variance extension
var_ext_data = self.alg.compute_variance(op_result.values)
kpf1_obj[ext_names[VAR_EXT]] = var_ext_data
if len(ext_names) > WAVE_EXT:
# no wave ext yet or zero size
if not hasattr(kpf1_obj, ext_names[WAVE_EXT]) or np.size(getattr(kpf1_obj, ext_names[WAVE_EXT])) == 0:
kpf1_obj[ext_names[WAVE_EXT]] = np.zeros((total_order, width))
# for neid data with level 1 sample:
if ins == "NEID":
if update_primary_header and level1_sample is not None and hasattr(kpf1_obj, data_ext_name):
sample_primary_header = level1_sample.header['PRIMARY']
else:
sample_primary_header = self.spec_header
if sample_primary_header is not None:
# for h_key in sample_primary_header:
for h_key in ['SSBZ100', 'SSBJD100', 'CAL-OBJ']:
if h_key in sample_primary_header:
kpf1_obj.header['PRIMARY'][h_key] = sample_primary_header[h_key]
return kpf1_obj
def add_wavecal_to_level1_data(self, level1_obj: KPF1, order_name: str, level1_sample: KPF1, level0_sample: KPF0):
if level1_sample is None and level0_sample is None:
return False
ins = self.alg.get_instrument().upper()
def get_extension_on(order_name, ext_type):
if ext_type != 'FLUX':
ext_name = order_name.replace('FLUX', ext_type) if 'FLUX' in order_name else None
else: # temporary setting, need more instrument information
ext_name = order_name
return ext_name
# check if wavelength calibration extension exists in level 1 or level 0 sample
if level1_sample is not None:
data_ext_name = get_extension_on(order_name, 'FLUX')
if (not hasattr(level1_sample, data_ext_name)) or \
(not hasattr(level1_obj, data_ext_name)):
return False
wave_ext_name = get_extension_on(order_name, 'WAVE')
# temporary code for transport calibration data from GREEN_CAL_WAVE
if ins == 'KPF':
wave_ext_alternate = 'GREEN_CAL_WAVE' if 'GREEN' in order_name else 'RED_CAL_WAVE'
else:
wave_ext_alternate = None
if wave_ext_name is None:
return False
if level1_sample is not None: # get header of wavelength cal from level 1 data
wave_header = level1_sample.header[wave_ext_name]
else: # get header of wavelength cal. from level 0 data
wave_header = level0_sample.header['DATA']
if wave_header is not None:
wave_header['EXTNAME']= wave_ext_name
if wave_header is None:
return False
level1_obj.header[wave_ext_name] = wave_header # assign the item or set?
if not self.to_set_wavelength_cal: # no data setting
return True
if level1_sample is not None: # assume wavelength calibration data is from level1 sample
wave_data = getattr(level1_sample, wave_ext_name) if hasattr(level1_sample, wave_ext_name) else None
# temporary solution
if wave_data is not None and (np.where(wave_data != 0.0)[0]).size == 0:
if wave_ext_alternate is not None:
self.logger.info("get wavelength solution from " + wave_ext_alternate) # removed
wave_data = getattr(level1_sample, wave_ext_alternate) \
if hasattr(level1_sample, wave_ext_alternate) else wave_data
else: # assume wavelength calibration data is in level0 sample, need update ???
wave_data = getattr(level0_sample, 'DATA') if hasattr(level0_sample, 'DATA') else None
if wave_data is None: # data setting error
return False
wave_start = 0
wave_end = min(np.shape(wave_data)[0], np.shape(getattr(level1_obj, wave_ext_name))[0])
wave_arr = getattr(level1_obj, wave_ext_name)
if wave_arr.size != 0 and wave_end > wave_start:
wave_arr[wave_start:wave_end, :] = wave_data[wave_start:wave_end, :]
return True
def get_args_value(self, key: str, args: Arguments, args_keys: list):
if key in args_keys:
v = args[key]
else:
v = self.default_args_val[key]
if key == 'rectification_method':
if v is not None and isinstance(v, str):
if v.lower() == 'normal':
method = SpectralExtractionAlg.NORMAL
elif v.lower() == 'vertical':
method = SpectralExtractionAlg.VERTICAL
else:
method = SpectralExtractionAlg.NoRECT
else:
method = SpectralExtractionAlg.NoRECT
elif key == 'extraction_method':
if v is not None and isinstance(v, str):
if 'summ' in v.lower():
method = SpectralExtractionAlg.SUM
elif 'fox' in v.lower():
method = SpectralExtractionAlg.FOX
else:
method = SpectralExtractionAlg.OPTIMAL
else:
method = SpectralExtractionAlg.OPTIMAL
else:
if key == 'data_extension' or key == 'trace_extension':
if v is None:
v = self.default_args_val[key]
return v
return method
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@modules@spectral_extraction@src@spectral_extraction.py@.PATH_END.py
|
{
"filename": "test_utils.py",
"repo_name": "gmzsebastian/SLSNe",
"repo_path": "SLSNe_extracted/SLSNe-main/slsne/tests/test_utils.py",
"type": "Python"
}
|
from ..utils import (define_filters, get_cenwave, quick_cenwave_zeropoint,
check_filters, plot_colors, get_lc, calc_flux_lum)
from astropy.table import Table
import os
import pytest
import numpy as np
from astropy import units as u
@pytest.fixture
def data_dir():
# Get directory with reference data
current_file_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(current_file_dir, '..', 'ref_data')
def test_define_filters_first_row(data_dir):
# Call the function
result = define_filters(data_dir)
# Check that the result is a Table
assert isinstance(result, Table)
# Read the input filters
filters_in = Table.read(f'{data_dir}/filter_reference.txt', format='ascii')
# Check that the ZTF filter is in the output table and is exactly 3
ZTF = filters_in[filters_in['Instrument'] == 'ZTF']
assert len(ZTF) == 3
# Check that all the values for 'Instrument' in filters_in are in the resulting table
for i in filters_in['Instrument']:
assert i in result['Instrument']
# Make sure there are no instances for the word 'Generic' in the resulting table
assert 'Generic' not in list(result['Cenwave'])
assert 'Generic' not in list(result['Zeropoint'])
def test_get_cenwave_swift_filter():
cenwave, zeropoint = get_cenwave('swift_UVW1', system='AB', return_zp=True, verbose=False)
assert cenwave == 2681.67
assert zeropoint == 3631.0
def test_get_cenwave_non_swift_filter():
cenwave = get_cenwave('g', instrument='ZTF', return_zp=False, verbose=False)
assert cenwave == 4746.48
def test_get_cenwave_generic_filter():
cenwave, zeropoint = get_cenwave('z', return_zp=True, system='Vega', verbose=False)
assert cenwave == 8922.78
assert zeropoint == 2238.99
def test_get_cenwave_unknown_filter():
with pytest.raises(KeyError):
get_cenwave('potato', verbose=False)
def test_get_cenwave_unknown_system():
with pytest.raises(KeyError):
get_cenwave('g', system='penguin', return_zp=True, verbose=False)
# Create a mock phot table
names = ['Telescope', 'Instrument', 'System', 'Filter']
data = [['Swift', 'P48', 'Generic'],
['UVOT', 'ZTF', 'Generic'],
['Vega', 'AB', 'AB'],
['UVW1', 'g', 'r']]
phot = Table(data, names=names)
def test_quick_cenwave_zeropoint():
cenwaves, zeropoints = quick_cenwave_zeropoint(phot)
assert np.all(cenwaves == np.array([2681.67, 4746.48, 6141.12]))
assert np.all(zeropoints == np.array([921.0, 3631.0, 3631.0]))
def test_quick_cenwave_zeropoint_missing_column():
phot_missing_column = phot.copy()
phot_missing_column.remove_column('Telescope')
with pytest.raises(KeyError):
quick_cenwave_zeropoint(phot_missing_column)
def test_check_filters(capsys):
# This should pass without raising an exception
# and without printing anything
check_filters(phot)
captured = capsys.readouterr()
assert captured.out == ""
def test_check_filters_missing_column():
# Remove the 'Telescope' column
phot_missing_column = phot.copy()
phot_missing_column.remove_column('Telescope')
with pytest.raises(KeyError):
check_filters(phot_missing_column)
def test_check_filters_prints_something(capsys):
# Add a row with a filter that is not in the reference data
phot.add_row(['pink', 'penguin', 'AB', 'g'])
check_filters(phot)
captured = capsys.readouterr()
assert captured.out != ""
def test_plot_colors_known_band():
assert plot_colors('u') == 'navy'
assert plot_colors('r') == 'r'
assert plot_colors('i') == 'maroon'
def test_plot_colors_swift():
color = plot_colors('UVW1')
assert isinstance(color, np.ndarray)
assert color.shape == (4,)
assert all(isinstance(num, (int, float)) for num in color)
def test_plot_colors_unknown_band():
assert plot_colors('potato') == 'k'
def test_get_lc(mocker):
# Call the function with a test object name
phot = get_lc('2018lfe')
# Check that the returned table is correct
assert isinstance(phot, Table)
assert len(phot) >= 1
assert 'MJD' in phot.colnames
def test_calc_flux_lum(mocker):
# Create a mock photometry table
phot = Table({
'Mag': [-20.0],
'zeropoint': [3631.0],
'cenwave': [5500.0]
})
# Call the function with the mock photometry table and a test redshift
F_lambda, L_lambda = calc_flux_lum(phot, 0.5)
# Check that the returned flux and luminosity are correct
assert isinstance(F_lambda, u.Quantity)
assert isinstance(L_lambda, u.Quantity)
assert np.isclose(F_lambda.value[0], 0.359850, rtol=1e-5)
assert np.isclose(L_lambda.value[0], 5.505261e+56, rtol=1e-5)
|
gmzsebastianREPO_NAMESLSNePATH_START.@SLSNe_extracted@SLSNe-main@slsne@tests@test_utils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/nnx/bridge/__init__.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .wrappers import functional as functional
from .wrappers import Functional as Functional
from .wrappers import ToNNX as ToNNX
from .wrappers import lazy_init as lazy_init
from .wrappers import ToLinen as ToLinen
from .wrappers import to_linen as to_linen
from .variables import NNXMeta as NNXMeta
from .variables import register_variable_name_type_pair as register_variable_name_type_pair
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@flax@nnx@bridge@__init__.py@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram/insidetextfont/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="weight", parent_name="histogram.insidetextfont", **kwargs
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram@insidetextfont@_weight.py@.PATH_END.py
|
{
"filename": "tycho_3d.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/config/tycho_3d.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Copyright (C) 2019, the ixpe team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function, division
import os
from ixpeobssim import IXPEOBSSIM_CONFIG_ASCII, IXPEOBSSIM_CONFIG_FITS
from ixpeobssim.config import file_path_to_model_name
from ixpeobssim.srcmodel.polarization import xStokesSkyCube
from ixpeobssim.srcmodel.roi import xExtendedSource, xROIModel
from ixpeobssim.utils.matplotlib_ import plt, setup_gca
from ixpeobssim.srcmodel.spectrum import load_spectral_spline
from ixpeobssim.srcmodel.img import xFITSImage
__model__ = file_path_to_model_name(__file__)
# Morphology.
ra, dec = 6.340, 64.137
img_file_path = os.path.join(IXPEOBSSIM_CONFIG_FITS, 'tycho_4p1_6p1_keV.fits')
# Energy spectrum.
spec_file_path = os.path.join(IXPEOBSSIM_CONFIG_ASCII, 'tycho_total_spectrum.csv')
spec_spline = load_spectral_spline(spec_file_path, delimiter=',', k=1)
spec = lambda E, t: spec_spline(E)
# Polarization
pol_cube = xStokesSkyCube()
inputs = [
('polx_0.4_pf_0.30_radial.fits', 'poly_0.4_pf_0.30_radial.fits', 1., None),
('polx_0.4_pf_0.30_radial.fits', 'poly_0.4_pf_0.30_radial.fits', 2., 2.83),
('polx_0.4_pf_0.60_radial.fits', 'poly_0.4_pf_0.60_radial.fits', 2.83, 4.),
('polx_0.4_pf_0.85_radial.fits', 'poly_0.4_pf_0.85_radial.fits', 4.0, 5.66),
('polx_0.4_pf_0.90_radial.fits', 'poly_0.4_pf_0.90_radial.fits', 5.66, 8.),
('polx_0.4_pf_0.90_radial.fits', 'poly_0.4_pf_0.90_radial.fits', 12., None)
]
for x_file_name, y_file_name, emin, emax in inputs:
x_file_path = os.path.join(IXPEOBSSIM_CONFIG_FITS, x_file_name)
y_file_path = os.path.join(IXPEOBSSIM_CONFIG_FITS, y_file_name)
pol_cube.add_layer_xy(x_file_path, y_file_path, emin, emax, rotate=True)
pol_deg = pol_cube.polarization_degree_model()
pol_ang = pol_cube.polarization_angle_model()
# Create the actual ROI model.
tycho = xExtendedSource('Tycho', img_file_path, spec, pol_deg, pol_ang)
ROI_MODEL = xROIModel(ra, dec, tycho)
def display():
"""Display the source model.
"""
# Energy spectrum
plt.figure('%s spectrum' % __model__)
spec_spline.plot()
setup_gca(xmin=1., xmax=12., logx=True, logy=True, grids=True)
# Morphology
plt.figure('%s morphology' % __model__)
img = xFITSImage(img_file_path)
img.plot()
if __name__ == '__main__':
from ixpeobssim.config import bootstrap_display
bootstrap_display()
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@config@tycho_3d.py@.PATH_END.py
|
{
"filename": "base_test.py",
"repo_name": "deepmind/optax",
"repo_path": "optax_extracted/optax-main/optax/_src/base_test.py",
"type": "Python"
}
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base functions in `base.py`."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
import numpy as np
from optax._src import base
# pylint:disable=no-value-for-parameter
class BaseTest(chex.TestCase):
def test_typing(self):
"""Ensure that the type annotations work for the update function."""
def f(updates, opt_state, params=None):
del params
return updates, opt_state
def g(f: base.TransformUpdateFn):
updates = np.zeros([])
params = np.zeros([])
opt_state = np.zeros([])
f(updates, opt_state)
f(updates, opt_state, params)
f(updates, opt_state, params=params)
g(f)
@chex.all_variants
def test_set_to_zero_returns_tree_of_correct_zero_arrays(self):
"""Tests that zero transform returns a tree of zeros of correct shape."""
grads = ({'a': np.ones((3, 4)), 'b': 1.0}, np.ones((1, 2, 3)))
updates, _ = self.variant(base.set_to_zero().update)(
grads, base.EmptyState()
)
correct_zeros = ({'a': np.zeros((3, 4)), 'b': 0.0}, np.zeros((1, 2, 3)))
chex.assert_trees_all_close(updates, correct_zeros, rtol=0)
@chex.all_variants(with_pmap=False)
def test_set_to_zero_is_stateless(self):
"""Tests that the zero transform returns an empty state."""
self.assertEqual(
self.variant(base.set_to_zero().init)(params=None), base.EmptyState()
)
class ExtraArgsTest(chex.TestCase):
def test_isinstance(self):
"""Locks in behavior for comparing transformations."""
def init_fn(params):
del params
return {}
def update_fn(updates, state, params=None):
del params
return updates, state
t1 = base.GradientTransformation(init_fn, update_fn)
self.assertIsInstance(t1, base.GradientTransformation)
self.assertNotIsInstance(t1, base.GradientTransformationExtraArgs)
t2 = base.with_extra_args_support(t1)
self.assertIsInstance(t2, base.GradientTransformation)
self.assertIsInstance(t2, base.GradientTransformationExtraArgs)
with self.subTest('args_correctly_ignored'):
state = t2.init({})
t2.update({}, state, ignored_arg='hi')
t3 = base.with_extra_args_support(t2)
self.assertIsInstance(t3, base.GradientTransformation)
self.assertIsInstance(t3, base.GradientTransformationExtraArgs)
def test_extra_args_with_callback(self):
"""An example of using extra args to log the learning rate."""
def init_fn(params):
del params
return {}
def update_fn(updates, state, *, metrics_logger=None, **extra_args):
del extra_args
if metrics_logger:
metrics_logger('learning_rate', 0.3)
return updates, state
t = base.GradientTransformationExtraArgs(init_fn, update_fn)
@jax.jit
def f(params):
state = t.init(params)
metrics = {}
def metrics_logger(name, value):
metrics[name] = value
t.update(params, state, metrics_logger=metrics_logger)
return metrics
metrics = f({'a': 1})
self.assertEqual(metrics['learning_rate'], 0.3)
class StatelessTest(chex.TestCase):
"""Tests for the stateless transformation."""
@chex.all_variants
def test_stateless(self):
params = {'a': jnp.zeros((1, 2)), 'b': jnp.ones((1,))}
updates = {'a': jnp.ones((1, 2)), 'b': jnp.full((1,), 2.0)}
@base.stateless
def opt(g, p):
return jax.tree.map(lambda g_, p_: g_ + 0.1 * p_, g, p)
state = opt.init(params)
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state, params)
expected_updates = {'a': jnp.ones((1, 2)), 'b': jnp.array([2.1])}
chex.assert_trees_all_close(new_updates, expected_updates)
@chex.all_variants
def test_stateless_no_params(self):
updates = {'linear': jnp.full((5, 3), 3.0)}
@base.stateless
def opt(g, _):
return jax.tree.map(lambda g_: g_ * 2, g)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state)
expected_updates = {'linear': jnp.full((5, 3), 6.0)}
chex.assert_trees_all_close(new_updates, expected_updates)
def test_init_returns_emptystate(self):
def weight_decay(g, p):
return jax.tree.map(lambda g_, p_: g_ + 0.1 * p_, g, p)
opt = base.stateless(weight_decay)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertIsInstance(state, base.EmptyState)
class StatelessWithTreeMapTest(chex.TestCase):
"""Tests for the stateless_with_tree_map transformation."""
@chex.all_variants
def test_stateless_with_tree_map(self):
params = {'a': jnp.zeros((1, 2)), 'b': jnp.ones((1,))}
updates = {'a': jnp.ones((1, 2)), 'b': jnp.full((1,), 2.0)}
opt = base.stateless_with_tree_map(lambda g, p: g + 0.1 * p)
state = opt.init(params)
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state, params)
expected_updates = {'a': jnp.ones((1, 2)), 'b': jnp.array([2.1])}
chex.assert_trees_all_close(new_updates, expected_updates)
@chex.all_variants
def test_stateless_with_tree_map_no_params(self):
updates = {'linear': jnp.full((5, 3), 3.0)}
opt = base.stateless_with_tree_map(lambda g, _: g * 2.0)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state)
expected_updates = {'linear': jnp.full((5, 3), 6.0)}
chex.assert_trees_all_close(new_updates, expected_updates)
def test_init_returns_emptystate(self):
opt = base.stateless_with_tree_map(lambda g, p: g + 0.1 * p)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertIsInstance(state, base.EmptyState)
if __name__ == '__main__':
absltest.main()
|
deepmindREPO_NAMEoptaxPATH_START.@optax_extracted@optax-main@optax@_src@base_test.py@.PATH_END.py
|
{
"filename": "test_old_outputs.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/ytdata/tests/test_old_outputs.py",
"type": "Python"
}
|
"""
ytdata frontend tests using enzo_tiny_cosmology
"""
import os
import shutil
import tempfile
import numpy as np
from yt.data_objects.api import create_profile
from yt.frontends.ytdata.api import (
YTDataContainerDataset,
YTGridDataset,
YTNonspatialDataset,
YTProfileDataset,
YTSpatialPlotDataset,
)
from yt.frontends.ytdata.tests.test_outputs import (
YTDataFieldTest,
compare_unit_attributes,
)
from yt.testing import (
assert_allclose_units,
assert_array_equal,
requires_file,
requires_module,
skip,
)
from yt.units.yt_array import YTArray
from yt.utilities.answer_testing.framework import data_dir_load, requires_ds
from yt.visualization.profile_plotter import PhasePlot, ProfilePlot
enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
ytdata_dir = "ytdata_test"
@skip(reason="See https://github.com/yt-project/yt/issues/3909")
@requires_module("h5py")
@requires_ds(enzotiny)
@requires_file(os.path.join(ytdata_dir, "DD0046_sphere.h5"))
@requires_file(os.path.join(ytdata_dir, "DD0046_cut_region.h5"))
def test_old_datacontainer_data():
ds = data_dir_load(enzotiny)
sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
fn = "DD0046_sphere.h5"
full_fn = os.path.join(ytdata_dir, fn)
sphere_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, sphere_ds)
assert isinstance(sphere_ds, YTDataContainerDataset)
yield YTDataFieldTest(full_fn, ("grid", "density"))
yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
cr = ds.cut_region(sphere, ['obj[("gas", "temperature")] > 1e4'])
fn = "DD0046_cut_region.h5"
full_fn = os.path.join(ytdata_dir, fn)
cr_ds = data_dir_load(full_fn)
assert isinstance(cr_ds, YTDataContainerDataset)
assert (cr[("gas", "temperature")] == cr_ds.data[("gas", "temperature")]).all()
@skip(reason="See https://github.com/yt-project/yt/issues/3909")
@requires_module("h5py")
@requires_ds(enzotiny)
@requires_file(os.path.join(ytdata_dir, "DD0046_covering_grid.h5"))
@requires_file(os.path.join(ytdata_dir, "DD0046_arbitrary_grid.h5"))
@requires_file(os.path.join(ytdata_dir, "DD0046_proj_frb.h5"))
def test_old_grid_datacontainer_data():
ds = data_dir_load(enzotiny)
fn = "DD0046_covering_grid.h5"
full_fn = os.path.join(ytdata_dir, fn)
cg_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, cg_ds)
assert isinstance(cg_ds, YTGridDataset)
yield YTDataFieldTest(full_fn, ("grid", "density"))
yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
fn = "DD0046_arbitrary_grid.h5"
full_fn = os.path.join(ytdata_dir, fn)
ag_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, ag_ds)
assert isinstance(ag_ds, YTGridDataset)
yield YTDataFieldTest(full_fn, ("grid", "density"))
yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
my_proj = ds.proj("density", "x", weight_field="density")
frb = my_proj.to_frb(1.0, (800, 800))
fn = "DD0046_proj_frb.h5"
full_fn = os.path.join(ytdata_dir, fn)
frb_ds = data_dir_load(full_fn)
assert_allclose_units(
frb[("gas", "density")], frb_ds.data[("gas", "density")], 1e-7
)
compare_unit_attributes(ds, frb_ds)
assert isinstance(frb_ds, YTGridDataset)
yield YTDataFieldTest(full_fn, ("gas", "density"), geometric=False)
@skip(reason="See https://github.com/yt-project/yt/issues/3909")
@requires_module("h5py")
@requires_ds(enzotiny)
@requires_file(os.path.join(ytdata_dir, "DD0046_proj.h5"))
def test_old_spatial_data():
ds = data_dir_load(enzotiny)
fn = "DD0046_proj.h5"
full_fn = os.path.join(ytdata_dir, fn)
proj_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, proj_ds)
assert isinstance(proj_ds, YTSpatialPlotDataset)
yield YTDataFieldTest(full_fn, ("gas", "density"), geometric=False)
@skip(reason="See https://github.com/yt-project/yt/issues/3909")
@requires_module("h5py")
@requires_ds(enzotiny)
@requires_file(os.path.join(ytdata_dir, "DD0046_Profile1D.h5"))
@requires_file(os.path.join(ytdata_dir, "DD0046_Profile2D.h5"))
def test_old_profile_data():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
ds = data_dir_load(enzotiny)
ad = ds.all_data()
profile_1d = create_profile(
ad,
("gas", "density"),
("gas", "temperature"),
weight_field=("gas", "cell_mass"),
)
fn = "DD0046_Profile1D.h5"
full_fn = os.path.join(ytdata_dir, fn)
prof_1d_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, prof_1d_ds)
assert isinstance(prof_1d_ds, YTProfileDataset)
for field in profile_1d.standard_deviation:
assert_array_equal(
profile_1d.standard_deviation[field],
prof_1d_ds.profile.standard_deviation["data", field[1]],
)
p1 = ProfilePlot(
prof_1d_ds.data,
("gas", "density"),
("gas", "temperature"),
weight_field=("gas", "cell_mass"),
)
p1.save()
yield YTDataFieldTest(full_fn, ("gas", "temperature"), geometric=False)
yield YTDataFieldTest(full_fn, ("index", "x"), geometric=False)
yield YTDataFieldTest(full_fn, ("gas", "density"), geometric=False)
fn = "DD0046_Profile2D.h5"
full_fn = os.path.join(ytdata_dir, fn)
prof_2d_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, prof_2d_ds)
assert isinstance(prof_2d_ds, YTProfileDataset)
p2 = PhasePlot(
prof_2d_ds.data,
("gas", "density"),
("gas", "temperature"),
("gas", "cell_mass"),
weight_field=None,
)
p2.save()
yield YTDataFieldTest(full_fn, ("gas", "density"), geometric=False)
yield YTDataFieldTest(full_fn, ("index", "x"), geometric=False)
yield YTDataFieldTest(full_fn, ("gas", "temperature"), geometric=False)
yield YTDataFieldTest(full_fn, ("index", "y"), geometric=False)
yield YTDataFieldTest(full_fn, ("gas", "cell_mass"), geometric=False)
os.chdir(curdir)
shutil.rmtree(tmpdir)
@skip(reason="See https://github.com/yt-project/yt/issues/3909")
@requires_module("h5py")
@requires_ds(enzotiny)
@requires_file(os.path.join(ytdata_dir, "test_data.h5"))
@requires_file(os.path.join(ytdata_dir, "random_data.h5"))
def test_old_nonspatial_data():
ds = data_dir_load(enzotiny)
region = ds.box([0.25] * 3, [0.75] * 3)
sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
my_data = {}
my_data["region_density"] = region[("gas", "density")]
my_data["sphere_density"] = sphere[("gas", "density")]
fn = "test_data.h5"
full_fn = os.path.join(ytdata_dir, fn)
array_ds = data_dir_load(full_fn)
compare_unit_attributes(ds, array_ds)
assert isinstance(array_ds, YTNonspatialDataset)
yield YTDataFieldTest(full_fn, "region_density", geometric=False)
yield YTDataFieldTest(full_fn, "sphere_density", geometric=False)
my_data = {"density": YTArray(np.linspace(1.0, 20.0, 10), "g/cm**3")}
fn = "random_data.h5"
full_fn = os.path.join(ytdata_dir, fn)
new_ds = data_dir_load(full_fn)
assert isinstance(new_ds, YTNonspatialDataset)
yield YTDataFieldTest(full_fn, ("gas", "density"), geometric=False)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@ytdata@tests@test_old_outputs.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/optimize/_trlib/setup.py",
"type": "Python"
}
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy import get_include
from scipy._build_utils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from os.path import join, dirname
lapack_opt = get_info('lapack_opt')
lib_inc = join(dirname(dirname(dirname(__file__))), '_lib')
config = Configuration('_trlib', parent_package, top_path)
config.add_extension('_trlib',
sources=['_trlib.c', 'trlib_krylov.c',
'trlib_eigen_inverse.c', 'trlib_leftmost.c',
'trlib_quadratic_zero.c', 'trlib_tri_factor.c'],
include_dirs=[get_include(), lib_inc, 'trlib'],
extra_info=lapack_opt,
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@optimize@_trlib@setup.py@.PATH_END.py
|
{
"filename": "validation-cholesteric.ipynb",
"repo_name": "PyEllips/pyElli",
"repo_path": "pyElli_extracted/pyElli-master/examples/Liquid crystals/validation-cholesteric.ipynb",
"type": "Jupyter Notebook"
}
|
# Example of a cholesteric liquid crystal
Author: O. Castany, C. Molinaro, M. Müller
```python
import elli
import elli.plot as elliplot
import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.scimath import sqrt
from scipy.constants import c, pi
```
## Set parameters
```python
# Materials
front = back = elli.IsotropicMaterial(elli.ConstantRefractiveIndex(1.6))
# Liquid crystal oriented along the x direction
(no, ne) = (1.5, 1.7)
Dn = ne - no
n_med = (ne + no) / 2
LC = elli.UniaxialMaterial(
elli.ConstantRefractiveIndex(no), elli.ConstantRefractiveIndex(ne)
) # ne along z
R = elli.rotation_v_theta(elli.E_Y, 90) # rotation round y
LC.set_rotation(R) # apply rotation from z to x
# Cholesteric pitch:
p = 650
# One half turn of a right-handed helix:
TN = elli.TwistedLayer(LC, p / 2, 25, 180)
# Inhomogeneous layer, repeated layer, and structure
N = 5 # number half pitch repetitions
h = N * p / 2
L = elli.RepeatedLayers([TN], N)
s = elli.Structure(front, [L], back)
# Normal incidence:
Kx = 0.0
# Calculation parameters
lbda_min, lbda_max = 600, 1500 # (nm)
lbda = np.linspace(lbda_min, lbda_max, 100)
k0 = 2 * pi / (lbda * 1e-9)
```
## Analytical calculation for the power reflection coefficient
```python
q = 2 * pi / p / 1e-9
alpha = q / k0
epsilon = (no**2 + ne**2) / 2
delta = (no**2 - ne**2) / 2
n2 = sqrt((alpha**2 + epsilon - sqrt(4 * epsilon * alpha**2 + delta**2)))
w = 1j * (ne**2 - n2**2 - alpha**2) / (2 * alpha * n2) # not k0/c
A = -2j * k0 * n2 * h * 1e-9
R_th = (
np.abs(
(w**2 + 1)
* (1 - np.exp(-2j * k0 * n2 * h * 1e-9))
/ (
2 * w * (1 + np.exp(-2j * k0 * n2 * h * 1e-9))
- 1j * (w**2 - 1) * (1 - np.exp(-2j * k0 * n2 * h * 1e-9))
)
)
** 2
)
```
## Calculation with pyElli
```python
data = s.evaluate(lbda, 0)
# Jones matrices for the circular wave basis
# Right-circular wave is reflected in the stop-band
# R_LR, T_LR close to zero
R_RR = data.Rc_RR
```
## Plotting
```python
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(lbda, R_RR, label="R_RR")
ax.plot(lbda, R_th, "r", label="R_th")
ax.legend(loc="center right", bbox_to_anchor=(1.00, 0.50))
ax.set_title(
"Right-handed Cholesteric Liquid Crystal, " + "{:.1f} helix pitches".format(N / 2.0)
)
ax.set_xlabel(r"Wavelength $\lambda_0$ (m)")
ax.set_ylabel(r"Power reflexion $R$")
fmt = ax.xaxis.get_major_formatter()
fmt.set_powerlimits((-3, 3))
plt.show()
```

```python
elliplot.draw_structure(s)
```
<AxesSubplot:xlabel='z (nm)', ylabel="n'">

```python
```
|
PyEllipsREPO_NAMEpyElliPATH_START.@pyElli_extracted@pyElli-master@examples@Liquid crystals@validation-cholesteric.ipynb@.PATH_END.py
|
{
"filename": "pypdfdirectory.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/document_loaders/pypdfdirectory.ipynb",
"type": "Jupyter Notebook"
}
|
# PyPDFDirectoryLoader
This loader loads all PDF files from a specific directory.
## Overview
### Integration details
| Class | Package | Local | Serializable | JS support|
| :--- | :--- | :---: | :---: | :---: |
| [PyPDFDirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ |
### Loader features
| Source | Document Lazy Loading | Native Async Support
| :---: | :---: | :---: |
| PyPDFDirectoryLoader | ✅ | ❌ |
## Setup
### Credentials
No credentials are needed for this loader.
If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:
```python
# os.environ["LANGSMITH_API_KEY"] = getpass.getpass("Enter your LangSmith API key: ")
# os.environ["LANGSMITH_TRACING"] = "true"
```
### Installation
Install **langchain_community**.
```python
%pip install -qU langchain_community
```
## Initialization
Now we can instantiate our model object and load documents:
```python
from langchain_community.document_loaders import PyPDFDirectoryLoader
directory_path = (
"../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf"
)
loader = PyPDFDirectoryLoader("example_data/")
```
## Load
```python
docs = loader.load()
docs[0]
```
Document(metadata={'source': 'example_data/layout-parser-paper.pdf', 'page': 0}, page_content='LayoutParser : A Unified Toolkit for Deep\nLearning Based Document Image Analysis\nZejiang Shen1( \x00), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\nLee4, Jacob Carlson3, and Weining Li5\n1Allen Institute for AI\nshannons@allenai.org\n2Brown University\nruochen zhang@brown.edu\n3Harvard University\n{melissadell,jacob carlson }@fas.harvard.edu\n4University of Washington\nbcgl@cs.washington.edu\n5University of Waterloo\nw422li@uwaterloo.ca\nAbstract. Recent advances in document image analysis (DIA) have been\nprimarily driven by the application of neural networks. Ideally, research\noutcomes could be easily deployed in production and extended for further\ninvestigation. However, various factors like loosely organized codebases\nand sophisticated model configurations complicate the easy reuse of im-\nportant innovations by a wide audience. Though there have been on-going\nefforts to improve reusability and simplify deep learning (DL) model\ndevelopment in disciplines like natural language processing and computer\nvision, none of them are optimized for challenges in the domain of DIA.\nThis represents a major gap in the existing toolkit, as DIA is central to\nacademic research across a wide range of disciplines in the social sciences\nand humanities. This paper introduces LayoutParser , an open-source\nlibrary for streamlining the usage of DL in DIA research and applica-\ntions. The core LayoutParser library comes with a set of simple and\nintuitive interfaces for applying and customizing DL models for layout de-\ntection, character recognition, and many other document processing tasks.\nTo promote extensibility, LayoutParser also incorporates a community\nplatform for sharing both pre-trained models and full document digiti-\nzation pipelines. We demonstrate that LayoutParser is helpful for both\nlightweight and large-scale digitization pipelines in real-word use cases.\nThe library is publicly available at https://layout-parser.github.io .\nKeywords: Document Image Analysis ·Deep Learning ·Layout Analysis\n·Character Recognition ·Open Source library ·Toolkit.\n1 Introduction\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\ndocument image analysis (DIA) tasks including document image classification [ 11,arXiv:2103.15348v2 [cs.CV] 21 Jun 2021')
```python
print(docs[0].metadata)
```
{'source': 'example_data/layout-parser-paper.pdf', 'page': 0}
## Lazy Load
```python
page = []
for doc in loader.lazy_load():
page.append(doc)
if len(page) >= 10:
# do some paged operation, e.g.
# index.upsert(page)
page = []
```
## API reference
For detailed documentation of all PyPDFDirectoryLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@document_loaders@pypdfdirectory.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/tests/test_optional/test_autoshapes/__init__.py",
"type": "Python"
}
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@tests@test_optional@test_autoshapes@__init__.py@.PATH_END.py
|
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/selected/textfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattergl.selected.textfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@selected@textfont@_color.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/image/_stream.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="image", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@image@_stream.py@.PATH_END.py
|
{
"filename": "spirou_pol_pipeline.py",
"repo_name": "edermartioli/spirou-polarimetry",
"repo_path": "spirou-polarimetry_extracted/spirou-polarimetry-master/spirou_pol_pipeline.py",
"type": "Python"
}
|
# -*- coding: iso-8859-1 -*-
"""
Created on April 29 2020
Description: This routine identifies and reduce all polarimetric sequences in a given data set.
@author: Eder Martioli <martioli@iap.fr>
Institut d'Astrophysique de Paris, France.
Simple usage example:
python ~/spirou-tools/spirou-polarimetry/spirou_pol_pipeline.py --input=*e.fits -Lsb -p -v
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
from optparse import OptionParser
import os,sys
import astropy.io.fits as fits
import glob
from copy import deepcopy
def generate_polar_sets(file_list, verbose=False) :
polar_sets = {}
current_exp_num = 0
pol_sequence = ["","","",""]
for i in range(len(file_list)) :
hdr = fits.getheader(file_list[i])
if "SBRHB1_P" in hdr.keys() and "SBRHB2_P" in hdr.keys() :
if hdr["SBRHB1_P"] == "P16" and hdr["SBRHB2_P"] == "P16" :
current_exp_num = 0
if verbose :
print("File:",file_list[i], "is in spectroscopic mode, skipping ...")
continue
elif (hdr["SBRHB1_P"] == "P16" and hdr["SBRHB2_P"] == "P2") or \
(hdr["SBRHB1_P"] == "P2" and hdr["SBRHB2_P"] == "P14") or \
(hdr["SBRHB1_P"] == "P14" and hdr["SBRHB2_P"] == "P16") :
pol_sequence[0] = file_list[i]
current_exp_num = 1
elif (hdr["SBRHB1_P"] == "P16" and hdr["SBRHB2_P"] == "P14") or \
(hdr["SBRHB1_P"] == "P2" and hdr["SBRHB2_P"] == "P2") or \
(hdr["SBRHB1_P"] == "P2" and hdr["SBRHB2_P"] == "P16") :
if current_exp_num == 1 :
if verbose :
print("File:",file_list[i], "is exposure 2, OK ...")
pol_sequence[current_exp_num] = file_list[i]
current_exp_num = 2
elif current_exp_num == -1 :
if verbose:
print("File",file_list[i]," is part of skipped seqeuence ...")
continue
else :
current_exp_num = 0
if verbose :
print("File",file_list[i]," is exposure 2, but sequence is out-of-order, skipping ...")
continue
elif (hdr["SBRHB1_P"] == "P4" and hdr["SBRHB2_P"] == "P2") or \
(hdr["SBRHB1_P"] == "P14" and hdr["SBRHB2_P"] == "P14") or \
(hdr["SBRHB1_P"] == "P2" and hdr["SBRHB2_P"] == "P4") :
if current_exp_num == 2 :
if verbose :
print("File:",file_list[i], "is exposure 3, OK ...")
pol_sequence[current_exp_num] = file_list[i]
current_exp_num = 3
elif current_exp_num == -1 :
if verbose :
print("File",file_list[i]," is part of skipped seqeuence ...")
continue
else :
current_exp_num = 0
if verbose :
print("File",file_list[i]," is exposure 3, but sequence is out-of-order, skipping ...")
continue
elif (hdr["SBRHB1_P"] == "P4" and hdr["SBRHB2_P"] == "P14") or \
(hdr["SBRHB1_P"] == "P14" and hdr["SBRHB2_P"] == "P2") or \
(hdr["SBRHB1_P"] == "P14" and hdr["SBRHB2_P"] == "P4") :
if current_exp_num == 3 :
if verbose :
print("File:",file_list[i], "is exposure 4, OK ...")
pol_sequence[current_exp_num] = file_list[i]
if verbose:
print("Stacking polarimetric sequence:", pol_sequence)
polar_sets[pol_sequence[0]] = deepcopy(pol_sequence)
current_exp_num = 0
elif current_exp_num == -1 :
if verbose :
print("File",file_list[i]," is part of skipped seqeuence ...")
continue
else :
current_exp_num = 0
if verbose :
print("File",file_list[i]," is exposure 4, but sequence is out-of-order, skipping ...")
continue
else :
current_exp_num = 0
if verbose :
print("File:",file_list[i], "is in UNKNOWN mode, skipping ...")
continue
else :
current_exp_num = 0
if verbose :
print("File:",file_list[i], "does not have keywords SBRHB1_P and SBRHB2_P, skipping ...")
continue
return polar_sets
def generate_polar_continuous_sets(polar_sets, verbose=False) :
cont_polar_sets = {}
keys = list(polar_sets.keys())
nkeys = len(keys)
for j in range(nkeys):
set = polar_sets[keys[j]]
cont_polar_sets[set[0]] = [set[0], set[1], set[2], set[3]]
if j < nkeys - 1 :
nset = polar_sets[keys[j+1]]
cont_polar_sets[set[1]] = [nset[0], set[1], set[2], set[3]]
cont_polar_sets[set[2]] = [nset[0], nset[1], set[2], set[3]]
cont_polar_sets[set[3]] = [nset[0], nset[1], nset[2], set[3]]
if verbose:
for key in cont_polar_sets.keys() :
print(key,"->",cont_polar_sets[key])
return cont_polar_sets
parser = OptionParser()
parser.add_option("-i", "--input", dest="input", help="Input spectral e.fits data pattern",type='string',default="*e.fits")
parser.add_option("-m", "--lsdmask", dest="lsdmask", help="Input LSD mask",type='string',default="")
parser.add_option("-c", action="store_true", dest="contset", help="Produce continuous set", default=False)
parser.add_option("-L", action="store_true", dest="run_lsd", help="Run LSD analysis", default=False)
parser.add_option("-s", action="store_true", dest="stack_lsd_profiles", help="Stack LSD profiles", default=False)
parser.add_option("-b", action="store_true", dest="blong_timeseries", help="Calculate longitudinal magnetic field time series", default=False)
parser.add_option("-v", action="store_true", dest="verbose", help="verbose", default=False)
parser.add_option("-p", action="store_true", dest="plot", help="plot", default=False)
try:
options,args = parser.parse_args(sys.argv[1:])
except:
print("Error: check usage with -h spirou_pol_pipeline.py")
sys.exit(1)
if options.verbose:
print('Spectral e.fits data pattern: ', options.input)
print('LSD mask: ', options.lsdmask)
spirou_pol_dir = os.path.dirname(__file__) + '/'
# make list of efits data files
if options.verbose:
print("Creating list of e.fits spectrum files...")
inputedata = sorted(glob.glob(options.input))
polar_sets = generate_polar_sets(inputedata)
if options.contset :
polar_sets = generate_polar_continuous_sets(polar_sets, verbose=True)
object_name = "object"
for key in polar_sets.keys() :
output_pol = str(key).replace("e.fits","p.fits")
if object_name == "object" :
object_name = fits.getheader(key,0)["OBJECT"].replace(" ","")
seq = polar_sets[key]
if options.run_lsd :
output_lsd = str(key).replace("e.fits","_lsd.fits")
command = "python {0}spirou_pol.py --exp1={1} --exp2={2} --exp3={3} --exp4={4} --lsdmask={5} --output={6} --output_lsd={7} -L".format(spirou_pol_dir,seq[0],seq[1],seq[2],seq[3],options.lsdmask, output_pol,output_lsd)
else :
command = "python {0}spirou_pol.py --exp1={1} --exp2={2} --exp3={3} --exp4={4} --output={5}".format(spirou_pol_dir,seq[0],seq[1],seq[2],seq[3], output_pol)
print("Running: ",command)
os.system(command)
if "e.fits" in options.input :
lsd_pattern = (options.input).replace("e.fits","_lsd.fits")
else :
lsd_pattern = "*_lsd.fits"
plot_flag = ""
if options.plot :
plot_flag = "-p"
verbose_flag = ""
if options.verbose :
verbose_flag = "-v"
if options.stack_lsd_profiles and options.run_lsd :
output_stack_lsd = object_name + "_lsd_stack.fits"
command = "python {0}stack_lsd_profiles.py --input={1} --output={2} -s {3} {4}".format(spirou_pol_dir, lsd_pattern, output_stack_lsd, plot_flag, verbose_flag)
print("Running: ",command)
os.system(command)
if options.blong_timeseries and options.run_lsd :
output_blong_timeseries = object_name + "_blong.rdb"
command = "python {0}spirou_blong_timeseries.py --input={1} --output={2} {3} {4}".format(spirou_pol_dir, lsd_pattern, output_blong_timeseries, plot_flag, verbose_flag)
print("Running: ",command)
os.system(command)
|
edermartioliREPO_NAMEspirou-polarimetryPATH_START.@spirou-polarimetry_extracted@spirou-polarimetry-master@spirou_pol_pipeline.py@.PATH_END.py
|
{
"filename": "pca.py",
"repo_name": "changhoonhahn/provabgs",
"repo_path": "provabgs_extracted/provabgs-main/bin/pca.py",
"type": "Python"
}
|
'''
compress training set into PCA components and save to pickle file
'''
import os, sys
import pickle
import numpy as np
# --- speculator ---
from speculator import SpectrumPCA
if os.environ['machine'] == 'cori':
dat_dir='/global/cscratch1/sd/chahah/provabgs/emulator/' # hardcoded to NERSC directory
elif os.environ['machine'] == 'tiger':
dat_dir='/tigress/chhahn/provabgs/'
version = '0.1'
name = sys.argv[1]
batch0 = int(sys.argv[2])
batch1 = int(sys.argv[3])
n_pca = int(sys.argv[4])
i_bin = int(sys.argv[5])
# fsps wavelength
fwave = os.path.join(dat_dir, 'wave.%s.npy' % name)
wave = np.load(fwave)
# wavelength bins
wave_bin = [
(wave >= 1000) & (wave < 2000),
(wave >= 2000) & (wave < 3600),
(wave >= 3600) & (wave < 5500),
(wave >= 5500) & (wave < 7410),
(wave >= 7410) & (wave < 60000)
][i_bin]
str_wbin = [
'.w1000_2000',
'.w2000_3600',
'.w3600_5500',
'.w5500_7410',
'.w7410_60000'
][i_bin]
# batches of fsps spectra
batches = range(batch0, batch1+1)
# parameters
if name == 'nmf':
fthetas = [os.path.join(dat_dir, 'fsps.%s.v%s.theta_unt.seed%i.npy' % (name, version, ibatch)) for ibatch in batches]
else:
fthetas = [os.path.join(dat_dir, 'fsps.%s.v%s.theta.seed%i.npy' % (name, version, ibatch)) for ibatch in batches]
# log(spectra) over wavelength bin
fspecs = [
os.path.join(dat_dir, 'fsps.%s.v%s.lnspectrum.seed%i%s.npy' % (name, version, ibatch, str_wbin))
for ibatch in batches]
if name == 'nmf': # theta = [b1, b2, b3, b4, g1, g2, dust1, dust2, dust_index, zred]
n_param = 10
elif name == 'burst': # theta = [tburst, zburst, dust1, dust2, dust_index]
n_param = 4
n_wave = np.sum(wave_bin)
fpca = os.path.join(dat_dir, 'fsps.%s.v%s.seed%i_%i%s.pca%i.hdf5' % (name, version, batch0, batch1, str_wbin, n_pca))
print(fpca)
# train PCA basis
PCABasis = SpectrumPCA(
n_parameters=n_param, # number of parameters
n_wavelengths=n_wave, # number of wavelength values
n_pcas=n_pca, # number of pca coefficients to include in the basis
spectrum_filenames=fspecs, # list of filenames containing the (un-normalized) log spectra for training the PCA
parameter_filenames=fthetas, # list of filenames containing the corresponding parameter values
parameter_selection=None)
print('compute spectrum parameters shift and scale')
PCABasis.compute_spectrum_parameters_shift_and_scale() # computes shifts and scales for (log) spectra and parameters
print('train pca')
PCABasis.train_pca()
print('transform and stack')
PCABasis.transform_and_stack_training_data(fpca.replace('.hdf5', ''), retain=True)
# save to file
PCABasis._save_to_file(fpca)
|
changhoonhahnREPO_NAMEprovabgsPATH_START.@provabgs_extracted@provabgs-main@bin@pca.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "gmbrandt/HTOF",
"repo_path": "HTOF_extracted/HTOF-main/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
setup(name='htof',
author='G. Mirek Brandt, Daniel Michalik, Gavin K. Hung',
version='1.1.5',
python_requires='>=3.6',
packages=find_packages(),
package_dir={'htof': 'htof'},
package_data={'htof': ['data/*.csv', 'data/*.txt']},
setup_requires=['pytest-runner'],
install_requires=['astropy>=2.0', 'pandas>=0.24.0', 'scipy>=1.0.0', 'numpy>=1.16', 'requests'],
tests_require=['pytest>=3.5'])
|
gmbrandtREPO_NAMEHTOFPATH_START.@HTOF_extracted@HTOF-main@setup.py@.PATH_END.py
|
{
"filename": "azuresearch.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/vectorstores/azuresearch.py",
"type": "Python"
}
|
from __future__ import annotations
import asyncio
import base64
import itertools
import json
import logging
import time
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Collection,
Dict,
Iterable,
List,
Literal,
Optional,
Tuple,
Type,
Union,
cast,
)
import numpy as np
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.exceptions import LangChainException
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import get_from_env
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict, model_validator
from langchain_community.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger()
if TYPE_CHECKING:
from azure.search.documents import SearchClient, SearchItemPaged
from azure.search.documents.aio import (
AsyncSearchItemPaged,
)
from azure.search.documents.aio import (
SearchClient as AsyncSearchClient,
)
from azure.search.documents.indexes.models import (
CorsOptions,
ScoringProfile,
SearchField,
SemanticConfiguration,
VectorSearch,
)
# Allow overriding field names for Azure Search
FIELDS_ID = get_from_env(
key="AZURESEARCH_FIELDS_ID", env_key="AZURESEARCH_FIELDS_ID", default="id"
)
FIELDS_CONTENT = get_from_env(
key="AZURESEARCH_FIELDS_CONTENT",
env_key="AZURESEARCH_FIELDS_CONTENT",
default="content",
)
FIELDS_CONTENT_VECTOR = get_from_env(
key="AZURESEARCH_FIELDS_CONTENT_VECTOR",
env_key="AZURESEARCH_FIELDS_CONTENT_VECTOR",
default="content_vector",
)
FIELDS_METADATA = get_from_env(
key="AZURESEARCH_FIELDS_TAG", env_key="AZURESEARCH_FIELDS_TAG", default="metadata"
)
MAX_UPLOAD_BATCH_SIZE = 1000
def _get_search_client(
endpoint: str,
index_name: str,
key: Optional[str] = None,
azure_ad_access_token: Optional[str] = None,
semantic_configuration_name: Optional[str] = None,
fields: Optional[List[SearchField]] = None,
vector_search: Optional[VectorSearch] = None,
semantic_configurations: Optional[
Union[SemanticConfiguration, List[SemanticConfiguration]]
] = None,
scoring_profiles: Optional[List[ScoringProfile]] = None,
default_scoring_profile: Optional[str] = None,
default_fields: Optional[List[SearchField]] = None,
user_agent: Optional[str] = "langchain",
cors_options: Optional[CorsOptions] = None,
async_: bool = False,
additional_search_client_options: Optional[Dict[str, Any]] = None,
) -> Union[SearchClient, AsyncSearchClient]:
from azure.core.credentials import AccessToken, AzureKeyCredential, TokenCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.search.documents import SearchClient
from azure.search.documents.aio import SearchClient as AsyncSearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
ExhaustiveKnnAlgorithmConfiguration,
ExhaustiveKnnParameters,
HnswAlgorithmConfiguration,
HnswParameters,
SearchIndex,
SemanticConfiguration,
SemanticField,
SemanticPrioritizedFields,
SemanticSearch,
VectorSearch,
VectorSearchAlgorithmKind,
VectorSearchAlgorithmMetric,
VectorSearchProfile,
)
additional_search_client_options = additional_search_client_options or {}
default_fields = default_fields or []
credential: Union[AzureKeyCredential, TokenCredential, InteractiveBrowserCredential]
# Determine the appropriate credential to use
if key is not None:
if key.upper() == "INTERACTIVE":
credential = InteractiveBrowserCredential()
credential.get_token("https://search.azure.com/.default")
else:
credential = AzureKeyCredential(key)
elif azure_ad_access_token is not None:
credential = TokenCredential(
lambda *scopes, **kwargs: AccessToken(
azure_ad_access_token, int(time.time()) + 3600
)
)
else:
credential = DefaultAzureCredential()
index_client: SearchIndexClient = SearchIndexClient(
endpoint=endpoint, credential=credential, user_agent=user_agent
)
try:
index_client.get_index(name=index_name)
except ResourceNotFoundError:
# Fields configuration
if fields is not None:
# Check mandatory fields
fields_types = {f.name: f.type for f in fields}
mandatory_fields = {df.name: df.type for df in default_fields}
# Check for missing keys
missing_fields = {
key: mandatory_fields[key]
for key, value in set(mandatory_fields.items())
- set(fields_types.items())
}
if len(missing_fields) > 0:
# Helper for formatting field information for each missing field.
def fmt_err(x: str) -> str:
return (
f"{x} current type: '{fields_types.get(x, 'MISSING')}'. "
f"It has to be '{mandatory_fields.get(x)}' or you can point "
f"to a different '{mandatory_fields.get(x)}' field name by "
f"using the env variable 'AZURESEARCH_FIELDS_{x.upper()}'"
)
error = "\n".join([fmt_err(x) for x in missing_fields])
raise ValueError(
f"You need to specify at least the following fields "
f"{missing_fields} or provide alternative field names in the env "
f"variables.\n\n{error}"
)
else:
fields = default_fields
# Vector search configuration
if vector_search is None:
vector_search = VectorSearch(
algorithms=[
HnswAlgorithmConfiguration(
name="default",
kind=VectorSearchAlgorithmKind.HNSW,
parameters=HnswParameters(
m=4,
ef_construction=400,
ef_search=500,
metric=VectorSearchAlgorithmMetric.COSINE,
),
),
ExhaustiveKnnAlgorithmConfiguration(
name="default_exhaustive_knn",
kind=VectorSearchAlgorithmKind.EXHAUSTIVE_KNN,
parameters=ExhaustiveKnnParameters(
metric=VectorSearchAlgorithmMetric.COSINE
),
),
],
profiles=[
VectorSearchProfile(
name="myHnswProfile",
algorithm_configuration_name="default",
),
VectorSearchProfile(
name="myExhaustiveKnnProfile",
algorithm_configuration_name="default_exhaustive_knn",
),
],
)
# Create the semantic settings with the configuration
if semantic_configurations:
if not isinstance(semantic_configurations, list):
semantic_configurations = [semantic_configurations]
semantic_search = SemanticSearch(
configurations=semantic_configurations,
default_configuration_name=semantic_configuration_name,
)
elif semantic_configuration_name:
# use default semantic configuration
semantic_configuration = SemanticConfiguration(
name=semantic_configuration_name,
prioritized_fields=SemanticPrioritizedFields(
content_fields=[SemanticField(field_name=FIELDS_CONTENT)],
),
)
semantic_search = SemanticSearch(configurations=[semantic_configuration])
else:
# don't use semantic search
semantic_search = None
# Create the search index with the semantic settings and vector search
index = SearchIndex(
name=index_name,
fields=fields,
vector_search=vector_search,
semantic_search=semantic_search,
scoring_profiles=scoring_profiles,
default_scoring_profile=default_scoring_profile,
cors_options=cors_options,
)
index_client.create_index(index)
# Create the search client
if not async_:
return SearchClient(
endpoint=endpoint,
index_name=index_name,
credential=credential,
user_agent=user_agent,
**additional_search_client_options,
)
else:
return AsyncSearchClient(
endpoint=endpoint,
index_name=index_name,
credential=credential,
user_agent=user_agent,
**additional_search_client_options,
)
class AzureSearch(VectorStore):
"""`Azure Cognitive Search` vector store."""
def __init__(
self,
azure_search_endpoint: str,
azure_search_key: str,
index_name: str,
embedding_function: Union[Callable, Embeddings],
search_type: str = "hybrid",
semantic_configuration_name: Optional[str] = None,
fields: Optional[List[SearchField]] = None,
vector_search: Optional[VectorSearch] = None,
semantic_configurations: Optional[
Union[SemanticConfiguration, List[SemanticConfiguration]]
] = None,
scoring_profiles: Optional[List[ScoringProfile]] = None,
default_scoring_profile: Optional[str] = None,
cors_options: Optional[CorsOptions] = None,
*,
vector_search_dimensions: Optional[int] = None,
additional_search_client_options: Optional[Dict[str, Any]] = None,
azure_ad_access_token: Optional[str] = None,
**kwargs: Any,
):
try:
from azure.search.documents.indexes.models import (
SearchableField,
SearchField,
SearchFieldDataType,
SimpleField,
)
except ImportError as e:
raise ImportError(
"Unable to import azure.search.documents. Please install with "
"`pip install -U azure-search-documents`."
) from e
"""Initialize with necessary components."""
# Initialize base class
self.embedding_function = embedding_function
if isinstance(self.embedding_function, Embeddings):
self.embed_query = self.embedding_function.embed_query
else:
self.embed_query = self.embedding_function
default_fields = [
SimpleField(
name=FIELDS_ID,
type=SearchFieldDataType.String,
key=True,
filterable=True,
),
SearchableField(
name=FIELDS_CONTENT,
type=SearchFieldDataType.String,
),
SearchField(
name=FIELDS_CONTENT_VECTOR,
type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
searchable=True,
vector_search_dimensions=vector_search_dimensions
or len(self.embed_query("Text")),
vector_search_profile_name="myHnswProfile",
),
SearchableField(
name=FIELDS_METADATA,
type=SearchFieldDataType.String,
),
]
user_agent = "langchain"
if "user_agent" in kwargs and kwargs["user_agent"]:
user_agent += " " + kwargs["user_agent"]
self.client = _get_search_client(
azure_search_endpoint,
index_name,
azure_search_key,
azure_ad_access_token,
semantic_configuration_name=semantic_configuration_name,
fields=fields,
vector_search=vector_search,
semantic_configurations=semantic_configurations,
scoring_profiles=scoring_profiles,
default_scoring_profile=default_scoring_profile,
default_fields=default_fields,
user_agent=user_agent,
cors_options=cors_options,
additional_search_client_options=additional_search_client_options,
)
self.async_client = _get_search_client(
azure_search_endpoint,
index_name,
azure_search_key,
azure_ad_access_token,
semantic_configuration_name=semantic_configuration_name,
fields=fields,
vector_search=vector_search,
semantic_configurations=semantic_configurations,
scoring_profiles=scoring_profiles,
default_scoring_profile=default_scoring_profile,
default_fields=default_fields,
user_agent=user_agent,
cors_options=cors_options,
async_=True,
)
self.search_type = search_type
self.semantic_configuration_name = semantic_configuration_name
self.fields = fields if fields else default_fields
self._azure_search_endpoint = azure_search_endpoint
self._azure_search_key = azure_search_key
self._index_name = index_name
self._semantic_configuration_name = semantic_configuration_name
self._fields = fields
self._vector_search = vector_search
self._semantic_configurations = semantic_configurations
self._scoring_profiles = scoring_profiles
self._default_scoring_profile = default_scoring_profile
self._default_fields = default_fields
self._user_agent = user_agent
self._cors_options = cors_options
def __del__(self) -> None:
# Close the sync client
if hasattr(self, "client") and self.client:
self.client.close()
# Close the async client
if hasattr(self, "async_client") and self.async_client:
# Check if we're in an existing event loop
try:
loop = asyncio.get_event_loop()
if loop.is_running():
# Schedule the coroutine to close the async client
loop.create_task(self.async_client.close())
else:
# If no event loop is running, run the coroutine directly
loop.run_until_complete(self.async_client.close())
except RuntimeError:
# Handle the case where there's no event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(self.async_client.close())
finally:
loop.close()
@property
def embeddings(self) -> Optional[Embeddings]:
# TODO: Support embedding object directly
return (
self.embedding_function
if isinstance(self.embedding_function, Embeddings)
else None
)
async def _aembed_query(self, text: str) -> List[float]:
if self.embeddings:
return await self.embeddings.aembed_query(text)
else:
return cast(Callable, self.embedding_function)(text)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
*,
keys: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
# batching support if embedding function is an Embeddings object
if isinstance(self.embedding_function, Embeddings):
try:
embeddings = self.embedding_function.embed_documents(list(texts))
except NotImplementedError:
embeddings = [self.embedding_function.embed_query(x) for x in texts]
else:
embeddings = [self.embedding_function(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
# when `keys` are not passed in and there is `ids` in kwargs, use those instead
# base class expects `ids` passed in rather than `keys`
# https://github.com/langchain-ai/langchain/blob/4cdaca67dc51dba887289f56c6fead3c1a52f97d/libs/core/langchain_core/vectorstores/base.py#L65
if (not keys) and ("ids" in kwargs) and (len(kwargs["ids"]) == len(embeddings)):
keys = kwargs["ids"]
return self.add_embeddings(zip(texts, embeddings), metadatas, keys=keys)
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
*,
keys: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
if isinstance(self.embedding_function, Embeddings):
try:
embeddings = await self.embedding_function.aembed_documents(list(texts))
except NotImplementedError:
embeddings = [
await self.embedding_function.aembed_query(x) for x in texts
]
else:
embeddings = [self.embedding_function(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
# when `keys` are not passed in and there is `ids` in kwargs, use those instead
# base class expects `ids` passed in rather than `keys`
# https://github.com/langchain-ai/langchain/blob/4cdaca67dc51dba887289f56c6fead3c1a52f97d/libs/core/langchain_core/vectorstores/base.py#L65
if (not keys) and ("ids" in kwargs) and (len(kwargs["ids"]) == len(embeddings)):
keys = kwargs["ids"]
return await self.aadd_embeddings(zip(texts, embeddings), metadatas, keys=keys)
def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
*,
keys: Optional[List[str]] = None,
) -> List[str]:
"""Add embeddings to an existing index."""
ids = []
# Write data to index
data = []
for i, (text, embedding) in enumerate(text_embeddings):
# Use provided key otherwise use default key
if keys:
key = keys[i]
else:
key = str(uuid.uuid4())
# Encoding key for Azure Search valid characters
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
metadata = metadatas[i] if metadatas else {}
# Add data to index
# Additional metadata to fields mapping
doc = {
"@search.action": "upload",
FIELDS_ID: key,
FIELDS_CONTENT: text,
FIELDS_CONTENT_VECTOR: np.array(embedding, dtype=np.float32).tolist(),
FIELDS_METADATA: json.dumps(metadata),
}
if metadata:
additional_fields = {
k: v
for k, v in metadata.items()
if k in [x.name for x in self.fields]
}
doc.update(additional_fields)
data.append(doc)
ids.append(key)
# Upload data in batches
if len(data) == MAX_UPLOAD_BATCH_SIZE:
response = self.client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if not all(r.succeeded for r in response):
raise LangChainException(response)
# Reset data
data = []
# Considering case where data is an exact multiple of batch-size entries
if len(data) == 0:
return ids
# Upload data to index
response = self.client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if all(r.succeeded for r in response):
return ids
else:
raise LangChainException(response)
async def aadd_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
*,
keys: Optional[List[str]] = None,
) -> List[str]:
"""Add embeddings to an existing index."""
ids = []
# Write data to index
data = []
for i, (text, embedding) in enumerate(text_embeddings):
# Use provided key otherwise use default key
key = keys[i] if keys else str(uuid.uuid4())
# Encoding key for Azure Search valid characters
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
metadata = metadatas[i] if metadatas else {}
# Add data to index
# Additional metadata to fields mapping
doc = {
"@search.action": "upload",
FIELDS_ID: key,
FIELDS_CONTENT: text,
FIELDS_CONTENT_VECTOR: np.array(embedding, dtype=np.float32).tolist(),
FIELDS_METADATA: json.dumps(metadata),
}
if metadata:
additional_fields = {
k: v
for k, v in metadata.items()
if k in [x.name for x in self.fields]
}
doc.update(additional_fields)
data.append(doc)
ids.append(key)
# Upload data in batches
if len(data) == MAX_UPLOAD_BATCH_SIZE:
response = await self.async_client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if not all(r.succeeded for r in response):
raise LangChainException(response)
# Reset data
data = []
# Considering case where data is an exact multiple of batch-size entries
if len(data) == 0:
return ids
# Upload data to index
response = await self.async_client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if all(r.succeeded for r in response):
return ids
else:
raise LangChainException(response)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool:
"""Delete by vector ID.
Args:
ids: List of ids to delete.
Returns:
bool: True if deletion is successful,
False otherwise.
"""
if ids:
res = self.client.delete_documents([{FIELDS_ID: i} for i in ids])
return len(res) > 0
else:
return False
async def adelete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool:
"""Delete by vector ID.
Args:
ids: List of ids to delete.
Returns:
bool: True if deletion is successful,
False otherwise.
"""
if ids:
res = await self.async_client.delete_documents([{"id": i} for i in ids])
return len(res) > 0
else:
return False
def similarity_search(
self,
query: str,
k: int = 4,
*,
search_type: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
search_type = search_type or self.search_type
if search_type == "similarity":
docs = self.vector_search(query, k=k, **kwargs)
elif search_type == "hybrid":
docs = self.hybrid_search(query, k=k, **kwargs)
elif search_type == "semantic_hybrid":
docs = self.semantic_hybrid_search(query, k=k, **kwargs)
else:
raise ValueError(f"search_type of {search_type} not allowed.")
return docs
def similarity_search_with_score(
self, query: str, *, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search with distance."""
search_type = kwargs.get("search_type", self.search_type)
if search_type == "similarity":
return self.vector_search_with_score(query, k=k, **kwargs)
elif search_type == "hybrid":
return self.hybrid_search_with_score(query, k=k, **kwargs)
elif search_type == "semantic_hybrid":
return self.semantic_hybrid_search_with_score(query, k=k, **kwargs)
else:
raise ValueError(f"search_type of {search_type} not allowed.")
async def asimilarity_search(
self,
query: str,
k: int = 4,
*,
search_type: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
search_type = search_type or self.search_type
if search_type == "similarity":
docs = await self.avector_search(query, k=k, **kwargs)
elif search_type == "hybrid":
docs = await self.ahybrid_search(query, k=k, **kwargs)
elif search_type == "semantic_hybrid":
docs = await self.asemantic_hybrid_search(query, k=k, **kwargs)
else:
raise ValueError(f"search_type of {search_type} not allowed.")
return docs
async def asimilarity_search_with_score(
self, query: str, *, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search with distance."""
search_type = kwargs.get("search_type", self.search_type)
if search_type == "similarity":
return await self.avector_search_with_score(query, k=k, **kwargs)
elif search_type == "hybrid":
return await self.ahybrid_search_with_score(query, k=k, **kwargs)
elif search_type == "semantic_hybrid":
return await self.asemantic_hybrid_search_with_score(query, k=k, **kwargs)
else:
raise ValueError(f"search_type of {search_type} not allowed.")
def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
*,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
result = self.vector_search_with_score(query, k=k, **kwargs)
return (
result
if score_threshold is None
else [r for r in result if r[1] >= score_threshold]
)
async def asimilarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
*,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
result = await self.avector_search_with_score(query, k=k, **kwargs)
return (
result
if score_threshold is None
else [r for r in result if r[1] >= score_threshold]
)
def vector_search(
self, query: str, k: int = 4, *, filters: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.vector_search_with_score(query, k=k, filters=filters)
return [doc for doc, _ in docs_and_scores]
async def avector_search(
self, query: str, k: int = 4, *, filters: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = await self.avector_search_with_score(
query, k=k, filters=filters
)
return [doc for doc, _ in docs_and_scores]
def vector_search_with_score(
self,
query: str,
k: int = 4,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query (str): Text to look up documents similar to.
k (int, optional): Number of Documents to return. Defaults to 4.
filters (str, optional): Filtering expression. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents most similar
to the query and score for each
"""
embedding = self.embed_query(query)
results = self._simple_search(embedding, "", k, filters=filters, **kwargs)
return _results_to_documents(results)
async def avector_search_with_score(
self,
query: str,
k: int = 4,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query (str): Text to look up documents similar to.
k (int, optional): Number of Documents to return. Defaults to 4.
filters (str, optional): Filtering expression. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents most similar
to the query and score for each
"""
embedding = await self._aembed_query(query)
results = await self._asimple_search(
embedding, "", k, filters=filters, **kwargs
)
return await _aresults_to_documents(results)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
*,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): Text to look up documents similar to.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
filters (str, optional): Filtering expression. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents most similar
to the query and score for each
"""
embedding = self.embed_query(query)
results = self._simple_search(embedding, "", fetch_k, filters=filters, **kwargs)
return _reorder_results_with_maximal_marginal_relevance(
results, query_embedding=np.array(embedding), lambda_mult=lambda_mult, k=k
)
async def amax_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
*,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): Text to look up documents similar to.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
filters (str, optional): Filtering expression. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents most similar
to the query and score for each
"""
embedding = await self._aembed_query(query)
results = await self._asimple_search(
embedding, "", fetch_k, filters=filters, **kwargs
)
return await _areorder_results_with_maximal_marginal_relevance(
results,
query_embedding=np.array(embedding),
lambda_mult=lambda_mult,
k=k,
)
def hybrid_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.hybrid_search_with_score(query, k=k, **kwargs)
return [doc for doc, _ in docs_and_scores]
async def ahybrid_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = await self.ahybrid_search_with_score(query, k=k, **kwargs)
return [doc for doc, _ in docs_and_scores]
def hybrid_search_with_score(
self,
query: str,
k: int = 4,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with a hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embed_query(query)
results = self._simple_search(embedding, query, k, filters=filters, **kwargs)
return _results_to_documents(results)
async def ahybrid_search_with_score(
self,
query: str,
k: int = 4,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with a hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = await self._aembed_query(query)
results = await self._asimple_search(
embedding, query, k, filters=filters, **kwargs
)
return await _aresults_to_documents(results)
def hybrid_search_with_relevance_scores(
self,
query: str,
k: int = 4,
*,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
result = self.hybrid_search_with_score(query, k=k, **kwargs)
return (
result
if score_threshold is None
else [r for r in result if r[1] >= score_threshold]
)
async def ahybrid_search_with_relevance_scores(
self,
query: str,
k: int = 4,
*,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
result = await self.ahybrid_search_with_score(query, k=k, **kwargs)
return (
result
if score_threshold is None
else [r for r in result if r[1] >= score_threshold]
)
def hybrid_max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
*,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with a hybrid query
and reorder results by MMR.
Args:
query (str): Text to look up documents similar to.
k (int, optional): Number of Documents to return. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
filters (str, optional): Filtering expression. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embed_query(query)
results = self._simple_search(
embedding, query, fetch_k, filters=filters, **kwargs
)
return _reorder_results_with_maximal_marginal_relevance(
results, query_embedding=np.array(embedding), lambda_mult=lambda_mult, k=k
)
async def ahybrid_max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
*,
filters: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with a hybrid query
and reorder results by MMR.
Args:
query (str): Text to look up documents similar to.
k (int, optional): Number of Documents to return. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
filters (str, optional): Filtering expression. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = await self._aembed_query(query)
results = await self._asimple_search(
embedding, query, fetch_k, filters=filters, **kwargs
)
return await _areorder_results_with_maximal_marginal_relevance(
results,
query_embedding=np.array(embedding),
lambda_mult=lambda_mult,
k=k,
)
def _simple_search(
self,
embedding: List[float],
text_query: str,
k: int,
*,
filters: Optional[str] = None,
**kwargs: Any,
) -> SearchItemPaged[dict]:
"""Perform vector or hybrid search in the Azure search index.
Args:
embedding: A vector embedding to search in the vector space.
text_query: A full-text search query expression;
Use "*" or omit this parameter to perform only vector search.
k: Number of documents to return.
filters: Filtering expression.
Returns:
Search items
"""
from azure.search.documents.models import VectorizedQuery
return self.client.search(
search_text=text_query,
vector_queries=[
VectorizedQuery(
vector=np.array(embedding, dtype=np.float32).tolist(),
k_nearest_neighbors=k,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
top=k,
**kwargs,
)
async def _asimple_search(
self,
embedding: List[float],
text_query: str,
k: int,
*,
filters: Optional[str] = None,
**kwargs: Any,
) -> AsyncSearchItemPaged[dict]:
"""Perform vector or hybrid search in the Azure search index.
Args:
embedding: A vector embedding to search in the vector space.
text_query: A full-text search query expression;
Use "*" or omit this parameter to perform only vector search.
k: Number of documents to return.
filters: Filtering expression.
Returns:
Search items
"""
from azure.search.documents.models import VectorizedQuery
return await self.async_client.search(
search_text=text_query,
vector_queries=[
VectorizedQuery(
vector=np.array(embedding, dtype=np.float32).tolist(),
k_nearest_neighbors=k,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
top=k,
**kwargs,
)
def semantic_hybrid_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
filters: Filtering expression.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank(
query, k=k, **kwargs
)
return [doc for doc, _, _ in docs_and_scores]
async def asemantic_hybrid_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
filters: Filtering expression.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = await self.asemantic_hybrid_search_with_score_and_rerank(
query, k=k, **kwargs
)
return [doc for doc, _, _ in docs_and_scores]
def semantic_hybrid_search_with_score(
self,
query: str,
k: int = 4,
score_type: Literal["score", "reranker_score"] = "score",
*,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_type: Must either be "score" or "reranker_score".
Defaulted to "score".
filters: Filtering expression.
Returns:
List[Tuple[Document, float]]: A list of documents and their
corresponding scores.
"""
docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank(
query, k=k, **kwargs
)
if score_type == "score":
return [
(doc, score)
for doc, score, _ in docs_and_scores
if score_threshold is None or score >= score_threshold
]
elif score_type == "reranker_score":
return [
(doc, reranker_score)
for doc, _, reranker_score in docs_and_scores
if score_threshold is None or reranker_score >= score_threshold
]
async def asemantic_hybrid_search_with_score(
self,
query: str,
k: int = 4,
score_type: Literal["score", "reranker_score"] = "score",
*,
score_threshold: Optional[float] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_type: Must either be "score" or "reranker_score".
Defaulted to "score".
filters: Filtering expression.
Returns:
List[Tuple[Document, float]]: A list of documents and their
corresponding scores.
"""
docs_and_scores = await self.asemantic_hybrid_search_with_score_and_rerank(
query, k=k, **kwargs
)
if score_type == "score":
return [
(doc, score)
for doc, score, _ in docs_and_scores
if score_threshold is None or score >= score_threshold
]
elif score_type == "reranker_score":
return [
(doc, reranker_score)
for doc, _, reranker_score in docs_and_scores
if score_threshold is None or reranker_score >= score_threshold
]
def semantic_hybrid_search_with_score_and_rerank(
self, query: str, k: int = 4, *, filters: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float, float]]:
"""Return docs most similar to query with a hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filtering expression.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import VectorizedQuery
results = self.client.search(
search_text=query,
vector_queries=[
VectorizedQuery(
vector=np.array(self.embed_query(query), dtype=np.float32).tolist(),
k_nearest_neighbors=k,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
query_type="semantic",
semantic_configuration_name=self.semantic_configuration_name,
query_caption="extractive",
query_answer="extractive",
top=k,
**kwargs,
)
# Get Semantic Answers
semantic_answers = results.get_answers() or []
semantic_answers_dict: Dict = {}
for semantic_answer in semantic_answers:
semantic_answers_dict[semantic_answer.key] = {
"text": semantic_answer.text,
"highlights": semantic_answer.highlights,
}
# Convert results to Document objects
docs = [
(
Document(
page_content=result.pop(FIELDS_CONTENT),
metadata={
**(
{FIELDS_ID: result.pop(FIELDS_ID)}
if FIELDS_ID in result
else {}
),
**(
json.loads(result[FIELDS_METADATA])
if FIELDS_METADATA in result
else {
k: v
for k, v in result.items()
if k != FIELDS_CONTENT_VECTOR
}
),
**{
"captions": (
{
"text": result.get("@search.captions", [{}])[
0
].text,
"highlights": result.get("@search.captions", [{}])[
0
].highlights,
}
if result.get("@search.captions")
else {}
),
"answers": semantic_answers_dict.get(
result.get(FIELDS_ID, ""),
"",
),
},
},
),
float(result["@search.score"]),
float(result["@search.reranker_score"]),
)
for result in results
]
return docs
async def asemantic_hybrid_search_with_score_and_rerank(
self, query: str, k: int = 4, *, filters: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float, float]]:
"""Return docs most similar to query with a hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filtering expression.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import VectorizedQuery
vector = await self._aembed_query(query)
results = await self.async_client.search(
search_text=query,
vector_queries=[
VectorizedQuery(
vector=np.array(vector, dtype=np.float32).tolist(),
k_nearest_neighbors=k,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
query_type="semantic",
semantic_configuration_name=self.semantic_configuration_name,
query_caption="extractive",
query_answer="extractive",
top=k,
**kwargs,
)
# Get Semantic Answers
semantic_answers = (await results.get_answers()) or []
semantic_answers_dict: Dict = {}
for semantic_answer in semantic_answers:
semantic_answers_dict[semantic_answer.key] = {
"text": semantic_answer.text,
"highlights": semantic_answer.highlights,
}
# Convert results to Document objects
docs = [
(
Document(
page_content=result.pop(FIELDS_CONTENT),
metadata={
**(
{FIELDS_ID: result.pop(FIELDS_ID)}
if FIELDS_ID in result
else {}
),
**(
json.loads(result[FIELDS_METADATA])
if FIELDS_METADATA in result
else {
k: v
for k, v in result.items()
if k != FIELDS_CONTENT_VECTOR
}
),
**{
"captions": (
{
"text": result.get("@search.captions", [{}])[
0
].text,
"highlights": result.get("@search.captions", [{}])[
0
].highlights,
}
if result.get("@search.captions")
else {}
),
"answers": semantic_answers_dict.get(
result.get(FIELDS_ID, ""),
"",
),
},
},
),
float(result["@search.score"]),
float(result["@search.reranker_score"]),
)
async for result in results
]
return docs
@classmethod
def from_texts(
cls: Type[AzureSearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
azure_search_endpoint: str = "",
azure_search_key: str = "",
azure_ad_access_token: Optional[str] = None,
index_name: str = "langchain-index",
fields: Optional[List[SearchField]] = None,
**kwargs: Any,
) -> AzureSearch:
# Creating a new Azure Search instance
azure_search = cls(
azure_search_endpoint,
azure_search_key,
index_name,
embedding,
fields=fields,
azure_ad_access_token=azure_ad_access_token,
**kwargs,
)
azure_search.add_texts(texts, metadatas, **kwargs)
return azure_search
@classmethod
async def afrom_texts(
cls: Type[AzureSearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
azure_search_endpoint: str = "",
azure_search_key: str = "",
azure_ad_access_token: Optional[str] = None,
index_name: str = "langchain-index",
fields: Optional[List[SearchField]] = None,
**kwargs: Any,
) -> AzureSearch:
# Creating a new Azure Search instance
azure_search = cls(
azure_search_endpoint,
azure_search_key,
index_name,
embedding,
fields=fields,
azure_ad_access_token=azure_ad_access_token,
**kwargs,
)
await azure_search.aadd_texts(texts, metadatas, **kwargs)
return azure_search
@classmethod
async def afrom_embeddings(
cls: Type[AzureSearch],
text_embeddings: Iterable[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
azure_search_endpoint: str = "",
azure_search_key: str = "",
index_name: str = "langchain-index",
fields: Optional[List[SearchField]] = None,
**kwargs: Any,
) -> AzureSearch:
text_embeddings, first_text_embedding = _peek(text_embeddings)
if first_text_embedding is None:
raise ValueError("Cannot create AzureSearch from empty embeddings.")
vector_search_dimensions = len(first_text_embedding[1])
azure_search = cls(
azure_search_endpoint=azure_search_endpoint,
azure_search_key=azure_search_key,
index_name=index_name,
embedding_function=embedding,
fields=fields,
vector_search_dimensions=vector_search_dimensions,
**kwargs,
)
await azure_search.aadd_embeddings(text_embeddings, metadatas, **kwargs)
return azure_search
@classmethod
def from_embeddings(
cls: Type[AzureSearch],
text_embeddings: Iterable[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
azure_search_endpoint: str = "",
azure_search_key: str = "",
index_name: str = "langchain-index",
fields: Optional[List[SearchField]] = None,
**kwargs: Any,
) -> AzureSearch:
# Creating a new Azure Search instance
text_embeddings, first_text_embedding = _peek(text_embeddings)
if first_text_embedding is None:
raise ValueError("Cannot create AzureSearch from empty embeddings.")
vector_search_dimensions = len(first_text_embedding[1])
azure_search = cls(
azure_search_endpoint=azure_search_endpoint,
azure_search_key=azure_search_key,
index_name=index_name,
embedding_function=embedding,
fields=fields,
vector_search_dimensions=vector_search_dimensions,
**kwargs,
)
azure_search.add_embeddings(text_embeddings, metadatas, **kwargs)
return azure_search
def as_retriever(self, **kwargs: Any) -> AzureSearchVectorStoreRetriever: # type: ignore
"""Return AzureSearchVectorStoreRetriever initialized from this VectorStore.
Args:
search_type (Optional[str]): Overrides the type of search that
the Retriever should perform. Defaults to `self.search_type`.
Can be "similarity", "hybrid", or "semantic_hybrid".
search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
Returns:
AzureSearchVectorStoreRetriever: Retriever class for VectorStore.
"""
search_type = kwargs.get("search_type", self.search_type)
kwargs["search_type"] = search_type
tags = kwargs.pop("tags", None) or []
tags.extend(self._get_retriever_tags())
return AzureSearchVectorStoreRetriever(vectorstore=self, **kwargs, tags=tags)
class AzureSearchVectorStoreRetriever(BaseRetriever):
"""Retriever that uses `Azure Cognitive Search`."""
vectorstore: AzureSearch
"""Azure Search instance used to find similar documents."""
search_type: str = "hybrid"
"""Type of search to perform. Options are "similarity", "hybrid",
"semantic_hybrid", "similarity_score_threshold", "hybrid_score_threshold",
or "semantic_hybrid_score_threshold"."""
k: int = 4
"""Number of documents to return."""
search_kwargs: dict = {}
"""Search params.
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
"""
allowed_search_types: ClassVar[Collection[str]] = (
"similarity",
"similarity_score_threshold",
"hybrid",
"hybrid_score_threshold",
"semantic_hybrid",
"semantic_hybrid_score_threshold",
)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_search_type(cls, values: Dict) -> Any:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in cls.allowed_search_types:
raise ValueError(
f"search_type of {search_type} not allowed. Valid values are: "
f"{cls.allowed_search_types}"
)
return values
def _get_relevant_documents(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
params = {**self.search_kwargs, **kwargs}
if self.search_type == "similarity":
docs = self.vectorstore.vector_search(query, k=self.k, **params)
elif self.search_type == "similarity_score_threshold":
docs = [
doc
for doc, _ in self.vectorstore.similarity_search_with_relevance_scores(
query, k=self.k, **params
)
]
elif self.search_type == "hybrid":
docs = self.vectorstore.hybrid_search(query, k=self.k, **params)
elif self.search_type == "hybrid_score_threshold":
docs = [
doc
for doc, _ in self.vectorstore.hybrid_search_with_relevance_scores(
query, k=self.k, **params
)
]
elif self.search_type == "semantic_hybrid":
docs = self.vectorstore.semantic_hybrid_search(query, k=self.k, **params)
elif self.search_type == "semantic_hybrid_score_threshold":
docs = [
doc
for doc, _ in self.vectorstore.semantic_hybrid_search_with_score(
query, k=self.k, **params
)
]
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
params = {**self.search_kwargs, **kwargs}
if self.search_type == "similarity":
docs = await self.vectorstore.avector_search(query, k=self.k, **params)
elif self.search_type == "similarity_score_threshold":
docs_and_scores = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, k=self.k, **params
)
)
docs = [doc for doc, _ in docs_and_scores]
elif self.search_type == "hybrid":
docs = await self.vectorstore.ahybrid_search(query, k=self.k, **params)
elif self.search_type == "hybrid_score_threshold":
docs_and_scores = (
await self.vectorstore.ahybrid_search_with_relevance_scores(
query, k=self.k, **params
)
)
docs = [doc for doc, _ in docs_and_scores]
elif self.search_type == "semantic_hybrid":
docs = await self.vectorstore.asemantic_hybrid_search(
query, k=self.k, **params
)
elif self.search_type == "semantic_hybrid_score_threshold":
docs = [
doc
for doc, _ in await self.vectorstore.asemantic_hybrid_search_with_score(
query, k=self.k, **params
)
]
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
def _results_to_documents(
results: SearchItemPaged[Dict],
) -> List[Tuple[Document, float]]:
docs = [
(
_result_to_document(result),
float(result["@search.score"]),
)
for result in results
]
return docs
async def _aresults_to_documents(
results: AsyncSearchItemPaged[Dict],
) -> List[Tuple[Document, float]]:
docs = [
(
_result_to_document(result),
float(result["@search.score"]),
)
async for result in results
]
return docs
async def _areorder_results_with_maximal_marginal_relevance(
results: SearchItemPaged[Dict],
query_embedding: np.ndarray,
lambda_mult: float = 0.5,
k: int = 4,
) -> List[Tuple[Document, float]]:
# Convert results to Document objects
docs = [
(
_result_to_document(result),
float(result["@search.score"]),
result[FIELDS_CONTENT_VECTOR],
)
async for result in results
]
documents, scores, vectors = map(list, zip(*docs))
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
query_embedding, vectors, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret: List[Tuple[Document, float]] = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
ret.append((documents[x], scores[x])) # type: ignore
return ret
def _reorder_results_with_maximal_marginal_relevance(
results: SearchItemPaged[Dict],
query_embedding: np.ndarray,
lambda_mult: float = 0.5,
k: int = 4,
) -> List[Tuple[Document, float]]:
# Convert results to Document objects
docs = [
(
_result_to_document(result),
float(result["@search.score"]),
result[FIELDS_CONTENT_VECTOR],
)
for result in results
]
if not docs:
return []
documents, scores, vectors = map(list, zip(*docs))
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
query_embedding, vectors, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret: List[Tuple[Document, float]] = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
ret.append((documents[x], scores[x])) # type: ignore
return ret
def _result_to_document(result: Dict) -> Document:
# Fields metadata
if FIELDS_METADATA in result:
if isinstance(result[FIELDS_METADATA], dict):
fields_metadata = result[FIELDS_METADATA]
else:
fields_metadata = json.loads(result[FIELDS_METADATA])
else:
fields_metadata = {
key: value
for key, value in result.items()
if key not in [FIELDS_CONTENT_VECTOR, FIELDS_CONTENT]
}
# IDs
if FIELDS_ID in result:
fields_id = {FIELDS_ID: result.pop(FIELDS_ID)}
else:
fields_id = {}
return Document(
page_content=result[FIELDS_CONTENT],
metadata={
**fields_id,
**fields_metadata,
},
)
def _peek(iterable: Iterable, default: Optional[Any] = None) -> Tuple[Iterable, Any]:
try:
iterator = iter(iterable)
value = next(iterator)
iterable = itertools.chain([value], iterator)
return iterable, value
except StopIteration:
return iterable, default
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@vectorstores@azuresearch.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/tests/test_optional/test_px/conftest.py",
"type": "Python"
}
|
import pandas as pd
import polars as pl
import pyarrow as pa
import pytest
from narwhals.typing import IntoDataFrame
from narwhals.utils import parse_version
def pandas_constructor(obj) -> IntoDataFrame:
return pd.DataFrame(obj) # type: ignore[no-any-return]
def pandas_nullable_constructor(obj) -> IntoDataFrame:
return pd.DataFrame(obj).convert_dtypes(dtype_backend="numpy_nullable") # type: ignore[no-any-return]
def pandas_pyarrow_constructor(obj) -> IntoDataFrame:
return pd.DataFrame(obj).convert_dtypes(dtype_backend="pyarrow") # type: ignore[no-any-return]
def polars_eager_constructor(obj) -> IntoDataFrame:
return pl.DataFrame(obj)
def pyarrow_table_constructor(obj) -> IntoDataFrame:
return pa.table(obj) # type: ignore[no-any-return]
constructors = [polars_eager_constructor, pyarrow_table_constructor, pandas_constructor]
if parse_version(pd.__version__) >= parse_version("2.0.0"):
constructors.extend(
[
pandas_nullable_constructor,
pandas_pyarrow_constructor,
]
)
@pytest.fixture(params=constructors)
def constructor(request: pytest.FixtureRequest):
return request.param # type: ignore[no-any-return]
@pytest.fixture(params=["pandas", "pyarrow", "polars"])
def backend(request: pytest.FixtureRequest) -> str:
return request.param # type: ignore[no-any-return]
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@tests@test_optional@test_px@conftest.py@.PATH_END.py
|
{
"filename": "tssproduce.py",
"repo_name": "sirocco-rt/sirocco",
"repo_path": "sirocco_extracted/sirocco-main/py_progs/py4py/scripts/tssproduce.py",
"type": "Python"
}
|
"""
Produces a set of CARAMEL and MEMECHO-format synthetic lightcurves from a set of models.
"""
# -*- coding: utf-8 -*-
import py4py.reverb.timeseries.output as tss_output
import py4py.reverb.timeseries.input as tss_import
import py4py.reverb.timeseries.process as tss_process
import astropy as ap
from astropy import units as u
# noinspection SpellCheckingInspection
from astropy.units import cds as ucds
import numpy as np
import pickle
import datetime
import sys
# ========== SETTINGS ==========
# -------- TF SETTINGS ---------
tf_lim_test = 9999999999
tf_delay_bins = 100
tf_wave = 6562.8
# ---- Seyfert Settings ----
spectrum_file_sey = "sey_100.spec"
suffix_sey = "sey"
bolometric_sey = 1.043e44
tf_line_sey = 28
databases_sey = {'min': {'path': "/home/swm1n12/bindata/sey_090", 'scale': 1.0/60, 'continuum': bolometric_sey*0.9},
'mid': {'path': "/home/swm1n12/bindata/sey_100", 'scale': 1.0/60, 'continuum': bolometric_sey},
'max': {'path': "/home/swm1n12/bindata/sey_110", 'scale': 1.0/60, 'continuum': bolometric_sey*1.1}}
# ---- QSO Settings ----
spectrum_file_qso = "qso_100.spec"
suffix_qso = "qso"
bolometric_qso = 1.043e46
tf_line_qso = 44
databases_qso = {'min': {'path': "/home/swm1n12/bindata/qso_090", 'scale': 1.0/50, 'continuum': bolometric_qso*0.9},
'mid': {'path': "/home/swm1n12/bindata/qso_100", 'scale': 1.0/100, 'continuum': bolometric_qso},
'max': {'path': "/home/swm1n12/bindata/qso_110", 'scale': 1.0/50, 'continuum': bolometric_qso*1.1}}
# ---- LIGHTCURVE SETTINGS -----
lightcurve_file = "light_1158.dat"
lightcurve_time_units = ucds.MJD
lightcurve_value_units = 1e-15 * u.erg / (u.s * u.angstrom * u.cm * u.cm)
# lightcurve_bolometric_correction = 9.0 * 5100.0 * u.angstrom
# We want a curve of the observed bolometric luminosity, so we need to rescale to 100 Pc
lightcurve_target_lum_qso = bolometric_qso * u.erg / (u.s * np.pi * np.power(u.parsec.to(u.cm)*100, 2) * u.cm * u.cm)
lightcurve_target_lum_sey = bolometric_sey * u.erg / (u.s * np.pi * np.power(u.parsec.to(u.cm)*100, 2) * u.cm * u.cm)
# ----- SPECTRUM SETTINGS ------
spectrum_bins_name = "Lambda"
spectrum_value_name = "A40P0.50"
# python spectra are per cm2 at 100 pc -> we need to divide ΔC by this value too
spectrum_value_units = u.angstrom * u.erg / (u.s * u.angstrom * (np.pi * np.power(u.parsec.to(u.cm)*100, 2) * u.cm * u.cm))
# spectrum_value_to_lightcurve_value = (np.pi * np.power(u.parsec.to(u.cm) * 100, 2)) * u.cm * u.cm / 1000
# spectrum_value_units = 1e-14 * u.erg / (u.s * u.angstrom * u.cm * u.cm)
spectrum_wave_units = u.angstrom
# ----- SPECTRUM (FULL) SETTINGS -----
# Used to create the non-continuum-subtracted spectra for MEMECHO
# As much continuum as possible! Can't go further due to the limits of Python.
spectrum_full_wave_range = [6200, 7000] * u.angstrom
spectrum_full_rebin_to = 100
# ----- SPECTRUM (SUBTRACTED) SETTINGS -----
# Used to create the continuum-subtracted spectra for CARAMEL
# As little continuum as possible! We want just the line to make it faster.
spectrum_line_wave_range = [6200, 7000] * u.angstrom
spectrum_line_subtract_range = [6300, 6850] * u.angstrom
spectrum_line_rebin_to = 70
# ------ SPECTRA TIMES ---------
spectra_times_file = "spectra_times.dat"
spectra_times_units = ucds.MJD
spectra_fudge_factor = 1
# ---- VISUALIZATION & OUTPUT SETTINGS ------
output_lightcurve_file = "out_lightcurve"
output_times_file = "out_times"
output_times_line_file = "out_times_line"
output_trailed_spec_file = "out_tspec"
output_spectra_file = "out_spectra"
output_animation_file = "out_anim"
is_reversed = False
visualise_outputs = True
visualise_animation = False
visualise_clean = False
visualise_rescaled_tfs = True
visualise_rescaled_tfs_max = 30
time_series_outputs = False
# --------- RESCALING ----------
delay_max = 90 * u.d
delta_continuum_range = 0.50
# --------- ERRORS ---------
error_ratio_to_variation = 0.020
# ------ PICKLES ------
pickle_tf_file = 'pickle_tf'
pickle_spectra_file = 'pickle_spectra'
pickle_times_file = 'pickle_times'
use_pickled_tf = True
use_pickled_times = True # If set to true, won't change continuum range!
use_pickled_spectra = True
stop_at_min_max = False
# ===============
# Program begins!
# ===============
print("=== tssproduce started! ===")
print("Importing begins at: {}".format(datetime.datetime.now()))
# Import all data files
print("Importing spectra...")
spectrum_qso_line, continuum_fit_qso = tss_import.read_spectrum(spectrum_file_qso, spectrum_bins_name, spectrum_value_name,
frequency=False,
wave_units=spectrum_wave_units,
wave_name="Å",
value_units=spectrum_value_units,
value_name="erg s$^{-1}$ cm$^{-2}$ at 100 Pc",
limits=spectrum_line_wave_range,
subtract_continuum_with_mask=spectrum_line_subtract_range,
rebin_to=spectrum_line_rebin_to)
spectrum_qso_full = tss_import.read_spectrum(spectrum_file_qso, spectrum_bins_name, spectrum_value_name,
frequency=False,
wave_units=spectrum_wave_units,
value_units=spectrum_value_units,
value_name="erg s$^{-1}$ cm$^{-2}$ at 100 Pc",
limits=spectrum_full_wave_range,
rebin_to=spectrum_full_rebin_to)
spectrum_sey_line, continuum_fit_sey = tss_import.read_spectrum(spectrum_file_sey, spectrum_bins_name, spectrum_value_name,
frequency=False,
wave_units=spectrum_wave_units,
wave_name="Å",
value_units=spectrum_value_units,
value_name="erg s$^{-1}$ cm$^{-2}$ at 100 Pc",
limits=spectrum_line_wave_range,
subtract_continuum_with_mask=spectrum_line_subtract_range,
rebin_to=spectrum_line_rebin_to)
spectrum_sey_full = tss_import.read_spectrum(spectrum_file_sey, spectrum_bins_name, spectrum_value_name,
frequency=False,
wave_units=spectrum_wave_units,
wave_name="Å",
value_units=spectrum_value_units,
value_name="erg s$^{-1}$ cm$^{-2}$ at 100 Pc",
limits=spectrum_full_wave_range,
rebin_to=spectrum_full_rebin_to)
print("Importing lightcurve file '{}'...".format(lightcurve_file))
lightcurve_qso = tss_import.read_lightcurve(lightcurve_file,
time_units=lightcurve_time_units,
value_units=lightcurve_value_units,
time_name="MJD",
value_name="erg s$^{-1}$",
target_bolometric_luminosity=lightcurve_target_lum_qso,
delta_continuum_range=delta_continuum_range)
lightcurve_sey = tss_import.read_lightcurve(lightcurve_file,
time_units=lightcurve_time_units,
value_units=lightcurve_value_units,
time_name="MJD",
value_name="erg s$^{-1}$",
target_bolometric_luminosity=lightcurve_target_lum_sey,
delta_continuum_range=delta_continuum_range)
print("Importing spectra timing file '{}'...".format(spectra_times_file))
spectra_times = tss_import.read_spectra_times(spectra_times_file,
time_units=spectra_times_units,
time_name="MJD")
# Produce a TF
if not use_pickled_tf:
print("Generating Ψ begins at: {}".format(datetime.datetime.now()))
tf_qso_full = tss_process.generate_tf(databases_qso, spectrum_qso_full, tf_delay_bins, tf_line_qso, tf_wave, 'qso_full', tf_lim_test)
tf_qso_line = tss_process.generate_tf(databases_qso, spectrum_qso_line, tf_delay_bins, tf_line_qso, tf_wave, 'qso_line', tf_lim_test)
tf_sey_full = tss_process.generate_tf(databases_sey, spectrum_sey_full, tf_delay_bins, tf_line_sey, tf_wave, 'sey_full', tf_lim_test)
tf_sey_line = tss_process.generate_tf(databases_sey, spectrum_sey_line, tf_delay_bins, tf_line_sey, tf_wave, 'sey_line', tf_lim_test)
print("Pickling Ψ for future use...")
picklefile = open(pickle_tf_file+'_sey_full_tf.pickle', 'wb')
pickle.dump(tf_sey_full, picklefile)
picklefile.close()
picklefile = open(pickle_tf_file+'_sey_line_tf.pickle', 'wb')
pickle.dump(tf_sey_line, picklefile)
picklefile.close()
picklefile = open(pickle_tf_file+'_qso_full_tf.pickle', 'wb')
pickle.dump(tf_qso_full, picklefile)
picklefile.close()
picklefile = open(pickle_tf_file+'_qso_line_tf.pickle', 'wb')
pickle.dump(tf_qso_line, picklefile)
picklefile.close()
else:
print("Unpickling Ψ...")
picklefile = open(pickle_tf_file+'_sey_full_tf.pickle', 'rb')
tf_sey_full = pickle.load(picklefile)
picklefile.close()
picklefile = open(pickle_tf_file+'_sey_line_tf.pickle', 'rb')
tf_sey_line = pickle.load(picklefile)
picklefile.close()
picklefile = open(pickle_tf_file+'_qso_full_tf.pickle', 'rb')
tf_qso_full = pickle.load(picklefile)
picklefile.close()
picklefile = open(pickle_tf_file+'_qso_line_tf.pickle', 'rb')
tf_qso_line = pickle.load(picklefile)
picklefile.close()
# ========
# Convolve
# ========
spectra_qso_line = tss_process.generate_spectra_base(spectrum_qso_line, spectra_times)
spectra_qso_full = tss_process.generate_spectra_base(spectrum_qso_full, spectra_times)
spectra_sey_line = tss_process.generate_spectra_base(spectrum_sey_line, spectra_times)
spectra_sey_full = tss_process.generate_spectra_base(spectrum_sey_full, spectra_times)
if not use_pickled_times:
print("Generating full lightcurve at: {}".format(datetime.datetime.now()))
# Here we convert the simple lightcurve and series of sample times into a
# high-resolution set of interpolated driving lightcurves and working times
# that are at a resolution on or below that of the transfer function
times_qso_full = tss_process.generate_times_and_delta_continuum(tf_qso_full, lightcurve_qso, delay_max)
times_qso_line = tss_process.generate_times_and_delta_continuum(tf_qso_line, lightcurve_qso, delay_max)
times_sey_full = tss_process.generate_times_and_delta_continuum(tf_sey_full, lightcurve_sey, delay_max)
times_sey_line = tss_process.generate_times_and_delta_continuum(tf_sey_line, lightcurve_sey, delay_max)
# Output the times to file
times_sey_line.write(output_times_file+'_sey.dat', format='ascii', overwrite=True)
times_qso_line.write(output_times_file+'_qso.dat', format='ascii', overwrite=True)
# Then we pickle these times to disk for use later
ap.io.misc.fnpickle(times_qso_full, pickle_times_file+'_qso_full_time.pickle')
ap.io.misc.fnpickle(times_qso_line, pickle_times_file+'_qso_line_time.pickle')
ap.io.misc.fnpickle(times_sey_full, pickle_times_file+'_sey_full_time.pickle')
ap.io.misc.fnpickle(times_sey_line, pickle_times_file+'_sey_line_time.pickle')
else:
print("Unpickling full lightcurve...")
# Recover previously-generated times from disk for use again
times_qso_full = ap.io.misc.fnunpickle(pickle_times_file+'_qso_full_time.pickle')
times_qso_line = ap.io.misc.fnunpickle(pickle_times_file+'_qso_line_time.pickle')
times_sey_full = ap.io.misc.fnunpickle(pickle_times_file+'_sey_full_time.pickle')
times_sey_line = ap.io.misc.fnunpickle(pickle_times_file+'_sey_line_time.pickle')
# -------------
# Begin process
# -------------
if use_pickled_spectra:
print("Unpickling time series...")
# Recover previously-generated spectra from disk for use again
spectra_qso_full = ap.io.misc.fnunpickle(pickle_spectra_file+'_qso_full.pickle')
spectra_qso_line = ap.io.misc.fnunpickle(pickle_spectra_file+'_qso_line.pickle')
spectra_sey_full = ap.io.misc.fnunpickle(pickle_spectra_file+'_sey_full.pickle')
spectra_sey_line = ap.io.misc.fnunpickle(pickle_spectra_file+'_sey_line.pickle')
spectra_qso_full_clean = ap.io.misc.fnunpickle(pickle_spectra_file+'_qso_full_clean.pickle')
spectra_qso_line_clean = ap.io.misc.fnunpickle(pickle_spectra_file+'_qso_line_clean.pickle')
spectra_sey_full_clean = ap.io.misc.fnunpickle(pickle_spectra_file+'_sey_full_clean.pickle')
spectra_sey_line_clean = ap.io.misc.fnunpickle(pickle_spectra_file+'_sey_line_clean.pickle')
else:
print("Generating time series begins at: {}".format(datetime.datetime.now()))
# We generate the absolute minima and maxima possible for the spectrum given
# the delta continuum range and the transfer functions.
tss_process.generate_spectra_min_max(times_qso_full, tf_qso_full, spectra_qso_full, spectrum_qso_full, continuum_fit_qso)
tss_process.generate_spectra_min_max(times_qso_line, tf_qso_line, spectra_qso_line, spectrum_qso_line)
tss_process.generate_spectra_min_max(times_sey_full, tf_sey_full, spectra_sey_full, spectrum_sey_full, continuum_fit_sey)
tss_process.generate_spectra_min_max(times_sey_line, tf_sey_line, spectra_sey_line, spectrum_sey_line)
if stop_at_min_max:
# If these are all we're interested in, just write them to disk for
# looking at later
spectra_qso_full.write(pickle_spectra_file+'_qso_full_range.dat', format='ascii', overwrite=True)
spectra_qso_line.write(pickle_spectra_file+'_qso_line_range.dat', format='ascii', overwrite=True)
spectra_sey_full.write(pickle_spectra_file+'_sey_full_range.dat', format='ascii', overwrite=True)
spectra_sey_line.write(pickle_spectra_file+'_sey_line_range.dat', format='ascii', overwrite=True)
sys.exit(1)
# We now generate the spectra properly
tss_process.generate_spectra_details(times_qso_line, tf_qso_line,
spectra_qso_line, spectrum_qso_line, verbose=False)
tss_process.generate_spectra_details(times_qso_full, tf_qso_full,
spectra_qso_full, spectrum_qso_full, continuum_fit_qso, verbose=False)
tss_process.generate_spectra_details(times_sey_line, tf_sey_line,
spectra_sey_line, spectrum_sey_line, verbose=False)
tss_process.generate_spectra_details(times_sey_full, tf_sey_full,
spectra_sey_full, spectrum_sey_full, continuum_fit_sey, verbose=False)
# Now we generate the errors! We want to set the per-pixel errors such that
# the *total* error on the line is equal to error_ratio_to_variation
# We calculate this for the QSO line case, as it's not polluted by the
# continuum variation, then apply to the full QSO. Then we apply the errors
# and keep clean copies of each.
spectra_qso_line_clean = tss_process.generate_spectra_error(spectra=spectra_qso_line, error=error_ratio_to_variation)
spectra_qso_full_clean = tss_process.copy_spectra_error(origin=spectra_qso_line, target=spectra_qso_full)
spectra_sey_line_clean = tss_process.copy_spectra_error(origin=spectra_qso_line, target=spectra_sey_line, rescale=True)
spectra_sey_full_clean = tss_process.copy_spectra_error(origin=spectra_sey_line, target=spectra_sey_full)
# Pickle the spectra to be used later
ap.io.misc.fnpickle(spectra_qso_full, pickle_spectra_file+'_qso_full.pickle')
ap.io.misc.fnpickle(spectra_qso_line, pickle_spectra_file+'_qso_line.pickle')
ap.io.misc.fnpickle(spectra_sey_full, pickle_spectra_file+'_sey_full.pickle')
ap.io.misc.fnpickle(spectra_sey_line, pickle_spectra_file+'_sey_line.pickle')
ap.io.misc.fnpickle(spectra_qso_full_clean, pickle_spectra_file+'_qso_full_clean.pickle')
ap.io.misc.fnpickle(spectra_qso_line_clean, pickle_spectra_file+'_qso_line_clean.pickle')
ap.io.misc.fnpickle(spectra_sey_full_clean, pickle_spectra_file+'_sey_full_clean.pickle')
ap.io.misc.fnpickle(spectra_sey_line_clean, pickle_spectra_file+'_sey_line_clean.pickle')
# Write the spectra out to an easily plotted file
spectra_qso_full.write(output_spectra_file+'_qso_full.dat', format='ascii', overwrite=True)
spectra_qso_line.write(output_spectra_file+'_qso_line.dat', format='ascii', overwrite=True)
spectra_sey_full.write(output_spectra_file+'_sey_full.dat', format='ascii', overwrite=True)
spectra_sey_line.write(output_spectra_file+'_sey_line.dat', format='ascii', overwrite=True)
spectra_qso_full_clean.write(output_spectra_file+'_qso_full_clean.dat', format='ascii', overwrite=True)
spectra_qso_line_clean.write(output_spectra_file+'_qso_line_clean.dat', format='ascii', overwrite=True)
spectra_sey_full_clean.write(output_spectra_file+'_sey_full_clean.dat', format='ascii', overwrite=True)
spectra_sey_line_clean.write(output_spectra_file+'_sey_line_clean.dat', format='ascii', overwrite=True)
spectra_times_sey = tss_process.generate_times_line_emission(spectra_sey_line, spectra_times)
spectra_times_qso = tss_process.generate_times_line_emission(spectra_qso_line, spectra_times)
spectra_times_sey.write(output_times_line_file+'_sey.dat', format='ascii', overwrite=True)
spectra_times_qso.write(output_times_line_file+'_qso.dat', format='ascii', overwrite=True)
if time_series_outputs:
tss_output.write_caramel_data(lightcurve_qso, spectra_qso_line, spectra_times, suffix_qso)
tss_output.write_memecho_data(lightcurve_qso, spectra_qso_full, spectra_times, suffix_qso)
tss_output.write_caramel_data(lightcurve_sey, spectra_sey_line, spectra_times, suffix_sey)
tss_output.write_memecho_data(lightcurve_sey, spectra_sey_full, spectra_times, suffix_sey)
# ==============================================================================
# OUTPUT VISUALIZATIONS
# ==============================================================================
# Generate trailed spectrogram
# ------------------------------------------------------------------------------
if visualise_outputs:
print("Generating trailed spectrogram begins at: {}".format(datetime.datetime.now()))
tss_output.trailed_spectrogram(spectra_qso_line, lightcurve_qso, spectra_times, output_trailed_spec_file+'_'+suffix_qso+'_line')
tss_output.trailed_spectrogram(spectra_qso_full, lightcurve_qso, spectra_times, output_trailed_spec_file+'_'+suffix_qso+'_full')
tss_output.trailed_spectrogram(spectra_sey_line, lightcurve_sey, spectra_times, output_trailed_spec_file+'_'+suffix_sey+'_line')
tss_output.trailed_spectrogram(spectra_sey_full, lightcurve_sey, spectra_times, output_trailed_spec_file+'_'+suffix_sey+'_full')
# tss_output.plot_spectra_rms([spectra_qso_line_clean, spectra_sey_line_clean],
# ['out_specrms_qso', 'out_specrms_sey'])
if visualise_clean:
tss_output.trailed_spectrogram(spectra_qso_line_clean, lightcurve_qso, spectra_times, output_trailed_spec_file+'_'+suffix_qso+'_line_clean')
tss_output.trailed_spectrogram(spectra_qso_full_clean, lightcurve_qso, spectra_times, output_trailed_spec_file+'_'+suffix_qso+'_full_clean')
tss_output.trailed_spectrogram(spectra_sey_line_clean, lightcurve_sey, spectra_times, output_trailed_spec_file+'_'+suffix_sey+'_line_clean')
tss_output.trailed_spectrogram(spectra_sey_full_clean, lightcurve_sey, spectra_times, output_trailed_spec_file+'_'+suffix_sey+'_full_clean')
if visualise_rescaled_tfs:
# tf_qso_resc = tss_process.generate_tf(databases_qso, spectrum_qso_line, 25, tf_line_qso, tf_wave, 'out_qso_resc', 10000, dynamic_range=1.5)
# tf_sey_resc = tss_process.generate_tf(databases_sey, spectrum_sey_line, 25, tf_line_sey, tf_wave, 'out_sey_resc', 10000, dynamic_range=1.5)
tss_output.rescaled_rfs([tf_qso_line, tf_sey_line],
rescale_max_time=delay_max.value,
figure_max_time=delay_max.value/2.5,
keplerian={
'angle': 40, 'mass': 1.33e8, 'radius': [50, 100]
})
# ------------------------------------------------------------------------------
# Generate write_animation
# ------------------------------------------------------------------------------
if visualise_animation:
print("Generating write_animation begins at: {}".format(datetime.datetime.now()))
tss_output.write_animation(spectra_qso_full, lightcurve_qso, spectra_times, times_qso_full, output_animation_file + "_qso_full")
tss_output.write_animation(spectra_qso_line, lightcurve_qso, spectra_times, times_qso_line, output_animation_file + "_qso_line")
tss_output.write_animation(spectra_sey_full, lightcurve_sey, spectra_times, times_sey_full, output_animation_file + "_sey_full")
tss_output.write_animation(spectra_sey_line, lightcurve_sey, spectra_times, times_sey_line, output_animation_file + "_sey_line")
if visualise_clean:
tss_output.write_animation(spectra_qso_full_clean, lightcurve_qso, spectra_times, times_qso_full, output_animation_file + "_qso_full_clean")
tss_output.write_animation(spectra_qso_line_clean, lightcurve_qso, spectra_times, times_qso_line, output_animation_file + "_qso_line_clean")
tss_output.write_animation(spectra_sey_full_clean, lightcurve_sey, spectra_times, times_sey_full, output_animation_file + "_sey_full_clean")
tss_output.write_animation(spectra_sey_line_clean, lightcurve_sey, spectra_times, times_sey_line, output_animation_file + "_sey_line_clean")
|
sirocco-rtREPO_NAMEsiroccoPATH_START.@sirocco_extracted@sirocco-main@py_progs@py4py@scripts@tssproduce.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/scene/yaxis/title/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@scene@yaxis@title@__init__.py@.PATH_END.py
|
{
"filename": "tools.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/cli/langchain_cli/integration_template/docs/tools.ipynb",
"type": "Jupyter Notebook"
}
|
---
sidebar_label: __ModuleName__
---
# __ModuleName__
- TODO: Make sure API reference link is correct.
This notebook provides a quick overview for getting started with __ModuleName__ [tool](/docs/integrations/tools/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html).
- TODO: Add any other relevant links, like information about underlying API, etc.
## Overview
### Integration details
- TODO: Make sure links and features are correct
| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/__module_name__) | Package latest |
| :--- | :--- | :---: | :---: | :---: |
| [__ModuleName__](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | beta/❌ | ✅/❌ |  |
### Tool features
- TODO: Add feature table if it makes sense
## Setup
- TODO: Add any additional deps
The integration lives in the `langchain-community` package.
```python
%pip install --quiet -U langchain-community
```
### Credentials
- TODO: Add any credentials that are needed
```python
import getpass
import os
# if not os.environ.get("__MODULE_NAME___API_KEY"):
# os.environ["__MODULE_NAME___API_KEY"] = getpass.getpass("__MODULE_NAME__ API key:\n")
```
It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:
```python
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()
```
## Instantiation
- TODO: Fill in instantiation params
Here we show how to instantiate an instance of the __ModuleName__ tool, with
```python
from langchain_community.tools import __ModuleName__
tool = __ModuleName__(
...
)
```
## Invocation
### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)
- TODO: Describe what the tool args are, fill them in, run cell
```python
tool.invoke({...})
```
### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)
We can also invoke the tool with a model-generated ToolCall, in which case a ToolMessage will be returned:
- TODO: Fill in tool args and run cell
```python
# This is usually generated by a model, but we'll create a tool call directly for demo purposes.
model_generated_tool_call = {
"args": {...}, # TODO: FILL IN
"id": "1",
"name": tool.name,
"type": "tool_call",
}
tool.invoke(model_generated_tool_call)
```
## Chaining
- TODO: Add user question and run cells
We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:
import ChatModelTabs from "@theme/ChatModelTabs";
<ChatModelTabs customVarName="llm" />
```python
# | output: false
# | echo: false
# !pip install -qU langchain langchain-openai
from langchain.chat_models import init_chat_model
llm = init_chat_model(model="gpt-4o", model_provider="openai")
```
```python
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig, chain
prompt = ChatPromptTemplate(
[
("system", "You are a helpful assistant."),
("human", "{user_input}"),
("placeholder", "{messages}"),
]
)
# specifying tool_choice will force the model to call this tool.
llm_with_tools = llm.bind_tools([tool], tool_choice=tool.name)
llm_chain = prompt | llm_with_tools
@chain
def tool_chain(user_input: str, config: RunnableConfig):
input_ = {"user_input": user_input}
ai_msg = llm_chain.invoke(input_, config=config)
tool_msgs = tool.batch(ai_msg.tool_calls, config=config)
return llm_chain.invoke({**input_, "messages": [ai_msg, *tool_msgs]}, config=config)
tool_chain.invoke("...")
```
## API reference
For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.__module_name__.tool.__ModuleName__.html
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@cli@langchain_cli@integration_template@docs@tools.ipynb@.PATH_END.py
|
{
"filename": "platforms.md",
"repo_name": "google/googletest",
"repo_path": "googletest_extracted/googletest-main/docs/platforms.md",
"type": "Markdown"
}
|
# Supported Platforms
GoogleTest follows Google's
[Foundational C++ Support Policy](https://opensource.google/documentation/policies/cplusplus-support).
See
[this table](https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md)
for a list of currently supported versions compilers, platforms, and build
tools.
|
googleREPO_NAMEgoogletestPATH_START.@googletest_extracted@googletest-main@docs@platforms.md@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.