metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "README.md",
"repo_name": "GBTAmmoniaSurvey/GAS",
"repo_path": "GAS_extracted/GAS-master/releases/README.md",
"type": "Markdown"
}
|
Release Making Instructions
===========================
To create a data release, run the appropriate script, e.g.:
python DR1.py
It will install the released version of GAS (and appropriate required other
packages) then run the gridding scripts.
Building a new release
----------------------
Checklist for creating a new release:
1. Create a new file DR#.py in this directory and populate it
2. Change the version of GAS in `setup.py` to `DR#`
3. Commit the changes (make sure to add `DR#.py`)
3. Test the DR using the hashtag version of the install script (e.g.,
https://github.com/keflavich/GAS/commit/c8e3ee117e7024ba7fdb75e2cc0d91546fc64bc7#diff-1094293878d5c459ed6dc7720ed01f18R15
instead of
https://github.com/keflavich/GAS/commit/c8e3ee117e7024ba7fdb75e2cc0d91546fc64bc7#diff-1094293878d5c459ed6dc7720ed01f18R16)
4. `git tag DR#` to create a tag
5. `git push --tags` to push the tags to the github repository
|
GBTAmmoniaSurveyREPO_NAMEGASPATH_START.@GAS_extracted@GAS-master@releases@README.md@.PATH_END.py
|
{
"filename": "_tickprefix.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/yaxis/_tickprefix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickprefix", parent_name="layout.scene.yaxis", **kwargs
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@yaxis@_tickprefix.py@.PATH_END.py
|
{
"filename": "checksource.py",
"repo_name": "lucatelli/morphen",
"repo_path": "morphen_extracted/morphen-main/analysis_scripts/checksource.py",
"type": "Python"
}
|
# Script to image and assess the properties of long baseline calibrators.
# Runs in CASA 4.5.0
# Expects to find the *.split.cal measurement set and the .fluxscale file.
# Unless you want to limit spws (i.e. exclude very narrow ones for
# speed) nothing should need to be changed.
# If the analysis fails (usually only on check source) it's an
# indication that the source is non-point like. The image and png
# should be created regardless.
#
# C. Brogan (Nov 2015)
# T. Hunter (Jan 2016)
###################################
# DATA PROPERTIES
###################################
from __future__ import print_function # prevents adding old-style print statements
import numpy as np
import pylab as pl
import analysisUtils as au
import glob
import os
# Check if this is CASA6 CASA 6
try:
import casalith
casaVersion = casalith.version_string()
except:
# either we are importing into python, or CASA < 6
if (os.getenv('CASAPATH') is not None):
import casadef
if casadef.casa_version >= '5.0.0':
import casa as mycasa
if 'cutool' in dir(mycasa):
cu = mycasa.cutool()
casaVersion = '.'.join([str(i) for i in cu.version()[:-1]]) + '-' + str(cu.version()[-1])
else:
casaVersion = mycasa.casa['build']['version'].split()[0]
else:
casaVersion = casadef.casa_version
else:
casaVersion = None
if casaVersion < '5.9.9':
from taskinit import *
from imfit_cli import imfit_cli as imfit
try:
from tclean_cli import tclean_cli as tclean
except:
print("checksource.py: Cannot import tclean")
else:
from casatasks import imfit
from casatasks import tclean
from casatools import msmetadata as msmdtool
from matplotlib.ticker import MultipleLocator # used by plotPointingResults
def writeOut(f,line):
print(line)
f.write(line+'\n')
def version(short=False):
"""
Returns the CVS revision number as a string.
"""
myversion = "$Id: checksource.py,v 1.23 2020/10/19 13:50:47 thunter Exp $"
if (short):
myversion = myversion.split()[2]
return myversion
def checksource(overwrite=True, verbose=False, subdir='', splitcal_vis=''):
"""
Images the phasecal and check source in a manually-calibrated dataset and
reports statistics. Expects to find the *.split.cal measurement set and
the corresponding .fluxscale file for it.
Inputs:
overwrite: if True, overwrite any existing image from a previous execution
splitcal_vis: defaults to *.cal, but can be specified as list of strings,
or a comma-delimited string
Outputs:
png image plots of each calibrator, and an ASCII file for each dataset
The name of the ASCII file, and a list of pngs are returned.
"""
# Read the dataset(s) and get properties
if (splitcal_vis == ''):
vislist = glob.glob('*.cal')
else:
if (type(splitcal_vis) == str):
vislist = splitcal_vis.split(',')
else:
vislist = splitcal_vis
print("Checking datasets: ", vislist)
mymsmd = au.createCasaTool(msmdtool)
if (len(subdir) > 0):
if (os.path.exists(subdir)):
if (subdir[-1] != '/'):
subdir += '/'
else:
os.mkdir(subdir)
if (subdir[-1] != '/'):
subdir += '/'
pnglist = []
textfiles = []
for vis in vislist:
mymsmd.open(vis)
freq=mymsmd.meanfreq(0,unit='GHz')
# Check Source
check=mymsmd.fieldsforintent('OBSERVE_CHECK_SOURCE*',True)[0]
checkid=mymsmd.fieldsforintent('OBSERVE_CHECK_SOURCE*',False)[0]
checkpos=mymsmd.phasecenter(checkid)
# Phase calibrator
phase=mymsmd.fieldsforintent('CALIBRATE_PHASE*',True)[0]
phaseid=mymsmd.fieldsforintent('CALIBRATE_PHASE*',False)[0]
phasepos=mymsmd.phasecenter(phaseid)
if ('OBSERVE_TARGET#ON_SOURCE' in mymsmd.intents()):
nScienceFields= len(mymsmd.fieldsforintent('OBSERVE_TARGET*',False))
science = mymsmd.fieldsforintent('OBSERVE_TARGET*',True)[0]
scienceid = mymsmd.fieldsforintent('OBSERVE_TARGET*',False)[0]
else:
nScienceFields = 0
mymsmd.done()
floatcell = au.pickCellSize(vis, maxBaselinePercentile=99,
verbose=verbose)
cell = au.pickCellSize(vis, maxBaselinePercentile=99, cellstring=True,
verbose=verbose)
# imsize = int(au.nextValidImsize(int(5.0/floatcell))) # valid when we only had checksources for synthBeam < 0.25
imsize = int(au.nextValidImsize(int(np.max([5.0,5.0*au.estimateSynthesizedBeam(vis)])/floatcell)))
print("imsize = ", imsize)
region='circle[[%dpix , %dpix], 15pix ]' % (int(imsize/2),int(imsize/2))
if False:
# original method (for bands 3-6 only)
cell = str(np.round(0.015*(100/freq),3))+'arcsec'
if freq < 116.0:
imsize = [320,320]
region='circle[[160pix , 160pix] ,15pix ]'
else:
imsize = [680,680]
region='circle[[340pix , 340pix] ,15pix ]'
###################################
# IMAGE
###################################
weighting = 'briggs'
robust = 0.5
niter = 50
threshold = '0.0mJy'
spw=''
separation = au.angularSeparationOfTwoFields(vis,checkid,phaseid)
if (nScienceFields > 0):
separation_pcal_science = au.angularSeparationOfTwoFields(vis,scienceid,phaseid)
separation_check_science = au.angularSeparationOfTwoFields(vis,scienceid,checkid)
fieldtype = ['checksource','phasecal']
field = [check,phase]
for i,cal in enumerate(field):
if (not os.path.exists(cal+'_'+vis+'.image') or overwrite):
os.system('rm -rf '+cal+'_'+vis+'.*')
if verbose:
print("Running tclean('%s', field='%s', cell=%s, imsize=%s, ...)" % (vis, cal, str(cell), str(imsize)))
tclean(vis=vis,
imagename=cal+'_'+vis,
field=cal,spw=spw,
specmode='mfs',
deconvolver='hogbom',
imsize = imsize,
cell= cell,
weighting = weighting,
robust = robust,
niter = niter,
threshold = threshold,
interactive = False,
mask = region,
gridder = 'standard')
png = subdir+fieldtype[i]+'_'+cal+'_'+vis+'.image.png'
pnglist.append(png)
au.imviewField(cal+'_'+vis+'.image',radius=30*floatcell,
contourImage=cal+'_'+vis+'.mask',levels=[1],
plotfile=png)
###################################
# ANALYZE
###################################
###########
# PHASE
###########
imagename=phase+'_'+vis
if verbose:
print("Running imfit('%s', region='%s')" % (imagename+'.image', region))
# Fit the phase source to get position and flux
imagefit=imfit(imagename=imagename+'.image',
region=region)
fitresults=au.imfitparse(imagefit)
# Compare the Positions
phasepos_obs=au.direction2radec(phasepos)
if fitresults is not None:
phasepos_fit=','.join(fitresults.split()[:2])
phasepos_diff=au.angularSeparationOfStrings(phasepos_obs,phasepos_fit,verbose=False)*3600.
# Compare the Flux densities
peakIntensity = au.imagePeak(imagename+'.image')
selffluxfile=glob.glob('*.fluxscale')[0]
fluxscaleResult = au.fluxscaleParseLog(selffluxfile,field=phase)
if fluxscaleResult is not None:
selfflux = fluxscaleResult[0][0]
phaseflux_fit=float(fitresults.split()[2])
phaseCoherence = 100*peakIntensity/phaseflux_fit
phaseflux_diff=100*(selfflux-phaseflux_fit)/selfflux
# Print the final results and save to file
textfile = subdir+'calimage_results_'+vis+'.txt'
textfiles.append(textfile)
f = open(textfile,'w')
f.write('\n*************************************************************\n\n')
line = 'CHECK_SOURCE IMAGE ANALYSIS REPORT (version %s)\n' % version(short=True)
writeOut(f,line)
info = au.getFitsBeam(imagename+'.image')
synthBeam = (info[0]*info[1])**0.5
if fitresults is None:
line = "Phasecal %s: imfit failed" % (phase)
elif fluxscaleResult is not None:
line= "Phasecal %s: Position difference = %s arcsec = %s synth.beam, Flux %% difference = %s"%(phase,au.roundFiguresToString(phasepos_diff,3), au.roundFiguresToString(phasepos_diff/synthBeam,3), au.roundFiguresToString(phaseflux_diff,3))
writeOut(f,line)
line = " coherence = peakIntensity/fittedFluxDensity = %s%%" % (au.roundFiguresToString(phaseCoherence,3))
else:
line = "Phasecal %s: Position difference = %s arcsec = %s synth.beam" % (phase,au.roundFiguresToString(phasepos_diff,3), au.roundFiguresToString(phasepos_diff/synthBeam,3))
writeOut(f,line)
f.close()
if fluxscaleResult is None:
print("Full checksource analysis is not supported if there is no flux calibrator")
return textfiles, pnglist
###########
# CHECK
###########
imagename=check+'_'+vis
# Fit the check source to get position and flux
if verbose:
print("Running imfit('%s', region='%s')" % (imagename+'.image', region))
imagefit=imfit(imagename=imagename+'.image',
region=region)
fitresults=au.imfitparse(imagefit, deconvolved=True)
info = au.getFitsBeam(imagename+'.image')
synthMajor, synthMinor = info[0:2]
synthBeam = (info[0]*info[1])**0.5
# Compare the Positions
checkpos_obs=au.direction2radec(checkpos)
if fitresults is not None:
checkpos_fit=','.join(fitresults.split()[:2])
checkpos_diff=au.angularSeparationOfStrings(checkpos_obs,checkpos_fit,
verbose=False)*3600.
# Compare the Flux densities
selffluxfile=glob.glob('*.fluxscale')[0]
results = au.fluxscaleParseLog(selffluxfile,field=check)
peakIntensity = au.imagePeak(imagename+'.image')
if (results is not None and fitresults is not None):
selfflux=results[0][0]
checkflux_fit=float(fitresults.split()[2])
checkflux_diff=100*(selfflux-checkflux_fit)/selfflux
checkCoherence = 100*peakIntensity/checkflux_fit
if fitresults is not None:
if verbose:
print("Checksource fitresults: ", fitresults)
deconvolvedMajor = float(fitresults.split()[5])
deconvolvedMinor = float(fitresults.split()[7])
# Print the final results and save to file
f=open(textfile,'a')
if fitresults is None:
line = "Checksource %s: imfit failed" % (phase)
else:
if (results is not None):
line= "\nChecksource %s: Position difference = %s arcsec = %s synth.beam, Flux %% difference = %s"%(check ,au.roundFiguresToString(checkpos_diff,3),au.roundFiguresToString(checkpos_diff/synthBeam,3),au.roundFiguresToString(checkflux_diff,3))
writeOut(f,line)
line = " coherence = peakIntensity/fittedFluxDensity = %s%%" % (au.roundFiguresToString(checkCoherence,3))
else:
line= "\nChecksource %s: Position difference = %s arcsec = %s synth.beam" % (check ,au.roundFiguresToString(checkpos_diff,3),au.roundFiguresToString(checkpos_diff/synthBeam,3))
writeOut(f,line)
line = " beam size = %s x %s arcsec" % (au.roundFiguresToString(synthMajor,3), au.roundFiguresToString(synthMinor,3))
writeOut(f,line)
line = " apparent deconvolved size = %s x %s arcsec = %s synth.beam area" % (au.roundFiguresToString(deconvolvedMajor,2), au.roundFiguresToString(deconvolvedMinor,2), au.roundFiguresToString(deconvolvedMajor*deconvolvedMinor/(synthBeam**2),2))
writeOut(f,line)
line = " angular separation of phasecal to checksource = %s degree" % (au.roundFiguresToString(separation,3))
writeOut(f,line)
if (nScienceFields > 0):
if (nScienceFields > 1):
modifier = 'first'
else:
modifier = 'only'
line = " angular separation of phasecal to %s science field (%d) = %s degree" % (modifier,scienceid,au.roundFiguresToString(separation_pcal_science,3))
writeOut(f,line)
line = " angular separation of checksource to %s science field (%d) = %s degree" % (modifier,scienceid,au.roundFiguresToString(separation_check_science,3))
writeOut(f,line)
f.close()
# end 'for' loop over vislist
return textfiles, pnglist
def offset(workingdir, vis='', plotfile='', imfitlog=False, spw='', verbose=False):
"""
Takes a pipeline working directory and find all images of the checksource
and produces a plot showing the relative directions of the first two science
targets, the phase calibrator, and the checksource, and a vector
showing the offset of the checksource from its catalog position (computed
using the results of the CASA task imfit), and a
text label showing the RAO and DECO offsets.
workingdir: path to pipeline working directory
vis: alternate location for a measurement set to consult (ignores *_target.ms)
Looks first for *chk*iter2.image; if not found, then *chk*iter1.image
plotfile: default = img+'_offset.png'
imfitlog: if True, then request imfit to generate log files (*.imfit)
spw: int or comma-delimited string, if specified, limit to this or these spws
verbose: print more messages explaining what images it is operating on
"""
mymsmd = au.createCasaTool(msmdtool)
if verbose:
print("workingdir: ", workingdir)
imglist = sorted(glob.glob(os.path.join(workingdir,'*_chk.spw*image')))
if len(imglist) == 0:
print("No check source images found in this directory.")
return
# If iter2.image is found, then drop the iter1 version from the list
for i in imglist:
if i.find('iter2') > 0:
imglist.remove(i.replace('iter2','iter1'))
if verbose:
print("Processing %d images:" % (len(imglist)))
for i in imglist:
print(i)
if vis == '':
searchpath = os.path.join(workingdir,'*.ms')
if verbose:
print("searchpath: ", searchpath)
allvislist = sorted(glob.glob(searchpath))
if verbose:
print("all vis found: " , allvislist)
vislist = []
for vis in allvislist:
if vis.find('_target') < 0:
vislist.append(vis)
else:
vislist = [vis]
raos = []
decos = []
totals = []
sourcenames = []
spws = au.parseSpw(vis, spw)
scienceSpws = au.getScienceSpws(vis, returnString=False)
spws = np.intersect1d(scienceSpws,spws)
if verbose:
print("using spws: ", spws)
newimglist = []
for img in imglist: # there will be an image for each spw
if img.find('spw') > 0 and spw != '':
myspw = int(img.split('spw')[1].split('.')[0])
if myspw in spws:
sourcenames.append(au.imageSource(img))
newimglist.append(img)
if verbose:
print("Using %s" % (img))
elif verbose:
print("Skipping %s" % (img))
else:
sourcenames.append(au.imageSource(img))
newimglist.append(img)
sourcenames = np.unique(sourcenames)
pngs = []
print("vislist = ", vislist)
imglist = newimglist
for sourcename in sourcenames:
for ispw, img in enumerate(imglist): # there will be an image for each spw
if 'spw' not in img:
print("No spw in the image name: ", img)
continue
spw = int(img.split('spw')[1].split('.')[0])
# find the first vis that observed this target as check source
checkid = -1
for vis in vislist:
# print "Checking ", vis
mymsmd.open(vis)
if spw >= mymsmd.nspw():
print("Guessing that spw %d is spw %d in the split ms." % (spw,ispw))
spw = ispw
if 'OBSERVE_CHECK_SOURCE#ON_SOURCE' in mymsmd.intents():
checksources = mymsmd.fieldsforintent('OBSERVE_CHECK_SOURCE*',True)
else:
checksources = mymsmd.fieldsforintent('CALIBRATE_DELAY*',True)
if sourcename in checksources:
check = checksources[0]
checkid = mymsmd.fieldsforname(sourcename)[0]
checkpos = mymsmd.phasecenter(checkid)
# Phase calibrator
phase = mymsmd.fieldsforintent('CALIBRATE_PHASE*',True)[0]
phaseid = mymsmd.fieldsforintent('CALIBRATE_PHASE*',False)[0]
phasepos = mymsmd.phasecenter(phaseid)
if ('OBSERVE_TARGET#ON_SOURCE' in mymsmd.intents()):
nScienceFields = len(mymsmd.fieldsforintent('OBSERVE_TARGET*',False))
science = mymsmd.fieldsforintent('OBSERVE_TARGET*',True)[0]
scienceid = mymsmd.fieldsforintent('OBSERVE_TARGET*',False)[0]
sciencepos = mymsmd.phasecenter(scienceid)
if nScienceFields > 1:
science2 = mymsmd.fieldsforintent('OBSERVE_TARGET*',True)[1]
science2id = mymsmd.fieldsforintent('OBSERVE_TARGET*',False)[1]
science2pos = mymsmd.phasecenter(science2id)
else:
nScienceFields = 0
rxBand = mymsmd.namesforspws(spw)[0].split('#')[1].split('_')[-1].lstrip('0') # string
break
else:
mymsmd.close()
if checkid < 0:
print("Could not find an ms that observed this check source: %s. Try including the vis parameter." % (sourcename))
continue
info = au.getFitsBeam(img)
imsize = info[5] # size in RA direction
region = 'circle[[%dpix , %dpix], 15pix ]' % (int(imsize/2),int(imsize/2))
freq = mymsmd.meanfreq(spw,unit='GHz')
if imfitlog:
logfile = img + '.imfit'
else:
logfile = ''
imagefit = imfit(imagename=img, region=region, logfile=logfile)
fitresults = au.imfitparse(imagefit, deconvolved=True)
synthMajor, synthMinor = info[0:2]
synthBeam = (info[0]*info[1])**0.5
# Compare the Positions
checkpos_obs = au.direction2radec(checkpos)
if fitresults is not None:
checkpos_fit = ','.join(fitresults.split()[:2])
print("spw %d: checksource fitted position: " % (spw), checkpos_fit)
result = au.angularSeparationOfStrings(checkpos_fit, checkpos_obs, True, verbose=False)
checkpos_diff, deltaLong, deltaLat, deltaLongCosDec, pa = result
total = checkpos_diff*3600.
rao = deltaLongCosDec*3600.
deco = deltaLat*3600.
print("spw %d: %s offset=%.4f arcsec, RAO=%+.4f, DECO=%+.4f, PA=%.1fdeg" % (spw, sourcename, total, rao, deco, pa))
totals.append(total)
raos.append(rao)
decos.append(deco)
mymsmd.close()
if nScienceFields > 1:
scienceDeg = np.degrees(au.angularSeparationOfDirections(science2pos,sciencepos,True))
phaseDeg = np.degrees(au.angularSeparationOfDirections(phasepos,sciencepos,True))
checkDeg = np.degrees(au.angularSeparationOfDirections(checkpos,sciencepos,True))
if len(raos) == 1:
pl.clf()
desc = pl.subplot(111)
if nScienceFields > 1:
pl.plot([0, scienceDeg[3], phaseDeg[3], checkDeg[3]],
[0, scienceDeg[2], phaseDeg[2], checkDeg[2]], 'b+', ms=10, mew=2)
else:
pl.plot([0, phaseDeg[3], checkDeg[3]], [0,phaseDeg[2],checkDeg[2]], 'b+', ms=10, mew=2)
pl.hold(True)
pl.axis('equal')
yrange = np.diff(pl.ylim())[0]
# reverse RA axis
x0,x1 = pl.xlim()
xoffset = 0.15*(x1-x0)
# Keep a fixed scale among the spws/images
xscale = 0.5*xoffset/np.max(np.abs([rao,deco]))
# draw the arrow for each spw's image
pl.arrow(checkDeg[3], checkDeg[2], rao*xscale, deco*xscale, lw=1, shape='full',
head_width=0.15*xoffset, head_length=0.2*xoffset, fc='b', ec='b')
if len(raos) == 1:
pl.xlim([x1+xoffset, x0-xoffset])
yoffset = yrange*0.025
pl.text(0, 0+yoffset, 'science', ha='center',va='bottom')
if nScienceFields > 1:
pl.text(scienceDeg[3], scienceDeg[2]+yoffset, 'science (%.1fdeg)'%scienceDeg[0], ha='center',va='bottom')
pl.text(scienceDeg[3], scienceDeg[2]-yoffset, science2, ha='center',va='top')
pl.text(phaseDeg[3], phaseDeg[2]+yoffset, 'phase (%.1fdeg)'%phaseDeg[0], ha='center',va='bottom')
pl.text(checkDeg[3], checkDeg[2]+yoffset, 'check (%.1fdeg)'%checkDeg[0], ha='center',va='bottom')
pl.text(0, 0-yoffset, science, ha='center',va='top')
pl.text(phaseDeg[3], phaseDeg[2]-yoffset, phase, ha='center',va='top')
pl.text(checkDeg[3], checkDeg[2]-yoffset, check, ha='center',va='top')
pl.xlabel('RA offset (deg)')
pl.ylabel('Dec offset (deg)')
projCode = au.projectCodeFromDataset(vis)
if type(projCode) == str:
if verbose:
print("Did not find project code")
projCode = ''
else:
projCode = projCode[0] + ', Band %s, ' % (rxBand)
pl.title(projCode + os.path.basename(img).split('.spw')[0] + ', spws=%s'%spws, size=12)
pl.ylim([pl.ylim()[0]-yoffset*8, pl.ylim()[1]+yoffset*8])
minorLocator = MultipleLocator(0.5) # degrees
desc.xaxis.set_minor_locator(minorLocator)
desc.yaxis.set_minor_locator(minorLocator)
# end 'for' loop over spws/images
if len(raos) < 1:
return
pl.ylim([pl.ylim()[0]-yoffset*7, pl.ylim()[1]+yoffset*15])
rao = np.median(raos)
raostd = np.std(raos)
deco = np.median(decos)
decostd = np.std(decos)
total = np.median(totals)
totalstd = np.std(totals)
raoBeams = rao / synthBeam
raostdBeams = raostd / synthBeam
decoBeams = deco / synthBeam
decostdBeams = decostd / synthBeam
# draw the median arrow in thick black line
pl.arrow(checkDeg[3], checkDeg[2], rao*xscale, deco*xscale, lw=2,
shape='full', head_width=0.12*xoffset,
head_length=0.18*xoffset, ec='k', fc='k')
print("median +- std: offset=%.4f+-%.4f, RAO=%.4f+-%.4f, DECO=%.4f+-%.4f" % (total,totalstd,rao,raostd,deco,decostd))
# pl.text(checkDeg[3], checkDeg[2]-0.6*xoffset, '$\Delta\\alpha$: %+.4f"+-%.4f"' % (rao,raostd), ha='center')
# pl.text(checkDeg[3], checkDeg[2]-0.85*xoffset, '$\Delta\\delta$: %+.4f"+-%.4f"' % (deco,decostd), ha='center')
pl.text(0.05,0.95, '$\Delta\\alpha$: %+.4f"+-%.4f" = %+.2f+-%.2f beams' % (rao,raostd,raoBeams,raostdBeams), ha='left', transform=desc.transAxes)
pl.text(0.05,0.91, '$\Delta\\delta$: %+.4f"+-%.4f" = %+.2f+-%.2f beams' % (deco,decostd,decoBeams,decostdBeams), ha='left', transform=desc.transAxes)
if plotfile == '':
png = img + '_offset.png'
else:
png = plotfile
pl.savefig(png, bbox_inches='tight')
pl.draw()
pngs.append(png)
print("Wrote ", png)
|
lucatelliREPO_NAMEmorphenPATH_START.@morphen_extracted@morphen-main@analysis_scripts@checksource.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/mesh3d/legendgrouptitle/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="mesh3d.legendgrouptitle.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@mesh3d@legendgrouptitle@font@_shadow.py@.PATH_END.py
|
{
"filename": "map_circular_ed.ipynb",
"repo_name": "HITS-AIN/PINK",
"repo_path": "PINK_extracted/PINK-master/jupyter/devel/map_circular_ed.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
dim = 16
shape = (dim, dim)
half_dim = dim / 2
data = np.zeros(shape)
for y in range(shape[0]):
delta = (2 * half_dim * (y+0.5) - (y+0.5)**2)**0.5
for x in range(round(half_dim - delta), round(half_dim + delta)):
data[y,x] = 1
fig, ax = plt.subplots(1,1)
ax.imshow(data)
fig.show()
```

```python
```
|
HITS-AINREPO_NAMEPINKPATH_START.@PINK_extracted@PINK-master@jupyter@devel@map_circular_ed.ipynb@.PATH_END.py
|
{
"filename": "ex_gam_new.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/nonparametric/tests/ex_gam_new.py",
"type": "Python"
}
|
"""Example for GAM with Poisson Model and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from statsmodels.compat.python import lrange
import time
import numpy as np
from scipy import stats
from statsmodels.sandbox.gam import Model as GAM
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
np.seterr(all='raise')
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction or end in overflow
#DGP: simple polynomial
order = 3
sigma_noise = 0.1
nobs = 1000
#lb, ub = -0.75, 3#1.5#0.75 #2.5
lb, ub = -3.5, 3
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*1, 1.*x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) #/ 4.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 3
if example == 2:
print("binomial")
f = family.Binomial()
mu_true = f.link.inverse(z)
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = family.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
p = np.asarray([stats.poisson.rvs(val) for val in f.link.inverse(z)],
float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
for ss in m.smoothers:
print(ss.params)
if example > 1:
import matplotlib.pyplot as plt
plt.figure()
for i in np.array(m.history[2:15:3]):
plt.plot(i.T)
plt.figure()
plt.plot(exog)
#plt.plot(p, '.', lw=2)
plt.plot(y_true, lw=2)
y_pred = m.results.mu # + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], 'k.', alpha=0.5)
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM Poisson ' + ii)
counter += 1
res = GLM(p, exog_reduced, family=f).fit()
#plot component, compared to true component
x1 = x[:,0]
x2 = x[:,1]
f1 = exog[:,:order+1].sum(1) - 1 #take out constant
f2 = exog[:,order+1:].sum(1) - 1
plt.figure()
#Note: need to correct for constant which is indeterminatedly distributed
#plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0]+1, 'r')
#better would be subtract f(0) m.smoothers[0](np.array([0]))
plt.plot(x1, f1, linewidth=2)
plt.plot(x1, m.smoothers[0](x1)-m.smoothers[0].params[0], 'r')
plt.figure()
plt.plot(x2, f2, linewidth=2)
plt.plot(x2, m.smoothers[1](x2)-m.smoothers[1].params[0], 'r')
plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@nonparametric@tests@ex_gam_new.py@.PATH_END.py
|
{
"filename": "_autocolorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/cone/_autocolorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="autocolorscale", parent_name="cone", **kwargs):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@cone@_autocolorscale.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/histogram/marker/colorbar/title/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram.marker.colorbar.title"
_path_str = "histogram.marker.colorbar.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram.mark
er.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.marker.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@histogram@marker@colorbar@title@_font.py@.PATH_END.py
|
{
"filename": "tracer_spectra.py",
"repo_name": "abaleato/CARDiAC",
"repo_path": "CARDiAC_extracted/CARDiAC-master/src/cardiac/tracer_spectra.py",
"type": "Python"
}
|
import numpy as np
import camb
from camb import model
from astropy.cosmology import Planck18
from scipy.interpolate import interp1d
try:
# A lot of this is copied from Nick Kokron's anzu repository
import pyccl as ccl
from anzu.emu_funcs import LPTEmulator
from velocileptors.LPT.cleft_fftw import CLEFT
from velocileptors.EPT.cleft_kexpanded_resummed_fftw import RKECLEFT
def compute_velocileptors_spectra(cosmovec, snapscale, use_physical_densities=True,
use_sigma_8=True, kecleft=True, cleftobj=None):
'''
Returns a spline object which computes the cleft component spectra. Computed either in
"full" CLEFT or in "k-expanded" CLEFT which allows for faster redshift dependence.
Args:
cosmovec : array-like
Vector containing cosmology in the order (ombh2, omch2, w0, ns, sigma8, H0, Neff).
If self.use_sigma_8 != True, then ln(A_s/10^{-10}) should be provided instead of sigma8.
snapscale : float
scale factor
kecleft: bool
Bool to check if the calculation is being made with
Returns:
cleft_aem : InterpolatedUnivariateSpline
Spline that computes basis spectra as a function of k
'''
if use_physical_densities:
if use_sigma_8:
cosmo = ccl.Cosmology(Omega_b=cosmovec[0] / (cosmovec[5] / 100) ** 2,
Omega_c=cosmovec[1] /
(cosmovec[5] / 100) ** 2,
h=cosmovec[5] / 100, n_s=cosmovec[3],
w0=cosmovec[2], Neff=cosmovec[6],
sigma8=cosmovec[4])
else:
cosmo = ccl.Cosmology(Omega_b=cosmovec[0] / (cosmovec[5] / 100) ** 2,
Omega_c=cosmovec[1] /
(cosmovec[5] / 100) ** 2,
h=cosmovec[5] / 100, n_s=cosmovec[3],
w0=cosmovec[2], Neff=cosmovec[6],
A_s=np.exp(cosmovec[4]) * 1e-10)
else:
if use_sigma_8:
cosmo = ccl.Cosmology(Omega_b=cosmovec[0],
Omega_c=cosmovec[1] - cosmovec[0],
h=cosmovec[5] / 100, n_s=cosmovec[3],
w0=cosmovec[2], Neff=cosmovec[6],
sigma8=cosmovec[4])
else:
cosmo = ccl.Cosmology(Omega_b=cosmovec[0],
Omega_c=cosmovec[1] - cosmovec[0],
h=cosmovec[5] / 100, n_s=cosmovec[3],
w0=cosmovec[2], Neff=cosmovec[6],
A_s=np.exp(cosmovec[4]) * 1e-10)
k = np.logspace(-3, 1, 1000)
if kecleft:
# If using kecleft, check that we're only varying the redshift
if cleftobj is None:
# Do the full calculation again, as the cosmology changed.
pk = ccl.linear_matter_power(
cosmo, k * cosmo['h'], 1) * (cosmo['h']) ** 3
# Function to obtain the no-wiggle spectrum.
# Not implemented yet, maybe Wallisch maybe B-Splines?
# pnw = p_nwify(pk)
# For now just use Stephen's standard savgol implementation.
cleftobj = RKECLEFT(k, pk)
# Adjust growth factors
D = ccl.background.growth_factor(cosmo, snapscale)
cleftobj.make_ptable(D=D, kmin=k[0], kmax=k[-1], nk=1000)
cleftpk = cleftobj.pktable.T
else:
# Using "full" CLEFT, have to always do calculation from scratch
pk = ccl.linear_matter_power(
cosmo, k * cosmo['h'], snapscale) * (cosmo['h']) ** 3
cleftobj = CLEFT(k, pk, N=2700, jn=10, cutoff=1)
cleftobj.make_ptable()
cleftpk = cleftobj.pktable.T
# Different cutoff for other spectra, because otherwise different
# large scale asymptote
cleftobj = CLEFT(k, pk, N=2700, jn=5, cutoff=10)
cleftobj.make_ptable()
cleftpk[3:, :] = cleftobj.pktable.T[3:, :]
cleftpk[2, :] /= 2
cleftpk[6, :] /= 0.25
cleftpk[7, :] /= 2
cleftpk[8, :] /= 2
cleftspline = interp1d(cleftpk[0], cleftpk, fill_value='extrapolate')
return cleftspline, cleftobj
def get_galaxy_ps_anzu(bvec, k, zs_sampled, halomatter=False):
'''
Calculate the galaxy power spectrum in the Planck 18 cosmology
- Inputs:
* bvec = list containing [b1, b2, bs2, bnabla2, SN] to be fed to Anzu to obtain Pgg
* z_mean = float. Central redshift of the fiducial dndz
* k = np array of floats. k at which to evaluate Pkgg.
* zs_sampled = redshifts at which to evaluate the Anzu prediction
* halomatter (optional) = Bool. If False, get gg spectrum. If False, get galaxy-matter cross spectrum
'''
emu = LPTEmulator()
h = Planck18.H0.value / 100.
for i, z in enumerate(zs_sampled):
a = 1 / (1 + z)
if i == 0:
cosmo_vec = np.atleast_2d([Planck18.Ob0 * h ** 2, Planck18.Odm0 * h ** 2, -1, 0.966, 0.812,
Planck18.H0.value, 3.046, a]) # Values from Planck 2018
else:
cosmo_vec = np.vstack([np.atleast_2d([Planck18.Ob0 * h ** 2, Planck18.Odm0 * h ** 2, -1, 0.966, 0.812,
Planck18.H0.value, 3.046, a]), cosmo_vec])
lpt_spec = np.zeros((len(cosmo_vec), 10, 700))
# Evaluate predictions at the relevant redshifts
for i, cv in enumerate(cosmo_vec):
lpt_interp, cleftobk = compute_velocileptors_spectra(cv, cv[-1],
use_physical_densities=emu.use_physical_densities,
use_sigma_8=emu.use_sigma_8, kecleft=False)
lpt_spec[i, ...] = lpt_interp(emu.k)[1:11, :]
emu_spec = emu.predict(k, cosmo_vec, spec_lpt=lpt_spec)
Pk = np.zeros((len(k), len(cosmo_vec[:, -1])))
if halomatter:
min_idx = len(k)
else:
min_idx = 0
for i, z in enumerate(cosmo_vec[:, -1]):
Pk[:, i] = emu.basis_to_full(k, bvec, emu_spec[i, :, :], halomatter=halomatter)[min_idx:]
return Pk
except ImportError:
print('Anzu/velocileptors/ccl not installed. Proceeding just with CAMB matter PS x linear galaxy bias')
def get_galaxy_ps(g_bias, zs_sampled, g2_bias=None, gbias_mode='linear'):
'''
Calculate the galaxy power spectrum
- Inputs:
* g_bias = galaxy bias. if gbias_mode=='anzu', a list containing Lagrangian bias [b1, b2, bs2, bnabla2, SN],
if gbias_mode=='linear', a float with linear bias value at center of dndz
* z_mean = float. Central redshift of the fiducial dndz
* zs_sampled = redshifts at which to evaluate the prediction
* g2_bias (optional) = Like g_bias, but for the second galaxy sample in spectrum. If None, get galaxy-matter cross-spectrum.
* gbias_mode (optional) = 'linear' or 'anzu'. Galaxy bias prescription
'''
if g2_bias is None:
halomatter = True
# ToDo: Choose k's more systematically
k = np.logspace(-3, 0, 200)
if gbias_mode=='anzu':
try:
# TODO: implement different galaxy bias for two samples in anzu galaxy cross-spectrum
Pk = get_galaxy_ps_anzu(g_bias, k, zs_sampled, halomatter=halomatter)
return k, Pk
except:
print('Anzu/velocileptors not installed. Proceeding just with CAMB matter PS x linear bias')
k, pk_nonlin = get_matter_ps(zs_sampled)
Pk = np.swapaxes(pk_nonlin, 0, 1)
try:
if g2_bias is None:
# Halo-matter cross-spectrum using linear galaxy bias
return k, Pk * g_bias
else:
# Galaxy auto-spectrum using linear galaxy bias
return k, Pk * g_bias * g2_bias
except:
print('Galaxy bias must be a linear (i.e. a single number) when not using anzu')
def get_matter_ps(redshifts):
#Now get matter power spectra and sigma8 at redshifts between 0 and sufficiently behind the perturbed sources
pars = camb.CAMBparams()
h = Planck18.H0.value/100.
pars.set_cosmology(H0=Planck18.H0.value, ombh2=Planck18.Ob0 * h**2, omch2=Planck18.Odm0 * h**2)
pars.InitPower.set_params(ns=0.966)
#Note non-linear corrections couples to smaller scales than you want
pars.set_matter_power(redshifts=redshifts, kmax=2.0)
#Linear spectra
pars.NonLinear = model.NonLinear_none
results = camb.get_results(pars)
kh, z, pk = results.get_matter_power_spectrum(minkh=1e-4, maxkh=1e2, npoints = 500)
s8 = np.array(results.get_sigma8())
#Non-Linear spectra (Halofit)
pars.NonLinear = model.NonLinear_both
results.calc_power_spectra(pars)
kh_nonlin, z_nonlin, pk_nonlin = results.get_matter_power_spectrum(minkh=1e-4, maxkh=1e2, npoints = 500)
# Remove factors of h
k_nonlin = kh_nonlin * h
pk_nonlin /= h**3
return k_nonlin, pk_nonlin
|
abaleatoREPO_NAMECARDiACPATH_START.@CARDiAC_extracted@CARDiAC-master@src@cardiac@tracer_spectra.py@.PATH_END.py
|
{
"filename": "drive_ConeRot_AS209.py",
"repo_name": "simoncasassus/ConeRot",
"repo_path": "ConeRot_extracted/ConeRot-master/scripts/drive_ConeRot_AS209.py",
"type": "Python"
}
|
import sys
import numpy as np
import re
from copy import copy, deepcopy
import os
from optparse import OptionParser
HOME = os.environ.get('HOME')
include_path = '/home/simon/common/python/include/'
#include_path=HOME+'/common/python/conemaps-git/'
sys.path.append(include_path)
import ConeRot.MasterDConeMaps as MasterDConeMaps
distance = 121.246
sourcedir = '/home/simon/AS209/guvmem_runs/12CO21/momentmaps/AS209_CO21_modout_lS0.00032_lL0.0_dgauss/'
workdir = 'work_modout_lS0.00032_lL0.0_dgauss_numba_c'
a_min = 0.7
a_max = 1.0
a_min_regions = 0.3
a_max_regions = 2.3
PA = 86.7 # continuum
inc = (180. - 35.3) * np.pi / 180. # teague
tanpsi = 0.
#PA (85.74220998465579, 0.13529773191532968, 0.1327572377106918) 85.76418586465633
#inc (145.09924886452643, 0.07437000793240145, 0.08022007127306097) 145.10917890648307
#dra_off (0.0003049257218234983, 0.0002611812938863823, 0.00023822235134958746) 0.00028508059219256446
#ddec_off (0.0006638206724156879, 0.0002150304853814315, 0.0001963689464940382) 0.0006907247091780622
# #####################################################################
# #####################################################################
a_min_plot = a_min_regions
a_max_plot = a_max_regions
parser = OptionParser()
parser.add_option("-r",
"--retrograde",
action="store_true",
dest="RetroGrade",
default=False,
help="toggle retrograde orientation (RT trials only)")
parser.add_option("-f",
"--forceorient",
action="store_true",
dest="ForceOrient",
default=False,
help="toggle force input orientation in FixPAinc run")
parser.add_option("-F",
"--farside",
action="store_true",
dest="DoFarSideOnly",
default=False,
help="toggle far side only")
parser.add_option("-M",
"--MCMC",
action="store_true",
dest="RunMCMCmaster",
default=False,
help="toggle MCMC optim")
parser.add_option("-d",
"--dry-run",
action="store_false",
dest="RunMaster",
default=True,
help="toggle dry run")
parser.add_option("-o",
"--NoVarOrient",
action="store_false",
dest="DoVarOrient",
default=True,
help="no variable PA, inc profile, use with --forceorient")
parser.add_option("-R",
"--Regions",
action="store_true",
dest="Regions",
default=False,
help="use regions")
parser.add_option("-m",
"--Merid",
action="store_true",
dest="DoMerid",
default=False,
help="use meridional flows")
#parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=True,
# help="don't print status messages to stdout")
(options, args) = parser.parse_args()
print("options.RetroGrade:", options.RetroGrade)
print("options.ForceOrient:", options.ForceOrient)
print("options.DoFarSideOnly:", options.DoFarSideOnly)
print("options.RunMCMCmaster:", options.RunMCMCmaster)
print("options.RunMaster:", options.RunMaster)
print("options.DoVarOrient:", options.DoVarOrient)
print("options.DoMerid:", options.DoMerid)
######################################################################
exec_master_script = sys.argv[0]
RunMCMCmaster = options.RunMCMCmaster
RunMaster = options.RunMaster
Regions = options.Regions
######################################################################
if re.match('^(.*)\/$', workdir):
workdir = m.group(1)
if RunMaster:
ClearWorkDir = True
else:
ClearWorkDir = False
DoExec = False
PlotVarPAinc = False
if options.DoVarOrient:
DoExec = RunMaster
PlotVarPAinc = True
if not options.DoMerid:
workdir += '_nomerid'
if Regions:
workdir += '_Regions'
if options.ForceOrient:
workdir += '_ForceOrient'
if options.DoFarSideOnly:
workdir += '_FarSide'
if options.RunMCMCmaster:
workdir += '_MCMC'
workdir += '/'
print("workdir>>>> ", workdir)
S = MasterDConeMaps.Setup(
filename_source=sourcedir + 'im_g_v0.fits',
filename_errormap=sourcedir + 'im_g_v0_errormap.fits',
workdir=workdir,
DoErrorMap=True,
typicalerror=0.1, # km/s
ComputeSystVelo=True, # best run this only once, then pass value in vsyst
vsyst=4.67,
#fieldscale=1., #1.
fieldscale=1.5, #1.
pixscale_factor=3.0, #3.
unitscale=1.,
PA=PA,
inc=inc,
tanpsi=-0.3,
rangePA=20.,
rangeinc=30. * np.pi / 180.,
rangetanpsi=0.6,
a_min=a_min, #0.17
a_max=a_max, #0.27
DoRegions=Regions,
RestrictAvToRadialDomain=False,
a_min_regions=a_min_regions,
a_max_regions=a_max_regions,
n_abins=7, # 6 #minimum 3 for overlap
DoAccr=False,
DoAccr_fixPAinc=False,
DoMerid_fixPAinc=options.DoMerid,
ClearWorkDir=ClearWorkDir,
DoExec=DoExec, # Execute the full optimization
DumpAllFitsFiles=False,
#x_center=0.002, # from the continuum
#y_center=0.012,
x_center=0.0,
y_center=0.0,
bmaj=134E-3, # arcsec
bmin=100E-3, # arcsec
DoConjGrad=True,
DoMinuit=False, # BROKEN
DoFarSideOnly=options.DoFarSideOnly,
RunMCMC=RunMCMCmaster,
RecoverMCMC=RunMCMCmaster, # RunMCMC
n_cores_MCMC=90, #30
Nit=400,
burn_in=250,
exec_master_script=exec_master_script)
S.domain = (('PA', (S.PA - S.rangePA / 2., S.PA + S.rangePA / 2.)),
('inc', (S.inc - S.rangeinc / 2., S.inc + S.rangeinc / 2.)),
('tanpsi', (S.tanpsi - S.rangetanpsi / 2.,
S.tanpsi + S.rangetanpsi / 2.)))
if S.DoExec:
S.Run()
SFixOrient = copy(S)
if Regions:
if S.DoFixOrient:
SFixOrient.RunFixOrient(ForceGlobalOrient=options.ForceOrient,
Force_allradsPA=S.PA,
Force_allradsinc=S.inc)
else:
SFixOrient.workdir = re.sub('/$', '_fixPAinc/', SFixOrient.workdir)
import ConeRot.RotOrient.PlotRotorient
rgaps = [[0.18, 0.4], [1.6, 1.7]]
#pdl> p ( (190.-75/2.)/$d)
#1.48058252427184
#pdl> p ( (190.+75/2.)/$d)
#2.20873786407767
# Walsk+ 2014
if Regions:
vsys = ConeRot.RotOrient.PlotRotorient.execfig(
S.workdir,
SFixOrient.filename_source,
distance=distance,
ForceGlobalOrient=options.ForceOrient,
Force_allradsPA=S.PA,
Force_allradsinc=S.inc,
WithComparData=False,
WithComparRadTWind=False,
PlotVarPAinc=PlotVarPAinc,
rgaps=rgaps,
title='AS209',
DoAUBar=True,
alabel='',
PlotVarOrient=True)
vsys = ConeRot.RotOrient.PlotRotorient.execfig(
S.workdir,
SFixOrient.filename_source,
distance=distance,
ForceGlobalOrient=options.ForceOrient,
Force_allradsPA=S.PA,
Force_allradsinc=S.inc,
WithComparData=False,
WithComparRadTWind=False,
PlotVarPAinc=PlotVarPAinc,
rgaps=rgaps,
title='AS209',
DoAUBar=False,
alabel='',
PlotVarOrient=True)
print("returned from execfig vsys", vsys)
#vsys=ConeRot.RotOrient.PlotRotorient.execfig(S.workdir,SFixOrient.filename_source, distance=distance, ForceGlobalOrient=options.ForceOrient, Force_allradsPA=S.PA, Force_allradsinc=S.inc, WithComparData= False, WithComparRadTWind=False, PlotVarPAinc=PlotVarPAinc, rgaps=rgaps,title='HD100546',DoAUBar=False,alabel='',PlotVarOrient=False)
print("returned from execfig vsys", vsys)
else:
vsys = ConeRot.RotOrient.PlotRotorient.execfig(
S.workdir,
SFixOrient.filename_source,
distance=distance,
ForceGlobalOrient=options.ForceOrient,
Force_allradsPA=S.PA,
Force_allradsinc=S.inc,
WithComparData=False,
WithComparRadTWind=False,
PlotVarPAinc=PlotVarPAinc,
VarOrient=False,
a_min=a_min_plot,
a_max=a_max_plot,
Plot_vRot_Global=True,
Plot_vRot_VarOrient=False,
Plot_vRot_VarOrient_FixIncPA=False,
rgaps=rgaps)
SFixOrient.vsyst = vsys
S.vsyst = vsys
import ConeRot.KineSummaryCompact
file_continuum = False # './continuum/median_restored_finetav_fullim.fits'
ConeRot.KineSummaryCompact.exec_summary_allrads(SFixOrient.workdir,
SFixOrient.filename_source,
file_continuum=file_continuum,
vsyst=S.vsyst,
AllRads=False,
a_min=a_min_plot,
a_max=a_max_plot)
ConeRot.KineSummaryCompact.exec_summary_allrads(SFixOrient.workdir,
SFixOrient.filename_source,
file_continuum=file_continuum,
vsyst=S.vsyst,
AllRads=True,
a_min=a_min_plot,
a_max=a_max_plot)
file_continuum = False # './continuum/median_restored_finetav_z_stretched.fits'
ConeRot.KineSummaryCompact.exec_summary_faceon(SFixOrient.workdir,
SFixOrient.filename_source,
file_continuum=file_continuum,
vsyst=S.vsyst,
AllRads=False,
a_min=a_min_plot,
a_max=a_max_plot,
Zoom=True,
side=1.5)
ConeRot.KineSummaryCompact.exec_summary_faceon(SFixOrient.workdir,
SFixOrient.filename_source,
file_continuum=file_continuum,
vsyst=S.vsyst,
AllRads=True,
a_min=a_min_plot,
a_max=a_max_plot,
Zoom=False,
side=1.5)
ConeRot.KineSummaryCompact.exec_summary_faceon(SFixOrient.workdir,
SFixOrient.filename_source,
file_continuum=file_continuum,
vsyst=S.vsyst,
AllRads=True,
a_min=a_min_plot,
a_max=a_max_plot,
Zoom=True,
side=3.,
UseScatter=False)
|
simoncasassusREPO_NAMEConeRotPATH_START.@ConeRot_extracted@ConeRot-master@scripts@drive_ConeRot_AS209.py@.PATH_END.py
|
{
"filename": "compute_3pcf_correction_function.py",
"repo_name": "oliverphilcox/RascalC",
"repo_path": "RascalC_extracted/RascalC-master/python/compute_3pcf_correction_function.py",
"type": "Python"
}
|
### Function to fit a model to the 3PCF survey correction function, defined as the ratio between model and true RR pair counts for a single survey. This fits a piecewise polynomial model to the data.
## NB: Input RRR counts should be normalized by summed cubed random weights here.
## NB: Assume mu is in [-1,1] limit here
import sys
import os
import numpy as np
import scipy.spatial as ss
# PARAMETERS
if (len(sys.argv)!=5) and (len(sys.argv)!=6):
print("Usage python compute_3pcf_correction_function.py {GALAXY_FILE} {BIN_FILE} {OUTPUT_DIR} {PERIODIC} [{RRR_COUNTS}]")
sys.exit(1);
gal_file = str(sys.argv[1])
binfile = str(sys.argv[2])
outdir=str(sys.argv[3])
periodic = int(sys.argv[4])
if periodic:
if(len(sys.argv)!=5):
print("Usage python compute_3pcf_correction_function.py {GALAXY_FILE} {BIN_FILE} {OUTPUT_DIR} {PERIODIC} [{RRR_COUNTS}]")
sys.exit(1);
print("\nAssuming periodic boundary conditions - so Phi(r,mu) = 1 everywhere");
else:
if(len(sys.argv)!=6):
print("Usage python compute_3pcf_correction_function.py {GALAXY_FILE} {BIN_FILE} {OUTPUT_DIR} {PERIODIC} [{RRR_COUNTS}]")
sys.exit(1);
RRR_file = str(sys.argv[5])
## Load galaxies
print("\nLoading galaxies")
all_gal = np.loadtxt(gal_file)
gal_x = all_gal[:,0]
gal_y = all_gal[:,1]
gal_z = all_gal[:,2]
gal_w = all_gal[:,3]
gal_n = (1./gal_w-1.)/20000.
N_gal = len(all_gal)
w_bar = np.mean(gal_w)
## Find survey volume via ConvexHull in Scipy
hull = ss.ConvexHull(np.vstack([gal_x,gal_y,gal_z]).T)
print('\nSurvey volume is approximately: %.2f (Gpc/h)^3'%(hull.volume/1e9))
V=hull.volume # in (Mpc/h)^3
## Galaxy number density
n_bar = N_gal/V
if periodic:
nw3_bar = n_bar**3*w_bar**3
else:
nw3_bar = np.mean(gal_n**3*gal_w**3)
# Load in binning files
r_bins = np.loadtxt(binfile)
n=len(r_bins)
## Define normalization constant
norm = 6.*V*nw3_bar
print("Normalizing output survey correction by %.2e"%norm);
if periodic:
## Output periodic survey correction function
phi_inv_mult = np.zeros([n,n,7]);
## Set to correct periodic survey values
phi_inv_mult[:,:,0]=1.
else:
from scipy.special import legendre
## Load triple counts and renormalize
tmp_triple_counts = np.loadtxt(RRR_file)*np.sum(gal_w)**3
# Compute number of angular bins in data-set
m = (len(tmp_triple_counts)//n)//n
assert len(tmp_triple_counts)%m==0, "Incorrect RRR format"
mu_all = np.linspace(-1,1,m+1)
mu_cen = 0.5*(mu_all[1:]+mu_all[:-1])
RRR_true = np.zeros([n,n,m])
## load in RRR counts (and add symmetries)
for i in range(len(tmp_triple_counts)):
RRR_true[(i//m)//n,(i//m)%n,i%m] += tmp_triple_counts[i]*0.5
RRR_true[(i//m)%n,(i//m)//n,i%m] += tmp_triple_counts[i]*0.5
## Now construct Legendre moments
leg_triple = np.zeros([n,n,7])
for a in range(n):
for b in range(n):
for ell in range(7):
# (NB: we've absorbed a factor of delta_mu into RRR_true here)
leg_triple[a,b,ell]+=np.sum(legendre(ell)(mu_cen)*RRR_true[a,b,:])*(2.*ell+1.)
vol_r = lambda b: 4.*np.pi/3.*(r_bins[b,1]**3.-r_bins[b,0]**3.)
## Construct inverse multipoles of Phi
phi_inv_mult = np.zeros([n,n,7])
for b1 in range(n):
for b2 in range(n):
phi_inv_mult[b1,b2,:] = leg_triple[b1,b2,:]/(3.*nw3_bar*V*vol_r(b1)*vol_r(b2))
## Check all seems reasonable
if np.mean(phi_inv_mult[:,:,0])<1e-3:
print(phi_inv_mult[:,:,0])
print("Survey correction function seems too small - are the RRR counts normalized correctly?")
sys.exit(1)
if np.mean(phi_inv_mult[:,:,0])>1e3:
print("Survey correction function seems too large - are the RRR counts normalized correctly?")
sys.exit(1)
if periodic:
outfile = os.path.join(outdir, 'BinCorrectionFactor3PCF_n%d_periodic.txt'%(n))
else:
outfile = os.path.join(outdir, 'BinCorrectionFactor3PCF_n%d_m%d.txt'%(n,m))
with open(outfile,"w+") as out:
for b1 in range(n):
for b2 in range(n):
for ell in range(7):
out.write("%.8e"%(phi_inv_mult[b1,b2,ell]*norm))
if ell<6:
out.write("\t")
if ell==6:
out.write("\n")
print("\nSaved (normalized) output to %s\n"%outfile)
|
oliverphilcoxREPO_NAMERascalCPATH_START.@RascalC_extracted@RascalC-master@python@compute_3pcf_correction_function.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/slider/currentvalue/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.slider.currentvalue"
_path_str = "layout.slider.currentvalue.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font of the current value label text.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.slider.
currentvalue.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.slider.currentvalue.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.currentvalue.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@layout@slider@currentvalue@_font.py@.PATH_END.py
|
{
"filename": "numba_interface.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/tardis/montecarlo/montecarlo_numba/numba_interface.py",
"type": "Python"
}
|
from enum import IntEnum
from numba import float64, int64, boolean
from numba.experimental import jitclass
import numpy as np
from astropy import units as u
from tardis import constants as const
from tardis.montecarlo import (
montecarlo_configuration as montecarlo_configuration,
)
C_SPEED_OF_LIGHT = const.c.to("cm/s").value
numba_model_spec = [
("r_inner", float64[:]),
("r_outer", float64[:]),
("time_explosion", float64),
]
@jitclass(numba_model_spec)
class NumbaModel(object):
def __init__(self, r_inner, r_outer, time_explosion):
"""
Model for the Numba mode
Parameters
----------
r_inner : numpy.ndarray
r_outer : numpy.ndarray
time_explosion : float
"""
self.r_inner = r_inner
self.r_outer = r_outer
self.time_explosion = time_explosion
numba_plasma_spec = [
("electron_density", float64[:]),
("line_list_nu", float64[:]),
("tau_sobolev", float64[:, :]),
("transition_probabilities", float64[:, :]),
("line2macro_level_upper", int64[:]),
("macro_block_references", int64[:]),
("transition_type", int64[:]),
("destination_level_id", int64[:]),
("transition_line_id", int64[:]),
]
@jitclass(numba_plasma_spec)
class NumbaPlasma(object):
def __init__(
self,
electron_density,
line_list_nu,
tau_sobolev,
transition_probabilities,
line2macro_level_upper,
macro_block_references,
transition_type,
destination_level_id,
transition_line_id,
):
"""
Plasma for the Numba code
Parameters
----------
electron_density : numpy.ndarray
line_list_nu : numpy.ndarray
tau_sobolev : numpy.ndarray
transition_probabilities : numpy.ndarray
line2macro_level_upper : numpy.ndarray
macro_block_references : numpy.ndarray
transition_type : numpy.ndarray
destination_level_id : numpy.ndarray
transition_line_id : numpy.ndarray
"""
self.electron_density = electron_density
self.line_list_nu = line_list_nu
self.tau_sobolev = tau_sobolev
#### Macro Atom transition probabilities
self.transition_probabilities = transition_probabilities
self.line2macro_level_upper = line2macro_level_upper
self.macro_block_references = macro_block_references
self.transition_type = transition_type
# Destination level is not needed and/or generated for downbranch
self.destination_level_id = destination_level_id
self.transition_line_id = transition_line_id
def numba_plasma_initialize(plasma, line_interaction_type):
"""
Initialize the NumbaPlasma object and copy over the data over from TARDIS Plasma
Parameters
----------
plasma : tardis.plasma.BasePlasma
line_interaction_type : enum
"""
electron_densities = plasma.electron_densities.values
line_list_nu = plasma.atomic_data.lines.nu.values
tau_sobolev = np.ascontiguousarray(
plasma.tau_sobolevs.values.copy(), dtype=np.float64
)
if montecarlo_configuration.disable_line_scattering:
tau_sobolev *= 0
if line_interaction_type == "scatter":
# to adhere to data types, we must have an array of minimum size 1
array_size = 1
transition_probabilities = np.zeros(
(array_size, array_size), dtype=np.float64
) # to adhere to data types
line2macro_level_upper = np.zeros(array_size, dtype=np.int64)
macro_block_references = np.zeros(array_size, dtype=np.int64)
transition_type = np.zeros(array_size, dtype=np.int64)
destination_level_id = np.zeros(array_size, dtype=np.int64)
transition_line_id = np.zeros(array_size, dtype=np.int64)
else:
transition_probabilities = np.ascontiguousarray(
plasma.transition_probabilities.values.copy(), dtype=np.float64
)
line2macro_level_upper = (
plasma.atomic_data.lines_upper2macro_reference_idx
)
macro_block_references = plasma.atomic_data.macro_atom_references[
"block_references"
].values
transition_type = plasma.atomic_data.macro_atom_data[
"transition_type"
].values
# Destination level is not needed and/or generated for downbranch
destination_level_id = plasma.atomic_data.macro_atom_data[
"destination_level_idx"
].values
transition_line_id = plasma.atomic_data.macro_atom_data[
"lines_idx"
].values
return NumbaPlasma(
electron_densities,
line_list_nu,
tau_sobolev,
transition_probabilities,
line2macro_level_upper,
macro_block_references,
transition_type,
destination_level_id,
transition_line_id,
)
packet_collection_spec = [
("packets_input_nu", float64[:]),
("packets_input_mu", float64[:]),
("packets_input_energy", float64[:]),
("packets_output_nu", float64[:]),
("packets_output_energy", float64[:]),
]
@jitclass(packet_collection_spec)
class PacketCollection(object):
def __init__(
self,
packets_input_nu,
packets_input_mu,
packets_input_energy,
packets_output_nu,
packets_output_energy,
):
self.packets_input_nu = packets_input_nu
self.packets_input_mu = packets_input_mu
self.packets_input_energy = packets_input_energy
self.packets_output_nu = packets_output_nu
self.packets_output_energy = packets_output_energy
vpacket_collection_spec = [
("rpacket_index", int64),
("spectrum_frequency", float64[:]),
("v_packet_spawn_start_frequency", float64),
("v_packet_spawn_end_frequency", float64),
("nus", float64[:]),
("energies", float64[:]),
("idx", int64),
("number_of_vpackets", int64),
("length", int64),
("last_interaction_in_nu", float64[:]),
("last_interaction_type", int64[:]),
("last_interaction_in_id", int64[:]),
("last_interaction_out_id", int64[:]),
]
@jitclass(vpacket_collection_spec)
class VPacketCollection(object):
def __init__(
self,
rpacket_index,
spectrum_frequency,
v_packet_spawn_start_frequency,
v_packet_spawn_end_frequency,
number_of_vpackets,
temporary_v_packet_bins,
):
self.spectrum_frequency = spectrum_frequency
self.v_packet_spawn_start_frequency = v_packet_spawn_start_frequency
self.v_packet_spawn_end_frequency = v_packet_spawn_end_frequency
self.nus = np.empty(temporary_v_packet_bins, dtype=np.float64)
self.energies = np.empty(temporary_v_packet_bins, dtype=np.float64)
self.number_of_vpackets = number_of_vpackets
self.last_interaction_in_nu = np.zeros(temporary_v_packet_bins, dtype=np.float64)
self.last_interaction_type = -1 * np.ones(temporary_v_packet_bins, dtype=np.int64)
self.last_interaction_in_id = -1 * np.ones(temporary_v_packet_bins, dtype=np.int64)
self.last_interaction_out_id = -1 * np.ones(temporary_v_packet_bins, dtype=np.int64)
self.idx = 0
self.rpacket_index = rpacket_index
self.length = temporary_v_packet_bins
def set_properties(
self,
nu,
energy,
last_interaction_in_nu,
last_interaction_type,
last_interaction_in_id,
last_interaction_out_id,
):
if self.idx >= self.length:
temp_length = self.length * 2 + self.number_of_vpackets
temp_nus = np.empty(temp_length, dtype=np.float64)
temp_energies = np.empty(temp_length, dtype=np.float64)
temp_last_interaction_in_nu = np.empty(temp_length, dtype=np.float64)
temp_last_interaction_type = np.empty(temp_length, dtype=np.int64)
temp_last_interaction_in_id = np.empty(temp_length, dtype=np.int64)
temp_last_interaction_out_id = np.empty(temp_length, dtype=np.int64)
temp_nus[: self.length] = self.nus
temp_energies[: self.length] = self.energies
temp_last_interaction_in_nu[: self.length] = self.last_interaction_in_nu
temp_last_interaction_type[: self.length] = self.last_interaction_type
temp_last_interaction_in_id[: self.length] = self.last_interaction_in_id
temp_last_interaction_out_id[: self.length] = self.last_interaction_out_id
self.nus = temp_nus
self.energies = temp_energies
self.last_interaction_in_nu = temp_last_interaction_in_nu
self.last_interaction_type = temp_last_interaction_type
self.last_interaction_in_id = temp_last_interaction_in_id
self.last_interaction_out_id = temp_last_interaction_out_id
self.length = temp_length
self.nus[self.idx] = nu
self.energies[self.idx] = energy
self.last_interaction_in_nu[self.idx] = last_interaction_in_nu
self.last_interaction_type[self.idx] = last_interaction_type
self.last_interaction_in_id[self.idx] = last_interaction_in_id
self.last_interaction_out_id[self.idx] = last_interaction_out_id
self.idx += 1
estimators_spec = [
("j_estimator", float64[:]),
("nu_bar_estimator", float64[:]),
("j_blue_estimator", float64[:, :]),
("Edotlu_estimator", float64[:, :]),
]
@jitclass(estimators_spec)
class Estimators(object):
def __init__(
self, j_estimator, nu_bar_estimator, j_blue_estimator, Edotlu_estimator
):
self.j_estimator = j_estimator
self.nu_bar_estimator = nu_bar_estimator
self.j_blue_estimator = j_blue_estimator
self.Edotlu_estimator = Edotlu_estimator
def configuration_initialize(runner, number_of_vpackets):
if runner.line_interaction_type == "macroatom":
montecarlo_configuration.line_interaction_type = (
LineInteractionType.MACROATOM
)
elif runner.line_interaction_type == "downbranch":
montecarlo_configuration.line_interaction_type = (
LineInteractionType.DOWNBRANCH
)
elif runner.line_interaction_type == "scatter":
montecarlo_configuration.line_interaction_type = (
LineInteractionType.SCATTER
)
else:
raise ValueError(
f'Line interaction type must be one of "macroatom",'
f'"downbranch", or "scatter" but is '
f"{runner.line_interaction_type}"
)
montecarlo_configuration.number_of_vpackets = number_of_vpackets
montecarlo_configuration.temporary_v_packet_bins = number_of_vpackets
montecarlo_configuration.full_relativity = runner.enable_full_relativity
montecarlo_configuration.montecarlo_seed = runner.seed
montecarlo_configuration.single_packet_seed = runner.single_packet_seed
montecarlo_configuration.v_packet_spawn_start_frequency = (
runner.virtual_spectrum_spawn_range.end.to(
u.Hz, equivalencies=u.spectral()
).value
)
montecarlo_configuration.v_packet_spawn_end_frequency = (
runner.virtual_spectrum_spawn_range.start.to(
u.Hz, equivalencies=u.spectral()
).value
)
montecarlo_configuration.VPACKET_LOGGING = runner.virt_logging
# class TrackRPacket(object):
class LineInteractionType(IntEnum):
SCATTER = 0
DOWNBRANCH = 1
MACROATOM = 2
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@tardis@montecarlo@montecarlo_numba@numba_interface.py@.PATH_END.py
|
{
"filename": "_tickformatstopdefaults.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcoords/line/colorbar/_tickformatstopdefaults.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="parcoords.line.colorbar",
**kwargs,
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcoords@line@colorbar@_tickformatstopdefaults.py@.PATH_END.py
|
{
"filename": "_scattercarpet.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/_scattercarpet.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Scattercarpet(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "scattercarpet"
_valid_props = {
"a",
"asrc",
"b",
"bsrc",
"carpet",
"connectgaps",
"customdata",
"customdatasrc",
"fill",
"fillcolor",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hoveron",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legendgroup",
"line",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"stream",
"text",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"xaxis",
"yaxis",
}
# a
# -
@property
def a(self):
"""
Sets the a-axis coordinates.
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["a"]
@a.setter
def a(self, val):
self["a"] = val
# asrc
# ----
@property
def asrc(self):
"""
Sets the source reference on Chart Studio Cloud for a .
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["asrc"]
@asrc.setter
def asrc(self, val):
self["asrc"] = val
# b
# -
@property
def b(self):
"""
Sets the b-axis coordinates.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
# bsrc
# ----
@property
def bsrc(self):
"""
Sets the source reference on Chart Studio Cloud for b .
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bsrc"]
@bsrc.setter
def bsrc(self, val):
self["bsrc"] = val
# carpet
# ------
@property
def carpet(self):
"""
An identifier for this carpet, so that `scattercarpet` and
`contourcarpet` traces can specify a carpet plot on which they
lie
The 'carpet' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". scatterternary has a subset of the options
available to scatter. "toself" connects the endpoints of the
trace (or each segment of the trace if it has gaps) into a
closed shape. "tonext" fills the space between two traces if
one completely encloses the other (eg consecutive contour
lines), and behaves like "toself" if there is no trace before
it. "tonext" should not be used if one trace does not enclose
the other.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself', 'tonext']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['a', 'b', 'text', 'name'] joined with '+' characters
(e.g. 'a+b')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.scattercarpet.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hoveron
# -------
@property
def hoveron(self):
"""
Do the hover effects highlight individual points (markers or
line points) or do they highlight filled regions? If the fill
is "toself" or "tonext" and there are no markers or text, then
the default is "fills", otherwise it is "points".
The 'hoveron' property is a flaglist and may be specified
as a string containing:
- Any combination of ['points', 'fills'] joined with '+' characters
(e.g. 'points+fills')
Returns
-------
Any
"""
return self["hoveron"]
@hoveron.setter
def hoveron(self, val):
self["hoveron"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (a,b) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b). To be seen, trace `hoverinfo`
must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattercarpet.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scattercarpet.mark
er.ColorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
gradient
:class:`plotly.graph_objects.scattercarpet.mark
er.Gradient` instance or dict with compatible
properties
line
:class:`plotly.graph_objects.scattercarpet.mark
er.Line` instance or dict with compatible
properties
maxdisplayed
Sets a maximum number of points to be drawn on
the graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
Returns
-------
plotly.graph_objs.scattercarpet.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points and the trace is not
stacked then the default is "lines+markers". Otherwise,
"lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattercarpet.sele
cted.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattercarpet.sele
cted.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattercarpet.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.scattercarpet.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (a,b) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b). If trace `hoverinfo` contains
a "text" flag and "hovertext" is not set, these elements will
be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.scattercarpet.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
Every attributes that can be specified per-point (the ones that
are `arrayOk: true`) are available. variables `a`, `b` and
`text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattercarpet.unse
lected.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattercarpet.unse
lected.Textfont` instance or dict with
compatible properties
Returns
-------
plotly.graph_objs.scattercarpet.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
a
Sets the a-axis coordinates.
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
Sets the b-axis coordinates.
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattercarpet.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (a,b)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b). To
be seen, trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.scattercarpet.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scattercarpet.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattercarpet.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattercarpet.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (a,b) point. If
a single string, the same string appears over all the
data points. If an array of strings, the items are
mapped in order to the the data points in (a,b). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `a`, `b` and
`text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattercarpet.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
"""
def __init__(
self,
arg=None,
a=None,
asrc=None,
b=None,
bsrc=None,
carpet=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
line=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxis=None,
yaxis=None,
**kwargs
):
"""
Construct a new Scattercarpet object
Plots a scatter trace on either the first carpet axis or the
carpet axis with a matching `carpet` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Scattercarpet`
a
Sets the a-axis coordinates.
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
Sets the b-axis coordinates.
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattercarpet.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (a,b)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b). To
be seen, trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.scattercarpet.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scattercarpet.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattercarpet.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattercarpet.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (a,b) point. If
a single string, the same string appears over all the
data points. If an array of strings, the items are
mapped in order to the the data points in (a,b). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `a`, `b` and
`text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattercarpet.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
Returns
-------
Scattercarpet
"""
super(Scattercarpet, self).__init__("scattercarpet")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattercarpet
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scattercarpet`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("a", None)
_v = a if a is not None else _v
if _v is not None:
self["a"] = _v
_v = arg.pop("asrc", None)
_v = asrc if asrc is not None else _v
if _v is not None:
self["asrc"] = _v
_v = arg.pop("b", None)
_v = b if b is not None else _v
if _v is not None:
self["b"] = _v
_v = arg.pop("bsrc", None)
_v = bsrc if bsrc is not None else _v
if _v is not None:
self["bsrc"] = _v
_v = arg.pop("carpet", None)
_v = carpet if carpet is not None else _v
if _v is not None:
self["carpet"] = _v
_v = arg.pop("connectgaps", None)
_v = connectgaps if connectgaps is not None else _v
if _v is not None:
self["connectgaps"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hoveron", None)
_v = hoveron if hoveron is not None else _v
if _v is not None:
self["hoveron"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("mode", None)
_v = mode if mode is not None else _v
if _v is not None:
self["mode"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
# Read-only literals
# ------------------
self._props["type"] = "scattercarpet"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@_scattercarpet.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/outsidetextfont/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="style", parent_name="funnel.outsidetextfont", **kwargs
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@outsidetextfont@_style.py@.PATH_END.py
|
{
"filename": "_columns.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/grid/_columns.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColumnsValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="columns", parent_name="layout.grid", **kwargs):
super(ColumnsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@grid@_columns.py@.PATH_END.py
|
{
"filename": "_xpad.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/marker/colorbar/_xpad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="funnel.marker.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@marker@colorbar@_xpad.py@.PATH_END.py
|
{
"filename": "conv1d_transpose_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/kernel_tests/nn_ops/conv1d_transpose_test.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv1DTransposeTest(test.TestCase):
def testConv1DTransposeSingleStride(self):
with self.cached_session():
strides = [1, 1, 1]
# Input, output: [batch, width, depth]
x_shape = [2, 6, 3]
y_shape = [2, 6, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in range(y_shape[0]):
for w in range(y_shape[1]):
for c in range(y_shape[2]):
target = 2 * 3.0
w_in = w > 0 and w < y_shape[1] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, w, c])
def testConv1DTransposeSame(self):
with self.cached_session():
strides = [1, 2, 1]
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 8, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[1]):
for w in range(y_shape[1]):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[1] == 0 and w > 0 and w < y_shape[1] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, w, k])
def testConv1DTransposeValid(self):
with self.cached_session():
strides = [1, 2, 1]
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 9, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in range(x_shape[0]):
for k in range(f_shape[1]):
for w in range(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[1] == 0 and w > pad and w < y_shape[1] - 1 - pad
if w_in:
target += 3.0
cache_values[n, w, k] = target
# copy values in the border
cache_values[n, 0, k] = cache_values[n, 1, k]
cache_values[n, -1, k] = cache_values[n, -2, k]
cache_values[n, :, k] = cache_values[n, :, k]
self.assertAllClose(cache_values, value)
@test_util.run_deprecated_v1
def testGradient(self):
self.skipTest("b/262851489: Fix nightly build for GPU.")
x_shape = [2, 4, 3]
f_shape = [3, 2, 3]
y_shape = [2, 8, 2]
strides = [1, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv1d_transpose gradient err = %g " % err)
err_tolerance = 0.0005
self.assertLess(err, err_tolerance)
def testConv1DTransposeSingleStrideNCW(self):
# `NCW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session():
strides = [1, 1, 1]
# Input, output: [batch, depth, width]
x_shape = [2, 3, 4]
y_shape = [2, 2, 4]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCW")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[1]):
for w in range(y_shape[2]):
target = 2 * 3.0
w_in = w > 0 and w < y_shape[2] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, k, w])
def testConv1DTransposeSameNCW(self):
# `NCW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session():
strides = [1, 1, 2]
# Input, output: [batch, depth, width]
x_shape = [2, 3, 4]
y_shape = [2, 2, 8]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCW")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[1]):
for w in range(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if w_in:
target += 3.0
self.assertAllClose(target, value[n, k, w])
def testConv1DTransposeValidNCW(self):
# `NCW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session():
strides = [1, 1, 2]
# Input, output: [batch, depth, width]
x_shape = [2, 3, 4]
y_shape = [2, 2, 9]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCW")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in range(x_shape[0]):
for k in range(f_shape[1]):
for w in range(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % strides[2] == 0 and w > pad and \
w < y_shape[2] - 1 - pad
if w_in:
target += 3.0
cache_values[n, k, w] = target
# copy values in the border
cache_values[n, k, 0] = cache_values[n, k, 1]
cache_values[n, k, -1] = cache_values[n, k, -2]
cache_values[n, k, :] = cache_values[n, k, :]
self.assertAllClose(cache_values, value)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@kernel_tests@nn_ops@conv1d_transpose_test.py@.PATH_END.py
|
{
"filename": "expint_f64_test.py",
"repo_name": "HajimeKawahara/exojax",
"repo_path": "exojax_extracted/exojax-master/tests/unittests/spec/xs/f64/expint_f64_test.py",
"type": "Python"
}
|
import numpy as np
from scipy.special import expn
from exojax.spec import rtransfer as rt
import jax.numpy as jnp
import pytest
from jax import config #
config.update("jax_enable_x64", True)
def test_comparison_expint():
x=np.logspace(-4,1.9,1000)
dif=2.0*expn(3,x)-rt.trans2E3(x)
assert np.max(dif) < 4.e-8
if __name__ == "__main__":
test_comparison_expint()
|
HajimeKawaharaREPO_NAMEexojaxPATH_START.@exojax_extracted@exojax-master@tests@unittests@spec@xs@f64@expint_f64_test.py@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/colorbar/tickfont/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="isosurface.colorbar.tickfont",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@colorbar@tickfont@_textcase.py@.PATH_END.py
|
{
"filename": "_customdata.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/_customdata.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="choroplethmap", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@_customdata.py@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolar/marker/colorbar/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scatterpolar.marker.colorbar.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "colorbars"},
{"valType": "any", "editType": "colorbars"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolar@marker@colorbar@tickformatstop@_dtickrange.py@.PATH_END.py
|
{
"filename": "Frame_Run_Plot.py",
"repo_name": "francescoa97outlook/pyExoRaMa",
"repo_path": "pyExoRaMa_extracted/pyExoRaMa-main/GUI_Plot/Frame_Run_Plot.py",
"type": "Python"
}
|
import math
import time
import tkinter as tk
import pandas as pd
from tkinter import messagebox as msgbox
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
from matplotlib import pyplot as plt
from GUI_Plot import MassRadiusDB
def pureFunction(type_name, x):
if type_name == "pure-high-pressure-ices":
return np.power(10, (0.13666292574887867 + 0.27183702181443314 * x -
0.007134024332627119 * np.power(x, 2) - 0.0021407416433092126 * np.power(x, 3) -
0.0022608931475693915 * np.power(x, 4) - 0.0002516518649610248 * np.power(x, 5) +
0.00011968169122553435 * np.power(x, 6) + 0.000011663496987412905 * np.power(x, 7) -
3.536434693875541e-6 * np.power(x, 8) - 1.6848230313524644e-7 * np.power(x, 9) +
4.4044933682275176e-8 * np.power(x, 10)))
elif type_name == "pure-Silicates":
return np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x, 3) -
0.0003311775031243655 * np.power(x, 4) + 0.00004856681718363753 * np.power(x, 5) -
0.00001245509278944841 * np.power(x, 6) - 1.3074832660503483e-6 * np.power(x, 7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x, 9) -
1.1251826465596989e-8 * np.power(x, 10)))
else:
return (np.power(10, (-0.11408792224566819 + 0.27851883673695 * x -
0.01997874049680844 * np.power(x, 2) - 0.002490304269884624 * np.power(x, 3) +
0.00007525048500183394 * np.power(x, 4) - 0.00007162041164677924 * np.power(x, 5) -
0.00003393158521958243 * np.power(x, 6) + 8.589995554646332e-7 * np.power(x, 7) +
1.132375249329131e-6 * np.power(x, 8) + 2.2299345660512832e-8 * np.power(x, 9) -
1.0475165171649914e-8 * np.power(x, 10))))
def rangeFunction(type_name, x, r):
if type_name == "Fe-Silicates":
return ((np.power(10, (-0.11408792224566819 + 0.27851883673695 * x -
0.01997874049680844 * np.power(x, 2) - 0.002490304269884624 * np.power(x, 3) +
0.00007525048500183394 * np.power(x, 4) - 0.00007162041164677924 * np.power(x, 5) -
0.00003393158521958243 * np.power(x, 6) + 8.589995554646332e-7 * np.power(x, 7) +
1.132375249329131e-6 * np.power(x, 8) + 2.2299345660512832e-8 * np.power(x, 9) -
1.0475165171649914e-8 * np.power(x, 10)))) < r) & (
r < (np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x,
3) -
0.0003311775031243655 * np.power(x,
4) + 0.00004856681718363753 * np.power(x,
5) -
0.00001245509278944841 * np.power(x,
6) - 1.3074832660503483e-6 * np.power(x,
7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x,
9) -
1.1251826465596989e-8 * np.power(x, 10)))))
elif type_name == "Silicates-H2O":
return ((np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x, 3) -
0.0003311775031243655 * np.power(x, 4) + 0.00004856681718363753 * np.power(x, 5) -
0.00001245509278944841 * np.power(x, 6) - 1.3074832660503483e-6 * np.power(x, 7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x, 9) -
1.1251826465596989e-8 * np.power(x, 10)))) < r) & (
r < np.power(10, (0.13666292574887867 + 0.27183702181443314 * x -
0.007134024332627119 * np.power(x, 2) - 0.0021407416433092126 * np.power(x,
3) -
0.0022608931475693915 * np.power(x, 4) - 0.0002516518649610248 * np.power(
x, 5) +
0.00011968169122553435 * np.power(x,
6) + 0.000011663496987412905 * np.power(
x, 7) -
3.536434693875541e-6 * np.power(x, 8) - 1.6848230313524644e-7 * np.power(x,
9) +
4.4044933682275176e-8 * np.power(x, 10))))
elif type_name == "Envelope-H2O":
return (np.power(10, (0.13666292574887867 + 0.27183702181443314 * x -
0.007134024332627119 * np.power(x, 2) - 0.0021407416433092126 * np.power(x, 3) -
0.0022608931475693915 * np.power(x, 4) - 0.0002516518649610248 * np.power(x, 5) +
0.00011968169122553435 * np.power(x, 6) + 0.000011663496987412905 * np.power(x, 7) -
3.536434693875541e-6 * np.power(x, 8) - 1.6848230313524644e-7 * np.power(x, 9) +
4.4044933682275176e-8 * np.power(x, 10)))) < r
elif type_name == "Envelope-Silicates":
return (np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x, 3) -
0.0003311775031243655 * np.power(x, 4) + 0.00004856681718363753 * np.power(x, 5) -
0.00001245509278944841 * np.power(x, 6) - 1.3074832660503483e-6 * np.power(x, 7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x, 9) -
1.1251826465596989e-8 * np.power(x, 10)))) < r
else:
return (np.power(10, (-0.11408792224566819 + 0.27851883673695 * x -
0.01997874049680844 * np.power(x, 2) - 0.002490304269884624 * np.power(x, 3) +
0.00007525048500183394 * np.power(x, 4) - 0.00007162041164677924 * np.power(x, 5) -
0.00003393158521958243 * np.power(x, 6) + 8.589995554646332e-7 * np.power(x, 7) +
1.132375249329131e-6 * np.power(x, 8) + 2.2299345660512832e-8 * np.power(x, 9) -
1.0475165171649914e-8 * np.power(x, 10)))) < r
def applyFunction(type_name, x, r):
if type_name == "Fe-Silicates":
return (r - np.power(10, (-0.11408792224566819 + 0.27851883673695 * x -
0.01997874049680844 * np.power(x, 2) - 0.002490304269884624 * np.power(x, 3) +
0.00007525048500183394 * np.power(x, 4) - 0.00007162041164677924 * np.power(x, 5) -
0.00003393158521958243 * np.power(x, 6) + 8.589995554646332e-7 * np.power(x, 7) +
1.132375249329131e-6 * np.power(x, 8) + 2.2299345660512832e-8 * np.power(x, 9) -
1.0475165171649914e-8 * np.power(x, 10)))) / (
np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x, 3) -
0.0003311775031243655 * np.power(x, 4) + 0.00004856681718363753 * np.power(x,
5) -
0.00001245509278944841 * np.power(x, 6) - 1.3074832660503483e-6 * np.power(x,
7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x, 9) -
1.1251826465596989e-8 * np.power(x, 10))) - np.power(10, (
-0.11408792224566819 + 0.27851883673695 * x -
0.01997874049680844 * np.power(x,
2) - 0.002490304269884624 * np.power(x, 3) +
0.00007525048500183394 * np.power(x,
4) - 0.00007162041164677924 * np.power(x, 5) -
0.00003393158521958243 * np.power(x,
6) + 8.589995554646332e-7 * np.power(x, 7) +
1.132375249329131e-6 * np.power(x,
8) + 2.2299345660512832e-8 * np.power(x, 9) -
1.0475165171649914e-8 * np.power(x, 10))))
elif type_name == "Silicates-H2O":
return (r - np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x, 3) -
0.0003311775031243655 * np.power(x, 4) + 0.00004856681718363753 * np.power(x, 5) -
0.00001245509278944841 * np.power(x, 6) - 1.3074832660503483e-6 * np.power(x, 7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x, 9) -
1.1251826465596989e-8 * np.power(x, 10)))) / (
np.power(10., (0.13666292574887867 + 0.27183702181443314 * x -
0.007134024332627119 * np.power(x, 2) - 0.0021407416433092126 * np.power(x,
3) -
0.0022608931475693915 * np.power(x, 4) - 0.0002516518649610248 * np.power(x,
5) +
0.00011968169122553435 * np.power(x, 6) + 0.000011663496987412905 * np.power(
x, 7) -
3.536434693875541e-6 * np.power(x, 8) - 1.6848230313524644e-7 * np.power(x,
9) +
4.4044933682275176e-8 * np.power(x, 10))) - np.power(10.,
(
0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(
x,
2) - 0.0052918215948260265 * np.power(
x, 3) -
0.0003311775031243655 * np.power(
x,
4) + 0.00004856681718363753 * np.power(
x, 5) -
0.00001245509278944841 * np.power(
x,
6) - 1.3074832660503483e-6 * np.power(
x, 7) +
8.211419885278952e-7 * np.power(
x,
8) + 3.47368749025812e-8 * np.power(
x, 9) -
1.1251826465596989e-8 * np.power(
x, 10))))
elif type_name == "Envelope-H2O":
return ((1 / np.power(10, (0.13666292574887867 + 0.27183702181443314 * x -
0.007134024332627119 * np.power(x, 2) - 0.0021407416433092126 * np.power(x, 3) -
0.0022608931475693915 * np.power(x, 4) - 0.0002516518649610248 * np.power(x, 5) +
0.00011968169122553435 * np.power(x, 6) + 0.000011663496987412905 * np.power(x, 7) -
3.536434693875541e-6 * np.power(x, 8) - 1.6848230313524644e-7 * np.power(x, 9) +
4.4044933682275176e-8 * np.power(x, 10))) - 1 / r)) * np.power(10, x)
elif type_name == "Envelope-Silicates":
return ((1 / np.power(10, (0.020013868549526272 + 0.29811170324848235 * x -
0.02012734730157388 * np.power(x, 2) - 0.0052918215948260265 * np.power(x, 3) -
0.0003311775031243655 * np.power(x, 4) + 0.00004856681718363753 * np.power(x, 5) -
0.00001245509278944841 * np.power(x, 6) - 1.3074832660503483e-6 * np.power(x, 7) +
8.211419885278952e-7 * np.power(x, 8) + 3.47368749025812e-8 * np.power(x, 9) -
1.1251826465596989e-8 * np.power(x, 10))) - 1 / r)) * np.power(10, x)
else:
return ((1 / np.power(10, (-0.11408792224566819 + 0.27851883673695 * x -
0.01997874049680844 * np.power(x, 2) - 0.002490304269884624 * np.power(x, 3) +
0.00007525048500183394 * np.power(x, 4) - 0.00007162041164677924 * np.power(x, 5) -
0.00003393158521958243 * np.power(x, 6) + 8.589995554646332e-7 * np.power(x, 7) +
1.132375249329131e-6 * np.power(x, 8) + 2.2299345660512832e-8 * np.power(x, 9) -
1.0475165171649914e-8 * np.power(x, 10))) - 1 / r)) * np.power(10, x)
def helpButtonFunc():
msgbox.showinfo(title="INFO",
message="Plot options. \n\nThe user can plot planets with mass and radius measured with a relative uncertainty better than a specific threshold (in %). \nThe threshold can be increased/decreased by steps of 1% by using the +/- button. \nBy pushing the button '\u25B6', the threshold (for mass or radius) is increased automatically by steps of 1%, and the mass-radius diagram is updated in a sequence, adding progressively to the plot the planets with less precise measurements. \n\nFrom the drop down menu, the user can select the parameter to be used in the 3D colormap plot. \n\nThe green button 'Plot' will refresh the mass-radius diagram after any change to the input values and selected options made by the user.")
class Frame_Run_Plot:
cbl_third_coord = None
cbl_cmap = None
gui = None
frame_run_plot = None
label = None
mass_step = None
mass_back_step_btn = None
mass_next_step_btn = None
mass_start_step_btn = None
mass_verse_btn = None
mass_label_verse = None
radius_step = None
radius_back_step_btn = None
radius_next_step_btn = None
radius_start_step_btn = None
radius_verse_btn = None
radius_label_verse = None
plot_current_situation_btn = None
data0 = None
# Internal Variables
mmin = None
mmax = None
rmin = None
rmax = None
xscale = None
yscale = None
age_host_min = None
age_host_max = None
Teff_min = None
Teff_max = None
FeHdex_min = None
FeHdex_max = None
mstar_min = None
mstar_max = None
rstar_min = None
rstar_max = None
Porb_min = None
Porb_max = None
aorb_min = None
aorb_max = None
eccentricity_min = None
eccentricity_max = None
Teq_min = None
Teq_max = None
sigmaMpercent = None
sigmaRpercent = None
histmassbin = None
histradiusbin = None
histzetabin = None
logcountinmass = None
logcountinradius = None
env2 = None
env1 = None
env3 = None
env4 = None
add1 = None
filter1 = None
add2 = None
np2 = None
subsetdata = None
newPlanets = None
global_stop_mass = None
global_stop_radius = None
mass_radius_plot = None
number_element_plot_density = None
fullDBMatrix = None
ticks_x = None
ticks_y = None
names = None
sc = None
sc2 = None
sc1 = None
sc3 = None
annot = None
num_new_planets = None
newcmp = None
show_error_plot = None
mass_coeff = None
radius_coeff = None
index_ecc = None
index_FeH = None
index_tstar = None
index_mass_max = None
index_p_orb = None
index_a_orb = None
index_teq = None
index_mass_min = None
index_min_rad = None
index_mass_star = None
index_radius_star = None
index_rad_max = None
index_rad_p = None
index_mass_p = None
index_age_host = None
check_ecc = None
check_FeH = None
check_tstar = None
check_p_orb = None
check_a_orb = None
check_teq = None
check_mass_star = None
check_radius_star = None
check_age_host = None
choose_filter_map_var = None
choose_filter_map = None
max_val = None
min_val = None
chosen_index = None
check = None
coeff = None
help_button = None
font_labels = None
font_ticks = None
ticks_y_lim = None
ticks_x_lim = None
show_all_planets_labels = None
core_contours = None
get_only_planetary_system = None
number_planets_system = None
def __init__(self, window, gui, data0, mass_coeff, radius_coeff, index_ecc, index_FeH, index_tstar, index_mass_max,
index_p_orb, index_a_orb, index_teq, index_mass_min, index_min_rad,
index_mass_star, index_radius_star, index_rad_max, index_rad_p, index_mass_p, index_age_host,
check_age_host, check_ecc, check_FeH, check_tstar, check_p_orb, check_a_orb, check_teq,
check_mass_star, check_radius_star):
self.data0 = data0
self.mass_coeff = mass_coeff
self.radius_coeff = radius_coeff
self.index_ecc = index_ecc
self.index_FeH = index_FeH
self.index_tstar = index_tstar
self.index_mass_max = index_mass_max
self.index_p_orb = index_p_orb
self.index_a_orb = index_a_orb
self.index_teq = index_teq
self.index_mass_min = index_mass_min
self.index_min_rad = index_min_rad
self.index_mass_star = index_mass_star
self.index_radius_star = index_radius_star
self.index_rad_max = index_rad_max
self.index_rad_p = index_rad_p
self.index_mass_p = index_mass_p
self.index_age_host = index_age_host
self.check_age_host = check_age_host
self.check_ecc = check_ecc
self.check_FeH = check_FeH
self.check_tstar = check_tstar
self.check_p_orb = check_p_orb
self.check_a_orb = check_a_orb
self.check_teq = check_teq
self.check_mass_star = check_mass_star
self.check_radius_star = check_radius_star
self.gui = gui
self.number_element_plot_density = 5
self.frame_run_plot = tk.Frame(window, highlightbackground="black", highlightthickness=1, padx=5, pady=2)
self.label = tk.Label(master=self.frame_run_plot, text='\u03C3Mp/Mp(%)', fg="blue", font=('Sans', '9', 'bold'))
self.label.grid(column=0, row=0)
self.mass_step = tk.Entry(master=self.frame_run_plot, width=4)
self.mass_step.grid(column=1, row=0)
self.mass_step.insert(-1, "50")
self.mass_back_step_btn = tk.Button(master=self.frame_run_plot, text="-", command=self.massStepBackBtn,
bg="#cc0099", font=('Sans', '9', 'bold'))
self.mass_back_step_btn.grid(column=2, row=0)
self.mass_start_step_btn = tk.Button(master=self.frame_run_plot, text="\u25B6", command=self.massRunBtn,
bg="#ffff00", font=('Sans', '9', 'bold'))
self.mass_start_step_btn.grid(column=3, row=0)
self.mass_next_step_btn = tk.Button(master=self.frame_run_plot, text="+", command=self.massStepForwardBtn,
bg="#c65353", font=('Sans', '9', 'bold'))
self.mass_next_step_btn.grid(column=4, row=0)
self.mass_verse_btn = tk.Button(master=self.frame_run_plot, text="Verse", bg="#669999",
font=('Sans', '9', 'bold'), command=self.massChangeVerse)
self.mass_verse_btn.grid(column=5, row=0)
self.mass_label_verse = tk.Label(master=self.frame_run_plot, text='Forward', fg="#ff6600",
font=('Sans', '9', 'bold'), borderwidth=2, relief="ridge")
self.mass_label_verse.grid(column=6, row=0)
self.help_button = tk.Button(master=self.frame_run_plot, text="?", command=helpButtonFunc, bg="black",
fg="yellow", font=('Sans', '10', 'bold'))
self.help_button.grid(column=9, row=0)
self.label = tk.Label(master=self.frame_run_plot, text='\u03C3Rp/Rp(%)', fg="blue", font=('Sans', '9', 'bold'))
self.label.grid(column=0, row=1)
self.radius_step = tk.Entry(master=self.frame_run_plot, width=4)
self.radius_step.grid(column=1, row=1)
self.radius_step.insert(-1, "20")
self.radius_back_step_btn = tk.Button(master=self.frame_run_plot, text="-", command=self.radiusStepBackBtn,
bg="#cc0099", font=('Sans', '9', 'bold'))
self.radius_back_step_btn.grid(column=2, row=1)
self.radius_start_step_btn = tk.Button(master=self.frame_run_plot, text="\u25B6", command=self.radiusRunBtn,
bg="#ffff00", font=('Sans', '9', 'bold'))
self.radius_start_step_btn.grid(column=3, row=1)
self.radius_next_step_btn = tk.Button(master=self.frame_run_plot, text="+", command=self.radiusStepForwardBtn,
bg="#c65353", font=('Sans', '9', 'bold'))
self.radius_next_step_btn.grid(column=4, row=1)
self.radius_verse_btn = tk.Button(master=self.frame_run_plot, text="Verse", bg="#669999",
font=('Sans', '9', 'bold'),
command=self.radiusChangeVerse)
self.radius_verse_btn.grid(column=5, row=1)
self.radius_label_verse = tk.Label(master=self.frame_run_plot, text='Forward', fg="#ff6600",
font=('Sans', '9', 'bold'), borderwidth=2, relief="ridge")
self.radius_label_verse.grid(column=6, row=1)
options_list = ["None"]
if self.check_teq:
options_list.append("Planet Temp")
if self.check_mass_star:
options_list.append("Star Mass")
if self.check_radius_star:
options_list.append("Star Radius")
if self.check_tstar:
options_list.append("Star Temp")
if self.check_FeH:
options_list.append("[Fe/H]")
if self.check_ecc:
options_list.append("Eccentricity")
if self.check_age_host:
options_list.append("Age")
if self.check_p_orb:
options_list.append("Orbital Period")
if self.check_a_orb:
options_list.append("Semi-major axis")
self.choose_filter_map_var = tk.StringVar()
self.choose_filter_map = tk.OptionMenu(self.frame_run_plot, self.choose_filter_map_var, *options_list)
self.choose_filter_map.grid(column=7, row=0, rowspan=2)
self.choose_filter_map_var.set("None")
self.plot_current_situation_btn = tk.Button(master=self.frame_run_plot, text="PLOT", width=15,
bg="#00ff00", font=('Sans', '13', 'bold'),
command=self.plotCurrentSituation)
self.plot_current_situation_btn.grid(column=8, row=0, rowspan=2)
self.frame_run_plot.pack(padx=3, pady=3)
def massStepBackBtn(self):
self.stepBackForwMass(int(self.mass_step.get()), -1)
def massStepForwardBtn(self):
self.stepBackForwMass(int(self.mass_step.get()), 1)
def stepBackForwMass(self, val, step):
self.mass_step.delete(0, tk.END)
self.mass_step.insert(-1, val + step)
self.executeRoutine(int(self.mass_step.get()), int(self.radius_step.get()))
def massRunBtn(self):
if self.mass_start_step_btn["text"] == "\u25B6":
self.global_stop_mass = False
self.mass_start_step_btn["text"] = "\u23F8"
var_start = int(self.mass_step.get())
if self.mass_label_verse["text"] == "Forward":
var_stop = 100
var_step = 1
else:
var_stop = 0
var_step = -1
for i in range(var_start, var_stop, var_step):
if self.global_stop_mass:
break
time.sleep(1)
self.mass_step.delete(0, tk.END)
self.mass_step.insert(-1, str(i))
self.executeRoutine(i, int(self.radius_step.get()))
self.gui.window.update()
else:
self.mass_start_step_btn["text"] = "\u25B6"
self.global_stop_mass = True
self.gui.window.update()
def massChangeVerse(self):
if self.mass_label_verse["text"] == "Forward":
self.mass_label_verse["text"] = "Backward"
else:
self.mass_label_verse["text"] = "Forward"
def plotCurrentSituation(self):
self.executeRoutine(int(self.mass_step.get()), int(self.radius_step.get()))
def radiusStepBackBtn(self):
self.stepBackForwRadius(int(self.radius_step.get()), -1)
def radiusStepForwardBtn(self):
self.stepBackForwRadius(int(self.radius_step.get()), 1)
def stepBackForwRadius(self, val, step):
self.radius_step.delete(0, tk.END)
self.radius_step.insert(-1, val + step)
self.executeRoutine(int(self.mass_step.get()), int(self.radius_step.get()))
def radiusRunBtn(self):
if self.radius_start_step_btn["text"] == "\u25B6":
self.global_stop_radius = False
self.radius_start_step_btn["text"] = "\u23F8"
var_start = int(self.radius_step.get())
if self.radius_label_verse["text"] == "Forward":
var_stop = 100
var_step = 1
else:
var_stop = 0
var_step = -1
for i in range(var_start, var_stop, var_step):
if self.global_stop_radius:
break
time.sleep(1)
self.radius_step.delete(0, tk.END)
self.radius_step.insert(-1, str(i))
self.executeRoutine(int(self.mass_step.get()), i)
self.gui.window.update()
else:
self.radius_start_step_btn["text"] = "\u25B6"
self.global_stop_radius = True
self.gui.window.update()
def radiusChangeVerse(self):
if self.radius_label_verse["text"] == "Forward":
self.radius_label_verse["text"] = "Backward"
else:
self.radius_label_verse["text"] = "Forward"
def dataAcquisition(self, sigmaMpercent, sigmaRpercent):
self.ticks_x_lim = int(self.gui.frame_input_master.frame_export_file.x_ticks_entry.get())
self.ticks_y_lim = int(self.gui.frame_input_master.frame_export_file.y_ticks_entry.get())
self.font_labels = float(self.gui.frame_input_master.frame_export_file.font_labels_entry.get())
self.font_ticks = float(self.gui.frame_input_master.frame_export_file.font_ticks_entry.get())
self.mmin = float(self.gui.frame_input_master.frame_scale_plot.mass_min.get())
self.mmax = float(self.gui.frame_input_master.frame_scale_plot.mass_max.get())
self.rmin = float(self.gui.frame_input_master.frame_scale_plot.radius_min.get())
self.rmax = float(self.gui.frame_input_master.frame_scale_plot.radius_max.get())
self.xscale = self.gui.frame_input_master.frame_scale_plot.mass_label_scale['text']
self.yscale = self.gui.frame_input_master.frame_scale_plot.radius_label_scale['text']
self.age_host_min = float(self.gui.frame_input_master.frame_input_planet.age_host_min.get())
self.age_host_max = float(self.gui.frame_input_master.frame_input_planet.age_host_max.get())
self.Teff_min = float(self.gui.frame_input_master.frame_input_planet.t_eff_star_min.get())
self.Teff_max = float(self.gui.frame_input_master.frame_input_planet.t_eff_star_max.get())
self.FeHdex_min = float(self.gui.frame_input_master.frame_input_planet.Fe_H_min.get())
self.FeHdex_max = float(self.gui.frame_input_master.frame_input_planet.Fe_H_max.get())
self.mstar_min = float(self.gui.frame_input_master.frame_input_planet.M_star_min.get())
self.mstar_max = float(self.gui.frame_input_master.frame_input_planet.M_star_max.get())
self.rstar_min = float(self.gui.frame_input_master.frame_input_planet.R_star_min.get())
self.rstar_max = float(self.gui.frame_input_master.frame_input_planet.R_star_max.get())
self.Porb_min = float(self.gui.frame_input_master.frame_input_planet.P_orb_planet_min.get())
self.Porb_max = float(self.gui.frame_input_master.frame_input_planet.P_orb_planet_max.get())
self.aorb_min = float(self.gui.frame_input_master.frame_input_planet.semi_major_axes_planet_min.get())
self.aorb_max = float(self.gui.frame_input_master.frame_input_planet.semi_major_axes_planet_max.get())
self.eccentricity_min = float(self.gui.frame_input_master.frame_input_planet.eccentricity_planet_min.get())
self.eccentricity_max = float(self.gui.frame_input_master.frame_input_planet.eccentricity_planet_max.get())
self.show_error_plot = self.gui.frame_input_master.frame_input_planet.show_error_plot_var.get()
self.show_all_planets_labels = self.gui.frame_input_master.frame_input_planet.show_planets_labels_var.get()
self.get_only_planetary_system = self.gui.frame_input_master.frame_input_planet.get_only_planetary_system_var.get()
if self.get_only_planetary_system:
self.number_planets_system = int(self.gui.frame_input_master.frame_input_planet.number_planets_system.get())
if self.number_planets_system <= 1:
msgbox.showerror(title="ERROR", message="The number of planets must be greater than 1")
return
self.Teq_min = float(self.gui.frame_input_master.frame_input_planet.T_eq_planet_min.get())
self.Teq_max = float(self.gui.frame_input_master.frame_input_planet.T_eq_planet_max.get())
self.histmassbin = int(self.gui.frame_input_master.frame_histogram_info.mass_bin_var.get())
self.histradiusbin = int(self.gui.frame_input_master.frame_histogram_info.radius_bin_var.get())
self.histzetabin = int(self.gui.frame_input_master.frame_histogram_info.zeta_bin_var.get())
self.logcountinmass = self.gui.frame_input_master.frame_histogram_info.mass_label_plot["text"]
self.logcountinradius = self.gui.frame_input_master.frame_histogram_info.radius_label_plot["text"]
self.env2 = self.gui.frame_input_master.frame_envelope_plot.label_envelope["text"]
self.core_contours = self.gui.frame_input_master.frame_envelope_plot.core_contours_var.get()
self.env1 = (self.env2 != "None")
self.env3 = self.gui.frame_input_master.frame_pure_hydrogen.mass_radius_check_var.get()
self.env4 = self.gui.frame_input_master.frame_pure_hydrogen.central_density_check_var.get()
self.add1 = self.gui.frame_input_master.frame_new_planet.add_new_planet_check_var.get()
self.filter1 = self.gui.frame_input_master.frame_new_planet.filter_new_planet_check_var.get()
self.add2 = self.gui.frame_input_master.frame_new_planet.label_new_planet_check_var.get()
data2 = self.data0
# PLANETARY SYSTEM
arr_letter = ["b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z"]
if self.get_only_planetary_system:
names = data2.iloc[:, 0]
index = [idx for idx, s in enumerate(names) if s[-1] in arr_letter]
names2 = np.array(names[index].array)
names3 = list()
for name in names2:
names3.append(name[:-1])
names4 = np.unique(names3)
dizionario_planet = dict()
for elem in names4:
occur = names3.count(elem)
if occur >= self.number_planets_system:
dizionario_planet[elem] = names3.count(elem)
index2 = [idx for idx, s in enumerate(names) if s[:-1] in dizionario_planet.keys()]
data2 = data2.iloc[index2, :]
self.subsetdata = data2[
(self.mmin <= data2[self.index_mass_p] * self.mass_coeff) & (
data2[self.index_mass_p] * self.mass_coeff <= self.mmax) &
(self.rmin <= data2[self.index_rad_p] * self.radius_coeff) & (
data2[self.index_rad_p] * self.radius_coeff <= self.rmax) &
(data2[self.index_rad_max] / data2[self.index_rad_p] <= sigmaRpercent / 100) &
(data2[self.index_min_rad] / data2[self.index_rad_p] <= sigmaRpercent / 100) &
(data2[self.index_mass_min] / data2[self.index_mass_p] <= sigmaMpercent / 100) &
(data2[self.index_mass_max] / data2[self.index_mass_p] <= sigmaMpercent / 100) &
((data2[self.index_rad_p] * self.radius_coeff) ** 4 / (data2[self.index_mass_p] * self.mass_coeff) > 0.01)]
if self.check_p_orb:
self.subsetdata = self.subsetdata[(self.Porb_min <= self.subsetdata[self.index_p_orb]) & (
self.subsetdata[self.index_p_orb] <= self.Porb_max)]
if self.check_teq:
self.subsetdata = self.subsetdata[
(self.Teq_min <= self.subsetdata[self.index_teq]) & (self.subsetdata[self.index_teq] <= self.Teq_max)]
if self.check_tstar:
self.subsetdata = self.subsetdata[(self.Teff_min <= self.subsetdata[self.index_tstar]) & (
self.subsetdata[self.index_tstar] <= self.Teff_max)]
if self.check_ecc:
self.subsetdata = self.subsetdata[(self.eccentricity_min <= self.subsetdata[self.index_ecc]) & (
self.subsetdata[self.index_ecc] <= self.eccentricity_max)]
if self.check_mass_star:
self.subsetdata = self.subsetdata[(self.mstar_min <= self.subsetdata[self.index_mass_star]) & (
self.subsetdata[self.index_mass_star] <= self.mstar_max)]
if self.check_radius_star:
self.subsetdata = self.subsetdata[(self.rstar_min <= self.subsetdata[self.index_radius_star]) & (
self.subsetdata[self.index_radius_star] <= self.rstar_max)]
if self.check_a_orb:
self.subsetdata = self.subsetdata[(self.aorb_min <= self.subsetdata[self.index_a_orb]) & (
self.subsetdata[self.index_a_orb] <= self.aorb_max)]
if self.check_FeH:
self.subsetdata = self.subsetdata[(self.FeHdex_min <= self.subsetdata[self.index_FeH]) & (
self.subsetdata[self.index_FeH] <= self.FeHdex_max)]
if self.check_age_host:
self.subsetdata = self.subsetdata[(self.age_host_min <= self.subsetdata[self.index_age_host]) & (
self.subsetdata[self.index_age_host] <= self.age_host_max)]
# NEW PLANET INPUT
self.num_new_planets = 0
temp_df = self.gui.frame_input_master.frame_new_planet.input_list
if self.add1 and len(temp_df) > 0:
self.newPlanets = pd.DataFrame(
{0: temp_df["Name"], self.index_mass_p: temp_df["Mass_p"], self.index_mass_min: temp_df["Mass_sn_p"],
self.index_mass_max: temp_df["Mass_sp_p"], self.index_rad_p: temp_df["Radius_p"],
self.index_min_rad: temp_df["Radius_sn_p"],
self.index_rad_max: temp_df["Radius_sp_p"]})
if self.check_age_host:
self.newPlanets[self.index_age_host] = temp_df["Age"]
if self.check_tstar:
self.newPlanets[self.index_tstar] = temp_df["Tstar"]
if self.check_mass_star:
self.newPlanets[self.index_mass_star] = temp_df["Mstar"]
if self.check_radius_star:
self.newPlanets[self.index_radius_star] = temp_df["Rstar"]
if self.check_p_orb:
self.newPlanets[self.index_p_orb] = temp_df["p_orb"]
if self.check_a_orb:
self.newPlanets[self.index_a_orb] = temp_df["a_orb"]
if self.check_ecc:
self.newPlanets[self.index_ecc] = temp_df["Ecc"]
if self.check_teq:
self.newPlanets[self.index_teq] = temp_df["tPlanet"]
if self.check_FeH:
self.newPlanets[self.index_FeH] = temp_df["[Fe/H]"]
self.newPlanets = self.newPlanets[
(self.mmin <= self.newPlanets[self.index_mass_p] * self.mass_coeff) & (
self.newPlanets[self.index_mass_p] * self.mass_coeff <= self.mmax) &
(self.rmin <= self.newPlanets[self.index_rad_p] * self.radius_coeff) & (
self.newPlanets[self.index_rad_p] * self.radius_coeff <= self.rmax)]
if self.filter1:
self.newPlanets = self.newPlanets[
(self.newPlanets[self.index_rad_max] / self.newPlanets[self.index_rad_p] <= sigmaRpercent / 100) &
(self.newPlanets[self.index_min_rad] / self.newPlanets[self.index_rad_p] <= sigmaRpercent / 100) &
(self.newPlanets[self.index_mass_min] / self.newPlanets[self.index_mass_p] <= sigmaMpercent / 100) &
(self.newPlanets[self.index_mass_max] / self.newPlanets[self.index_mass_p] <= sigmaMpercent / 100) &
((self.newPlanets[self.index_rad_p] * self.radius_coeff) ** 4 / (
self.newPlanets[self.index_mass_p] * self.mass_coeff) > 0.01)]
if self.check_p_orb:
self.newPlanets = self.newPlanets[(self.Porb_min <= self.newPlanets[self.index_p_orb]) & (
self.newPlanets[self.index_p_orb] <= self.Porb_max)]
if self.check_teq:
self.newPlanets = self.newPlanets[(self.Teq_min <= self.newPlanets[self.index_teq]) & (
self.newPlanets[self.index_teq] <= self.Teq_max)]
if self.check_tstar:
self.newPlanets = self.newPlanets[(self.Teff_min <= self.newPlanets[self.index_tstar]) & (
self.newPlanets[self.index_tstar] <= self.Teff_max)]
if self.check_ecc:
self.newPlanets = self.newPlanets[(self.eccentricity_min <= self.newPlanets[self.index_ecc]) & (
self.newPlanets[self.index_ecc] <= self.eccentricity_max)]
if self.check_mass_star:
self.newPlanets = self.newPlanets[(self.mstar_min <= self.newPlanets[self.index_mass_star]) & (
self.newPlanets[self.index_mass_star] <= self.mstar_max)]
if self.check_radius_star:
self.newPlanets = self.newPlanets[(self.rstar_min <= self.newPlanets[self.index_radius_star]) & (
self.newPlanets[self.index_radius_star] <= self.rstar_max)]
if self.check_a_orb:
self.newPlanets = self.newPlanets[(self.aorb_min <= self.newPlanets[self.index_a_orb]) & (
self.newPlanets[self.index_a_orb] <= self.aorb_max)]
if self.check_FeH:
self.newPlanets = self.newPlanets[(self.FeHdex_min <= self.newPlanets[self.index_FeH]) & (
self.newPlanets[self.index_FeH] <= self.FeHdex_max)]
if self.check_age_host:
self.newPlanets = self.newPlanets[(self.age_host_min <= self.newPlanets[self.index_age_host]) & (
self.newPlanets[self.index_age_host] <= self.age_host_max)]
self.subsetdata = self.subsetdata.append(self.newPlanets, ignore_index=True)
self.num_new_planets = len(self.newPlanets)
self.names = np.array(self.subsetdata[0])
def plotHistogramMass(self):
array = self.subsetdata[self.index_mass_p] * self.mass_coeff
self.gui.frame_output_plot.histogram_mass.clear()
self.ticks_x = np.linspace(self.mmin, self.mmax, self.ticks_x_lim)
self.gui.frame_output_plot.histogram_mass.set_title('Histogram of Mp/M⊕', fontsize=self.font_labels)
if self.xscale == "Log":
hist, bins, _ = plt.hist(array, self.histmassbin)
self.gui.frame_output_plot.histogram_mass.axes.set_xscale("log")
self.ticks_x = np.logspace(math.log10(min(array)), math.log10(max(array)), self.ticks_x_lim)
histmassbin = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins))
arr = self.gui.frame_output_plot.histogram_mass.hist(array, histmassbin, color='#f9d616', edgecolor='black')
else:
arr = self.gui.frame_output_plot.histogram_mass.hist(array, self.histmassbin, color='#f9d616',
edgecolor='black')
if len(self.ticks_x) >= 14:
index = [i for i in range(2, len(self.ticks_x), 2)]
self.ticks_x = np.delete(self.ticks_x, index)
self.gui.frame_output_plot.histogram_mass.axes.set_xlim(xmin=self.mmin, xmax=self.mmax)
try:
self.gui.frame_output_plot.histogram_mass.axes.set_xticks(self.ticks_x)
except:
print("")
self.gui.frame_output_plot.histogram_mass.axes.set_xticklabels(np.round(self.ticks_x, 1),
fontsize=self.font_ticks)
if self.logcountinmass == "Count":
self.gui.frame_output_plot.histogram_mass.set_ylabel('Count', fontsize=self.font_labels)
else:
self.gui.frame_output_plot.histogram_mass.axes.set_yscale("log")
self.gui.frame_output_plot.histogram_mass.set_ylabel('Log Count', fontsize=self.font_labels)
self.gui.frame_output_plot.histogram_mass.axes.minorticks_off()
for i in range(self.histmassbin):
self.gui.frame_output_plot.histogram_mass.text(arr[1][i], arr[0][i], str(arr[0][i]), fontsize=7)
def plotHistogramRadius(self):
array = self.subsetdata[self.index_rad_p] * self.radius_coeff
self.gui.frame_output_plot.histogram_radius.clear()
self.ticks_y = np.linspace(self.rmin, self.rmax, self.ticks_y_lim)
self.gui.frame_output_plot.histogram_radius.set_title('Histogram of Rp/R⊕', fontsize=self.font_labels)
if self.yscale == "Log":
hist, bins, _ = plt.hist(array, self.histradiusbin)
self.gui.frame_output_plot.histogram_radius.axes.set_yscale("log")
self.ticks_y = np.logspace(math.log10(min(array)), math.log10(max(array)), self.ticks_y_lim)
histradiusbin = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins))
arr = self.gui.frame_output_plot.histogram_radius.hist(array, histradiusbin, orientation="horizontal",
color='#f9d616', edgecolor='black')
else:
arr = self.gui.frame_output_plot.histogram_radius.hist(array, self.histradiusbin, orientation="horizontal",
color='#f9d616', edgecolor='black')
if len(self.ticks_y) >= 20:
index = [i for i in range(2, len(self.ticks_y), 2)]
self.ticks_y = np.delete(self.ticks_y, index)
self.gui.frame_output_plot.histogram_radius.axes.set_ylim(ymin=self.rmin, ymax=self.rmax)
try:
self.gui.frame_output_plot.histogram_radius.axes.set_yticks(self.ticks_y)
except:
print("")
self.gui.frame_output_plot.histogram_radius.axes.set_yticklabels(np.round(self.ticks_y, 1))
plt.setp(self.gui.frame_output_plot.histogram_radius.axes.get_yticklabels(), rotation=-90, fontsize=7,
horizontalalignment='right')
if self.logcountinradius == "Count":
self.gui.frame_output_plot.histogram_radius.set_xlabel('Count', fontsize=self.font_labels)
else:
self.gui.frame_output_plot.histogram_radius.axes.set_xscale("log")
self.gui.frame_output_plot.histogram_radius.set_xlabel('Log Count', fontsize=self.font_labels)
self.gui.frame_output_plot.histogram_radius.axes.minorticks_off()
for i in range(self.histradiusbin):
self.gui.frame_output_plot.histogram_radius.text(arr[0][i], arr[1][i], str(arr[0][i]), fontsize=7)
self.ticks_y = np.append(self.ticks_y, self.rmax)
def plotHistogramZeta(self):
array = (self.subsetdata[self.index_rad_p] * self.radius_coeff) / (
(self.subsetdata[self.index_mass_p] * self.mass_coeff) ** (1 / 4))
self.gui.frame_output_plot.histogram_zeta.clear()
self.gui.frame_output_plot.histogram_zeta.set_title('Histogram of \nζ = (Rp/R⊕)/(Mp/M⊕)^1/4',
fontsize=self.font_labels)
hist, bins, _ = plt.hist(array, self.histzetabin)
self.gui.frame_output_plot.histogram_zeta.axes.set_xscale("log")
ticks = np.logspace(math.log10(min(array)), math.log10(max(array)), self.histzetabin)
histzetabin = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins))
arr = self.gui.frame_output_plot.histogram_zeta.hist(array, histzetabin, color='#f9d616', edgecolor='black')
if len(ticks) >= 6:
index = [i for i in range(2, len(ticks), 2)]
ticks = np.delete(ticks, index)
try:
self.gui.frame_output_plot.histogram_zeta.axes.set_xticks(ticks)
except:
print("")
self.gui.frame_output_plot.histogram_zeta.axes.set_xticklabels(np.round(ticks, 1), fontsize=self.font_ticks)
self.gui.frame_output_plot.histogram_zeta.axes.minorticks_off()
# self.gui.frame_output_plot.histogram_zeta.set_ylabel('Count', fontsize=self.font_labels)
for i in range(self.histzetabin):
self.gui.frame_output_plot.histogram_zeta.text(arr[1][i], arr[0][i], str(arr[0][i]), fontsize=7)
def plotMassRadius(self):
self.gui.frame_output_plot.mass_radius_plot.clear()
if self.xscale == "Log":
self.gui.frame_output_plot.mass_radius_plot.axes.set_xscale("log")
if self.yscale == "Log":
self.gui.frame_output_plot.mass_radius_plot.axes.set_yscale("log")
if self.env1:
cmp = plt.cm.get_cmap(self.gui.frame_input_master.frame_envelope_plot.choose_cmap_var.get())
cmp = cmp((np.linspace(0, 1, 500)))
cmp[:, 3] = 0.6
transp = [0, 0, 0, 0]
cmp[0, :] = transp
self.newcmp = colors.ListedColormap(cmp)
if self.env2 == "H20":
self.H2OPlot()
elif self.env2 == "Silicates":
self.SilicatesPlot()
else:
self.FePlot()
self.plotPureLine()
# just plot mass - radius curves for pure - Hydrogen composition at different specific entropy values
# according to Becker et al . 2014 ApJS
if self.env3:
self.plotMassRadiusHydrogen()
if self.env4:
self.plotMassRadiusHydrogenCentralDensity()
self.plotPlanetTepCat()
self.plotPlanetSolarSystem()
if self.add1 and self.num_new_planets > 0:
self.plotPlanetInput()
self.gui.frame_output_plot.mass_radius_plot.axes.minorticks_off()
self.gui.frame_output_plot.mass_radius_plot.axes.set_xlim(xmin=self.mmin, xmax=self.mmax)
self.gui.frame_output_plot.mass_radius_plot.axes.set_ylim(ymin=self.rmin, ymax=self.rmax)
try:
self.gui.frame_output_plot.mass_radius_plot.axes.set_xticks(self.ticks_x)
except:
print("")
self.gui.frame_output_plot.mass_radius_plot.axes.set_xticklabels(np.round(self.ticks_x, 1),
fontsize=self.font_ticks)
try:
self.gui.frame_output_plot.mass_radius_plot.axes.set_yticks(self.ticks_y)
except:
print("")
self.gui.frame_output_plot.mass_radius_plot.axes.set_yticklabels(np.round(self.ticks_y, 1),
fontsize=self.font_ticks)
self.gui.frame_output_plot.mass_radius_plot.set_ylabel("Planet Radius (Rp/R⊕)", fontsize=self.font_labels)
self.gui.frame_output_plot.mass_radius_plot.set_xlabel("Planet Mass (Mp/M⊕)", fontsize=self.font_labels)
self.gui.frame_output_plot.mass_radius_plot.set_title(
"Planet Mass-Radius: \n\u03C3Mp/Mp(%)<=" + str(self.mass_step.get()) + "% \u03C3Rp/Rp(%)<=" + str(
self.radius_step.get()) + "%", fontsize=self.font_labels)
self.gui.frame_output_plot.mass_radius_plot.legend(loc="upper left")
self.gui.frame_output_plot.plot_combined_canvas.draw()
self.gui.frame_output_plot.plot_combined_canvas.mpl_connect("motion_notify_event", self.hover)
def H2OPlot(self):
# Density Plot for Fe - Silicates Contour Mesh, approximated by Power - Series in lg[mass]
xx, yy = np.meshgrid(np.logspace(np.log10(self.mmin), np.log10(self.mmax), 500),
np.logspace(np.log10(self.rmin), np.log10(self.rmax), 500))
x_values = np.log10(xx)
if self.core_contours:
self.densityPlot("Fe-Silicates", xx, x_values, yy, [0.2, 0.4, 0.6, 0.8])
# Density Plot for Silicates - H2O Contour Mesh, approximated by Power - Series in lg[mass]
self.densityPlot("Silicates-H2O", xx, x_values, yy, [0.2, 0.4, 0.6, 0.8])
# Density Plot for Envelope - H2O Contour Mesh, approximated by Power - Series in lg[mass]
self.densityPlot("Envelope-H2O", xx, x_values, yy,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5, 2, 2.5, 3, 4, 5])
def SilicatesPlot(self):
# Density Plot for Fe - Silicates Contour Mesh, approximated by Power - Series in lg[mass]
xx, yy = np.meshgrid(np.logspace(np.log10(self.mmin), np.log10(self.mmax), 500),
np.logspace(np.log10(self.rmin), np.log10(self.rmax), 500))
x_values = np.log10(xx)
if self.core_contours:
self.densityPlot("Fe-Silicates", xx, x_values, yy, [0.2, 0.4, 0.6, 0.8])
# Density Plot for Envelope - Silicates Contour Mesh, approximated by Power - Series in lg[mass]
self.densityPlot("Envelope-Silicates", xx, x_values, yy,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5, 2, 2.5, 3, 4, 5])
def FePlot(self):
# Density Plot for Envelope - Fe Contour Mesh, approximated by Power - Series in lg[mass]
xx, yy = np.meshgrid(np.logspace(np.log10(self.mmin), np.log10(self.mmax), 500),
np.logspace(np.log10(self.rmin), np.log10(self.rmax), 500))
x_values = np.log10(xx)
self.densityPlot("Envelope-Fe", xx, x_values, yy,
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5, 2, 2.5, 3, 4, 5])
def densityPlot(self, envelope, xx, x_values, yy, levels):
index = rangeFunction(envelope, x_values, yy)
Z = applyFunction(envelope, x_values, yy)
index = 1 * index
valid = Z
valid[index == 0] = np.max(Z)
Z[index == 0] = np.min(Z) - 1
maxv = np.max(Z)
str_max = str(round(maxv, 2))
if maxv >= 5:
maxv = 5
str_max = ">=5"
minv = np.min(valid)
ax = self.gui.frame_output_plot.mass_radius_plot.pcolormesh(xx, yy, Z, cmap=self.newcmp, shading="nearest",
edgecolors=None, vmin=minv, vmax=maxv)
cbaxes = inset_axes(self.gui.frame_output_plot.mass_radius_plot, width="3%", height="15%", loc=7)
if self.cbl_cmap is not None:
self.cbl_cmap.remove()
self.cbl_cmap = plt.colorbar(ax, cax=cbaxes, ticks=[minv, maxv])
self.cbl_cmap.ax.set_yticklabels([str(round(minv, 2)), str_max])
self.cbl_cmap.ax.set_title("Z contours ", fontsize=8)
self.cbl_cmap.ax.yaxis.set_ticks_position('left')
self.gui.frame_output_plot.mass_radius_plot.contour(xx, yy, Z, colors="#5d5857", levels=levels)
def plotPureLine(self):
xx = np.logspace(np.log10(self.mmin), np.log10(self.mmax), 500)
x_values = np.log10(xx)
yy = pureFunction("pure-Fe-metals", x_values)
self.gui.frame_output_plot.mass_radius_plot.plot(xx, yy, "red", label="Fe-metals")
yy = pureFunction("pure-Silicates", x_values)
self.gui.frame_output_plot.mass_radius_plot.plot(xx, yy, "green", label="Silicates")
yy = pureFunction("pure-high-pressure-ices", x_values)
self.gui.frame_output_plot.mass_radius_plot.plot(xx, yy, "blue", label="Ices")
def plotMassRadiusHydrogen(self):
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS03Becker[:, 0],
MassRadiusDB.massradiusS03Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS04Becker[:, 0],
MassRadiusDB.massradiusS04Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS05Becker[:, 0],
MassRadiusDB.massradiusS05Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS06Becker[:, 0],
MassRadiusDB.massradiusS06Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS07Becker[:, 0],
MassRadiusDB.massradiusS07Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS08Becker[:, 0],
MassRadiusDB.massradiusS08Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS09Becker[:, 0],
MassRadiusDB.massradiusS09Becker[:, 1])
self.gui.frame_output_plot.mass_radius_plot.plot(MassRadiusDB.massradiusS10Becker[:, 0],
MassRadiusDB.massradiusS10Becker[:, 1])
def plotMassRadiusHydrogenCentralDensity(self):
for i in range(0, 40, 5):
chosen_element = np.array([MassRadiusDB.massradiusS03Becker[i, :], MassRadiusDB.massradiusS04Becker[i, :],
MassRadiusDB.massradiusS05Becker[i, :], MassRadiusDB.massradiusS06Becker[i, :],
MassRadiusDB.massradiusS07Becker[i, :], MassRadiusDB.massradiusS08Becker[i, :],
MassRadiusDB.massradiusS09Becker[i, :], MassRadiusDB.massradiusS10Becker[i, :]])
self.gui.frame_output_plot.mass_radius_plot.plot(chosen_element[:, 0], chosen_element[:, 1])
def plotPlanetSolarSystem(self):
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Mercury[0], MassRadiusDB.Mercury[1], c="black",
s=20, marker="$m$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Venus[0], MassRadiusDB.Venus[1], c="black",
s=20, marker="$V$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Earth[0], MassRadiusDB.Earth[1], c="black",
s=20, marker="$E$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Mars[0], MassRadiusDB.Mars[1], c="black", s=20,
marker="$M$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Jupiter[0], MassRadiusDB.Jupiter[1], c="black",
s=20, marker="$J$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Saturn[0], MassRadiusDB.Saturn[1], c="black",
s=20, marker="$S$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Uranus[0], MassRadiusDB.Uranus[1], c="black",
s=20, marker="$U$")
self.gui.frame_output_plot.mass_radius_plot.scatter(MassRadiusDB.Neptune[0], MassRadiusDB.Neptune[1], c="black",
s=20, marker="$N$")
def plotPlanetTepCat(self):
X = np.array(self.subsetdata[self.index_mass_p] * self.mass_coeff)
Y = np.array(self.subsetdata[self.index_rad_p] * self.radius_coeff)
deltaYm = np.array(self.subsetdata[self.index_min_rad]) * self.radius_coeff
deltaYp = np.array(self.subsetdata[self.index_rad_max]) * self.radius_coeff
deltaXm = np.array(self.subsetdata[self.index_mass_min]) * self.mass_coeff
deltaXp = np.array(self.subsetdata[self.index_mass_max]) * self.mass_coeff
d1 = deltaYm[np.logical_and(deltaXm != 0, deltaXp != 0)]
d2 = deltaYp[np.logical_and(deltaXm != 0, deltaXp != 0)]
d3 = deltaXm[np.logical_and(deltaXm != 0, deltaXp != 0)]
d4 = deltaXp[np.logical_and(deltaXm != 0, deltaXp != 0)]
x1 = X[np.logical_and(deltaXm != 0, deltaXp != 0)]
y1 = Y[np.logical_and(deltaXm != 0, deltaXp != 0)]
self.check = 0
filter_arr = None
space = " "
self.coeff = 1
if self.choose_filter_map_var.get() == "Planet Temp":
if self.check_teq:
self.chosen_index = self.index_teq
self.check = 1
elif self.choose_filter_map_var.get() == "Planet Mass":
space = " "
self.coeff = 317.8
self.chosen_index = self.index_mass_p
self.check = 1
elif self.choose_filter_map_var.get() == "Planet Radius":
self.chosen_index = self.index_rad_p
self.coeff = 11.2
self.check = 1
elif self.choose_filter_map_var.get() == "Star Temp":
space = " "
if self.check_tstar:
self.chosen_index = self.index_tstar
self.check = 1
elif self.choose_filter_map_var.get() == "Star Mass":
space = " "
if self.check_mass_star:
self.chosen_index = self.index_mass_star
self.check = 1
elif self.choose_filter_map_var.get() == "Star Radius":
if self.check_radius_star:
self.chosen_index = self.index_radius_star
self.check = 1
elif self.choose_filter_map_var.get() == "Eccentricity":
space = " "
if self.check_ecc:
self.chosen_index = self.index_ecc
self.check = 1
elif self.choose_filter_map_var.get() == "Semi-major axis":
space = " "
if self.check_a_orb:
self.chosen_index = self.index_a_orb
self.check = 1
elif self.choose_filter_map_var.get() == "Orbital Period":
space = " "
if self.check_p_orb:
self.chosen_index = self.index_p_orb
self.check = 1
elif self.choose_filter_map_var.get() == "Age":
space = " "
if self.check_age_host:
self.chosen_index = self.index_age_host
self.check = 1
elif self.choose_filter_map_var.get() == "[Fe/H]":
space = " "
if self.check_FeH:
self.chosen_index = self.index_FeH
self.check = 1
self.min_val = 1
self.max_val = 3000
if self.check:
filter_arr = np.array(self.subsetdata[self.chosen_index])
self.max_val = np.max(filter_arr) * self.coeff
self.min_val = np.min(filter_arr) * self.coeff
filter_cmap = filter_arr[np.logical_and(deltaXm != 0, deltaXp != 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
self.sc = self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"), vmin=self.min_val,
vmax=self.max_val, edgecolors="black", zorder=100,
label="Planets")
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4], linestyle="None",
zorder=101, alpha=0.5)
if self.check:
cbaxes = inset_axes(self.gui.frame_output_plot.mass_radius_plot, width="3%", height="15%", loc=6)
if self.cbl_third_coord is not None:
self.cbl_third_coord.remove()
self.cbl_third_coord = plt.colorbar(self.sc, cax=cbaxes, ticks=[self.min_val, self.max_val])
self.cbl_third_coord.ax.set_title(space + self.choose_filter_map_var.get(), fontsize=8)
if self.index_mass_min != self.index_mass_max:
x1 = X[np.logical_and(deltaXm == 0, deltaXp != 0)]
y1 = Y[np.logical_and(deltaXm == 0, deltaXp != 0)]
if self.check:
filter_cmap = filter_arr[np.logical_and(deltaXm == 0, deltaXp != 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
d1 = deltaYm[np.logical_and(deltaXm == 0, deltaXp != 0)]
d2 = deltaYp[np.logical_and(deltaXm == 0, deltaXp != 0)]
d3 = deltaXm[np.logical_and(deltaXm == 0, deltaXp != 0)]
d4 = deltaXp[np.logical_and(deltaXm == 0, deltaXp != 0)]
if x1.size != 0:
self.sc1 = self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"),
vmin=self.min_val, vmax=self.max_val,
edgecolors="black", zorder=100,
marker='v',
label="Planets (only \u03C3+ mass)")
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4],
linestyle="None", zorder=101, alpha=0.5)
x1 = X[np.logical_and(deltaXm != 0, deltaXp == 0)]
y1 = Y[np.logical_and(deltaXm != 0, deltaXp == 0)]
if self.check:
filter_cmap = filter_arr[np.logical_and(deltaXm != 0, deltaXp == 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
d1 = deltaYm[np.logical_and(deltaXm != 0, deltaXp == 0)]
d2 = deltaYp[np.logical_and(deltaXm != 0, deltaXp == 0)]
d3 = deltaXm[np.logical_and(deltaXm != 0, deltaXp == 0)]
d4 = deltaXp[np.logical_and(deltaXm != 0, deltaXp == 0)]
if x1.size != 0:
self.sc2 = self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"),
vmin=self.min_val, vmax=self.max_val,
edgecolors="black", zorder=100,
marker='^',
label="Planets (only \u03C3- mass)")
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4],
linestyle="None", zorder=101, alpha=0.5)
x1 = X[np.logical_and(deltaXm == 0, deltaXp == 0)]
y1 = Y[np.logical_and(deltaXm == 0, deltaXp == 0)]
if self.check:
filter_cmap = filter_arr[np.logical_and(deltaXm == 0, deltaXp == 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
d1 = deltaYm[np.logical_and(deltaXm == 0, deltaXp == 0)]
d2 = deltaYp[np.logical_and(deltaXm == 0, deltaXp == 0)]
d3 = deltaXm[np.logical_and(deltaXm == 0, deltaXp == 0)]
d4 = deltaXp[np.logical_and(deltaXm == 0, deltaXp == 0)]
if x1.size != 0:
self.sc3 = self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"),
vmin=self.min_val, vmax=self.max_val,
edgecolors="black", zorder=100, marker='D',
label="Planets (no \u03C3 mass)")
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4],
linestyle="None", zorder=101, alpha=0.5)
self.annot = self.gui.frame_output_plot.mass_radius_plot.axes.annotate("", xy=(0, 0), xytext=(20, 20),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"),
zorder=102)
self.annot.set_visible(False)
if self.show_all_planets_labels:
names = np.array(self.subsetdata[0])
for i in range(len(X)):
if X[i] >= self.mmax / 3:
x = -60
else:
x = 20
if X[i] >= self.rmax / 2:
y = -20
else:
y = 20
self.gui.frame_output_plot.mass_radius_plot.axes.annotate(str(names[i]), xy=(X[i], Y[i]), xytext=(x, y),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
def update_annot(self, ind, sc):
pos = sc.get_offsets()[ind["ind"][0]]
self.annot.xy = pos
el = ind["ind"]
self.annot.set_text(self.names[el[-1]])
self.annot.get_bbox_patch().set_alpha(0.75)
def hover(self, event):
vis = self.annot.get_visible()
if event.inaxes == self.gui.frame_output_plot.mass_radius_plot.axes:
if self.sc is not None:
cont, ind = self.sc.contains(event)
if cont:
self.update_annot(ind, self.sc)
self.annot.set_visible(True)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
else:
if vis:
self.annot.set_visible(False)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
if self.sc1 is not None:
cont, ind = self.sc1.contains(event)
if cont:
self.update_annot(ind, self.sc1)
self.annot.set_visible(True)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
else:
if vis:
self.annot.set_visible(False)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
if self.sc2 is not None:
cont, ind = self.sc2.contains(event)
if cont:
self.update_annot(ind, self.sc2)
self.annot.set_visible(True)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
else:
if vis:
self.annot.set_visible(False)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
if self.sc3 is not None:
cont, ind = self.sc3.contains(event)
if cont:
self.update_annot(ind, self.sc3)
self.annot.set_visible(True)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
else:
if vis:
self.annot.set_visible(False)
self.gui.frame_output_plot.plot_combined_canvas.draw_idle()
def plotPlanetInput(self):
tempSubData = self.subsetdata.tail(self.num_new_planets)
X = np.array(tempSubData[self.index_mass_p] * self.mass_coeff)
Y = np.array(tempSubData[self.index_rad_p] * self.radius_coeff)
deltaYm = np.array(tempSubData[self.index_min_rad]) * self.radius_coeff
deltaYp = np.array(tempSubData[self.index_rad_max]) * self.radius_coeff
deltaXm = np.array(tempSubData[self.index_mass_min]) * self.radius_coeff
deltaXp = np.array(tempSubData[self.index_mass_max]) * self.radius_coeff
d1 = deltaYm[np.logical_and(deltaXm != 0, deltaXp != 0)]
d2 = deltaYp[np.logical_and(deltaXm != 0, deltaXp != 0)]
d3 = deltaXm[np.logical_and(deltaXm != 0, deltaXp != 0)]
d4 = deltaXp[np.logical_and(deltaXm != 0, deltaXp != 0)]
if self.check:
filter_arr = np.array(tempSubData[self.chosen_index])
filter_cmap = filter_arr[np.logical_and(deltaXm != 0, deltaXp != 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_arr = None
filter_cmap = None
x1 = X[np.logical_and(deltaXm != 0, deltaXp != 0)]
y1 = Y[np.logical_and(deltaXm != 0, deltaXp != 0)]
names = np.array(tempSubData[0])
self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, edgecolors="black", s=100, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"), vmin=self.min_val,
vmax=self.max_val, zorder=103)
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4], linestyle="None",
zorder=104, alpha=0.5)
if self.index_mass_min != self.index_mass_max:
x1 = X[np.logical_and(deltaXm == 0, deltaXp != 0)]
y1 = Y[np.logical_and(deltaXm == 0, deltaXp != 0)]
if self.check:
filter_cmap = filter_arr[np.logical_and(deltaXm == 0, deltaXp != 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
d1 = deltaYm[np.logical_and(deltaXm == 0, deltaXp != 0)]
d2 = deltaYp[np.logical_and(deltaXm == 0, deltaXp != 0)]
d3 = deltaXm[np.logical_and(deltaXm == 0, deltaXp != 0)]
d4 = deltaXp[np.logical_and(deltaXm == 0, deltaXp != 0)]
if x1.size != 0:
self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"), vmin=self.min_val,
vmax=self.max_val, edgecolors="black", zorder=100,
marker='v')
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4],
linestyle="None", zorder=101, alpha=0.5)
x1 = X[np.logical_and(deltaXm != 0, deltaXp == 0)]
y1 = Y[np.logical_and(deltaXm != 0, deltaXp == 0)]
if self.check:
filter_cmap = filter_arr[np.logical_and(deltaXm != 0, deltaXp == 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
d1 = deltaYm[np.logical_and(deltaXm != 0, deltaXp == 0)]
d2 = deltaYp[np.logical_and(deltaXm != 0, deltaXp == 0)]
d3 = deltaXm[np.logical_and(deltaXm != 0, deltaXp == 0)]
d4 = deltaXp[np.logical_and(deltaXm != 0, deltaXp == 0)]
if x1.size != 0:
self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"), vmin=self.min_val,
vmax=max, edgecolors="black", zorder=100,
marker='^')
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4],
linestyle="None", zorder=101, alpha=0.5)
x1 = X[np.logical_and(deltaXm == 0, deltaXp == 0)]
y1 = Y[np.logical_and(deltaXm == 0, deltaXp == 0)]
if self.check:
filter_cmap = filter_arr[np.logical_and(deltaXm == 0, deltaXp == 0)]
filter_cmap = filter_cmap * self.coeff
else:
filter_cmap = None
d1 = deltaYm[np.logical_and(deltaXm == 0, deltaXp == 0)]
d2 = deltaYp[np.logical_and(deltaXm == 0, deltaXp == 0)]
d3 = deltaXm[np.logical_and(deltaXm == 0, deltaXp == 0)]
d4 = deltaXp[np.logical_and(deltaXm == 0, deltaXp == 0)]
if x1.size != 0:
self.gui.frame_output_plot.mass_radius_plot.scatter(x1, y1, s=20, c=filter_cmap,
cmap=plt.cm.get_cmap("jet"), vmin=self.min_val,
vmax=self.max_val, edgecolors="black", zorder=100,
marker='D')
if self.show_error_plot:
self.gui.frame_output_plot.mass_radius_plot.errorbar(x1, y1, yerr=[d1, d2], xerr=[d3, d4],
linestyle="None", zorder=101, alpha=0.5)
if self.add2:
for i in range(self.num_new_planets):
if X[i] >= self.mmax / 3:
x = -60
else:
x = 20
if X[i] >= self.rmax / 2:
y = -20
else:
y = 20
self.gui.frame_output_plot.mass_radius_plot.axes.annotate(str(names[i]), xy=(X[i], Y[i]), xytext=(x, y),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
def executeRoutine(self, mass, radius):
self.dataAcquisition(mass, radius)
if self.subsetdata.empty:
msgbox.showerror(title="ERROR", message="No planet found with these boundaries")
return
self.plotHistogramMass()
self.plotHistogramRadius()
self.plotHistogramZeta()
self.plotMassRadius()
|
francescoa97outlookREPO_NAMEpyExoRaMaPATH_START.@pyExoRaMa_extracted@pyExoRaMa-main@GUI_Plot@Frame_Run_Plot.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Jammy2211/PyAutoLens",
"repo_path": "PyAutoLens_extracted/PyAutoLens-main/test_autolens/point/fit/positions/source/__init__.py",
"type": "Python"
}
|
Jammy2211REPO_NAMEPyAutoLensPATH_START.@PyAutoLens_extracted@PyAutoLens-main@test_autolens@point@fit@positions@source@__init__.py@.PATH_END.py
|
|
{
"filename": "table2.py",
"repo_name": "JiaxiWu1018/Unsupervised-TRGB",
"repo_path": "Unsupervised-TRGB_extracted/Unsupervised-TRGB-main/plots/table2.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 25 18:31:41 2022
@author: michaelwu
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
def count_field(det):
field, gal = [], []
for i in range(len(det)):
if det['field'][i] not in field:
field.append(det['field'][i])
if det['galaxy'][i] not in gal:
gal.append(det['galaxy'][i])
return field, gal
def c4(n):
k = n//2
if n%2 == 0:
return np.sqrt(2/(np.pi*(2*k-1)))*(2**(2*k-2))*(math.factorial(k-1)**2)/math.factorial(2*k-2)
else:
return np.sqrt(np.pi/k)*math.factorial(2*k-1)/(2**(2*k-1))/(math.factorial(k-1)**2)
def cal_dispersion(gal, det):
summ, length = 0, 0
for i in range(len(gal)):
judge = [det['galaxy'][j] == gal[i] for j in range(len(det))]
subdet = det[judge].reset_index(drop=True)
if len(subdet) >= 2:
std = np.std(np.array(subdet['TRGB'])/c4(len(subdet)))
summ += len(subdet) * std
length += len(subdet)
std = summ / length
return std
det = pd.read_csv('../detection/ghosts_detection_v1.2.csv')
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('all', std, len(field)/50 * 100, len(det)/len(field))
judge = ['3031' in det['field'][i] for i in range(len(det))]
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('3031', std, len(field)/11 * 100, len(det)/len(field))
det = pd.read_csv('../detection/ghosts_detection_v4.3.csv')
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('all', std, len(field)/50 * 100, len(det)/len(field))
judge = ['3031' in det['field'][i] for i in range(len(det))]
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('3031', std, len(field)/11 * 100, len(det)/len(field))
det = pd.read_csv('../detection/ghosts_detection_v4.4.csv')
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('all', std, len(field)/50 * 100, len(det)/len(field))
judge = ['3031' in det['field'][i] for i in range(len(det))]
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('3031', std, len(field)/11 * 100, len(det)/len(field))
det = pd.read_csv('../detection/ghosts_detection_v4.4.csv')
judge = det['RGB AGB Ratio'] >= 4
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('all', std, len(field)/50 * 100, len(det)/len(field))
judge = ['3031' in det['field'][i] for i in range(len(det))]
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('3031', std, len(field)/11 * 100, len(det)/len(field))
det = pd.read_csv('../detection/ghosts_detection_v4.4.csv')
judge = (det['RGB AGB Ratio'] >= 4) & (det['# star below tip'] >= 200)
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('all', std, len(field)/50 * 100, len(det)/len(field))
judge = ['3031' in det['field'][i] for i in range(len(det))]
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('3031', std, len(field)/11 * 100, len(det)/len(field))
det = pd.read_csv('../detection/ghosts_detection_v4.1.csv')
judge = (det['RGB AGB Ratio'] >= 4) & (det['# star below tip'] >= 200)
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('all', std, len(field)/50 * 100, len(det)/len(field))
judge = ['3031' in det['field'][i] for i in range(len(det))]
det = det[judge].reset_index(drop=True)
field, gal = count_field(det)
std = cal_dispersion(gal, det)
print('3031', std, len(field)/11 * 100, len(det)/len(field))
|
JiaxiWu1018REPO_NAMEUnsupervised-TRGBPATH_START.@Unsupervised-TRGB_extracted@Unsupervised-TRGB-main@plots@table2.py@.PATH_END.py
|
{
"filename": "test_file_image.py",
"repo_name": "h5py/h5py",
"repo_path": "h5py_extracted/h5py-master/h5py/tests/test_file_image.py",
"type": "Python"
}
|
import h5py
from h5py import h5f, h5p
from .common import ut, TestCase
class TestFileImage(TestCase):
def test_load_from_image(self):
from binascii import a2b_base64
from zlib import decompress
compressed_image = 'eJzr9HBx4+WS4mIAAQ4OBhYGAQZk8B8KKjhQ+TD5BCjNCKU7oPQKJpg4I1hOAiouCDUfXV1IkKsrSPV/NACzx4AFQnMwjIKRCDxcHQNAdASUD0ulJ5hQ1ZWkFpeAaFh69KDQXkYGNohZjDA+JCUzMkIEmKHqELQAWKkAByytOoBJViAPJM7ExATWyAE0B8RgZkyAJmlYDoEAIahukJoNU6+HMTA0UOgT6oBgP38XUI6G5UMFZrzKR8EoGAUjGMDKYVgxDSsuAHcfMK8='
image = decompress(a2b_base64(compressed_image))
fapl = h5p.create(h5py.h5p.FILE_ACCESS)
fapl.set_fapl_core()
fapl.set_file_image(image)
fid = h5f.open(self.mktemp().encode(), h5py.h5f.ACC_RDONLY, fapl=fapl)
f = h5py.File(fid)
self.assertTrue('test' in f)
def test_open_from_image(self):
from binascii import a2b_base64
from zlib import decompress
compressed_image = 'eJzr9HBx4+WS4mIAAQ4OBhYGAQZk8B8KKjhQ+TD5BCjNCKU7oPQKJpg4I1hOAiouCDUfXV1IkKsrSPV/NACzx4AFQnMwjIKRCDxcHQNAdASUD0ulJ5hQ1ZWkFpeAaFh69KDQXkYGNohZjDA+JCUzMkIEmKHqELQAWKkAByytOoBJViAPJM7ExATWyAE0B8RgZkyAJmlYDoEAIahukJoNU6+HMTA0UOgT6oBgP38XUI6G5UMFZrzKR8EoGAUjGMDKYVgxDSsuAHcfMK8='
image = decompress(a2b_base64(compressed_image))
fid = h5f.open_file_image(image)
f = h5py.File(fid)
self.assertTrue('test' in f)
|
h5pyREPO_NAMEh5pyPATH_START.@h5py_extracted@h5py-master@h5py@tests@test_file_image.py@.PATH_END.py
|
{
"filename": "test_keras_savedmodel_exporter.py",
"repo_name": "ML4GW/hermes",
"repo_path": "hermes_extracted/hermes-main/tests/quiver/exporters/test_keras_savedmodel_exporter.py",
"type": "Python"
}
|
import pytest
from hermes.quiver import Model, Platform
from hermes.quiver.exporters import KerasSavedModel
@pytest.mark.tensorflow
def test_keras_savedmodel_exporter(temp_local_repo, keras_model):
scope = keras_model.name.split("_")[0]
input_name = f"{scope}_dense_input"
output_name = f"{scope}_dense/MatMul"
assert keras_model.inputs[0].name.split(":")[0] == input_name
assert keras_model.outputs[0].name.split(":")[0] == output_name
model = Model("identity", temp_local_repo, Platform.ONNX)
exporter = KerasSavedModel(model.config, model.fs)
input_shapes = {input_name: (None, 10)}
exporter._check_exposed_tensors("input", input_shapes)
assert len(model.config.input) == 1
assert model.config.input[0].name == input_name
assert model.config.input[0].dims[0] == -1
bad_input_shapes = {input_name: (None, 12)}
with pytest.raises(ValueError):
exporter._check_exposed_tensors("input", bad_input_shapes)
output_shapes = exporter._get_output_shapes(keras_model, output_name)
assert tuple(output_shapes[keras_model.layers[-1].name]) == (None, 10)
exporter._check_exposed_tensors("output", output_shapes)
assert len(model.config.output) == 1
assert model.config.output[0].name == keras_model.layers[-1].name
assert model.config.output[0].dims[0] == -1
version_path = temp_local_repo.fs.join("identity", "1")
output_path = temp_local_repo.fs.join(version_path, "model.savedmodel")
temp_local_repo.fs.soft_makedirs(output_path)
exporter.export(keras_model, output_path)
# now test using full call
exporter(keras_model, 2)
with pytest.raises(ValueError):
exporter(keras_model, 3, input_shapes)
with pytest.raises(ValueError):
exporter(keras_model, 3, None, ["y"])
|
ML4GWREPO_NAMEhermesPATH_START.@hermes_extracted@hermes-main@tests@quiver@exporters@test_keras_savedmodel_exporter.py@.PATH_END.py
|
{
"filename": "chromatic_aberrations.py",
"repo_name": "mtalapinto/moes",
"repo_path": "carmenes/chromatic_aberrations.py",
"type": "Python"
}
|
import numpy as np
import utils
import json
import pandas as pd
import os
from optics import parameters
from optics import vis_spectrometer
from optics import env_data
import matplotlib.pyplot as plt
import matplotlib
import dynesty
import dyplot
import corner
import pickle
import math
import warnings
# matplotlib.use('Qt4agg')
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
def load_coeffs(date, fib):
path_chromatic = 'data/aberrations_coefficients/chromatic_coefficients_timeseries/' + str(date) + '/'
file_chromatic_coeffs = pd.read_csv(path_chromatic + 'chrome_coeffs_' + str(fib) + '.dat', sep=',')
a0 = file_chromatic_coeffs['a0'].values[0]
a1 = file_chromatic_coeffs['a1'].values[0]
a2 = file_chromatic_coeffs['a2'].values[0]
a3 = file_chromatic_coeffs['a3'].values[0]
return a0, a1, a2, a3
def resample_equal(samples, weights, rstate=None):
"""
Resample a new set of points from the weighted set of inputs
such that they all have equal weight.
Each input sample appears in the output array either
`floor(weights[i] * nsamples)` or `ceil(weights[i] * nsamples)` times,
with `floor` or `ceil` randomly selected (weighted by proximity).
Parameters
----------
samples : `~numpy.ndarray` with shape (nsamples,)
Set of unequally weighted samples.
weights : `~numpy.ndarray` with shape (nsamples,)
Corresponding weight of each sample.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
Returns
-------
equal_weight_samples : `~numpy.ndarray` with shape (nsamples,)
New set of samples with equal weights.
Examples
--------
# >>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
# >>> w = np.array([0.6, 0.2, 0.15, 0.05])
# >>> utils.resample_equal(x, w)
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 3., 3.]])
Notes
-----
Implements the systematic resampling method described in `Hol, Schon, and
Gustafsson (2006) <doi:10.1109/NSSPW.2006.4378824>`_.
"""
if rstate is None:
rstate = np.random
if abs(np.sum(weights) - 1.) > SQRTEPS: # same tol as in np.random.choice.
# Guarantee that the weights will sum to 1.
warnings.warn("Weights do not sum to 1 and have been renormalized.")
weights = np.array(weights) / np.sum(weights)
# Make N subdivisions and choose positions with a consistent random offset.
nsamples = len(weights)
positions = (rstate.random() + np.arange(nsamples)) / nsamples
# Resample the data.
idx = np.zeros(nsamples, dtype=int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < nsamples:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return samples[idx]
def get_sum(vec):
fvec = np.sort(vec)
fval = np.median(fvec)
nn = int(np.around(len(fvec) * 0.15865))
vali, valf = fval - fvec[nn], fvec[-nn] - fval
return fval, vali, valf
def function(x, coefs):
# return coefs[0] + coefs[1]*x + coefs[2]*x**2 + coefs[3]*x**3
return coefs[0] * x ** 2 + coefs[1] + coefs[2] * x ** -2 + coefs[3] * x ** -4 # + coefs[4]*x**-6 + coefs[5]*x**-8
def correct_dyn(ws_data, ws_model, coord, fiber, date):
x = ws_model['wave'].values # wavelength
if coord == 'x':
y = ws_data['posm'].values - ws_model['x'].values
else:
y = ws_data['posmy'].values - ws_model['y'].values # y coordinate
def prior(cube):
cube[0] = utils.transform_uniform(cube[0], -10., 10.)
cube[1] = utils.transform_uniform(cube[1], -10., 10.)
cube[2] = utils.transform_uniform(cube[2], -10., 10.)
cube[3] = utils.transform_uniform(cube[3], -10., 10.)
return cube
def loglike(cube):
# Extract parameters:
a0, a1, a2, a3 = cube[0], cube[1], cube[2], cube[3]
# Generate model:
model = a0 * x ** 2 + a1 + a2 * x ** -2 + a3 * x ** -4 # + a4*x**-6 + a5*x**-8
# Evaluate the log-likelihood:
ndata = len(y)
sigma_fit = 0.001
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit ** 2) + \
(-0.5 * ((y - model) / sigma_fit) ** 2).sum()
return loglikelihood
n_params = 4
outdir = 'data/aberrations_coefficients/chromatic_coefficients_timeseries/'+date+'/'
if not os.path.exists(outdir):
os.mkdir(outdir)
# Run MultiNest:
dsampler = dynesty.DynamicNestedSampler(
loglike,
prior,
ndim=n_params
)
dsampler.run_nested(nlive_init=500, nlive_batch=500)
results = dsampler.results
samples = results['samples']
# Get weighted posterior:
weights = np.exp(results['logwt'] - results['logz'][-1])
#posterior_samples = resample_equal(results.samples, weights)
# Get lnZ:
lnZ = results.logz[-1]
lnZerr = results.logzerr[-1]
a0, a0up, a0lo = get_sum(samples[:, 0])
a1, a1up, a1lo = get_sum(samples[:, 1])
a2, a2up, a2lo = get_sum(samples[:, 2])
a3, a3up, a3lo = get_sum(samples[:, 3])
a0_end = a0
a1_end = a1
a2_end = a2
a3_end = a3
outdata = {}
outdata['c0'] = a0
outdata['c0_up'] = a0up
outdata['c0_lo'] = a0lo
outdata['c1'] = a1
outdata['c1_up'] = a1up
outdata['c1_lo'] = a1lo
outdata['c2'] = a2
outdata['c2_up'] = a2up
outdata['c2_lo'] = a2lo
outdata['c3'] = a3
outdata['c3_up'] = a3up
outdata['c3_lo'] = a3lo
outdata['lnZ'] = lnZ
outdata['lnZ_err'] = lnZerr
pickle.dump(outdata, open(outdir+'best_fit_pars_'+str(fiber)+'.pkl', 'wb'))
print('Chromatic correction file written...')
return a0_end, a1_end, a2_end, a3_end
|
mtalapintoREPO_NAMEmoesPATH_START.@carmenes@chromatic_aberrations.py@.PATH_END.py
|
{
"filename": "obs_file.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/document_loaders/obs_file.py",
"type": "Python"
}
|
# coding:utf-8
import os
import tempfile
from typing import Any, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class OBSFileLoader(BaseLoader):
"""Load from the `Huawei OBS file`."""
def __init__(
self,
bucket: str,
key: str,
client: Any = None,
endpoint: str = "",
config: Optional[dict] = None,
) -> None:
"""Initialize the OBSFileLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
key (str): The name of the object in the OBS bucket.
client (ObsClient, optional): An instance of the ObsClient to connect to OBS.
endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided.
config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
Raises:
ValueError: If the `esdk-obs-python` package is not installed.
TypeError: If the provided `client` is not an instance of ObsClient.
ValueError: If `client` is not provided, but `endpoint` is missing.
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSFileLoader with a new client:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config)
```
To create a new OBSFileLoader with an existing client:
```
from obs import ObsClient
# Assuming you have an existing ObsClient object 'obs_client'
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client)
```
To create a new OBSFileLoader without an existing client:
```
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url")
```
""" # noqa: E501
try:
from obs import ObsClient
except ImportError:
raise ImportError(
"Could not import esdk-obs-python python package. "
"Please install it with `pip install esdk-obs-python`."
)
if not client:
if not endpoint:
raise ValueError("Either OBSClient or endpoint must be provided.")
if not config:
config = dict()
if config.get("get_token_from_ecs"):
client = ObsClient(server=endpoint, security_provider_policy="ECS")
else:
client = ObsClient(
access_key_id=config.get("ak"),
secret_access_key=config.get("sk"),
security_token=config.get("token"),
server=endpoint,
)
if not isinstance(client, ObsClient):
raise TypeError("Client must be ObsClient type")
self.client = client
self.bucket = bucket
self.key = key
def load(self) -> List[Document]:
"""Load documents."""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.bucket}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
self.client.downloadFile(
bucketName=self.bucket, objectKey=self.key, downloadFile=file_path
)
loader = UnstructuredFileLoader(file_path)
return loader.load()
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@document_loaders@obs_file.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(self, plotly_name="colorscale", parent_name="bar.marker", **kwargs):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@_colorscale.py@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="tickwidth", parent_name="layout.yaxis", **kwargs):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@yaxis@_tickwidth.py@.PATH_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/colorbar/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="contour.colorbar", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@colorbar@_showexponent.py@.PATH_END.py
|
{
"filename": "Sig.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Sig.py",
"type": "Python"
}
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Sig.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Place-holder for the old SCons.Sig module hierarchy
This is no longer used, but code out there (such as the NSIS module on
the SCons wiki) may try to import SCons.Sig. If so, we generate a warning
that points them to the line that caused the import, and don't die.
If someone actually tried to use the sub-modules or functions within
the package (for example, SCons.Sig.MD5.signature()), then they'll still
get an AttributeError, but at least they'll know where to start looking.
"""
import SCons.Util
import SCons.Warnings
msg = 'The SCons.Sig module no longer exists.\n' \
' Remove the following "import SCons.Sig" line to eliminate this warning:'
SCons.Warnings.warn(SCons.Warnings.DeprecatedWarning, msg)
default_calc = None
default_module = None
class MD5Null(SCons.Util.Null):
def __repr__(self):
return "MD5Null()"
class TimeStampNull(SCons.Util.Null):
def __repr__(self):
return "TimeStampNull()"
MD5 = MD5Null()
TimeStamp = TimeStampNull()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Sig.py@.PATH_END.py
|
{
"filename": "dataclass_field.py",
"repo_name": "light-curve/light-curve-python",
"repo_path": "light-curve-python_extracted/light-curve-python-master/light-curve/light_curve/light_curve_py/dataclass_field.py",
"type": "Python"
}
|
import sys
if sys.version_info >= (3, 10):
from dataclasses import field as dataclass_field
else:
from dataclasses import field as _field
def dataclass_field(*, kw_only, **kwargs):
return _field(**kwargs)
__all__ = ["dataclass_field"]
|
light-curveREPO_NAMElight-curve-pythonPATH_START.@light-curve-python_extracted@light-curve-python-master@light-curve@light_curve@light_curve_py@dataclass_field.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "takafumi291/ESSENCE",
"repo_path": "ESSENCE_extracted/ESSENCE-main/README.md",
"type": "Markdown"
}
|
## ESSENCE: functions for evaluating spatially correlated noise in the interferometric images.
**ESSENCE** is a Python package for evaluating the statistical significance of image analysis and signal detection under correlated noise in interferometric images (e.g., ALMA, NOEMA), namely, Evaluating Statistical Significance undEr Noise CorrElation.
This code does the following things for you:
1. measuring noise autocorrelation function (ACF) which fully characterizes the statistical properties of spatially correlated noise in the interferometric image.
2. computing the noise in the spatially integrated quantities (e.g., flux, spectrum) with a given aperture.
3. simulating noise maps with the same correlation property.
4. constructing a covariance matrix from noise ACF, which can be used for a 2D image or 3D cube model fitting.
Detailed formulation of ESSENCE and its application are presented in [Tsukui et al. 2023](https://www.spiedigitallibrary.org/journals/Journal-of-Astronomical-Telescopes-Instruments-and-Systems/volume-9/issue-01/018001/Estimating-the-statistical-uncertainty-due-to-spatially-correlated-noise-in/10.1117/1.JATIS.9.1.018001.full?SSO=1).
### Requirements:
| Packages | Tested version |
| --------------:|---------------:|
| python | 3.7.7 |
| astropy | 4.3.1 |
| spectral_cube | 0.6.0 |
| numpy | 1.21.5 |
| scipy | 1.7.3 |
| multiprocess | 0.70.13 |
| functools | |
### Installation:
Not required. Git clone the software to a desired directory.
> $ git clone https://github.com/takafumi291/ESSENCE.git
> $ cd essence
### Example data:
For running tutorial.ipynb, please download [example data](https://drive.google.com/file/d/1h0wEPHVebVSjl803r9LnQyBTxfoU2kBY/view?usp=sharing), unzip, and place it in the same directory of the ipynb file.
The data is from Tsukui and Iguchi 2021, Sci (ADS/JAO.ALMA2017.1.00394.S PI=Gonzalez Lopez, Jorg)
### Usage:
See [tutorial](https://github.com/takafumi291/ESSENCE/blob/main/Tutorial.ipynb) for a quick example.
### Contacts:
I am open to collaborations, e.g., any suggestion, feedback, or directly improving my codes. I am also happy to help with any difficulties you encounter using my codes. Feel free to contact me!
Takafumi Tsukui: tsukuitk23_at_gmail.com
|
takafumi291REPO_NAMEESSENCEPATH_START.@ESSENCE_extracted@ESSENCE-main@README.md@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/waterfall/textfont/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="waterfall.textfont", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@waterfall@textfont@_size.py@.PATH_END.py
|
{
"filename": "test_stacksubsample.py",
"repo_name": "PynPoint/PynPoint",
"repo_path": "PynPoint_extracted/PynPoint-main/tests/test_processing/test_stacksubsample.py",
"type": "Python"
}
|
import os
import pytest
import numpy as np
from pynpoint.core.pypeline import Pypeline
from pynpoint.readwrite.fitsreading import FitsReadingModule
from pynpoint.processing.stacksubset import StackAndSubsetModule, StackCubesModule, \
DerotateAndStackModule, CombineTagsModule
from pynpoint.util.tests import create_config, create_star_data, create_ifs_data, remove_test_data
class TestStackSubset:
def setup_class(self) -> None:
self.limit = 1e-10
self.test_dir = os.path.dirname(__file__) + '/'
create_ifs_data(self.test_dir+'data_ifs')
create_star_data(self.test_dir+'data')
create_star_data(self.test_dir+'extra')
create_config(self.test_dir+'PynPoint_config.ini')
self.pipeline = Pypeline(self.test_dir, self.test_dir, self.test_dir)
def teardown_class(self) -> None:
remove_test_data(self.test_dir, folders=['data_ifs', 'extra', 'data'])
def test_read_data(self) -> None:
module = FitsReadingModule(name_in='read1',
image_tag='images',
input_dir=self.test_dir+'data',
overwrite=True,
check=True)
self.pipeline.add_module(module)
self.pipeline.run_module('read1')
data = self.pipeline.get_data('images')
assert np.mean(data) == pytest.approx(0.08722544528764692, rel=self.limit, abs=0.)
assert data.shape == (10, 11, 11)
module = FitsReadingModule(name_in='read2',
image_tag='extra',
input_dir=self.test_dir+'extra',
overwrite=True,
check=True)
self.pipeline.add_module(module)
self.pipeline.run_module('read2')
extra = self.pipeline.get_data('extra')
assert data == pytest.approx(extra, rel=self.limit, abs=0.)
module = FitsReadingModule(name_in='read_ifs',
image_tag='images_ifs',
input_dir=self.test_dir+'data_ifs',
overwrite=True,
check=True,
ifs_data=True)
self.pipeline.add_module(module)
self.pipeline.run_module('read_ifs')
self.pipeline.set_attribute('images_ifs', 'PARANG', np.linspace(0., 180., 10), static=False)
data = self.pipeline.get_data('images_ifs')
assert np.sum(data) == pytest.approx(749.8396528807369, rel=self.limit, abs=0.)
assert data.shape == (3, 10, 21, 21)
def test_stack_and_subset(self) -> None:
self.pipeline.set_attribute('images', 'PARANG', np.arange(10.), static=False)
module = StackAndSubsetModule(name_in='stack1',
image_in_tag='images',
image_out_tag='stack1',
random=4,
stacking=2,
combine='mean',
max_rotation=None)
self.pipeline.add_module(module)
self.pipeline.run_module('stack1')
data = self.pipeline.get_data('stack1')
assert np.mean(data) == pytest.approx(0.08758276283743936, rel=self.limit, abs=0.)
assert data.shape == (4, 11, 11)
data = self.pipeline.get_data('header_stack1/INDEX')
assert data == pytest.approx(np.arange(4), rel=self.limit, abs=0.)
assert data.shape == (4, )
data = self.pipeline.get_data('header_stack1/PARANG')
assert data == pytest.approx([0.5, 2.5, 6.5, 8.5], rel=self.limit, abs=0.)
assert data.shape == (4, )
def test_stack_max_rotation(self) -> None:
angles = np.arange(10.)
angles[1:6] = 3.
angles[9] = 50.
self.pipeline.set_attribute('images', 'PARANG', angles, static=False)
module = StackAndSubsetModule(name_in='stack2',
image_in_tag='images',
image_out_tag='stack2',
random=None,
stacking=2,
combine='median',
max_rotation=1.)
self.pipeline.add_module(module)
with pytest.warns(UserWarning) as warning:
self.pipeline.run_module('stack2')
assert len(warning) == 1
assert warning[0].message.args[0] == 'Testing of util.module.stack_angles has been ' \
'limited, please use carefully.'
data = self.pipeline.get_data('stack2')
assert np.mean(data) == pytest.approx(0.08580759396987508, rel=self.limit, abs=0.)
assert data.shape == (7, 11, 11)
data = self.pipeline.get_data('header_stack2/INDEX')
assert data == pytest.approx(np.arange(7), rel=self.limit, abs=0.)
assert data.shape == (7, )
data = self.pipeline.get_data('header_stack2/PARANG')
assert data.shape == (7, )
self.pipeline.set_attribute('images', 'PARANG', np.arange(10.), static=False)
def test_stack_cube(self) -> None:
module = StackCubesModule(name_in='stackcube',
image_in_tag='images',
image_out_tag='mean',
combine='mean')
self.pipeline.add_module(module)
self.pipeline.run_module('stackcube')
data = self.pipeline.get_data('mean')
assert np.mean(data) == pytest.approx(0.08722544528764689, rel=self.limit, abs=0.)
assert data.shape == (2, 11, 11)
attribute = self.pipeline.get_attribute('mean', 'INDEX', static=False)
assert np.mean(attribute) == pytest.approx(0.5, rel=self.limit, abs=0.)
assert attribute.shape == (2, )
attribute = self.pipeline.get_attribute('mean', 'NFRAMES', static=False)
assert np.mean(attribute) == pytest.approx(1, rel=self.limit, abs=0.)
assert attribute.shape == (2, )
def test_derotate_and_stack(self) -> None:
module = DerotateAndStackModule(name_in='derotate1',
image_in_tag='images',
image_out_tag='derotate1',
derotate=True,
stack='mean',
extra_rot=10.)
self.pipeline.add_module(module)
self.pipeline.run_module('derotate1')
data = self.pipeline.get_data('derotate1')
assert np.mean(data) == pytest.approx(0.08709860116308817, rel=self.limit, abs=0.)
assert data.shape == (1, 11, 11)
module = DerotateAndStackModule(name_in='derotate2',
image_in_tag='images',
image_out_tag='derotate2',
derotate=False,
stack='median',
extra_rot=0.)
self.pipeline.add_module(module)
self.pipeline.run_module('derotate2')
data = self.pipeline.get_data('derotate2')
assert np.mean(data) == pytest.approx(0.0861160094566323, rel=self.limit, abs=0.)
assert data.shape == (1, 11, 11)
data = self.pipeline.get_data('derotate2')
assert np.mean(data) == pytest.approx(0.0861160094566323, rel=self.limit, abs=0.)
assert data.shape == (1, 11, 11)
module = DerotateAndStackModule(name_in='derotate_ifs1',
image_in_tag='images_ifs',
image_out_tag='derotate_ifs1',
derotate=True,
stack='mean',
extra_rot=0.,
dimension='time')
self.pipeline.add_module(module)
self.pipeline.run_module('derotate_ifs1')
data = self.pipeline.get_data('derotate_ifs1')
assert np.mean(data) == pytest.approx(0.1884438996655355, rel=self.limit, abs=0.)
assert data.shape == (3, 1, 21, 21)
module = DerotateAndStackModule(name_in='derotate_ifs2',
image_in_tag='images_ifs',
image_out_tag='derotate_ifs2',
derotate=False,
stack='median',
extra_rot=0.,
dimension='wavelength')
self.pipeline.add_module(module)
self.pipeline.run_module('derotate_ifs2')
data = self.pipeline.get_data('derotate_ifs2')
assert np.mean(data) == pytest.approx(0.055939644983170146, rel=self.limit, abs=0.)
assert data.shape == (1, 10, 21, 21)
module = DerotateAndStackModule(name_in='derotate_ifs3',
image_in_tag='images_ifs',
image_out_tag='derotate_ifs3',
derotate=True,
stack=None,
extra_rot=0.,
dimension='wavelength')
self.pipeline.add_module(module)
self.pipeline.run_module('derotate_ifs3')
data = self.pipeline.get_data('derotate_ifs3')
assert np.mean(data) == pytest.approx(0.05653316989966066, rel=self.limit, abs=0.)
assert data.shape == (3, 10, 21, 21)
def test_combine_tags(self) -> None:
module = CombineTagsModule(image_in_tags=['images', 'extra'],
check_attr=True,
index_init=False,
name_in='combine1',
image_out_tag='combine1')
self.pipeline.add_module(module)
with pytest.warns(UserWarning) as warning:
self.pipeline.run_module('combine1')
assert len(warning) == 1
assert warning[0].message.args[0] == 'The non-static keyword FILES is already used but ' \
'with different values. It is advisable to only ' \
'combine tags that descend from the same data set.'
data = self.pipeline.get_data('combine1')
assert np.mean(data) == pytest.approx(0.0872254452876469, rel=self.limit, abs=0.)
assert data.shape == (20, 11, 11)
data = self.pipeline.get_data('header_combine1/INDEX')
assert data[19] == 9
assert data.shape == (20, )
module = CombineTagsModule(image_in_tags=['images', 'extra'],
check_attr=False,
index_init=True,
name_in='combine2',
image_out_tag='combine2')
self.pipeline.add_module(module)
self.pipeline.run_module('combine2')
data = self.pipeline.get_data('combine1')
extra = self.pipeline.get_data('combine2')
assert data == pytest.approx(extra, rel=self.limit, abs=0.)
data = self.pipeline.get_data('header_combine2/INDEX')
assert data[19] == 19
assert data.shape == (20, )
|
PynPointREPO_NAMEPynPointPATH_START.@PynPoint_extracted@PynPoint-main@tests@test_processing@test_stacksubsample.py@.PATH_END.py
|
{
"filename": "thermo.ipynb",
"repo_name": "miguelzuma/hi_class_public",
"repo_path": "hi_class_public_extracted/hi_class_public-master/notebooks/thermo.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# import necessary modules
# uncomment to get plots displayed in notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import math
```
```python
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
```
```python
common_settings = {'output' : 'tCl',
# LambdaCDM parameters
'h':0.67556,
'omega_b':0.022032,
'omega_cdm':0.12038,
'A_s':2.215e-9,
'n_s':0.9619,
'tau_reio':0.0925,
# Take fixed value for primordial Helium (instead of automatic BBN adjustment)
'YHe':0.246,
'thermodynamics_verbose':1
}
##############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(['tau_rec','conformal_age'])
thermo = M.get_thermodynamics()
print thermo.viewkeys()
```
```python
tau = thermo['conf. time [Mpc]']
g = thermo['g [Mpc^-1]']
# to make the reionisation peak visible, rescale g by 100 for late times
g[:500] *= 100
#################
#
# start plotting
#
#################
#
plt.xlim([1.e2,derived['conformal_age']])
plt.xlabel(r'$\tau \,\,\, \mathrm{[Mpc]}$')
plt.ylabel(r'$\mathrm{visibility} \,\,\, g \,\,\, [\mathrm{Mpc}^{-1}]$')
plt.axvline(x=derived['tau_rec'],color='k')
# The conformal time at reionisation could be extracted from the code.
# But we know it because it is part of the standard output
# when thermodynamics_verbose=1
plt.axvline(x=4255.316282,color='k')
#
# Print functions one by one, saving between each (for slides)
#
plt.semilogx(tau,g,'r',label=r'$\psi$')
```
```python
plt.savefig('thermo.pdf',bbox_inches='tight')
```
|
miguelzumaREPO_NAMEhi_class_publicPATH_START.@hi_class_public_extracted@hi_class_public-master@notebooks@thermo.ipynb@.PATH_END.py
|
{
"filename": "fli.py",
"repo_name": "panoptes/POCS",
"repo_path": "POCS_extracted/POCS-main/src/panoptes/pocs/camera/fli.py",
"type": "Python"
}
|
from contextlib import suppress
import numpy as np
from astropy import units as u
from panoptes.pocs.camera.sdk import AbstractSDKCamera
from panoptes.pocs.camera.libfli import FLIDriver
from panoptes.pocs.camera import libfliconstants as c
from panoptes.utils.images import fits as fits_utils
from panoptes.utils import error
class Camera(AbstractSDKCamera):
_driver = None
_cameras = {}
_assigned_cameras = set()
def __init__(self,
name='FLI Camera',
target_temperature=25 * u.Celsius,
*args, **kwargs):
kwargs['target_temperature'] = target_temperature
super().__init__(name, FLIDriver, *args, **kwargs)
self.logger.info('{} initialised'.format(self))
def __del__(self):
with suppress(AttributeError):
handle = self._handle
self._driver.FLIClose(handle)
self.logger.debug('Closed FLI camera handle {}'.format(handle.value))
super().__del__()
# Properties
@property
def temperature(self):
"""
Current temperature of the camera's image sensor.
"""
return self._driver.FLIGetTemperature(self._handle)
@AbstractSDKCamera.target_temperature.getter
def target_temperature(self):
"""
Current value of the target temperature for the camera's image sensor cooling control.
Can be set by assigning an astropy.units.Quantity.
"""
return self._target_temperature
@property
def cooling_enabled(self):
"""
Current status of the camera's image sensor cooling system (enabled/disabled).
Note: For FLI cameras this is always True, and cannot be set.
"""
return True
@cooling_enabled.setter
def cooling_enabled(self, enable):
# Cooling is always enabled on FLI cameras
if not enable:
raise error.NotSupported("Cannot disable cooling on {}".format(self.name))
@property
def cooling_power(self):
"""
Current power level of the camera's image sensor cooling system (as
a percentage of the maximum).
"""
return self._driver.FLIGetCoolerPower(self._handle)
@property
def is_exposing(self):
""" True if an exposure is currently under way, otherwise False """
return bool(self._driver.FLIGetExposureStatus(self._handle).value)
# Methods
def connect(self):
"""
Connect to FLI camera.
Gets a 'handle', serial number and specs/capabilities from the driver
"""
self.logger.debug('Connecting to {}'.format(self))
self._handle = self._driver.FLIOpen(port=self._address)
if self._handle == c.FLI_INVALID_DEVICE:
message = 'Could not connect to {} on {}!'.format(self.name, self._camera_address)
raise error.PanError(message)
self._get_camera_info()
self.model = self.properties['camera model']
# All FLI camera models are cooled
self._is_cooled_camera = True
self._connected = True
# Private Methods
def _set_target_temperature(self, target):
self._driver.FLISetTemperature(self._handle, target)
# Check for success?
self._target_temperature = target
def _set_cooling_enabled():
raise NotImplementedError
def _start_exposure(self, seconds, filename, dark, header, *args, **kwargs):
self._driver.FLISetExposureTime(self._handle, exposure_time=seconds)
if dark:
frame_type = c.FLI_FRAME_TYPE_DARK
else:
frame_type = c.FLI_FRAME_TYPE_NORMAL
self._driver.FLISetFrameType(self._handle, frame_type)
# For now set to 'visible' (i.e. light sensitive) area of image sensor.
# Can later use this for windowed exposures.
self._driver.FLISetImageArea(self._handle,
self.properties['visible corners'][0],
self.properties['visible corners'][1])
# No on chip binning for now.
self._driver.FLISetHBin(self._handle, bin_factor=1)
self._driver.FLISetVBin(self._handle, bin_factor=1)
# No pre-exposure image sensor flushing, either.
self._driver.FLISetNFlushes(self._handle, n_flushes=0)
# In principle can set bit depth here (16 or 8 bit) but most FLI cameras don't support it.
# Start exposure
self._driver.FLIExposeFrame(self._handle)
readout_args = (filename,
self.properties['visible width'],
self.properties['visible height'],
header)
return readout_args
def _readout(self, filename, width, height, header):
# Use FLIGrabRow for now at least because I can't get FLIGrabFrame to work.
# image_data = self._FLIDriver.FLIGrabFrame(self._handle, width, height)
image_data = np.zeros((height, width), dtype=np.uint16)
rows_got = 0
try:
for i in range(image_data.shape[0]):
image_data[i] = self._driver.FLIGrabRow(self._handle, image_data.shape[1])
rows_got += 1
except RuntimeError as err:
message = 'Readout error on {}, expected {} rows, got {}: {}'.format(
self, image_data.shape[0], rows_got, err)
raise error.PanError(message)
else:
fits_utils.write_fits(data=image_data,
header=header,
filename=filename)
def _create_fits_header(self, seconds, dark):
header = super()._create_fits_header(seconds, dark)
header.set('CAM-HW', self.properties['hardware version'], 'Camera hardware version')
header.set('CAM-FW', self.properties['firmware version'], 'Camera firmware version')
header.set('XPIXSZ', self.properties['pixel width'].value, 'Microns')
header.set('YPIXSZ', self.properties['pixel height'].value, 'Microns')
return header
def _get_camera_info(self):
serial_number = self._driver.FLIGetSerialString(self._handle)
camera_model = self._driver.FLIGetModel(self._handle)
hardware_version = self._driver.FLIGetHWRevision(self._handle)
firmware_version = self._driver.FLIGetFWRevision(self._handle)
pixel_width, pixel_height = self._driver.FLIGetPixelSize(self._handle)
ccd_corners = self._driver.FLIGetArrayArea(self._handle)
visible_corners = self._driver.FLIGetVisibleArea(self._handle)
self._info = {
'serial number': serial_number,
'camera model': camera_model,
'hardware version': hardware_version,
'firmware version': firmware_version,
'pixel width': pixel_width,
'pixel height': pixel_height,
'array corners': ccd_corners,
'array height': ccd_corners[1][1] - ccd_corners[0][1],
'array width': ccd_corners[1][0] - ccd_corners[0][0],
'visible corners': visible_corners,
'visible height': visible_corners[1][1] - visible_corners[0][1],
'visible width': visible_corners[1][0] - visible_corners[0][0]
}
|
panoptesREPO_NAMEPOCSPATH_START.@POCS_extracted@POCS-main@src@panoptes@pocs@camera@fli.py@.PATH_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertemplate", parent_name="waterfall", **kwargs):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@waterfall@_hovertemplate.py@.PATH_END.py
|
{
"filename": "eep.py",
"repo_name": "POSYDON-code/POSYDON",
"repo_path": "POSYDON_extracted/POSYDON-main/posydon/interpolation/eep.py",
"type": "Python"
}
|
"""Module for converting a MESA history file to an EEP track.
Reference: Dotter, Aaron (2016), AJSS, 222, 1.
"""
__authors__ = [
"Aaron Dotter <aaron.dotter@gmail.com>",
"Konstantinos Kovlakas <Konstantinos.Kovlakas@unige.ch>",
]
import numpy as np
from scipy.interpolate import pchip
# suggested lists of EEPs
ZAMSlo = ['ZAMS', 'IAMS', 'TAMS', 'TRGB', 'ZACHEB', 'TACHEB']
ZAMShi = ['ZAMS', 'IAMS', 'TAMS', 'ZACHEB', 'TACHEB', 'CBURN']
HeZAMS = ['ZACHEB', 'TACHEB', 'CBURN']
class EEP:
"""Convert a MESA history file to Equivalent Evolutionary Phase track."""
def __init__(self, filename, EEP_NAMES=ZAMShi, EEP_INTERVAL=100):
"""Load an MESA history file and construct the EEP instance."""
self.filename = filename.strip()
try:
with open(self.filename, 'r') as f:
self.header1 = f.readline()
self.header2 = f.readline()
self.header3 = f.readline()
tr = np.genfromtxt(self.filename, names=True, skip_header=5)
names = tr.dtype.names
except IOError:
print("Failed to open: ")
print(self.filename)
# this section attempts to find each of the EEPs for the track
prems = self._PreMS(tr)
zams = self._ZAMS(tr)
iams = self._IAMS(tr, Xc=0.2, guess=zams+1)
tams = self._TAMS(tr, guess=iams+1)
trgb = self._TRGB(tr, guess=tams+1)
zacheb = self._ZACHEB(tr, guess=trgb+1)
tacheb = self._TACHEB(tr, guess=zacheb+1)
tpagb = self._TPAGB(tr, guess=tacheb+1)
pagb = self._PAGB(tr, guess=tpagb+1)
wdcs = self._WDCS(tr, guess=pagb+1)
cburn = self._CBurn(tr, guess=zacheb+1)
# compute the distance metric along the track that is used to assign
# secondary EEPs
metric = self._metric_function(tr)
eep_index = []
# if the EEP is in the input list, and it exists (>0)
# then add it to the official list of EEPs
for eep in EEP_NAMES:
if eep == 'PreMS' and prems >= 0:
eep_index.append(prems)
if eep == 'ZAMS' and zams >= 0:
eep_index.append(zams)
if eep == 'IAMS' and iams >= 0:
eep_index.append(iams)
if eep == 'TAMS' and tams >= 0:
eep_index.append(tams)
if eep == 'TRGB' and trgb >= 0:
eep_index.append(trgb)
if eep == 'ZACHEB' and zacheb >= 0:
eep_index.append(zacheb)
if eep == 'TACHEB' and tacheb >= 0:
eep_index.append(tacheb)
if eep == 'TPAGB' and tpagb >= 0:
eep_index.append(tpagb)
if eep == 'PAGB' and pagb >= 0:
eep_index.append(pagb)
if eep == 'WDCS' and wdcs >= 0:
eep_index.append(wdcs)
if eep == 'CBURN' and cburn >= 0:
eep_index.append(cburn)
# some bookkeeping
eep_counter = 0
self.num_primary = len(eep_index)
self.num_secondary = EEP_INTERVAL*(len(eep_index)-1)
self.num_eeps = self.num_primary + self.num_secondary
self.eeps = np.zeros((self.num_eeps), tr.dtype.descr)
# assign the primary EEPs in the correct location for the EEP track
for eep in range(self.num_primary):
self.eeps[eep_counter] = tr[eep_index[eep]]
eep_counter += EEP_INTERVAL + 1
# assign the secondary EEPs in the correct location for the EEP track
for eep in range(1, self.num_primary):
eep_counter = (eep-1) * (EEP_INTERVAL + 1)
# fill in secondary
lo = eep_index[eep-1]
hi = eep_index[eep]
dm = (metric[hi] - metric[lo])/(EEP_INTERVAL+1)
m = metric[lo].item()
for j in range(1, EEP_INTERVAL+1):
m += dm
y = []
for i, name in enumerate(names):
y.append(pchip(x=metric[lo:hi], y=tr[name][lo:hi])(m))
self.eeps[eep_counter + j] = tuple(y)
# the following function definitions are for primary EEP determinations
def _PreMS(self, tr, Dfrac=0.01, guess=0):
PreMS = -1
for i in range(len(tr)):
if tr['center_h2'][i] < Dfrac*tr['center_h2'][0]:
PreMS = i
break
return PreMS
def _ZAMS(self, tr, dXc=0.001, guess=0):
ZAMS = -1
for i in range(len(tr)):
if abs(tr['center_h1'][i]-tr['center_h1'][0]) > dXc:
ZAMS = i
break
return ZAMS
def _IAMS(self, tr, Xc=0.1, guess=0):
IAMS = -1
for i in range(len(tr)):
if tr['center_h1'][i] < Xc:
IAMS = i
break
return IAMS
def _TAMS(self, tr, Xc=0.00001, guess=0):
TAMS = -1
for i in range(len(tr)):
if tr['center_h1'][i] < Xc:
TAMS = i
break
return TAMS
def _TRGB(self, tr, guess=0):
Yc_min = tr['center_he4'][guess] - 0.01
L_He_max = -99.
Tc_min = 99.
TRGB = -1
TRGB1 = 0
TRGB2 = 0
for i in range(guess, len(tr)):
if tr['center_he4'][i] > Yc_min:
if tr['log_LHe'][i] > L_He_max:
L_He_max = tr['log_LHe'][i]
TRGB1 = i
if tr['log_center_T'][i] < Tc_min:
Tc_min = tr['log_center_T'][i]
TRGB2 = i
return max(TRGB, min(TRGB1, TRGB2))
def _ZACHEB(self, tr, guess=0):
ZACHEB = -1
Yc_min = max(0.9, tr['center_he4'][guess] - 0.03)
L_He_max = -99.
Tc_min = 99.
ZACHEB1 = 0
ZACHEB = 0
for i in range(guess, len(tr)):
if tr['center_he4'][i] > Yc_min and tr['log_LHe'][i] > L_He_max:
L_He_max = tr['log_LHe'][i]
ZACHEB1 = i
for i in range(ZACHEB1, len(tr)):
if tr['center_he4'][i] > Yc_min and tr['log_center_T'][i] < Tc_min:
Tc_min = tr['log_center_T'][i]
ZACHEB = i
return ZACHEB
def _TACHEB(self, tr, Yc_min=0.001, guess=0):
TACHEB = -1
for i in range(guess, len(tr)):
if tr['center_he4'][i] < Yc_min:
TACHEB = i
break
return TACHEB
def _TPAGB(self, tr, guess=0):
TPAGB = -1
He_shell_min = 0.1
Yc_min = 1.0e-6
for i in range(guess, len(tr)):
He_shell_mass = tr['he_core_mass'][i] - tr['c_core_mass'][i]
if tr['center_he4'][i] < Yc_min and He_shell_mass < He_shell_min:
TPAGB = i
break
return TPAGB
def _PAGB(self, tr, guess=0):
PAGB = -1
core_mass_frac_min = 0.8
Tc_now = tr['log_center_T'][guess]
Tc_end = tr['log_center_T'][-1]
# check for low-inter / high mass split
if Tc_now > Tc_end:
for i in range(guess, len(tr)):
core_mass_frac = tr['c_core_mass'][i] / tr['star_mass'][i]
if core_mass_frac > core_mass_frac_min:
PAGB = i
break
return PAGB
def _WDCS(self, tr, gamma=10., guess=0):
WDCS = -1
for i in range(guess, len(tr)):
if tr['center_gamma'][i] > gamma:
WDCS = i
break
return WDCS
def _CBurn(self, tr, XC12=0.1, guess=0):
CBURN = -1
XY_min = 1.0E-6
for i in range(guess, len(tr)):
Xc = tr['center_h1'][i]
Yc = tr['center_he4'][i]
C12 = tr['center_c12'][i]
if Xc < XY_min and Yc < XY_min and C12 < XC12:
CBURN = i
break
return CBURN
# this function computes the distance metric along the evolutionary track
# it is made up of several terms whose weights can be adjusted. Currently
# use the H-R and age information only.
# other terms can be added, must be "monotonic increasing"
def _metric_function(self, tr):
term1 = tr['log_Teff']
term2 = tr['log_L']
term3 = np.log10(tr['star_age'])
term4 = tr['log_center_Rho']
weight1 = 2.0
weight2 = 0.125
weight3 = 1.0
weight4 = 0.0
# etc.
metric = np.zeros(len(tr))
for i in range(1, len(tr)):
metric[i] = metric[i-1] + \
weight1*pow(term1[i]-term1[i-1], 2) + \
weight2*pow(term2[i]-term2[i-1], 2) + \
weight3*pow(term3[i]-term3[i-1], 2) + \
weight4*pow(term4[i]-term4[i-1], 2)
return metric
|
POSYDON-codeREPO_NAMEPOSYDONPATH_START.@POSYDON_extracted@POSYDON-main@posydon@interpolation@eep.py@.PATH_END.py
|
{
"filename": "test_xml.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/document_loaders/test_xml.py",
"type": "Python"
}
|
import os
from pathlib import Path
from langchain_community.document_loaders import UnstructuredXMLLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_xml_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "factbook.xml")
loader = UnstructuredXMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@document_loaders@test_xml.py@.PATH_END.py
|
{
"filename": "test_multiples.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/test/suite/codes_tests/test_multiples.py",
"type": "Python"
}
|
from amuse.test.amusetest import TestWithMPI
import tempfile
import numpy
from amuse.community.hermite.interface import Hermite
from amuse.community.kepler.interface import Kepler
from amuse.community.smalln.interface import SmallN
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse import datamodel
from amuse.ic import plummer
from amuse.couple import multiples
from amuse.couple import encounters
from amuse import io
class TestSimpleMultiples(TestWithMPI):
previous = None
def new_smalln(self):
if not self.previous is None:
self.previous.stop()
result = SmallN()
result.parameters.timestep_parameter = 0.1
result.parameters.cm_index = 2001
self.previous = result
return result
def new_kepler_si(self):
unit_converter = nbody_system.nbody_to_si(
1.0 | units.MSun,
1.0 | units.AU
)
kepler = Kepler(unit_converter)
kepler.initialize_code()
return kepler
def new_kepler(self):
kepler = Kepler()
kepler.initialize_code()
return kepler
def new_smalln_si(self):
if not self.previous is None:
self.previous.stop()
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
result = SmallN(converter)
result.parameters.timestep_parameter = 0.1
result.parameters.cm_index = 2001
return result
def new_binary(self, mass1, mass2, semi_major_axis,
eccentricity=0, keyoffset=-1):
total_mass = mass1 + mass2
mass_fraction_particle_1 = mass1 / (total_mass)
if keyoffset >= 0:
binary = datamodel.Particles(keys=range(keyoffset, keyoffset+2))
else:
binary = datamodel.Particles(2)
binary[0].mass = mass1
binary[1].mass = mass2
mu = nbody_system.G * total_mass
velocity_perihelion = numpy.sqrt(mu / semi_major_axis * ((1.0 + eccentricity)/(1.0 - eccentricity)))
radius_perihelion = semi_major_axis * (1.0 - eccentricity)
binary[0].position = ((1.0 - mass_fraction_particle_1) * radius_perihelion * [1.0, 0.0, 0.0])
binary[1].position = -(mass_fraction_particle_1 * radius_perihelion * [1.0, 0.0, 0.0])
binary[0].velocity = ((1.0 - mass_fraction_particle_1) * velocity_perihelion * [0.0, 1.0, 0.0])
binary[1].velocity = -(mass_fraction_particle_1 * velocity_perihelion * [0.0, 1.0, 0.0])
return binary
def create_binaries(self, center_of_mass_particles, mass1, mass2, semi_major_axis,
eccentricity=0):
singles_in_binaries = datamodel.Particles()
for binary in center_of_mass_particles:
particles_in_binary = self.new_binary(
mass1,
mass2,
semi_major_axis
)
particles_in_binary.radius = semi_major_axis
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.mass = mass1 + mass2
particles_in_binary.position += binary.position
particles_in_binary.velocity += binary.velocity
singles_in_binaries.add_particles(particles_in_binary)
return center_of_mass_particles, singles_in_binaries
def test0(self):
code = Hermite()
stars = datamodel.Particles(2)
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0, 0, 0],
[1.2, 0, 0]
] | nbody_system.length
stars.velocity = [
[0.0, 0, 0],
[0, 0.1, 0]
] | nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
interaction_over_code=None
)
encounter_code.parameters.hard_binary_factor = 1
encounter_code.small_scale_factor = 1
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(0.6 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
def test1(self):
code = Hermite()
stars = datamodel.Particles(keys=(1, 2, 3, 4))
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0, 0, 0],
[0.5, 0, 0],
[2.0, 0, 0],
[-10.0, 0, 0],
] | nbody_system.length
stars.velocity = [
[0.0, 0, 0],
[0, 0.1, 0],
[0, -0.1, 0],
[0, 0.2, 0],
] | nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
interaction_over_code=None
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(0.6 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertAlmostRelativeEquals(multiples_code.particles[:-1].radius, 0.5 | nbody_system.length)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].radius, 0.4446 | nbody_system.length, 3)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
multiples_code.evolve_model(3 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.particles), 2)
self.assertEqual(len(multiples_code.binaries), 1)
def test2(self):
code = Hermite()
stars = datamodel.Particles(keys=(1, 2, 3, 4))
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0, 0, 0],
[0.5, 0, 0],
[3, 0, 0],
[-10, 0, 0],
] | nbody_system.length
stars.velocity = [
[0.0, 0, 0],
[0, 0.1, 0],
[0.0, -0.5, 0],
[0, 0.2, 0],
] | nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
interaction_over_code=None
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(3 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
print(multiples_code.multiples[0].components)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.particles), 3)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 2)
def test3(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset=1
)
particles_in_binary.radius = 0.001 | nbody_system.length
binary = datamodel.Particle(key=3)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.radius = 0.5 | nbody_system.length
binary.mass = 0.2 | nbody_system.mass
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
interaction_over_code=None
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
self.assertEqual(len(multiples_code.singles_in_binaries), 2)
self.assertEqual(id(multiples_code.binaries[0].child1.particles_set), id(multiples_code.singles_in_binaries))
multiples_code.commit_particles()
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
def test4(self):
code = Hermite()
stars = datamodel.Particles(keys=(1, 2, 3, 4))
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0, 0, 0],
[0.5, 0, 0],
[2, 0, 0],
[-10, 0, 0],
] | nbody_system.length
stars.velocity = [
[0, 0, 0],
[0, 0.2, 0],
[0, -0.2, 0],
[0, 0.3, 0],
] | nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
interaction_over_code=None
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
stopping_condition = multiples_code.stopping_conditions.multiples_change_detection
stopping_condition.enable()
multiples_code.evolve_model(3 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
self.assertAlmostRelativeEquals(multiples_code.model_time, 0.0075 | nbody_system.time, 4)
self.assertEqual(len(stopping_condition.particles(0)), 1)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.particles), 3) # 1 multiples with 2 singles, plus 2 singles free
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 2)
multiples_code.evolve_model(3 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
self.assertAlmostRelativeEquals(multiples_code.model_time, 1.2195 | nbody_system.time, 4)
self.assertEqual(len(stopping_condition.particles(0)), 1) # 1 new multiple
self.assertEqual(len(stopping_condition.particles(1)), 1) # 1 dissolved multiple
self.assertEqual(len(multiples_code.multiples[0].components), 3)
self.assertEqual(len(multiples_code.particles), 2) # 1 multiple, plus 1 single free
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 1)
def test5(self):
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
code = Hermite(converter)
stars = datamodel.Particles(keys=(1, 2))
stars.mass = converter.to_si(1 | nbody_system.mass)
stars.position = converter.to_si([
[0, 0, 0],
[1.2, 0, 0]
] | nbody_system.length)
stars.velocity = converter.to_si([
[0, 0, 0],
[0, 0.1, 0]
] | nbody_system.speed)
stars.radius = converter.to_si(0.5 | nbody_system.length)
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler_si(),
resolve_collision_code=self.new_smalln_si(),
interaction_over_code=None,
G=constants.G
)
encounter_code.parameters.hard_binary_factor = 1
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code,
G=constants.G
)
end_time = converter.to_si(1.0 | nbody_system.time)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(end_time)
self.assertEqual(len(multiples_code.particles), 1) # 1 multiples with 2 singles
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 0)
def test6(self):
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
code = Hermite(converter)
stars = datamodel.Particles(keys=(1, 2, 3, 4))
stars.mass = converter.to_si(1 | nbody_system.mass)
stars.position = converter.to_si([
[0, 0, 0],
[1.2, 0, 0],
[100, 0, 0],
[100, 1.2, 0]
] | nbody_system.length)
stars.velocity = converter.to_si([
[0, 0, 0],
[0, 0.1, 0],
[0, 0, 0],
[0, 0, 0.1],
] | nbody_system.speed)
stars.radius = converter.to_si(0.5 | nbody_system.length)
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler_si(),
resolve_collision_code=self.new_smalln_si(),
interaction_over_code=None,
G=constants.G
)
encounter_code.small_scale_factor = 1.0
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code,
G=constants.G
)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
stopping_condition = multiples_code.stopping_conditions.multiples_change_detection
stopping_condition.enable()
end_time = converter.to_si(3.0 | nbody_system.time)
print(end_time.as_quantity_in(units.Myr))
multiples_code.evolve_model(end_time)
self.assertTrue(stopping_condition.is_set())
print(multiples_code.model_time.as_quantity_in(units.Myr))
self.assertAlmostRelativeEquals(multiples_code.model_time, 7.99844 | units.Myr, 4)
self.assertEqual(len(stopping_condition.particles(0)), 2)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(multiples_code.particles), 2) # 1 multiples with 2 singles
self.assertEqual(len(multiples_code.multiples), 2)
self.assertEqual(len(multiples_code.binaries), 2)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.multiples[1].components), 2)
self.assertEqual(len(multiples_code.singles), 0)
self.assertEqual(len(multiples_code.all_singles), 4)
def test7(self):
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
code = Hermite(converter)
stars = datamodel.Particles(keys=(1, 2))
stars.mass = converter.to_si(1 | nbody_system.mass)
stars.position = converter.to_si([
[0, 0, 0],
[1.1, 0, 0],
] | nbody_system.length)
stars.velocity = converter.to_si([
[0, 0, 0],
[-0.5, 1.5, 0],
] | nbody_system.speed)
stars.radius = converter.to_si(0.55 | nbody_system.length)
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler_si(),
resolve_collision_code=self.new_smalln_si(),
interaction_over_code=None,
G=constants.G
)
encounter_code.small_scale_factor = 1.0
encounter_code.parameters.hard_binary_factor = 1
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code,
G=constants.G
)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
multiples_code.singles.add_particles(stars)
multiples_code.commit_particles()
stopping_condition = multiples_code.stopping_conditions.encounter_detection
stopping_condition.enable()
end_time = converter.to_si(3.0 | nbody_system.time)
print(end_time.as_quantity_in(units.Myr))
multiples_code.evolve_model(end_time)
self.assertTrue(stopping_condition.is_set())
print(multiples_code.model_time.as_quantity_in(units.Myr))
# self.assertAlmostRelativeEquals(multiples_code.model_time , 5.96955 | units.Myr, 4)
self.assertEqual(len(stopping_condition.particles(0)), 1)
model = stopping_condition.particles(0)[0]
self.assertEqual(len(model.particles_before_encounter), 2)
self.assertEqual(len(model.particles_after_encounter), 2)
before = model.particles_before_encounter
after = model.particles_after_encounter
self.assertAlmostRelativeEquals(before.center_of_mass(), after.center_of_mass(), 7)
self.assertAlmostRelativeEquals(before.center_of_mass_velocity(), after.center_of_mass_velocity(), 7)
total_energy_before = before.kinetic_energy() + before.potential_energy(G=constants.G)
total_energy_after = after.kinetic_energy() + after.potential_energy(G=constants.G)
self.assertAlmostRelativeEquals(total_energy_before, total_energy_after, 7)
def test8(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset=1
)
particles_in_binary.radius = 0.001 | nbody_system.length
binary = datamodel.Particle(key=3)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.radius = 0.5 | nbody_system.length
binary.mass = 0.2 | nbody_system.mass
binary.position = [0.0, 0.0, 0.0] | nbody_system.length
binary.velocity = [0.0, 0.0, 0.0] | nbody_system.speed
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
interaction_over_code=None
)
encounter_code.parameters.hard_binary_factor = 1
encounter_code.small_scale_factor = 1
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
field_particle = datamodel.Particle(key=4)
field_particle.mass = 0.5 | nbody_system.mass
field_particle.radius = 0.1 | nbody_system.length
field_particle.position = [0.0, 0.2, 0.0] | nbody_system.length
field_particle.velocity = [0.0, 0.0, 0.0] | nbody_system.speed
multiples_code.singles.add_particle(field_particle)
self.assertEqual(len(multiples_code.singles_in_binaries), 2)
self.assertEqual(id(multiples_code.binaries[0].child1.particles_set), id(multiples_code.singles_in_binaries))
multiples_code.commit_particles()
multiples_code.multiples.radius = 0.5 | nbody_system.length
initial_energy = multiples_code.get_total_energy()
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.particles), 2)
stopping_condition = multiples_code.stopping_conditions.encounter_detection
stopping_condition.enable()
singles = datamodel.Particles()
singles.add_particles(particles_in_binary)
singles.add_particle(field_particle)
singles_energy = singles.kinetic_energy() + singles.potential_energy(G=nbody_system.G)
self.assertAlmostRelativeEquals(initial_energy, singles_energy, 3)
multiples_code.evolve_model(2 | nbody_system.time)
final_energy = multiples_code.get_total_energy()
self.assertTrue(stopping_condition.is_set())
self.assertAlmostRelativeEquals(initial_energy, final_energy, 7)
def test9(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset=1
)
particles_in_binary.radius = 0.001 | nbody_system.length
binary = datamodel.Particle(key=3)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.radius = 0.5 | nbody_system.length
binary.mass = 0.2 | nbody_system.mass
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
others = datamodel.Particles(key=[4, 5, 6])
for i in range(3):
others[i].position = [i, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, i] | nbody_system.speed
others[i].mass = 1 | nbody_system.mass
others[i].radius = 0 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
multiples_code.singles.add_particles(others)
multiples_code.commit_particles()
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.singles), 3)
self.assertEqual(len(multiples_code.particles), 4)
self.assertEqual(len(code.particles), 4)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].mass, 0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].position, [0, 0, 0] | nbody_system.length, 6)
self.assertAlmostRelativeEquals(code.particles[-1].velocity, [0, 0, 0] | nbody_system.speed, 6)
multiples_code.update_model()
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].mass, 0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].position, [0, 0, 0] | nbody_system.length, 6)
self.assertAlmostRelativeEquals(code.particles[-1].velocity, [0, 0, 0] | nbody_system.speed, 6)
multiples_code.singles_in_binaries[0].mass = 0.2 | nbody_system.mass
multiples_code.update_model()
print(code.particles.mass)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 0.3 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].mass, 0.3 | nbody_system.mass)
print(code.particles[-1].position)
print(code.particles[-1].velocity)
self.assertAlmostRelativeEquals(code.particles[-1].position, [0.00166666666667, 0, 0] | nbody_system.length, 6)
self.assertAlmostRelativeEquals(code.particles[-1].velocity, [0, 0.7453559925, 0] | nbody_system.speed, 6)
def test10(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset=1
)
particles_in_binary.radius = 0.001 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
encounter_code.parameters.hard_binary_factor = 1
encounter_code.small_scale_factor = 1
others = datamodel.Particles(key=[4, 5, 6])
for i in range(3):
others[i].position = [i, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, i] | nbody_system.speed
others[i].mass = 1 | nbody_system.mass
others[i].radius = 0.05 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
multiples_code.singles.add_particles(particles_in_binary)
multiples_code.singles.add_particles(others)
multiples_code.commit_particles()
multiples_code.evolve_model(1 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.singles), 3)
self.assertEqual(len(multiples_code.particles), 4)
self.assertEqual(len(code.particles), 4)
self.assertEqual(id(multiples_code.singles_in_binaries), id(multiples_code.binaries[0].child1.particles_set))
self.assertEqual(id(multiples_code.components_of_multiples), id(multiples_code.multiples[0].components[0].particles_set))
# multiples_code.singles_in_binaries[0].mass = 0.2 | nbody_system.mass
print(multiples_code.particles.mass)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 1.1 | nbody_system.mass)
self.assertAlmostRelativeEquals(multiples_code.particles.mass.sum(), 0.1 + 0.1 + 3.0 | nbody_system.mass)
multiples_code.update_model()
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 1.1 | nbody_system.mass)
index = -1
if not code.particles[index].mass > 1.0 | nbody_system.mass:
index = -2
self.assertAlmostRelativeEquals(code.particles[index].mass, 1.1 | nbody_system.mass)
multiples_code.singles_in_binaries[0].mass += 0.2 | nbody_system.mass
multiples_code.update_model()
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 1.3 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[index].mass, 1.3 | nbody_system.mass)
def test11(self):
code = Hermite()
particles_in_binary = self.new_binary(
1.0 | nbody_system.mass,
1.0 | nbody_system.mass,
0.001 | nbody_system.length,
keyoffset=1
)
particles_in_binary.radius = 0.01 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
others = datamodel.Particles(keys=[4, 5, 6])
for i in range(3):
others[i].position = [i, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, 0] | nbody_system.speed
others[i].mass = 0.2 | nbody_system.mass
others[i].radius = 0.05 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles.add_particles(particles_in_binary)
multiples_code.singles.add_particles(others)
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
multiples_code.commit_particles()
multiples_code.evolve_model(1 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.singles), 3)
self.assertEqual(len(multiples_code.particles), 4)
self.assertEqual(len(code.particles), 4)
self.assertTrue(stopping_condition.is_set())
multiples_code.particles[-1].velocity = [0, 0, 0] | nbody_system.speed
multiples_code.update_model()
print(multiples_code.particles.key)
self.assertEqual(len(stopping_condition.particles(0)), 1)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(stopping_condition.particles(2)), 0)
self.assertAlmostRelativeEquals(multiples_code.multiples[0].mass, 2.0 | nbody_system.mass)
self.assertAlmostRelativeEquals(multiples_code.particles.mass.sum(), 2.6 | nbody_system.mass)
print(multiples_code.particles.velocity)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
self.assertEqual(len(stopping_condition.particles(0)), 0)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(stopping_condition.particles(2)), 1)
self.assertAlmostRelativeEquals(multiples_code.multiples[0].mass, 2.0 | nbody_system.mass)
self.assertAlmostRelativeEquals(multiples_code.particles.mass.sum(), 2.6 | nbody_system.mass)
def test12(self):
code = Hermite()
particles_in_binary = self.new_binary(
1.0 | nbody_system.mass,
1.0 | nbody_system.mass,
0.001 | nbody_system.length,
keyoffset=10
)
particles_in_binary.radius = 0.01 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
binary = datamodel.Particle(key=20)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.position = [1, 0, 1] | nbody_system.length
particles_in_binary.position += [1, 0, 1] | nbody_system.length
others = datamodel.Particles(keys=[4, 5, 6])
for i in range(3):
others[i].position = [i*10, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, 0] | nbody_system.speed
others[i].mass = 0.2 | nbody_system.mass
others[i].radius = 0.05 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.particles.add_particles(others)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
multiples_code.commit_particles()
print(multiples_code.particles)
self.assertEqual(len(multiples_code.particles), 4)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].position, [1, 0, 1] | nbody_system.length)
def test13(self):
code = Hermite()
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
center_of_mass_particles = datamodel.Particles(5)
center_of_mass_particles.position = (numpy.asarray(range(5))).reshape(5, 1) * ([1.0, 0.0, 0.0] | nbody_system.length)
center_of_mass_particles.velocity = [0.0, 0.0, 0.0] | nbody_system.speed
center_of_mass_particles.radius = 0.05 | nbody_system.length
binaries, singles_in_binaries = self.create_binaries(
center_of_mass_particles,
1 | nbody_system.mass,
0.01 | nbody_system.mass,
0.0001 | nbody_system.length
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code.binaries.add_particles(binaries)
multiples_code.commit_particles()
# stopping_condition = multiples_code.stopping_conditions.encounter_detection
# stopping_condition.enable()
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
multiples_code.evolve_model(1 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(0):
print("NEW:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(1):
print("REMOVED:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(2):
print("UPDATED:", x.key, x.child1.key, x.child2.key)
for x in multiples_code.singles:
print(x.key, x.mass)
self.assertEqual(len(multiples_code.singles_in_binaries) + len(multiples_code.singles), 2*len(center_of_mass_particles))
self.assertEqual(len(multiples_code.binaries) - len(stopping_condition.particles(0)) + len(stopping_condition.particles(1)), len(center_of_mass_particles))
def test14(self):
code = Hermite()
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
center_of_mass_particles = datamodel.Particles(5)
center_of_mass_particles.position = (numpy.asarray(range(5))).reshape(5, 1) * ([1.0, 0.0, 0.0] | nbody_system.length)
center_of_mass_particles.velocity = [0.0, 0.0, 0.0] | nbody_system.speed
center_of_mass_particles.radius = 0.05 | nbody_system.length
binaries, singles_in_binaries = self.create_binaries(
center_of_mass_particles,
1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.00000001 | nbody_system.length
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code.binaries.add_particles(binaries)
multiples_code.commit_particles()
# stopping_condition = multiples_code.stopping_conditions.encounter_detection
# stopping_condition.enable()
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(0):
print("NEW:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(1):
print("REMOVED:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(2):
print("UPDATED:", x.key, x.child1.key, x.child2.key)
for x in multiples_code.singles:
print(x.key, x.mass)
self.assertEqual(len(multiples_code.singles_in_binaries) + len(multiples_code.singles), 2*len(center_of_mass_particles))
self.assertEqual(len(multiples_code.binaries) - len(stopping_condition.particles(0)) + len(stopping_condition.particles(1)), len(center_of_mass_particles))
def test15(self):
code = Hermite()
encounter_code = encounters.HandleEncounter(
kepler_code=self.new_kepler(),
resolve_collision_code=self.new_smalln(),
)
n = 10
center_of_mass_particles = plummer.new_plummer_model(n, random=numpy.random.mtrand.RandomState(1))
center_of_mass_particles.radius = 0.5 | nbody_system.length
center_of_mass_particles.velocity *= 0
binaries, singles_in_binaries = self.create_binaries(
center_of_mass_particles,
0.999 * ((1.0 | nbody_system.mass) / n),
0.001 * ((1.0 | nbody_system.mass) / n),
0.00001 | nbody_system.length
)
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounter_code
)
multiples_code.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code.binaries.add_particles(binaries)
multiples_code.commit_particles()
# stopping_condition = multiples_code.stopping_conditions.encounter_detection
# stopping_condition.enable()
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(0):
print("NEW:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(1):
print("REMOVED:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(2):
print("UPDATED:", x.key, x.child1.key, x.child2.key)
for x in multiples_code.singles:
print(x.key, x.mass)
self.assertEqual(len(multiples_code.binaries) - len(stopping_condition.particles(0)) + len(stopping_condition.particles(1)), len(center_of_mass_particles))
def test16(self):
code = Hermite()
n = 10
singles = datamodel.Particles(keys=range(1, n+1))
singles.mass = 1 | nbody_system.mass
for x in range(n):
singles[x].position = [x*x, 0, 0] | nbody_system.length
singles.velocity = [0, 0, 0] | nbody_system.speed
singles.radius = 0.5 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code=code,
handle_encounter_code=encounters.StickyHandleEncounter()
)
multiples_code.singles.add_particles(singles)
multiples_code.commit_particles()
multiples_code.evolve_model(1 | nbody_system.time)
print(len(multiples_code.multiples))
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.particles), 9)
self.assertEqual(len(multiples_code.singles), 8)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles_in_binaries), 2)
self.assertEqual(id(multiples_code.components_of_multiples), id(multiples_code.multiples[0].components[0].particles_set))
print(multiples_code.multiples[0].components)
with tempfile.NamedTemporaryFile() as temp:
io.write_set_to_file(
(
multiples_code.singles,
multiples_code.singles_in_binaries,
multiples_code.binaries,
multiples_code.components_of_multiples,
multiples_code.multiples
),
temp.name,
# "multiples.hdf5",
"hdf5",
overwrite_file=True,
version="2.0",
names=(
"singles",
"singles_in_binaries",
"binaries",
"components_of_multiples",
"multiples"
)
)
multiples_code_loaded = encounters.Multiples(
gravity_code=Hermite(),
handle_encounter_code=encounters.StickyHandleEncounter()
)
(
singles,
singles_in_binaries,
binaries,
components_of_multiples,
multiples
) = io.read_set_from_file(
temp.name,
# "multiples.hdf5",
"hdf5",
version="2.0",
names=(
"singles",
"singles_in_binaries",
"binaries",
"components_of_multiples",
"multiples"
)
)
self.assertEqual(len(multiples), 1)
self.assertEqual(len(singles), 8)
self.assertEqual(len(binaries), 1)
self.assertEqual(len(singles_in_binaries), 2)
# self.assertEquals(id(components_of_multiples), id(multiples[0].components[0].particles_set))
multiples_code_loaded.singles.add_particles(singles)
multiples_code_loaded.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code_loaded.binaries.add_particles(binaries)
multiples_code_loaded.components_of_multiples.add_particles(components_of_multiples)
multiples_code_loaded.multiples.add_particles(multiples)
multiples_code_loaded.commit_particles()
self.assertEqual(len(multiples_code_loaded.multiples), 1)
self.assertEqual(len(multiples_code_loaded.particles), 9)
self.assertEqual(len(multiples_code_loaded.singles), 8)
self.assertEqual(len(multiples_code_loaded.binaries), 1)
self.assertEqual(len(multiples_code_loaded.singles_in_binaries), 2)
# self.assertEquals(id(multiples_code_loaded.components_of_multiples), id(multiples_code_loaded.multiples[0].components[0].particles_set))
multiples_code.evolve_model(4 | nbody_system.time)
# need to use 3 here as the model_time is reset when doing a restart and we dit not set it after creating Hermite
multiples_code_loaded.evolve_model(3.0 | nbody_system.time)
print(len(multiples_code.multiples), multiples_code.particles)
print(multiples_code.particles.position - multiples_code_loaded.particles.position)
self.assertAlmostRelativeEquals(multiples_code.particles.position - multiples_code_loaded.particles.position, [0, 0, 0] | nbody_system.length)
for code in [multiples_code, multiples_code_loaded]:
self.assertEqual(len(code.multiples), 1)
self.assertEqual(len(code.particles), 8)
self.assertEqual(len(code.singles), 7)
self.assertEqual(len(code.binaries), 1)
self.assertEqual(len(code.singles_in_binaries), 2)
self.assertEqual(len(code.components_of_multiples), 3)
self.assertEqual(id(code.components_of_multiples), id(code.multiples[0].components[0].particles_set))
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@test@suite@codes_tests@test_multiples.py@.PATH_END.py
|
{
"filename": "makePlanetInput_ntl-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline/.ipynb_checkpoints/makePlanetInput_ntl-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
This notebook prepares a planet candidate catalog for the stellar population in the specified input stellar catalog. It computes the reliability, corrected planet radius and includes useful planet properties such as robovetter score. It outputs two catalogs, one that contains only PCs and one that contains all KOIs.
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$ is the false positive effectiveness, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We will separately measure $E$ and $F_{\mathrm{obsFP}}$ as binomial point processes with probabilities that depend on period and MES. Once we have $F_{\mathrm{obsFP}}$ then $F_{\mathrm{obsPC}} = 1 - F_{\mathrm{obsFP}}$, assuming that $N_{\mathrm{obsTCEs}} = N_{\mathrm{obsPC}} + N_{\mathrm{obsFP}}$.
We think of TCEs as consisting of two sets: those that are dispositioned as FP and those that are dispositioned as PC. We do this for both the observed TCEs, and for inverted/scrambled TCEs, where all TCEs are true false positives. Then we can think of the vetting process as drawing from the set of TCEs, with a probability $r$ of selecting either PCs or FPs. Then the probability distribution of selecting $c$ FPs from $n$ TCEs is given by the binomial distribution
$$P\{c\} = \left( \begin{array}{c} n \\ c \end{array} \right) r^c (1-r)^{n-c}.$$
To measure $E$ we use the inverted and scrambled data sets, where all detected TCEs are by definition FPs. We define $E$ as the probability of drawing FPs from inverted/scrambled TCEs, found via the Bayesian inference $p(E|n, c) \propto p(c|E, n) p(E)$, where
$$p(c|E, n) = \left( \begin{array}{c} n \\ c \end{array} \right) E^c (1-E)^{n-c}$$ and
$p(E)$ is a prior distribution of the probability $E$. By putting the data on a grid indexed by $i,j$, we can fit effectiveness as a function parameterized by a vector $\theta$, $E(\theta,\mathrm{period},\mathrm{MES})$, as $p(\theta)|n_{i,j}, c_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) \propto p(c_{i,j}|\theta, n_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) p(\theta)$, where $p(\theta)$ is some prior distribution of the parameters.
To measure $F_{\mathrm{obsFP}}$ we perform a similar inference using the set of observed TCEs, and inferring the probability of drawing c FPs from n observed TCEs. The inference in this case becomes $p(F_{\mathrm{obsFP}}|n, c) \propto p(c|F_{\mathrm{obsFP}}, n) p(F_{\mathrm{obsFP}})$, which we can parameterize interms of a function similar to effectiveness.
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spec
import pandas as pd
from astropy.io import ascii
from astropy.table import Table, vstack
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import sys
sys.path.insert(0, '..')
import dr25Models as funcModels
```
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{1 - F_{\mathrm{obsFP}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We get $E$ and $F_{\mathrm{obsFP}}$ from the outputs of the notebooks binomialFPEffectiveness.ipynb and binomialObsFPRate.ipynb.
```python
# set the effectiveness model
fpEffModel = "rotatedLogisticX0"
# set the obs FP rate model
obsModel = "rotatedLogisticX0"
# read in the model parameters
tt = pd.read_pickle("fpEffectivenessTable.pkl")
tm = tt[tt.Model == fpEffModel]
fpEffXRange = tm.periodRange.values[0]
fpEffYRange = tm.mesRange.values[0]
fpEffTheta = tm.medianMCMCTheta.values[0]
tt = pd.read_pickle("obsFpTable_ntl.pkl")
tm = tt[tt.Model == obsModel]
obsXRange = tm.periodRange.values[0]
obsYRange = tm.mesRange.values[0]
obsTheta = tm.medianMCMCTheta.values[0]
```
```python
cellPeriod, cellMes = np.meshgrid(np.array(np.linspace(fpEffXRange[0], fpEffXRange[1], 200)),
np.array(np.linspace(fpEffYRange[0], fpEffYRange[1], 200)))
effFit = funcModels.evaluateModel(cellPeriod, cellMes, fpEffTheta, fpEffXRange, fpEffYRange, fpEffModel)
obsFit = funcModels.evaluateModel(cellPeriod, cellMes, obsTheta, obsXRange, obsYRange, obsModel)
```
```python
fig = plt.figure(figsize=plt.figaspect(0.3));
R = 1 - (obsFit/(1-obsFit))*((1-effFit)/effFit)
pR = R;
pR[pR<0] = 0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("Reliability");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, pR, levels = [.45, .5, .55, .6, .7, .75, .8, .85, .9, .95, .99]);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
fig = plt.figure(figsize=plt.figaspect(0.3));
R = (1-effFit)/effFit
pR = R;
pR[pR<0] = 0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("1-E/E");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, pR);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
fig = plt.figure(figsize=plt.figaspect(0.3));
R = obsFit/(1-obsFit)
pR = R;
pR[pR<0] = 0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("obs/(1-obs)");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, pR);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
R = 1 - (obsFit/(1-obsFit))*((1-effFit)/effFit)
pR = R;
pR[pR<0] = 0;
sp = np.zeros([3,3])
sPeriod = np.array([[0, 10, 200], [0, 10, 200], [0, 10, 200]])
sMes = np.array([[0, 0, 0], [10, 10, 10], [30, 30, 30]])
sp[0,0] = np.mean(np.mean(pR[np.where((cellPeriod > 0) & (cellPeriod <= 20) & (cellMes > 20) & (cellMes <= 200))]))
sp[0,1] = np.mean(np.mean(pR[np.where((cellPeriod > 20) & (cellPeriod <= 200) & (cellMes > 20) & (cellMes <= 200))]))
sp[0,2] = np.mean(np.mean(pR[np.where((cellPeriod > 200) & (cellPeriod <= 500) & (cellMes > 20) & (cellMes <= 200))]))
sp[1,0] = np.mean(np.mean(pR[np.where((cellPeriod > 0) & (cellPeriod <= 20) & (cellMes > 10) & (cellMes <= 20))]))
sp[1,1] = np.mean(np.mean(pR[np.where((cellPeriod > 20) & (cellPeriod <= 200) & (cellMes > 10) & (cellMes <= 20))]))
sp[1,2] = np.mean(np.mean(pR[np.where((cellPeriod > 200) & (cellPeriod <= 500) & (cellMes > 10) & (cellMes <= 20))]))
sp[2,0] = np.mean(np.mean(pR[np.where((cellPeriod > 0) & (cellPeriod <= 20) & (cellMes > 0) & (cellMes <= 10))]))
sp[2,1] = np.mean(np.mean(pR[np.where((cellPeriod > 20) & (cellPeriod <= 200) & (cellMes > 0) & (cellMes <= 10))]))
sp[2,2] = np.mean(np.mean(pR[np.where((cellPeriod > 200) & (cellPeriod <= 500) & (cellMes > 0) & (cellMes <= 10))]))
x = np.array([[0, 1, 2], [0, 1, 2], [0, 1, 2]])
y = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
dx = 1
dy = 1
imageSize = (3,3)
plt.figure(figsize=imageSize);
fig, ax = plt.subplots(figsize=imageSize);
da = np.transpose(sp);
ax.imshow(da);
# ax.imshow(da, origin='lower');
arrayShape = da.shape;
for i in range(arrayShape[0]):
for j in range(arrayShape[1]):
if da[i, j] < 0.7:
c = "w"
else:
c = "k"
text = ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, round(da[i, j],3),
ha="center", va="center", color=c);
sp
```
array([[0.99952036, 0.99800576, 0.95404595],
[0.9983263 , 0.99447956, 0.91202263],
[0.98286909, 0.95805152, 0.69536692]])
<Figure size 216x216 with 0 Axes>

```python
def computeReliabiltyPosterior(xp, yp, eSamples, oSamples):
r = np.zeros(np.shape(eSamples)[0])
for i in range(np.shape(eSamples)[0]):
e = funcModels.evaluateModel(xp, yp, eSamples[i,:], fpEffXRange, fpEffYRange, fpEffModel)
o = funcModels.evaluateModel(xp, yp, oSamples[i,:], obsXRange, obsYRange, obsModel)
r[i] = 1 - (o/(1-o))*((1-e)/e)
e = funcModels.evaluateModel(xp, yp, fpEffTheta, fpEffXRange, fpEffYRange, fpEffModel)
o = funcModels.evaluateModel(xp, yp, obsTheta, obsXRange, obsYRange, obsModel)
f = 1 - (o/(1-o))*((1-e)/e)
return r, f
```
```python
eSamples = np.load("binEffPosteriors_" + str(fpEffModel) + ".npy");
oSamples = np.load("binObsPosteriors_" + str(obsModel) + ".npy");
r1, f1 = computeReliabiltyPosterior(200., 25., eSamples, oSamples)
r2, f2 = computeReliabiltyPosterior(365., 10., eSamples, oSamples)
r3, f3 = computeReliabiltyPosterior(365., 8., eSamples, oSamples)
rr = np.percentile(r1, [5, 95]);
plt.hist(r1[(r1 > 0.95*rr[0]) & (r1 < 1.05*rr[1])], 100);
plt.plot([f1, f1], [0, 3000], color='k', linestyle='--', linewidth=1)
rr = np.percentile(r2, [5, 95]);
plt.hist(r2[(r2 > 0.95*rr[0]) & (r2 < 1.05*rr[1])], 100, alpha = 0.5);
plt.plot([f2, f2], [0, 3000], color='k', linestyle='--', linewidth=1)
rr = np.percentile(r3, [5, 95]);
plt.hist(r3[(r3 > 0.95*rr[0]) & (r3 < 1.05*rr[1])], 100, alpha = 0.5);
plt.plot([f3, f3], [0, 3000], color='k', linestyle='--', linewidth=1)
```
[<matplotlib.lines.Line2D at 0x105afe410>]

```python
import requests
from cStringIO import StringIO
selectStr = "kepid,kepoi_name,koi_tce_plnt_num,koi_pdisposition,koi_score,koi_period,koi_max_mult_ev,koi_prad,koi_prad_err1,koi_prad_err2,koi_ror,koi_ror_err1,koi_ror_err2"
urlDr25Koi = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=q1_q17_dr25_koi&select=" + selectStr
r = requests.get(urlDr25Koi)
if r.status_code != requests.codes.ok:
r.raise_for_status()
fh = StringIO(r.content)
dr25Koi = pd.read_csv(fh, dtype={"kepoi_name":str})
print("Loaded " + str(len(dr25Koi)) + " KOIs")
```
Loaded 8054 KOIs
```python
# restrict the population to stars in the Travis' catalog
dr25CleanStellarIso = pd.read_csv("../stellarCatalogs/dr25_stellar_supp_gaia_clean_GK.txt")
dr25Koi = dr25Koi[dr25Koi.kepid.isin(dr25CleanStellarIso.kepid)]
dr25Koi = dr25Koi.reset_index(drop=True)
print("After removing planets not in Travis' list, we have " + str(len(dr25Koi)) + " KOIs")
```
After removing planets not in Travis' list, we have 2464 KOIs
```python
# merge in only iso_rad and uncertainties from the stellar table
dr25Koi = pd.merge(dr25Koi, dr25CleanStellarIso[["kepid","iso_rad","iso_rad_err1","iso_rad_err2"]], on="kepid", how="inner")
```
```python
# correct the planet radii with the new catalog
rEarth = 6356.8 # km
rSun = 695700 # km
dr25Koi['corrected_prad'] = dr25Koi['koi_ror']*dr25Koi['iso_rad']*rSun/rEarth;
dr25Koi['corrected_prad_err1'] = np.sqrt(dr25Koi['koi_ror_err1']**2*dr25Koi['iso_rad']**2
+dr25Koi['koi_ror']**2*dr25Koi['iso_rad_err1']**2)*rSun/rEarth;
dr25Koi['corrected_prad_err2'] = -np.sqrt(dr25Koi['koi_ror_err2']**2*dr25Koi['iso_rad']**2
+dr25Koi['koi_ror']**2*dr25Koi['iso_rad_err2']**2)*rSun/rEarth;
dr25Koi = dr25Koi[~np.isnan(dr25Koi.koi_prad)]
```
```python
v = dr25Koi.corrected_prad_err1/dr25Koi.koi_prad_err1
plt.hist(v[v<5], 100);
```

```python
```
```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(dr25Koi.koi_period, dr25Koi.koi_prad,
yerr = [-dr25Koi.koi_prad_err2, dr25Koi.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(dr25Koi.koi_period, dr25Koi.corrected_prad,
yerr = [-dr25Koi.corrected_prad_err2, dr25Koi.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
```
```python
dr25Fpp = ascii.read("../data/q1_q17_dr25_koifpp.txt")
dr25FppPd = dr25Fpp.to_pandas()
```
```python
```
```python
mergedDr25Koi = pd.merge(dr25Koi, dr25FppPd, on="kepoi_name", how="inner")
```
```python
mergedDr25Koi.loc[:,"fpEffectiveness"] = pd.Series(
funcModels.evaluateModel(mergedDr25Koi.koi_period,
mergedDr25Koi.koi_max_mult_ev, fpEffTheta,
fpEffXRange, fpEffYRange, fpEffModel), index = mergedDr25Koi.index)
mergedDr25Koi.loc[:,"obsFpRate"] = pd.Series(
funcModels.evaluateModel(mergedDr25Koi.koi_period,
mergedDr25Koi.koi_max_mult_ev, obsTheta,
obsXRange, obsYRange, obsModel), index = mergedDr25Koi.index)
mergedDr25Koi.loc[:,"reliability"] = pd.Series(
1-(mergedDr25Koi.obsFpRate/(1-mergedDr25Koi.obsFpRate))
*(1-mergedDr25Koi.fpEffectiveness)/mergedDr25Koi.fpEffectiveness, index = mergedDr25Koi.index)
mergedDr25Koi.reliability[mergedDr25Koi.reliability < 0.] = 0.
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:14: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
```python
plt.hist(mergedDr25Koi.koi_score, 40);
plt.yscale('log', nonposy='clip')
```

```python
np.sum(np.isnan(mergedDr25Koi.fpp_prob) & mergedDr25Koi.koi_period > 50)
```
0
```python
mergedDr25Koi[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>kepid_x</th>
<th>kepoi_name</th>
<th>koi_tce_plnt_num</th>
<th>koi_pdisposition</th>
<th>koi_score</th>
<th>koi_period</th>
<th>koi_max_mult_ev</th>
<th>koi_prad</th>
<th>koi_prad_err1</th>
<th>koi_prad_err2</th>
<th>...</th>
<th>corrected_prad</th>
<th>corrected_prad_err1</th>
<th>corrected_prad_err2</th>
<th>rowid</th>
<th>kepid_y</th>
<th>fpp_koi_period</th>
<th>fpp_prob</th>
<th>fpEffectiveness</th>
<th>obsFpRate</th>
<th>reliability</th>
</tr>
</thead>
<tbody>
<tr>
<th>1897</th>
<td>9394762</td>
<td>K05664.01</td>
<td>1</td>
<td>FALSE POSITIVE</td>
<td>0.0</td>
<td>77.138911</td>
<td>11.215458</td>
<td>3.39</td>
<td>1.02</td>
<td>-0.27</td>
<td>...</td>
<td>3.415315</td>
<td>22.841603</td>
<td>-1.055625</td>
<td>6112</td>
<td>9394762</td>
<td>308.57</td>
<td>0.68</td>
<td>0.993294</td>
<td>0.480444</td>
<td>0.993757</td>
</tr>
</tbody>
</table>
<p>1 rows × 26 columns</p>
</div>
```python
mergedDr25Koi["fpp_prob_use"] = mergedDr25Koi["fpp_prob"]
mergedDr25Koi.fpp_prob_use[np.isnan(mergedDr25Koi.fpp_prob)] = 1
mergedDr25Koi.fpp_prob_use[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2] = 1
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
```python
mergedDr25Koi[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>kepid_x</th>
<th>kepoi_name</th>
<th>koi_tce_plnt_num</th>
<th>koi_pdisposition</th>
<th>koi_score</th>
<th>koi_period</th>
<th>koi_max_mult_ev</th>
<th>koi_prad</th>
<th>koi_prad_err1</th>
<th>koi_prad_err2</th>
<th>...</th>
<th>corrected_prad_err1</th>
<th>corrected_prad_err2</th>
<th>rowid</th>
<th>kepid_y</th>
<th>fpp_koi_period</th>
<th>fpp_prob</th>
<th>fpEffectiveness</th>
<th>obsFpRate</th>
<th>reliability</th>
<th>fpp_prob_use</th>
</tr>
</thead>
<tbody>
<tr>
<th>1897</th>
<td>9394762</td>
<td>K05664.01</td>
<td>1</td>
<td>FALSE POSITIVE</td>
<td>0.0</td>
<td>77.138911</td>
<td>11.215458</td>
<td>3.39</td>
<td>1.02</td>
<td>-0.27</td>
<td>...</td>
<td>22.841603</td>
<td>-1.055625</td>
<td>6112</td>
<td>9394762</td>
<td>308.57</td>
<td>0.68</td>
<td>0.993294</td>
<td>0.480444</td>
<td>0.993757</td>
<td>1.0</td>
</tr>
</tbody>
</table>
<p>1 rows × 27 columns</p>
</div>
```python
mergedDr25Koi["totalReliability"] = (1-mergedDr25Koi.fpp_prob_use)*mergedDr25Koi.reliability
```
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.koi_max_mult_ev, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
plt.title("KOI Reliability, size = total reliability");
plt.ylim([7, 50])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Reliability, size = total reliability");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
dr25PC = mergedDr25Koi[mergedDr25Koi.koi_pdisposition == "CANDIDATE"]
dr25FP = mergedDr25Koi[mergedDr25Koi.koi_pdisposition == "FALSE POSITIVE"]
# remove those with corrected_prad = NAN
dr25PC = dr25PC[~np.isnan(dr25PC.corrected_prad)]
dr25FP = dr25FP[~np.isnan(dr25FP.corrected_prad)]
mergedDr25Koi = mergedDr25Koi[~np.isnan(mergedDr25Koi.corrected_prad)]
print("There are " + str(len(dr25PC)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
print("There are " + str(len(dr25FP)) + " FPs in " + str(len(dr25CleanStellarIso)) + " observed targets")
```
There are 1821 PCs in 60220 observed targets
There are 641 FPs in 60220 observed targets
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.koi_max_mult_ev, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
plt.title("PC Reliability, size = total reliability");
plt.ylim([7, 30])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("PC Reliability, size = total reliability");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
rs = mergedDr25Koi.totalReliability*mergedDr25Koi.koi_score
ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, marker="+", alpha=0.2);
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=rs, edgecolors='k', s=100*rs, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Total Reliability x Score");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("KOI Total Reliability x Score");
```

```python
plt.hist(dr25PC.corrected_prad/dr25PC.koi_prad, 100);
#plt.yscale('log', nonposy='clip')
```

```python
plt.hist(dr25CleanStellarIso.radius[dr25CleanStellarIso.radius<2]/dr25CleanStellarIso.radius_DR25[dr25CleanStellarIso.radius<2], 100);
#plt.yscale('log', nonposy='clip')
```

```python
dr25PC.to_csv("koiCatalogs/dr25_GK_PCs_ntl.csv", index=False)
mergedDr25Koi.to_csv("koiCatalogs/dr25_GK_KOIs_ntl.csv", index=False)
```
```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(dr25PC.koi_period, dr25PC.koi_prad,
yerr = [-dr25PC.koi_prad_err2, dr25PC.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(dr25PC.koi_period, dr25PC.corrected_prad,
yerr = [-dr25PC.corrected_prad_err2, dr25PC.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
plt.hist(dr25PC.koi_score, 40);
plt.yscale('log', nonposy='clip')
plt.title("PC score distribution")
plt.hist(dr25FP.koi_score, 40, alpha=0.5);
plt.yscale('log', nonposy='clip')
plt.title("FP score distribution")
```
Text(0.5,1,'FP score distribution')

```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline@.ipynb_checkpoints@makePlanetInput_ntl-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py3/matplotlib/style/core.py",
"type": "Python"
}
|
"""
Core functions and attributes for the matplotlib style library:
``use``
Select style sheet to override the current matplotlib settings.
``context``
Context manager to use a style sheet temporarily.
``available``
List available style sheets.
``library``
A dictionary of style names and matplotlib settings.
"""
import contextlib
import logging
import os
from pathlib import Path
import sys
import warnings
if sys.version_info >= (3, 10):
import importlib.resources as importlib_resources
else:
# Even though Py3.9 has importlib.resources, it doesn't properly handle
# modules added in sys.path.
import importlib_resources
import matplotlib as mpl
from matplotlib import _api, _docstring, _rc_params_in_file, rcParamsDefault
_log = logging.getLogger(__name__)
__all__ = ['use', 'context', 'available', 'library', 'reload_library']
BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib')
# Users may want multiple library paths, so store a list of paths.
USER_LIBRARY_PATHS = [os.path.join(mpl.get_configdir(), 'stylelib')]
STYLE_EXTENSION = 'mplstyle'
# A list of rcParams that should not be applied from styles
STYLE_BLACKLIST = {
'interactive', 'backend', 'webagg.port', 'webagg.address',
'webagg.port_retries', 'webagg.open_in_browser', 'backend_fallback',
'toolbar', 'timezone', 'figure.max_open_warning',
'figure.raise_window', 'savefig.directory', 'tk.window_focus',
'docstring.hardcopy', 'date.epoch'}
@_docstring.Substitution(
"\n".join(map("- {}".format, sorted(STYLE_BLACKLIST, key=str.lower)))
)
def use(style):
"""
Use Matplotlib style settings from a style specification.
The style name of 'default' is reserved for reverting back to
the default style settings.
.. note::
This updates the `.rcParams` with the settings from the style.
`.rcParams` not defined in the style are kept.
Parameters
----------
style : str, dict, Path or list
A style specification. Valid options are:
str
- One of the style names in `.style.available` (a builtin style or
a style installed in the user library path).
- A dotted name of the form "package.style_name"; in that case,
"package" should be an importable Python package name, e.g. at
``/path/to/package/__init__.py``; the loaded style file is
``/path/to/package/style_name.mplstyle``. (Style files in
subpackages are likewise supported.)
- The path or URL to a style file, which gets loaded by
`.rc_params_from_file`.
dict
A mapping of key/value pairs for `matplotlib.rcParams`.
Path
The path to a style file, which gets loaded by
`.rc_params_from_file`.
list
A list of style specifiers (str, Path or dict), which are applied
from first to last in the list.
Notes
-----
The following `.rcParams` are not related to style and will be ignored if
found in a style specification:
%s
"""
if isinstance(style, (str, Path)) or hasattr(style, 'keys'):
# If name is a single str, Path or dict, make it a single element list.
styles = [style]
else:
styles = style
style_alias = {'mpl20': 'default', 'mpl15': 'classic'}
for style in styles:
if isinstance(style, str):
style = style_alias.get(style, style)
if style == "default":
# Deprecation warnings were already handled when creating
# rcParamsDefault, no need to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
# don't trigger RcParams.__getitem__('backend')
style = {k: rcParamsDefault[k] for k in rcParamsDefault
if k not in STYLE_BLACKLIST}
elif style in library:
style = library[style]
elif "." in style:
pkg, _, name = style.rpartition(".")
try:
path = (importlib_resources.files(pkg)
/ f"{name}.{STYLE_EXTENSION}")
style = _rc_params_in_file(path)
except (ModuleNotFoundError, OSError, TypeError) as exc:
# There is an ambiguity whether a dotted name refers to a
# package.style_name or to a dotted file path. Currently,
# we silently try the first form and then the second one;
# in the future, we may consider forcing file paths to
# either use Path objects or be prepended with "./" and use
# the slash as marker for file paths.
pass
if isinstance(style, (str, Path)):
try:
style = _rc_params_in_file(style)
except OSError as err:
raise OSError(
f"{style!r} is not a valid package style, path of style "
f"file, URL of style file, or library style name (library "
f"styles are listed in `style.available`)") from err
filtered = {}
for k in style: # don't trigger RcParams.__getitem__('backend')
if k in STYLE_BLACKLIST:
_api.warn_external(
f"Style includes a parameter, {k!r}, that is not "
f"related to style. Ignoring this parameter.")
else:
filtered[k] = style[k]
mpl.rcParams.update(filtered)
@contextlib.contextmanager
def context(style, after_reset=False):
"""
Context manager for using style settings temporarily.
Parameters
----------
style : str, dict, Path or list
A style specification. Valid options are:
str
- One of the style names in `.style.available` (a builtin style or
a style installed in the user library path).
- A dotted name of the form "package.style_name"; in that case,
"package" should be an importable Python package name, e.g. at
``/path/to/package/__init__.py``; the loaded style file is
``/path/to/package/style_name.mplstyle``. (Style files in
subpackages are likewise supported.)
- The path or URL to a style file, which gets loaded by
`.rc_params_from_file`.
dict
A mapping of key/value pairs for `matplotlib.rcParams`.
Path
The path to a style file, which gets loaded by
`.rc_params_from_file`.
list
A list of style specifiers (str, Path or dict), which are applied
from first to last in the list.
after_reset : bool
If True, apply style after resetting settings to their defaults;
otherwise, apply style on top of the current settings.
"""
with mpl.rc_context():
if after_reset:
mpl.rcdefaults()
use(style)
yield
def update_user_library(library):
"""Update style library with user-defined rc files."""
for stylelib_path in map(os.path.expanduser, USER_LIBRARY_PATHS):
styles = read_style_directory(stylelib_path)
update_nested_dict(library, styles)
return library
def read_style_directory(style_dir):
"""Return dictionary of styles defined in *style_dir*."""
styles = dict()
for path in Path(style_dir).glob(f"*.{STYLE_EXTENSION}"):
with warnings.catch_warnings(record=True) as warns:
styles[path.stem] = _rc_params_in_file(path)
for w in warns:
_log.warning('In %s: %s', path, w.message)
return styles
def update_nested_dict(main_dict, new_dict):
"""
Update nested dict (only level of nesting) with new values.
Unlike `dict.update`, this assumes that the values of the parent dict are
dicts (or dict-like), so you shouldn't replace the nested dict if it
already exists. Instead you should update the sub-dict.
"""
# update named styles specified by user
for name, rc_dict in new_dict.items():
main_dict.setdefault(name, {}).update(rc_dict)
return main_dict
# Load style library
# ==================
_base_library = read_style_directory(BASE_LIBRARY_PATH)
library = {}
available = []
def reload_library():
"""Reload the style library."""
library.clear()
library.update(update_user_library(_base_library))
available[:] = sorted(library.keys())
reload_library()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py3@matplotlib@style@core.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pip/vendor/__init__.py",
"type": "Python"
}
|
"""
pip.vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip.vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@pip@vendor@__init__.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "marblestation/iSpec",
"repo_path": "iSpec_extracted/iSpec-master/synthesizer/setup.py",
"type": "Python"
}
|
# python setup.py build_ext --inplace
import os
import sys
import numpy
from distutils.core import setup
from distutils.extension import Extension
from distutils.sysconfig import get_config_vars
from Cython.Distutils import build_ext
from Cython.Build import cythonize
#os.environ['CC'] = 'gcc'
#get_config_vars()['OPT'] = ''
#'OPT': '-DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes'
#get_config_vars()['CFLAGS'] = '-fno-strict-aliasing -fno-common -dynamic -pipe -fwrapv -DNDEBUG -g -fwrapv -O3 '
#'CFLAGS': '-fno-strict-aliasing -fno-common -dynamic -pipe -O2 -fwrapv -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes'
ext_modules=[
## Requires external C routine:
Extension(
name="synthesizer",
version='1.0',
description='Python integration of SPECTRUM a Stellar Spectral Synthesis Program (C) Richard O. Gray 1992 - 2010 Version 2.77',
author='Sergi Blanco Cuaresma',
url='http://www.marblestation.com',
sources=["synthesizer.pyx", "synthesizer_func.c", "spectrum/abund2.c", "spectrum/al1op3.c", "spectrum/autoion3.c", "spectrum/balmer8.c", "spectrum/brackett.c", "spectrum/broad12.c", "spectrum/c1op_av.c", "spectrum/ca1op_av.c", "spectrum/capnu.c", "spectrum/chop.c", "spectrum/coolop5.c", "spectrum/density9.c", "spectrum/depth.c", "spectrum/eqtaukap.c", "spectrum/fe1op2.c", "spectrum/flux.c", "spectrum/fluxflx2.c", "spectrum/getisotope.c", "spectrum/he12.c", "spectrum/he13.c", "spectrum/he14a.c", "spectrum/he15a.c", "spectrum/he16a.c", "spectrum/he17a.c", "spectrum/he1op_av.c", "spectrum/he313.c", "spectrum/he314a.c", "spectrum/he315a.c", "spectrum/he316a.c", "spectrum/he317a.c", "spectrum/he617a.c", "spectrum/helines.c", "spectrum/helium6.c", "spectrum/heprof4.c", "spectrum/hotdensity.c", "spectrum/hprofl5.c", "spectrum/humphreys.c", "spectrum/inatom2.c", "spectrum/infix.c", "spectrum/inisotope.c", "spectrum/inline8.c", "spectrum/inmodel6.c", "spectrum/integ4.c", "spectrum/intensit.c", "spectrum/interva4.c", "spectrum/invelgrad.c", "spectrum/isorelabun.c", "spectrum/linelst12b.c", "spectrum/lline6.c", "spectrum/lukeop2.c", "spectrum/lyman3.c", "spectrum/maxcharge.c", "spectrum/mg1op_av.c", "spectrum/mghop.c", "spectrum/ohop.c", "spectrum/opacity6.c", "spectrum/optdepth.c", "spectrum/opttrap.c", "spectrum/partfn5.c", "spectrum/paschen3.c", "spectrum/pfinit5.c", "spectrum/pfunctio.c", "spectrum/pfund.c", "spectrum/planck.c", "spectrum/pop13.c", "spectrum/qround.c", "spectrum/setreset.c", "spectrum/si1op3.c", "spectrum/spaux.c", "spectrum/strong8.c", "spectrum/tauflx2.c", "spectrum/taukap7.c", "spectrum/tauref.c", "spectrum/tauwave.c", "spectrum/trapez.c", "spectrum/unified.c", "spectrum/veryhotdensity.c", "spectrum/voigt.c", "spectrum/xi7.c"],
include_dirs = [numpy.get_include()], # .../site-packages/numpy/core/include
extra_compile_args = [],
extra_link_args = [],
language="c",
)
]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
)
|
marblestationREPO_NAMEiSpecPATH_START.@iSpec_extracted@iSpec-master@synthesizer@setup.py@.PATH_END.py
|
{
"filename": "plot_tf_log.py",
"repo_name": "tijmen/cosmosage",
"repo_path": "cosmosage_extracted/cosmosage-main/plot_tf_log.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Helper functions for monitoring the progress of a training loop.
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import scipy
from tensorboard.backend.event_processing import event_accumulator
import os
import glob
from scipy.stats import linregress
from tensorflow.python.summary.summary_iterator import summary_iterator
from tensorflow.core.util.event_pb2 import Event
def most_recent_log(dir):
logs = glob.glob("/home/tijmen/cosmosage/models/" + dir + "/*/runs/*/*.0")
sorted_logs = sorted(logs, key=lambda x: os.path.getmtime(x), reverse=True)
if sorted_logs:
return sorted_logs[0]
else:
raise IndexError("No directories found")
def plot_loss(file_paths, plot_type="default", detailed_pts_per_eval=10):
plt.figure(figsize=(12, 8))
for idx, file_path in enumerate(file_paths):
# Load the event accumulator
ea = event_accumulator.EventAccumulator(file_path)
ea.Reload()
# # Print all available items inside ea
# print(ea.scalars.Keys())
# # available keys are
# # ['train/loss', 'train/learning_rate', 'train/epoch', 'eval/loss', 'eval/runtime', 'eval/samples_per_second', 'eval/steps_per_second']
tloss = ea.scalars.Items("train/loss")
eloss = ea.scalars.Items("eval/loss")
lr = ea.scalars.Items("train/learning_rate")
epoch = ea.scalars.Items("train/epoch")
# Extract steps and loss values for training loss
t_steps = np.array([s.step for s in tloss])
t_losses = np.array([s.value for s in tloss])
# Extract steps and loss values for evaluation loss
e_steps = np.array([s.step for s in eloss])
e_losses = np.array([s.value for s in eloss])
# Extract steps and learning rate values
lr_steps = np.array([s.step for s in lr])
lr_values = np.array([s.value for s in lr])
# Extract steps and epoch values
epoch_steps = np.array([s.step for s in epoch])
epoch_values = np.array([s.value for s in epoch])
plt.figure(figsize=(12, 6))
# Smooth the loss curve if plot_type is "logsmooth"
if plot_type == "logsmooth":
# gaussian smoothing using edge handling that doesn't change the length
t_losses = scipy.ndimage.filters.gaussian_filter1d(
t_losses, sigma=10, mode="nearest"
)
# Plotting
if plot_type == "default":
plt.plot(
t_steps, t_losses, label=f"Training Loss (Run {idx+1})", color=f"C{idx}"
)
plt.plot(
e_steps,
e_losses,
label=f"Evaluation Loss (Run {idx+1})",
color=f"C{idx}",
linestyle="dashed",
)
elif plot_type == "logsmooth":
plt.semilogy(
t_steps, t_losses, label=f"Training Loss (Run {idx+1})", color=f"C{idx}"
)
plt.semilogy(
e_steps,
e_losses,
label=f"Evaluation Loss (Run {idx+1})",
color=f"C{idx}",
linestyle="dashed",
)
elif plot_type == "detailed":
# Bin the loss values
bin_size = int(len(t_losses) / (detailed_pts_per_eval * len(e_losses)))
num_bins = int(len(t_losses) / bin_size)
t_losses_binned = np.mean(
t_losses[: num_bins * bin_size].reshape(-1, bin_size), axis=1
)
t_steps_binned = np.mean(
t_steps[: num_bins * bin_size].reshape(-1, bin_size), axis=1
)
# Calculate error bars
t_losses_std = np.std(
t_losses[: num_bins * bin_size].reshape(-1, bin_size), axis=1
) / np.sqrt(bin_size)
# Plotting
plt.errorbar(
t_steps_binned,
t_losses_binned,
yerr=t_losses_std,
label=f"Training Loss (Run {idx+1})",
color=f"C{idx}",
capsize=3,
)
plt.plot(
e_steps,
e_losses,
label=f"Evaluation Loss (Run {idx+1})",
color=f"C{idx}",
linestyle="dashed",
)
plt.ylabel("Loss")
# label each evaluation point with the epoch number
for i, e_loss in enumerate(e_losses):
epoch_number = epoch_values[np.where(epoch_steps == e_steps[i])[0][0]]
plt.text(
e_steps[i],
e_loss,
f"Epoch: {epoch_number:.2f}",
color=f"C{idx}",
fontsize=9,
)
plt.grid()
# Plotting learning rate on the other axis
ax2 = plt.gca().twinx()
ax2.plot(lr_steps, lr_values, label="Learning Rate", color="red", alpha=0.15)
ax2.set_ylabel("Learning Rate")
elif plot_type == "slopes":
num_segments = 10
segment_length = len(t_steps) // num_segments
# Initialize arrays to store slopes and midpoints of each segment
slopes = np.zeros(num_segments)
midpoints = np.zeros(num_segments)
indices = np.zeros(num_segments)
avg_losses = np.zeros(num_segments)
# Calculate slopes for each segment and determine the midpoints based on the fit
for i in range(num_segments):
start_idx = i * segment_length
end_idx = (i + 1) * segment_length if i != num_segments - 1 else len(t_steps)
indices[i] = (start_idx+end_idx)/2
segment_steps = t_steps[start_idx:end_idx]
segment_losses = t_losses[start_idx:end_idx]
avg_losses[i] = np.mean(segment_losses)
slope, intercept, _, _, _ = linregress(segment_steps, segment_losses)
slopes[i] = slope
# Calculate midpoint based on the fit
avg_loss = (np.max(segment_losses) + np.min(segment_losses)) / 2
x_mid = (avg_loss - intercept) / slope
midpoints[i] = x_mid
# Create subplots
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9, 4))
# Plot slopes in the top subplot
ax1.plot(indices, slopes, label="Slopes", color="blue")
ax1.set_xlabel("Steps")
ax1.set_ylabel("Slope")
ax1.grid()
# Plot training loss in the bottom subplot
ax2.plot(indices, avg_losses, label=f"Training Loss", color="green")
ax2.set_xlabel("Steps")
ax2.set_ylabel("Training Loss")
ax2.grid()
plt.xlabel("Steps")
plt.legend()
plt.show()
def main():
parser = argparse.ArgumentParser(
description="Plot training and evaluation loss from TensorFlow event files."
)
parser.add_argument(
"file_paths",
nargs="+",
type=str,
help="Path(s) to the TensorFlow event file(s)",
)
args = parser.parse_args()
plot_loss(args.file_paths)
if __name__ == "__main__":
main()
|
tijmenREPO_NAMEcosmosagePATH_START.@cosmosage_extracted@cosmosage-main@plot_tf_log.py@.PATH_END.py
|
{
"filename": "_ipynb_static.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/app/backends/_ipynb_static.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy backend for the IPython notebook (static approach).
We aim to have:
* ipynb_static - export visualization to a static notebook
* ipynb_vnc - vnc-approach: render in Python, send result to JS as png
* ipynb_webgl - send gl commands to JS and execute in webgl context
"""
from __future__ import division
from ..base import BaseApplicationBackend, BaseCanvasBackend
from .. import Application, Canvas
from ...util import logger
# Imports for screenshot
from ...gloo.util import _screenshot
from ...io import _make_png
from base64 import b64encode
# -------------------------------------------------------------------- init ---
capability = dict( # things that can be set by the backend
title=True, # But it only applies to the dummy window :P
size=True, # We cannot possibly say we dont, because Canvas always sets it
position=True, # Dito
show=True, # Note: we don't alow this, but all scripts call show ...
vsync=False,
resizable=True, # Yes, you can set to not be resizable (it always is)
decorate=False,
fullscreen=False,
context=True,
multi_window=True,
scroll=True,
parent=False,
always_on_top=False,
)
def _set_config(c):
_app.backend_module._set_config(c)
# Create our "backend" backend; The toolkit that is going to provide a
# canvas (e.g. OpenGL context) so we can render images.
# Note that if IPython has already loaded a GUI backend, vispy is
# probably going to use that as well, because it prefers loaded backends.
try:
# Explicitly use default (avoid using test-app)
_app = Application('default')
except Exception:
_msg = 'ipynb_static backend relies on a core backend'
available, testable, why_not, which = False, False, _msg, None
else:
# Try importing IPython
try:
from IPython.display import display_png
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
else:
available, testable, why_not = True, False, None
which = _app.backend_module.which
# Use that backend's shared context
KEYMAP = _app.backend_module.KEYMAP
# ------------------------------------------------------------- application ---
# todo: maybe trigger something in JS on any of these methods?
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._backend2 = _app._backend
def _vispy_get_backend_name(self):
realname = self._backend2._vispy_get_backend_name()
return 'ipynb_static (via %s)' % realname
def _vispy_process_events(self):
return self._backend2._vispy_process_events()
def _vispy_run(self):
pass # We run in IPython, so we don't run!
#return self._backend2._vispy_run()
def _vispy_quit(self):
return self._backend2._vispy_quit()
def _vispy_get_native_app(self):
return self._backend2._vispy_get_native_app()
# ------------------------------------------------------------------ canvas ---
class CanvasBackend(BaseCanvasBackend):
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
self._initialized = False
# Test kwargs
# if kwargs['position']:
# raise RuntimeError('ipynb_static Canvas is not positionable')
if not kwargs['decorate']:
raise RuntimeError('ipynb_static Canvas is not decoratable')
if kwargs['vsync']:
raise RuntimeError('ipynb_static Canvas does not support vsync')
if kwargs['fullscreen']:
raise RuntimeError('ipynb_static Canvas does not support '
'fullscreen')
# Create real canvas. It is a backend to this backend
kwargs.pop('vispy_canvas', None)
kwargs['autoswap'] = False
canvas = Canvas(app=_app, **kwargs) # Pass kwargs to underlying canvas
self._backend2 = canvas.native
# Connect to events of canvas to keep up to date with size and draw
canvas.events.draw.connect(self._on_draw)
canvas.events.resize.connect(self._on_resize)
# Show the widget
canvas.show()
# todo: hide that canvas
# Raw PNG that will be displayed on canvas.show()
self._im = ""
def _vispy_warmup(self):
return self._backend2._vispy_warmup()
def _vispy_set_current(self):
return self._backend2._vispy_set_current()
def _vispy_swap_buffers(self):
return self._backend2._vispy_swap_buffers()
def _vispy_set_title(self, title):
return self._backend2._vispy_set_title(title)
#logger.warn('IPython notebook canvas has not title.')
def _vispy_set_size(self, w, h):
return self._backend2._vispy_set_size(w, h)
def _vispy_set_position(self, x, y):
logger.warn('IPython notebook canvas cannot be repositioned.')
def _vispy_set_visible(self, visible):
#self._backend2._vispy_set_visible(visible)
if not visible:
logger.warn('IPython notebook canvas cannot be hidden.')
else:
self._vispy_update()
self._vispy_canvas.app.process_events()
self._vispy_close()
display_png(self._im, raw=True)
def _vispy_update(self):
return self._backend2._vispy_update()
def _vispy_close(self):
return self._backend2._vispy_close()
# todo: can we close on IPython side?
def _vispy_get_position(self):
return 0, 0
def _vispy_get_size(self):
return self._backend2._vispy_get_size()
def _on_resize(self, event=None):
# Event handler that is called by the underlying canvas
if self._vispy_canvas is None:
return
size = self._backend2._vispy_get_size()
self._vispy_canvas.events.resize(size=size)
def _on_draw(self, event=None):
# Event handler that is called by the underlying canvas
if self._vispy_canvas is None:
return
# Handle initialization
if not self._initialized:
self._initialized = True
self._vispy_canvas.events.initialize()
self._on_resize()
# Normal behavior
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
# Generate base64 encoded PNG string
self._gen_png()
def _gen_png(self):
# Take the screenshot
screenshot = _screenshot()
# Convert to PNG
png = _make_png(screenshot)
# Encode base64
self._im = b64encode(png)
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@app@backends@_ipynb_static.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/libs/sqlite3/README.md",
"type": "Markdown"
}
|
<h1 align="center">SQLite Source Repository</h1>
This repository contains the complete source code for the
[SQLite database engine](https://sqlite.org/). Some test scripts
are also included. However, many other test scripts
and most of the documentation are managed separately.
## Version Control
SQLite sources are managed using the
[Fossil](https://www.fossil-scm.org/), a distributed version control system
that was specifically designed and written to support SQLite development.
The [Fossil repository](https://sqlite.org/src/timeline) contains the urtext.
If you are reading this on GitHub or some other Git repository or service,
then you are looking at a mirror. The names of check-ins and
other artifacts in a Git mirror are different from the official
names for those objects. The official names for check-ins are
found in a footer on the check-in comment for authorized mirrors.
The official check-in name can also be seen in the `manifest.uuid` file
in the root of the tree. Always use the official name, not the
Git-name, when communicating about an SQLite check-in.
If you pulled your SQLite source code from a secondary source and want to
verify its integrity, there are hints on how to do that in the
[Verifying Code Authenticity](#vauth) section below.
## Obtaining The Code
If you do not want to use Fossil, you can download tarballs or ZIP
archives or [SQLite archives](https://sqlite.org/cli.html#sqlar) as follows:
* Latest trunk check-in as
[Tarball](https://www.sqlite.org/src/tarball/sqlite.tar.gz),
[ZIP-archive](https://www.sqlite.org/src/zip/sqlite.zip), or
[SQLite-archive](https://www.sqlite.org/src/sqlar/sqlite.sqlar).
* Latest release as
[Tarball](https://www.sqlite.org/src/tarball/sqlite.tar.gz?r=release),
[ZIP-archive](https://www.sqlite.org/src/zip/sqlite.zip?r=release), or
[SQLite-archive](https://www.sqlite.org/src/sqlar/sqlite.sqlar?r=release).
* For other check-ins, substitute an appropriate branch name or
tag or hash prefix in place of "release" in the URLs of the previous
bullet. Or browse the [timeline](https://www.sqlite.org/src/timeline)
to locate the check-in desired, click on its information page link,
then click on the "Tarball" or "ZIP Archive" links on the information
page.
If you do want to use Fossil to check out the source tree,
first install Fossil version 2.0 or later.
(Source tarballs and precompiled binaries available
[here](https://www.fossil-scm.org/fossil/uv/download.html). Fossil is
a stand-alone program. To install, simply download or build the single
executable file and put that file someplace on your $PATH.)
Then run commands like this:
mkdir -p ~/sqlite ~/Fossils
cd ~/sqlite
fossil clone https://www.sqlite.org/src ~/Fossils/sqlite.fossil
fossil open ~/Fossils/sqlite.fossil
After setting up a repository using the steps above, you can always
update to the latest version using:
fossil update trunk ;# latest trunk check-in
fossil update release ;# latest official release
Or type "fossil ui" to get a web-based user interface.
## Compiling for Unix-like systems
First create a directory in which to place
the build products. It is recommended, but not required, that the
build directory be separate from the source directory. Cd into the
build directory and then from the build directory run the configure
script found at the root of the source tree. Then run "make".
For example:
tar xzf sqlite.tar.gz ;# Unpack the source tree into "sqlite"
mkdir bld ;# Build will occur in a sibling directory
cd bld ;# Change to the build directory
../sqlite/configure ;# Run the configure script
make ;# Run the makefile.
make sqlite3.c ;# Build the "amalgamation" source file
make test ;# Run some tests (requires Tcl)
See the makefile for additional targets.
The configure script uses autoconf 2.61 and libtool. If the configure
script does not work out for you, there is a generic makefile named
"Makefile.linux-gcc" in the top directory of the source tree that you
can copy and edit to suit your needs. Comments on the generic makefile
show what changes are needed.
## Using MSVC for Windows systems
On Windows, all applicable build products can be compiled with MSVC.
First open the command prompt window associated with the desired compiler
version (e.g. "Developer Command Prompt for VS2013"). Next, use NMAKE
with the provided "Makefile.msc" to build one of the supported targets.
For example, from the parent directory of the source subtree named "sqlite":
mkdir bld
cd bld
nmake /f ..\sqlite\Makefile.msc TOP=..\sqlite
nmake /f ..\sqlite\Makefile.msc sqlite3.c TOP=..\sqlite
nmake /f ..\sqlite\Makefile.msc sqlite3.dll TOP=..\sqlite
nmake /f ..\sqlite\Makefile.msc sqlite3.exe TOP=..\sqlite
nmake /f ..\sqlite\Makefile.msc test TOP=..\sqlite
There are several build options that can be set via the NMAKE command
line. For example, to build for WinRT, simply add "FOR_WINRT=1" argument
to the "sqlite3.dll" command line above. When debugging into the SQLite
code, adding the "DEBUG=1" argument to one of the above command lines is
recommended.
SQLite does not require [Tcl](http://www.tcl.tk/) to run, but a Tcl installation
is required by the makefiles (including those for MSVC). SQLite contains
a lot of generated code and Tcl is used to do much of that code generation.
## Source Code Tour
Most of the core source files are in the **src/** subdirectory. The
**src/** folder also contains files used to build the "testfixture" test
harness. The names of the source files used by "testfixture" all begin
with "test".
The **src/** also contains the "shell.c" file
which is the main program for the "sqlite3.exe"
[command-line shell](https://sqlite.org/cli.html) and
the "tclsqlite.c" file which implements the
[Tcl bindings](https://sqlite.org/tclsqlite.html) for SQLite.
(Historical note: SQLite began as a Tcl
extension and only later escaped to the wild as an independent library.)
Test scripts and programs are found in the **test/** subdirectory.
Additional test code is found in other source repositories.
See [How SQLite Is Tested](http://www.sqlite.org/testing.html) for
additional information.
The **ext/** subdirectory contains code for extensions. The
Full-text search engine is in **ext/fts3**. The R-Tree engine is in
**ext/rtree**. The **ext/misc** subdirectory contains a number of
smaller, single-file extensions, such as a REGEXP operator.
The **tool/** subdirectory contains various scripts and programs used
for building generated source code files or for testing or for generating
accessory programs such as "sqlite3_analyzer(.exe)".
### Generated Source Code Files
Several of the C-language source files used by SQLite are generated from
other sources rather than being typed in manually by a programmer. This
section will summarize those automatically-generated files. To create all
of the automatically-generated files, simply run "make target_source".
The "target_source" make target will create a subdirectory "tsrc/" and
fill it with all the source files needed to build SQLite, both
manually-edited files and automatically-generated files.
The SQLite interface is defined by the **sqlite3.h** header file, which is
generated from src/sqlite.h.in, ./manifest.uuid, and ./VERSION. The
[Tcl script](http://www.tcl.tk) at tool/mksqlite3h.tcl does the conversion.
The manifest.uuid file contains the SHA3 hash of the particular check-in
and is used to generate the SQLITE\_SOURCE\_ID macro. The VERSION file
contains the current SQLite version number. The sqlite3.h header is really
just a copy of src/sqlite.h.in with the source-id and version number inserted
at just the right spots. Note that comment text in the sqlite3.h file is
used to generate much of the SQLite API documentation. The Tcl scripts
used to generate that documentation are in a separate source repository.
The SQL language parser is **parse.c** which is generated from a grammar in
the src/parse.y file. The conversion of "parse.y" into "parse.c" is done
by the [lemon](./doc/lemon.html) LALR(1) parser generator. The source code
for lemon is at tool/lemon.c. Lemon uses the tool/lempar.c file as a
template for generating its parser.
Lemon also generates the **parse.h** header file, at the same time it
generates parse.c.
The **opcodes.h** header file contains macros that define the numbers
corresponding to opcodes in the "VDBE" virtual machine. The opcodes.h
file is generated by scanning the src/vdbe.c source file. The
Tcl script at ./mkopcodeh.tcl does this scan and generates opcodes.h.
A second Tcl script, ./mkopcodec.tcl, then scans opcodes.h to generate
the **opcodes.c** source file, which contains a reverse mapping from
opcode-number to opcode-name that is used for EXPLAIN output.
The **keywordhash.h** header file contains the definition of a hash table
that maps SQL language keywords (ex: "CREATE", "SELECT", "INDEX", etc.) into
the numeric codes used by the parse.c parser. The keywordhash.h file is
generated by a C-language program at tool mkkeywordhash.c.
The **pragma.h** header file contains various definitions used to parse
and implement the PRAGMA statements. The header is generated by a
script **tool/mkpragmatab.tcl**. If you want to add a new PRAGMA, edit
the **tool/mkpragmatab.tcl** file to insert the information needed by the
parser for your new PRAGMA, then run the script to regenerate the
**pragma.h** header file.
### The Amalgamation
All of the individual C source code and header files (both manually-edited
and automatically-generated) can be combined into a single big source file
**sqlite3.c** called "the amalgamation". The amalgamation is the recommended
way of using SQLite in a larger application. Combining all individual
source code files into a single big source code file allows the C compiler
to perform more cross-procedure analysis and generate better code. SQLite
runs about 5% faster when compiled from the amalgamation versus when compiled
from individual source files.
The amalgamation is generated from the tool/mksqlite3c.tcl Tcl script.
First, all of the individual source files must be gathered into the tsrc/
subdirectory (using the equivalent of "make target_source") then the
tool/mksqlite3c.tcl script is run to copy them all together in just the
right order while resolving internal "#include" references.
The amalgamation source file is more than 200K lines long. Some symbolic
debuggers (most notably MSVC) are unable to deal with files longer than 64K
lines. To work around this, a separate Tcl script, tool/split-sqlite3c.tcl,
can be run on the amalgamation to break it up into a single small C file
called **sqlite3-all.c** that does #include on about seven other files
named **sqlite3-1.c**, **sqlite3-2.c**, ..., **sqlite3-7.c**. In this way,
all of the source code is contained within a single translation unit so
that the compiler can do extra cross-procedure optimization, but no
individual source file exceeds 32K lines in length.
## How It All Fits Together
SQLite is modular in design.
See the [architectural description](http://www.sqlite.org/arch.html)
for details. Other documents that are useful in
(helping to understand how SQLite works include the
[file format](http://www.sqlite.org/fileformat2.html) description,
the [virtual machine](http://www.sqlite.org/opcode.html) that runs
prepared statements, the description of
[how transactions work](http://www.sqlite.org/atomiccommit.html), and
the [overview of the query planner](http://www.sqlite.org/optoverview.html).
Years of effort have gone into optimizing SQLite, both
for small size and high performance. And optimizations tend to result in
complex code. So there is a lot of complexity in the current SQLite
implementation. It will not be the easiest library in the world to hack.
Key files:
* **sqlite.h.in** - This file defines the public interface to the SQLite
library. Readers will need to be familiar with this interface before
trying to understand how the library works internally.
* **sqliteInt.h** - this header file defines many of the data objects
used internally by SQLite. In addition to "sqliteInt.h", some
subsystems have their own header files.
* **parse.y** - This file describes the LALR(1) grammar that SQLite uses
to parse SQL statements, and the actions that are taken at each step
in the parsing process.
* **vdbe.c** - This file implements the virtual machine that runs
prepared statements. There are various helper files whose names
begin with "vdbe". The VDBE has access to the vdbeInt.h header file
which defines internal data objects. The rest of SQLite interacts
with the VDBE through an interface defined by vdbe.h.
* **where.c** - This file (together with its helper files named
by "where*.c") analyzes the WHERE clause and generates
virtual machine code to run queries efficiently. This file is
sometimes called the "query optimizer". It has its own private
header file, whereInt.h, that defines data objects used internally.
* **btree.c** - This file contains the implementation of the B-Tree
storage engine used by SQLite. The interface to the rest of the system
is defined by "btree.h". The "btreeInt.h" header defines objects
used internally by btree.c and not published to the rest of the system.
* **pager.c** - This file contains the "pager" implementation, the
module that implements transactions. The "pager.h" header file
defines the interface between pager.c and the rest of the system.
* **os_unix.c** and **os_win.c** - These two files implement the interface
between SQLite and the underlying operating system using the run-time
pluggable VFS interface.
* **shell.c.in** - This file is not part of the core SQLite library. This
is the file that, when linked against sqlite3.a, generates the
"sqlite3.exe" command-line shell. The "shell.c.in" file is transformed
into "shell.c" as part of the build process.
* **tclsqlite.c** - This file implements the Tcl bindings for SQLite. It
is not part of the core SQLite library. But as most of the tests in this
repository are written in Tcl, the Tcl language bindings are important.
* **test*.c** - Files in the src/ folder that begin with "test" go into
building the "testfixture.exe" program. The testfixture.exe program is
an enhanced Tcl shell. The testfixture.exe program runs scripts in the
test/ folder to validate the core SQLite code. The testfixture program
(and some other test programs too) is built and run when you type
"make test".
* **ext/misc/json1.c** - This file implements the various JSON functions
that are built into SQLite.
There are many other source files. Each has a succinct header comment that
describes its purpose and role within the larger system.
<a name="vauth"></a>
## Verifying Code Authenticity
The `manifest` file at the root directory of the source tree
contains either a SHA3-256 hash (for newer files) or a SHA1 hash (for
older files) for every source file in the repository.
The name of the version of the entire source tree is just the
SHA3-256 hash of the `manifest` file itself, possibly with the
last line of that file omitted if the last line begins with
"`# Remove this line`".
The `manifest.uuid` file should contain the SHA3-256 hash of the
`manifest` file. If all of the above hash comparisons are correct, then
you can be confident that your source tree is authentic and unadulterated.
The format of the `manifest` file should be mostly self-explanatory, but
if you want details, they are available
[here](https://fossil-scm.org/fossil/doc/trunk/www/fileformat.wiki#manifest).
## Contacts
The main SQLite website is [http://www.sqlite.org/](http://www.sqlite.org/)
with geographically distributed backups at
[http://www2.sqlite.org/](http://www2.sqlite.org) and
[http://www3.sqlite.org/](http://www3.sqlite.org).
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@libs@sqlite3@README.md@.PATH_END.py
|
{
"filename": "fake_nudot.py",
"repo_name": "mattpitkin/tempo2",
"repo_path": "tempo2_extracted/tempo2-master/python/toasim/bin/fake_nudot.py",
"type": "Python"
}
|
#!/usr/bin/env python
import toasim
import numpy as np
from scipy.interpolate import interp1d
import sys
import argparse
parser = argparse.ArgumentParser("Fake nudot variations")
parser.add_argument("--alt-nudot-factor", default=0.1, type=float)
parser.add_argument("--alt-time-N", nargs=2, type=float, help="Timescale of 'alt' mode given by a normal distribition given by mu/sigma in days")
parser.add_argument("--std-time-N", nargs=2, type=float, help="Timescale of 'std' mode given by a normal distribition given by mu/sigma in days")
parser.add_argument("--plot",action='store_true')
parser.add_argument("--subtract-quad","-s",action='store_true')
parser.add_argument("--nreal",default=1,type=int)
parser.add_argument("parfile")
parser.add_argument("timfile")
def integrate_phase(nudot,t0,t1, nu0):
t0*=86400.0
t1*=86400.0
return 0.5*nudot*(t1**2-t0**2) + nu0*(t1-t0)
def integrate_nu(nudot,t0,t1):
t0*=86400.0
t1*=86400.0
return nudot*(t1-t0)
args=parser.parse_args()
print(args)
if args.plot:
from matplotlib import pyplot as plt
nreal = args.nreal
header = toasim.header()
header.parfile_name=args.parfile
header.timfile_name=args.timfile
with open(args.parfile) as par, open(args.timfile) as tim:
header.orig_parfile=par.read()
header.idealised_toas=tim.read()
with open(header.timfile_name+".addNudot","wb") as outfile:
f1=None
f0=None
with open(args.parfile) as ff:
for line in ff:
e=line.split()
if len(e) > 1:
if e[0]=="F1":
f1=float(e[1])
if e[0]=="F0":
f0=float(e[1])
if f1 is None:
print("No F1 found in par file")
print(header.orig_parfile)
sys.exit(1)
if f0 is None:
print("No F0 found in par file")
print(header.orig_parfile)
sys.exit(1)
toas=[]
for line in header.idealised_toas.split("\n"):
if line.startswith(" "):
elems=line.strip().split()
toa=float(elems[2])
toas.append(toa)
ntoa=len(toas)
toas=np.array(toas)
header.ntoa=ntoa
header.nrealisations=nreal
header.invocation=" ".join(sys.argv)
print("\nWriting....")
header.write(outfile)
itoas = np.argsort(toas)
for ireal in range(nreal):
print("ireal={}/{}".format(ireal,nreal))
t = toas[itoas[0]] ## The time we have accumulated phase until
t0=t
accumulated_phase=0 ## the accumulated phase at t
accumulated_nu=0 ## the accumulated change in nu at t.
STD=(0,args.std_time_N[0],args.std_time_N[1])
ALT=(args.alt_nudot_factor*f1, args.alt_time_N[0],args.alt_time_N[1])
state=STD if np.random.uniform() < 0.5 else ALT
next_state= ALT if state==STD else STD
cur_nudot,mu,sigma = state
next_switch = toas[itoas[0]] + np.random.normal(mu,sigma) * np.random.uniform() ## start a random way through the first interval
phases=np.zeros_like(toas)
state_lag = t
other_state_lag = t
for i in itoas:
while toas[i] > next_switch:
## The next ToA occurs after a switch, so integrate phase up to the end of this switch.
#accumulated_phase += integrate_phase(nudot=cur_nudot, t0=t-state_lag, t1=next_switch-state_lag,nu0=accumulated_nu)
#accumulated_nu += integrate_nu(nudot=cur_nudot, t0=t-state_lag, t1=next_switch-state_lag)
accumulated_phase += integrate_phase(nudot=cur_nudot, t0=0, t1=next_switch-t,nu0=accumulated_nu)
accumulated_nu += integrate_nu(nudot=cur_nudot, t0=0, t1=next_switch-t)
other_state_lag += next_switch - t
t = next_switch
state,next_state = next_state,state # swap state and next state
cur_nudot,mu,sigma = state
other_state_lag,state_lag = state_lag,other_state_lag
next_switch = np.random.normal(mu,sigma) + t
# Now integrate to the ToA
#accumulated_phase += integrate_phase(nudot=cur_nudot, t0=t-state_lag, t1=toas[i]-state_lag,nu0=accumulated_nu)
#accumulated_nu += integrate_nu(nudot=cur_nudot, t0=t-state_lag, t1=toas[i]-state_lag)
accumulated_phase += integrate_phase(nudot=cur_nudot, t0=0, t1=toas[i]-t,nu0=accumulated_nu)
accumulated_nu += integrate_nu(nudot=cur_nudot, t0=0, t1=toas[i]-t)
other_state_lag += toas[i]-t
t = toas[i]
phases[i] = accumulated_phase
if args.subtract_quad:
## fit and remove quadratic
pp = np.poly1d(np.polyfit(toas,phases,2))
phases -= pp(toas)
if args.plot:
plt.subplot(311)
plt.plot(toas[itoas],phases[itoas],ls=':',marker='x')
plt.subplot(312)
d1 = np.diff(phases[itoas]) / np.diff(toas[itoas])
plt.plot(toas[itoas][:-1],d1,ls=':',marker='x')
plt.subplot(313)
d2 = np.diff(d1) / np.diff(toas[itoas])[:-1]
plt.plot(toas[itoas][:-2],d2,ls=':',marker='x')
plt.show()
offsets=phases/f0
real = toasim.correction(header,offsets,0,0,0,"")
real.write(outfile)
|
mattpitkinREPO_NAMEtempo2PATH_START.@tempo2_extracted@tempo2-master@python@toasim@bin@fake_nudot.py@.PATH_END.py
|
{
"filename": "dotnet.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py2/pygments/lexers/dotnet.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'_\n', Text), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the `F# language <https://fsharp.org/>`_ (version 3.0).
.. versionadded:: 1.5
"""
name = 'F#'
aliases = ['fsharp', 'f#']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
'->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
'_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py2@pygments@lexers@dotnet.py@.PATH_END.py
|
{
"filename": "weighted_means.py",
"repo_name": "mbejger/polgraw-allsky",
"repo_path": "polgraw-allsky_extracted/polgraw-allsky-master/followup/src/Auxiliary_scripts/weighted_means.py",
"type": "Python"
}
|
import numpy as np
import sys
data = np.genfromtxt(sys.argv[1],usecols=np.arange(0,6))
N = int(sys.argv[2])
lf = int(sys.argv[3])
ref = int(sys.argv[4])
shiftedf=np.zeros(lf)
start_frame = int(sys.argv[5])
lf2=lf
m=0
k=start_frame
data2={}
for i in range(0, lf):
if data[i,0] == -1000 and data[i,1] == -1000 and data[i,2] == -1000 and data[i,3] == -1000:
lf2=lf2-1
else:
shiftedf[m] = data[i,0] + 2.0*data[i,1]*N*(ref-k)
data2[m,0] = data[i,0]
data2[m,1] = data[i,1]
data2[m,2] = data[i,2]
data2[m,3] = data[i,3]
data2[m,4] = data[i,4]
data2[m,5] = data[i,5]
m=m+1
k=start_frame+1
meanf=means=meand=meana=0
x=0
for j in range(0, lf2):
meanf += shiftedf[j]*data2[j,5]
means += data2[j,1]*data2[j,5]
meand += data2[j,2]*data2[j,5]
meana += data2[j,3]*data2[j,5]
x=x+data2[j,5]
meanf = meanf/x
means = means/x
meand = meand/x
meana = meana/x
print meanf, means, meand, meana
#python weighted_means.py /work/ns/msieniawska/CGW_dt16_stattest/output_densegrid/8/followup/test.txt 32312 8 004
|
mbejgerREPO_NAMEpolgraw-allskyPATH_START.@polgraw-allsky_extracted@polgraw-allsky-master@followup@src@Auxiliary_scripts@weighted_means.py@.PATH_END.py
|
{
"filename": "table_colvald.md",
"repo_name": "jbroll/starbase",
"repo_path": "starbase_extracted/starbase-master/docs/table_colvald.md",
"type": "Markdown"
}
|
### table_colvald - get the value from the column.
SYNOPSIS
--------
```
#include <../tablelib/table.h>
double table_colvald(TableRow r, int c);
```
PARAMETERS
----------
* `"TableRow` r" - pointer to the table row.
* `"int` c" - The column number to get the value for.
DESCRIPTION
-----------
return the value of the table column for row `r` as a double.
SEE ALSO
--------
[table_colval](table_colval.html)
,
[table_colvals](table_colvals.html)
,
[table_colvali](table_colvali.html)
,
[table_rowloc](table_rowloc.html)
,
[table_parsline](table_parsline.html)
,
[table_colpad](table_colpad.html)
,
[table_coladd](table_coladd.html)
,
[table_colarg](table_colarg.html)
,
[table_colnum](table_colnum.html)
,
[table_colnam](table_colnam.html)
,
[table_hdrfree](table_hdrfree.html)
,
[table_hdrnth](table_hdrnth.html)
,
[table_rowfree](table_rowfree.html)
,
[table_header](table_header.html)
,
[table_rowput](table_rowput.html)
,
[table_hdrput](table_hdrput.html)
,
[table_rowget](table_rowget.html)
,
[table_rowtrim](table_rowtrim.html)
,
[table_hdrget](table_hdrget.html)
,
[table_hdrgetn](table_hdrgetn.html)
,
[table_hdrgeti](table_hdrgeti.html)
,
[table_hdrgetd](table_hdrgetd.html)
,
[table_hdrgets](table_hdrgets.html)
,
[table_hdrfind](table_hdrfind.html)
,
[table_extract](table_extract.html)
,
[table_load](table_load.html)
,
[table_loadva](table_loadva.html)
,
[table_mode](table_mode.html)
,
[table_ncol](table_ncol.html)
,
[table_ofs](table_ofs.html)
,
[table_ors](table_ors.html)
|
jbrollREPO_NAMEstarbasePATH_START.@starbase_extracted@starbase-master@docs@table_colvald.md@.PATH_END.py
|
{
"filename": "_side.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmapbox/colorbar/title/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="side",
parent_name="choroplethmapbox.colorbar.title",
**kwargs,
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmapbox@colorbar@title@_side.py@.PATH_END.py
|
{
"filename": "schema.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/utils/schema.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__version__ = '0.2.0'
class SchemaError(Exception):
"""Error during Schema validation."""
def __init__(self, autos, errors):
self.autos = autos if type(autos) is list else [autos]
self.errors = errors if type(errors) is list else [errors]
Exception.__init__(self, self.code)
@property
def code(self):
def uniq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
a = uniq(i for i in self.autos if i is not None)
e = uniq(i for i in self.errors if i is not None)
if e:
return '\n'.join(e)
return '\n'.join(a)
class And:
def __init__(self, *args, **kw):
self._args = args
assert list(kw) in (['error'], [])
self._error = kw.get('error')
def __repr__(self):
return f"{self.__class__.__name__}({', '.join(repr(a) for a in self._args)})"
def validate(self, data):
for s in [Schema(s, error=self._error) for s in self._args]:
data = s.validate(data)
return data
class Or(And):
def validate(self, data):
x = SchemaError([], [])
for s in [Schema(s, error=self._error) for s in self._args]:
try:
return s.validate(data)
except SchemaError as _x:
x = _x
raise SchemaError([f'{self!r} did not validate {data!r}'] + x.autos,
[self._error] + x.errors)
class Use:
def __init__(self, callable_, *, error=None):
assert callable(callable_)
self._callable = callable_
self._error = error
def __repr__(self):
return f'{self.__class__.__name__}({self._callable!r})'
def validate(self, data):
try:
return self._callable(data)
except SchemaError as x:
raise SchemaError([None] + x.autos, [self._error] + x.errors)
except BaseException as x:
f = self._callable.__name__
raise SchemaError(f'{f}({data!r}) raised {x!r}', self._error)
def priority(s):
"""Return priority for a give object.
:rtype: int
"""
if type(s) in (list, tuple, set, frozenset):
return 6
if type(s) is dict:
return 5
# We exclude Optional from the test, otherwise it will make a
# catch-all rule like "str" take precedence over any optional field,
# which would be inintuitive.
if hasattr(s, 'validate') and not type(s) is Optional:
return 4
if type(s) is type:
return 3
if callable(s):
return 2
else:
return 1
class Schema:
def __init__(self, schema, *, error=None):
self._schema = schema
self._error = error
def __repr__(self):
return f'{self.__class__.__name__}({self._schema!r})'
def validate(self, data):
s = self._schema
e = self._error
if type(s) in (list, tuple, set, frozenset):
data = Schema(type(s), error=e).validate(data)
return type(s)(Or(*s, error=e).validate(d) for d in data)
if type(s) is dict:
data = Schema(dict, error=e).validate(data)
new = type(data)()
x = None
coverage = set() # non-optional schema keys that were matched
sorted_skeys = list(sorted(s, key=priority))
for key, value in data.items():
valid = False
skey = None
for skey in sorted_skeys:
svalue = s[skey]
try:
nkey = Schema(skey, error=e).validate(key)
except SchemaError:
pass
else:
try:
nvalue = Schema(svalue, error=e).validate(value)
except SchemaError as _x:
x = _x
raise
else:
coverage.add(skey)
valid = True
break
if valid:
new[nkey] = nvalue
elif skey is not None:
if x is not None:
raise SchemaError([f'key {key!r} is required'] + x.autos, [e] + x.errors)
else:
raise SchemaError(f'key {skey!r} is required', e)
coverage = set(k for k in coverage if type(k) is not Optional)
required = set(k for k in s if type(k) is not Optional)
if coverage != required:
raise SchemaError(f'missed keys {(required - coverage)!r}', e)
if len(new) != len(data):
raise SchemaError(f'wrong keys {new!r} in {data!r}', e)
return new
if hasattr(s, 'validate'):
try:
return s.validate(data)
except SchemaError as x:
raise SchemaError([None] + x.autos, [e] + x.errors)
except BaseException as x:
raise SchemaError(f'{s!r}.validate({data!r}) raised {x!r}', self._error)
if type(s) is type:
if isinstance(data, s):
return data
else:
raise SchemaError(f'{data!r} should be instance of {s!r}', e)
if callable(s):
f = s.__name__
try:
if s(data):
return data
except SchemaError as x:
raise SchemaError([None] + x.autos, [e] + x.errors)
except BaseException as x:
raise SchemaError(f'{f}({data!r}) raised {x!r}', self._error)
raise SchemaError(f'{f}({data!r}) should evaluate to True', e)
if s == data:
return data
else:
raise SchemaError(f'{s!r} does not match {data!r}', e)
class Optional(Schema):
"""Marker for an optional part of Schema."""
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@utils@schema.py@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/splom/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="splom.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@splom@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/unselected/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._textfont import TextfontValidator
from ._marker import MarkerValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._textfont.TextfontValidator", "._marker.MarkerValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@unselected@__init__.py@.PATH_END.py
|
{
"filename": "pp.py",
"repo_name": "QEF/q-e",
"repo_path": "q-e_extracted/q-e-master/EPW/bin/pp.py",
"type": "Python"
}
|
#!/usr/bin/env python3
#
# Post-processing script from of PH data in format used by EPW
# 14/07/2015 - Creation of the script - Samuel Ponce
# 14/03/2018 - Automatically reads the number of q-points - Michael Waters
# 14/03/2018 - Detect if SOC is included in the calculation - Samuel Ponce
# 05/06/2019 - Removed SOC for xml detection instead - Felix Goudreault
#
from __future__ import print_function
try:
from builtins import input
except ImportError:
print('Install future. e.g. "pip install --user future"')
# import numpy as np
import os
import re
from xml.dom import minidom
# Return the number of q-points in the IBZ
def get_nqpt(prefix):
fname = '_ph0/' + prefix + '.phsave/control_ph.xml'
fid = open(fname, 'r')
lines = fid.readlines()
# these files are relatively small so reading the whole thing shouldn't
# be an issue
fid.close()
line_number_of_nqpt = 0
while 'NUMBER_OF_Q_POINTS' not in lines[line_number_of_nqpt]:
# increment to line of interest
line_number_of_nqpt += 1
line_number_of_nqpt += 1 # its on the next line after that text
nqpt = int(lines[line_number_of_nqpt])
return nqpt
# Check if the calculation include SOC
def hasSOC(prefix):
fname = prefix+'.save/data-file-schema.xml'
xmldoc = minidom.parse(fname)
item = xmldoc.getElementsByTagName('spinorbit')[0]
lSOC = item.childNodes[0].data
return lSOC
# Check if the calculation includes PAW
def hasPAW(prefix):
fname = prefix+'.save/data-file-schema.xml'
xmldoc = minidom.parse(fname)
item = xmldoc.getElementsByTagName('paw')[0]
lPAW = (item.childNodes[0].data == 'true')
return lPAW
# Check if the calculation used .fc or .fc.xml files
def hasfc(prefix):
fname = str(prefix)+'.fc.xml'
if (os.path.isfile(fname)):
lfc = True
else:
fname_no_xml = re.sub('\.xml$', '', fname)
if (os.path.isfile(fname_no_xml)):
lfc = True
else:
lfc = False
return lfc
# check if calculation used xml files (irrelevant of presence of SOC)
def hasXML(prefix):
# check for a file named prefix.dyn1.xml
# if it exists => return True else return False
fname = os.path.join(prefix + ".dyn1.xml")
if os.path.isfile(fname):
return True
# check if the other without .xml extension exists
# if not raise an error
fname_no_xml = re.sub('\.xml$', '', fname)
class FileNotFoundError(Exception):
pass
if not os.path.isfile(fname_no_xml):
raise FileNotFoundError(
"No dyn0 file found cannot tell if xml format was used.")
return False
# Check if the calculation was done in sequential
def isSEQ(prefix):
fname = '_ph0/'+str(prefix)+'.dvscf'
if (os.path.isfile(fname)):
lseq = True
else:
lseq = False
return lseq
# Enter the number of irr. q-points
user_input = input(
'Enter the prefix used for PH calculations (e.g. diam)\n')
prefix = str(user_input)
# # Test if SOC
# SOC = hasSOC(prefix)
# Test if '.xml' files are used
XML = hasXML(prefix)
# Test if PAW
PAW = hasPAW(prefix)
# Test if fc
fc = hasfc(prefix)
# Test if seq. or parallel run
SEQ = isSEQ(prefix)
if True: # this gets the nqpt from the outputfiles
nqpt = get_nqpt(prefix)
else:
# Enter the number of irr. q-points
user_input = input(
'Enter the number of irreducible q-points\n')
nqpt = user_input
try:
nqpt = int(user_input)
except ValueError:
raise Exception('The value you enter is not an integer!')
os.system('mkdir save 2>/dev/null')
for iqpt in range(1, nqpt+1):
label = str(iqpt)
# Case calculation in seq.
if SEQ:
# Case with XML files
if XML:
os.system('cp '+prefix+'.dyn0 '+prefix+'.dyn0.xml')
os.system('cp '+prefix+'.dyn'+str(iqpt)+'.xml save/'+prefix
+ '.dyn_q'+label+'.xml')
if (iqpt == 1):
os.system('cp _ph0/'+prefix+'.dvscf* save/'+prefix+'.dvscf_q'
+ label)
os.system('cp -r _ph0/'+prefix+'.phsave save/')
if fc:
os.system('cp '+prefix+'.fc.xml save/ifc.q2r.xml')
if PAW:
os.system('cp _ph0/'+prefix+'.dvscf_paw* save/'+prefix +
'.dvscf_paw_q'+label)
else:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf* save/'+prefix+'.dvscf_q'+label)
os.system('rm _ph0/'+prefix+'.q_'+str(iqpt)+'/*wfc*')
if PAW:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf_paw* save/'+prefix+'.dvscf_paw_q'+label)
# Case without XML files
else:
os.system('cp '+prefix+'.dyn'+str(iqpt)+' save/'+prefix+'.dyn_q' +
label)
if (iqpt == 1):
os.system('cp _ph0/'+prefix+'.dvscf save/'+prefix+'.dvscf_q' +
label)
os.system('cp -r _ph0/'+prefix+'.phsave save/')
if fc:
os.system('cp '+prefix+'.fc save/ifc.q2r')
if PAW:
os.system('cp _ph0/'+prefix+'.dvscf_paw save/'+prefix +
'.dvscf_paw_q'+label)
else:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf save/'+prefix+'.dvscf_q'+label)
os.system('rm _ph0/'+prefix+'.q_'+str(iqpt)+'/*wfc*')
if PAW:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf_paw save/'+prefix+'.dvscf_paw_q'+label)
else:
# Case with XML format
if XML:
os.system('cp '+prefix+'.dyn0 '+prefix+'.dyn0.xml')
os.system('cp '+prefix+'.dyn'+str(iqpt)+'.xml save/'+prefix +
'.dyn_q'+label+'.xml')
if (iqpt == 1):
os.system('cp _ph0/'+prefix+'.dvscf1 save/'+prefix+'.dvscf_q' +
label)
os.system('cp -r _ph0/'+prefix+'.phsave save/')
if fc:
os.system('cp '+prefix+'.fc.xml save/ifc.q2r.xml')
if PAW:
os.system('cp _ph0/'+prefix+'.dvscf_paw1 save/'+prefix +
'.dvscf_paw_q'+label)
else:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf1 save/'+prefix+'.dvscf_q'+label)
os.system('rm _ph0/'+prefix+'.q_'+str(iqpt)+'/*wfc*')
if PAW:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf_paw1 save/'+prefix+'.dvscf_paw_q'+label)
# Case without XML format
else:
os.system('cp '+prefix+'.dyn'+str(iqpt)+' save/'+prefix+'.dyn_q' +
label)
if (iqpt == 1):
os.system('cp _ph0/'+prefix+'.dvscf1 save/'+prefix+'.dvscf_q' +
label)
os.system('cp -r _ph0/'+prefix+'.phsave save/')
if fc:
os.system('cp '+prefix+'.fc save/ifc.q2r')
if PAW:
os.system('cp _ph0/'+prefix+'.dvscf_paw1 save/'+prefix +
'.dvscf_paw_q'+label)
else:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf1 save/'+prefix+'.dvscf_q'+label)
os.system('rm _ph0/'+prefix+'.q_'+str(iqpt)+'/*wfc*')
if PAW:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix +
'.dvscf_paw1 save/'+prefix+'.dvscf_paw_q'+label)
|
QEFREPO_NAMEq-ePATH_START.@q-e_extracted@q-e-master@EPW@bin@pp.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/area/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="area", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@area@_visible.py@.PATH_END.py
|
{
"filename": "_xanchor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmapgl/colorbar/_xanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="heatmapgl.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmapgl@colorbar@_xanchor.py@.PATH_END.py
|
{
"filename": "weight_init.py",
"repo_name": "MIC-DKFZ/dynamic-network-architectures",
"repo_path": "dynamic-network-architectures_extracted/dynamic-network-architectures-main/dynamic_network_architectures/initialization/weight_init.py",
"type": "Python"
}
|
from torch import nn
from dynamic_network_architectures.building_blocks.residual import BasicBlockD, BottleneckD
class InitWeights_He(object):
def __init__(self, neg_slope: float = 1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class InitWeights_XavierUniform(object):
def __init__(self, gain: int = 1):
self.gain = gain
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.xavier_uniform_(module.weight, self.gain)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
def init_last_bn_before_add_to_0(module):
if isinstance(module, BasicBlockD):
module.conv2.norm.weight = nn.init.constant_(module.conv2.norm.weight, 0)
module.conv2.norm.bias = nn.init.constant_(module.conv2.norm.bias, 0)
if isinstance(module, BottleneckD):
module.conv3.norm.weight = nn.init.constant_(module.conv3.norm.weight, 0)
module.conv3.norm.bias = nn.init.constant_(module.conv3.norm.bias, 0)
|
MIC-DKFZREPO_NAMEdynamic-network-architecturesPATH_START.@dynamic-network-architectures_extracted@dynamic-network-architectures-main@dynamic_network_architectures@initialization@weight_init.py@.PATH_END.py
|
{
"filename": "chat_templates.py",
"repo_name": "OpenAccess-AI-Collective/axolotl",
"repo_path": "axolotl_extracted/axolotl-main/src/axolotl/utils/chat_templates.py",
"type": "Python"
}
|
"""
This module provides functionality for selecting chat templates based on user choices.
These templates are used for formatting messages in a conversation.
"""
import logging
from typing import TYPE_CHECKING, Any, Dict, Optional
if TYPE_CHECKING:
from transformers import PreTrainedTokenizerBase
LOG = logging.getLogger("axolotl.utils.chat_templates")
_JINJA_TEMPALTE_CHOICE = "jinja"
_DEFAULT_TEMPLATE_CHOICE = "tokenizer_default"
_DEFAULT_FALLBACK_CHATML_TEMPLATE_CHOICE_PREFIX = "tokenizer_default_fallback_"
_CHAT_TEMPLATES = {
"alpaca": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response: ' + message['content'] + eos_token}}{% endif %}{% endfor %}",
"mistral_v1": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", # Mistral 7B V1, Mistral 7B V2, Mixtral 8x7B V1...
"mistral_v2v3": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + '[/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", # V3: Mistral 7B V3, Small, Large...
"mistral_v3_tekken": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST]' + message['content'] + '[/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", # V3-Tekken: Nemo, Pixtral...
"chatml": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"gemma": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
"cohere": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true %}{% set loop_messages = messages %}{% set system_message = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}",
"llama3": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
"llama3_2_vision": '{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now("%d %b %Y") %}\n {%- else %}\n {%- set date_string = "26 Jul 2024" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0][\'role\'] == \'system\' %}\n {%- set system_message = messages[0][\'content\']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = "" %}\n{%- endif %}\n\n{#- Find out if there are any images #}\n{% set image_ns = namespace(has_images=false) %} \n{%- for message in messages %}\n {%- for content in message[\'content\'] %}\n {%- if content[\'type\'] == \'image\' %}\n {%- set image_ns.has_images = true %}\n {%- endif %}\n {%- endfor %}\n{%- endfor %}\n\n{#- Error out if there are images and system message #}\n{%- if image_ns.has_images and not system_message == "" %}\n {{- raise_exception("Prompting with images is incompatible with system messages.") }}\n{%- endif %}\n\n{#- System message if there are no images #}\n{%- if not image_ns.has_images %}\n {{- "<|start_header_id|>system<|end_header_id|>\\n\\n" }}\n {%- if tools is not none %}\n {{- "Environment: ipython\\n" }}\n {%- endif %}\n {{- "Cutting Knowledge Date: December 2023\\n" }}\n {{- "Today Date: " + date_string + "\\n\\n" }}\n {%- if tools is not none and not tools_in_user_message %}\n {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}\n {{- \'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.\' }}\n {{- "Do not use variables.\\n\\n" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- "\\n\\n" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- "<|eot_id|>" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0][\'content\']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception("Cannot put tools in the first user message when there\'s no first user message!") }}\n{%- endif %}\n {{- \'<|start_header_id|>user<|end_header_id|>\\n\\n\' -}}\n {{- "Given the following functions, please respond with a JSON for a function call " }}\n {{- "with its proper arguments that best answers the given prompt.\\n\\n" }}\n {{- \'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.\' }}\n {{- "Do not use variables.\\n\\n" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- "\\n\\n" }}\n {%- endfor %}\n {{- first_user_message + "<|eot_id|>"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == \'ipython\' or message.role == \'tool\' or \'tool_calls\' in message) %}\n {{- \'<|start_header_id|>\' + message[\'role\'] + \'<|end_header_id|>\\n\\n\' }}\n {%- if message[\'content\'] is string %}\n {{- message[\'content\'] }}\n {%- else %}\n {%- for content in message[\'content\'] %}\n {%- if content[\'type\'] == \'image\' %}\n {{- \'<|image|>\' }}\n {%- elif content[\'type\'] == \'text\' %}\n {{- content[\'text\'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- \'<|eot_id|>\' }}\n {%- elif \'tool_calls\' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception("This model only supports single tool-calls at once!") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- \'<|start_header_id|>assistant<|end_header_id|>\\n\\n\' -}}\n {{- \'{"name": "\' + tool_call.name + \'", \' }}\n {{- \'"parameters": \' }}\n {{- tool_call.arguments | tojson }}\n {{- "}" }}\n {{- "<|eot_id|>" }}\n {%- elif message.role == "tool" or message.role == "ipython" %}\n {{- "<|start_header_id|>ipython<|end_header_id|>\\n\\n" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- "<|eot_id|>" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- \'<|start_header_id|>assistant<|end_header_id|>\\n\\n\' }}\n{%- endif %}\n',
"phi_3": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|system|>' + '\n' + message['content'] + '<|end|>' + '\n'}}{% elif (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
"phi_35": "{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
"deepseek_v2": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ '<|User|>' + message['content'] }}{% elif message['role'] == 'assistant' %}{{ '<|Assistant|>' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|Assistant|>' }}{% endif %}",
"jamba": '{# Variables #}\n{% set ns = namespace(message_count=0, is_last_checked_defined=False) %}\n{##}\n{% set bom_str = bom_str or "<|bom|>" %}\n{% set eom_str = eom_str or "<|eom|>" %}\n{% set default_system_message = "" %}\n{##}\n{% set documents_prefix = "<documents>" %}\n{% set documents_suffix = "</documents>" %}\n{% set tool_definitions_prefix = "<tool_definitions>" %}\n{% set tool_definitions_suffix = "</tool_definitions>" %}\n{% set active_modes_prefix = "<active_output_modes>" %}\n{% set active_modes_suffix = "</active_output_modes>" %}\n{##}\n{% set tool_calls_prefix = "<tool_calls>" %}\n{% set tool_calls_suffix = "</tool_calls>" %}\n{% set citations_prefix = "<citations>" %}\n{% set citations_suffix = "</citations>" %}\n{##}\n{% if add_generation_prompt is not defined %}\n {% set add_generation_prompt = True %}\n{% endif %}\n{% set role_to_predict = role_to_predict or "assistant" %}\n{% if messages|length > 0 and messages[0].role == "system" %}\n {% set system_message = messages[0].content %}\n {% set loop_messages = messages[1:] %}\n{% else %}\n {% set system_message = default_system_message %}\n {% set loop_messages = messages %}\n{% endif %}\n{##}\n{##}\n{# Macros #}\n{% macro handle_tool_definitions(tools) %}\n {{- tool_definitions_prefix -}}\n {{- "\\n# Tools" -}}\n {{- "\\n\\n## Functions" -}}\n {% for tool in tools %}\n {% set _ = is_param_set(tool, field="type") %}\n {% set is_tool_type_set = ns.is_last_checked_defined %}\n {% if is_tool_type_set %}\n {% if tool.type == "function" %}\n {% set tool = tool.function %}\n {% else %}\n {{ raise_exception("Currently, the only supported tool type is `function`") }}\n {% endif %}\n {% endif %}\n {{- "\\n\\n" + (tool|tojson(indent=2)) -}}\n {% endfor %}\n {{- "\\n" + tool_definitions_suffix -}}\n{% endmacro %}\n{##}\n{% macro handle_first_system_message(system_message, tools) %}\n {{- bom_str + handle_role("system") -}}\n {% set _ = is_param_set(system_message) %}\n {% set is_system_message_set = ns.is_last_checked_defined %}\n {% if is_system_message_set %}\n {{- system_message -}}\n {% endif %}\n {% set _ = is_param_set(tools, is_list=True) %}\n {% set is_tools_set = ns.is_last_checked_defined %}\n {% if is_tools_set %}\n {% if system_message %}\n {{- "\\n\\n" -}}\n {% endif %}\n {{- handle_tool_definitions(tools) -}}\n {% endif %}\n {% set ns.message_count = ns.message_count + 1 %}\n{% endmacro %}\n{##}\n{% macro handle_tool_calls(tool_calls) %}\n {{- tool_calls_prefix + "[\\n" -}}\n {% for tool_call in tool_calls %}\n {% set _ = is_param_set(tool_call, field="function") %}\n {% set is_tool_call_function_set = ns.is_last_checked_defined %}\n {% if is_tool_call_function_set %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {% set arguments = tool_call.arguments %}\n {% if arguments is not string %}\n {%- set arguments = arguments|tojson -%}\n {%- endif %}\n {{ "{\\"name\\": \\"" + tool_call.name + "\\", \\"arguments\\": " + arguments + "}" -}}\n {% if not loop.last %}\n {{- "," }}\n {% endif %}\n {% endfor %}\n {{- "\\n]" + tool_calls_suffix -}}\n{% endmacro %}\n{##}\n{% macro handle_documents(documents) %}\n {{- documents_prefix -}}\n {{- "\\n# Documents" -}}\n {{- "\\n\\nYou can use the following documents for reference:" -}}\n {% for doc in documents %}\n {{- "\\n\\n## Document ID: " + loop.index0|string -}}\n {% set _ = is_param_set(doc, field="title") %}\n {% set is_doc_title_set = ns.is_last_checked_defined %}\n {% if is_doc_title_set %}\n {{- "\\nTitle: " + doc.title -}}\n {% endif %}\n {% for key, value in doc.items() %}\n {% if key not in ["title", "text"] %}\n {{- "\\n" + key|title + ": " + value|string -}}\n {% endif %}\n {% endfor %}\n {{- "\\nText: " + doc.text -}}\n {% endfor %}\n {{- "\\n" + documents_suffix -}}\n{% endmacro %}\n{##}\n{% macro handle_knobs(knobs) %}\n {{- active_modes_prefix -}}\n {{- "\\n# Active Modes" -}}\n {{ "\\n\\nThe following modes configure the format or style of your responses. You should adhere to all currently" -}}\n {{ " active modes simultaneously." -}}\n {% if knobs.citation_mode == "fast" %}\n {{- "\\n\\n## Citation Mode" -}}\n {{- "\\n\\nProvide a list of references only for the documents you base your response on. Format your response" -}}\n {{ " with the original answer followed by a citation section. Use this template:" -}}\n {{ " `{answer}" + citations_prefix + "DOCUMENT_IDS" + citations_suffix + "`, where DOCUMENT_IDS are the relevant document numbers" -}}\n {{ " (e.g. [2, 5, 9]), or [] if the answer cannot be supported by the provided documents." -}}\n {% endif %}\n {% if knobs.response_format == "json_object" %}\n {{- "\\n\\n## JSON Mode" -}}\n {{ "\\n\\nProvide your response in JSON format. Adhere strictly to any schema given by the user." -}}\n {{ " If an appropriate JSON format exists, use it without modification." -}}\n {% endif %}\n {{- "\\n" + active_modes_suffix -}}\n{% endmacro %}\n{##}\n{% macro get_last_user_index(messages) %}\n {% set ns.last_user_index = 0 %}\n {% for message in messages %}\n {% if message.role == \'user\' %}\n {% set ns.last_user_index = loop.index0 %}\n {% endif %}\n {% endfor %}\n {{- ns.last_user_index -}}\n{% endmacro %}\n{##}\n{% macro handle_last_system_message(documents, knobs, use_documents, use_knobs) %}\n {{- bom_str + handle_role("system") -}}\n {% set macros_to_call = [] %}\n {% set params_for_macros = [] %}\n {% if use_documents %}\n {% set macros_to_call = macros_to_call + [handle_documents] %}\n {% set params_for_macros = params_for_macros + [[documents]] %}\n {% endif %}\n {% if use_knobs %}\n {% set macros_to_call = macros_to_call + [handle_knobs] %}\n {% set params_for_macros = params_for_macros + [[knobs]] %}\n {% endif %}\n {% for i in range(macros_to_call|length) %}\n {% if i > 0 %}\n {{- "\\n\\n" -}}\n {% endif %}\n {{- macros_to_call[i](*params_for_macros[i]) -}}\n {% endfor %}\n {% set ns.message_count = ns.message_count + 1 %}\n{% endmacro %}\n{##}\n{% macro handle_role(role, add_space=True) %}\n {{- "<|" + role + "|>" -}}\n {% if add_space %}\n {{- " " -}}\n {% endif %}\n{% endmacro %}\n{##}\n{% macro is_param_set(param, field=none, is_list=False) %}\n {% if field is not none %}\n {% if field in param %}\n {% set param = param[field] %}\n {% else %}\n {% set param = none %}\n {% endif %}\n {% endif %}\n {% set is_defined = param is defined and param is not none %}\n {% if is_list %}\n {% set ns.is_last_checked_defined = is_defined and param|length > 0 %}\n {% else %}\n {% set ns.is_last_checked_defined = is_defined %}\n {% endif %}\n{% endmacro %}\n{##}\n{##}\n{# Template #}\n{{- "<|startoftext|>" -}}\n{% set _ = is_param_set(system_message) %}\n{% set is_system_message_set = ns.is_last_checked_defined %}\n{% set _ = is_param_set(tools, is_list=True) %}\n{% set is_tools_set = ns.is_last_checked_defined %}\n{% set has_system_message = (is_system_message_set or is_tools_set) %}\n{% if has_system_message %}\n {{- handle_first_system_message(system_message, tools) -}}\n{% endif %}\n{% set last_user_index = get_last_user_index(loop_messages)|int %}\n{% for message in loop_messages %}\n {% if loop.index0 == last_user_index %}\n {% set _ = is_param_set(documents, is_list=True) %}\n {% set use_documents = ns.is_last_checked_defined %}\n {% set _ = is_param_set(knobs) %}\n {% set use_knobs = ns.is_last_checked_defined and knobs.is_set %}\n {% set add_last_system_message = use_documents or use_knobs %}\n {% if add_last_system_message %}\n {% if ns.message_count > 0 %}\n {{- eom_str -}}\n {% endif %}\n {{- handle_last_system_message(documents, knobs, use_documents, use_knobs) -}}\n {% endif %}\n {% endif %}\n {% set role = message.role %}\n {% set _ = is_param_set(message, field="name") %}\n {% set is_message_name_set = ns.is_last_checked_defined %}\n {% if is_message_name_set %}\n {% set message_prefix = handle_role(role) + "(" + message.name + ")" %}\n {% else %}\n {% set message_prefix = handle_role(role) %}\n {% endif %}\n {% set content = (message.content or "") %}\n {% if content is not string %}\n {% set content = content|tojson %}\n {% endif %}\n {% if ns.message_count > 0 %}\n {{- eom_str -}}\n {% endif %}\n {{- bom_str + message_prefix + content -}}\n {% set _ = is_param_set(message, field="tool_calls", is_list=True) %}\n {% set is_tool_calls_set = ns.is_last_checked_defined %}\n {% if role == "assistant" and is_tool_calls_set %}\n {{- handle_tool_calls(message.tool_calls) -}}\n {% endif %}\n {% set _ = is_param_set(message, field="citations", is_list=True) %}\n {% set is_citations_set = ns.is_last_checked_defined %}\n {% if role == "assistant" and is_citations_set %}\n {{- citations_prefix + message.citations|map(attribute="document_id")|list|string + citations_suffix -}}\n {% endif %}\n {% set ns.message_count = ns.message_count + 1 %}\n{% endfor %}\n{% if add_generation_prompt %}\n {% if ns.message_count > 0 %}\n {{- eom_str -}}\n {% endif %}\n {{- bom_str + handle_role(role_to_predict, add_space=False) -}}\n {% set _ = is_param_set(generation_preamble) %}\n {% set is_generation_preamble_set = ns.is_last_checked_defined %}\n {% if is_generation_preamble_set and generation_preamble.strip() != "" %}\n {{- " " + generation_preamble -}}\n {% endif %}\n {% set ns.message_count = ns.message_count + 1 %}\n{% else %}\n {% if ns.message_count > 0 %}\n {{- eom_str -}}\n {% endif %}\n{% endif %}\n',
"qwen_25": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
"exaone": "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}{{ '[|system|][|endofturn|]\n' }}{% endif %}{{ '[|' + message['role'] + '|]' + message['content'] }}{% if message['role'] == 'user' %}{{ '\n' }}{% else %}{{ '[|endofturn|]\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[|assistant|]' }}{% endif %}",
"metharme": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'Enter RP mode. You shall reply to the user while staying in character. Your responses must be detailed, creative, immersive, and drive the scenario forward.' %}{% endif %}{{ '<|system|>' + system_message }}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>' + content.strip() }}{% elif message['role'] == 'assistant' %}{{ '<|model|>' + content.strip() }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|model|>' }}{% else %}{{ eos_token }}{% endif %}",
}
def get_chat_template(
user_choice: str,
jinja_template: Optional[str] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
):
"""
Finds the correct chat_template based on the user's choice, jinja_template, and tokenizer.
Args:
user_choice (str): The user's choice of template.
jinja_template (Optional[str], optional): The jinja template string. Defaults to None.
tokenizer (Optional[PreTrainedTokenizerBase], optional): The tokenizer. Defaults to None.
Returns:
str: The chosen template string.
Raises:
ValueError: If the user_choice is not found in the templates.
"""
if user_choice == _JINJA_TEMPALTE_CHOICE:
if not jinja_template:
raise ValueError(
f"`jinja_template` cannot be None when `chat_template` choice is {_JINJA_TEMPALTE_CHOICE}"
)
return jinja_template
if user_choice == _DEFAULT_TEMPLATE_CHOICE:
if not tokenizer:
raise ValueError(
f"`tokenizer` cannot be None when chat_template choice is {_DEFAULT_TEMPLATE_CHOICE}"
)
if not tokenizer.chat_template:
raise ValueError(
f"`chat_template choice is {_DEFAULT_TEMPLATE_CHOICE} but tokenizer's chat_template is null. "
f"Please add a chat_template in tokenizer config"
)
return tokenizer.chat_template
if user_choice.startswith(_DEFAULT_FALLBACK_CHATML_TEMPLATE_CHOICE_PREFIX):
if not tokenizer:
raise ValueError(
f"`tokenizer` cannot be None when chat_template choice starts with {_DEFAULT_FALLBACK_CHATML_TEMPLATE_CHOICE_PREFIX}"
)
if tokenizer.chat_template:
return tokenizer.chat_template
user_choice = user_choice[
len(_DEFAULT_FALLBACK_CHATML_TEMPLATE_CHOICE_PREFIX) :
]
LOG.warning(
f"No chat template found on tokenizer, falling back to {user_choice}. It is recommended to set --train_on_inputs to True for the model to learn this chat template."
)
if user_choice in _CHAT_TEMPLATES:
return _CHAT_TEMPLATES[user_choice]
raise ValueError(f"Template '{user_choice}' not found.")
def extract_chat_template_args(cfg, ds_cfg: Optional[Dict[str, Any]] = None):
if ds_cfg and ds_cfg.get("chat_template"):
chat_template_choice = ds_cfg.get("chat_template") or _DEFAULT_TEMPLATE_CHOICE
chat_template_jinja = ds_cfg.get("chat_template_jinja")
else:
chat_template_choice = cfg.get("chat_template") or _DEFAULT_TEMPLATE_CHOICE
chat_template_jinja = cfg.get("chat_template_jinja")
return chat_template_choice, chat_template_jinja
def get_chat_template_from_config(
cfg,
ds_cfg: Optional[Dict[str, Any]] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
) -> str:
chat_template_choice, chat_template_jinja = extract_chat_template_args(
cfg=cfg, ds_cfg=ds_cfg
)
return get_chat_template(
user_choice=chat_template_choice,
jinja_template=chat_template_jinja,
tokenizer=tokenizer,
)
def register_chat_template(template_name: str, chat_template: str):
"""
Registers chat templates.
Args:
template_name (str): The name of the template.
chat_template (str): The template string.
"""
if template_name in _CHAT_TEMPLATES:
raise ValueError(f"Template '{template_name}' already exists.")
_CHAT_TEMPLATES[template_name] = chat_template
|
OpenAccess-AI-CollectiveREPO_NAMEaxolotlPATH_START.@axolotl_extracted@axolotl-main@src@axolotl@utils@chat_templates.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/sparse/linalg/dsolve/tests/__init__.py",
"type": "Python"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@sparse@linalg@dsolve@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "create_lightcone_21cmfast_rerun.py",
"repo_name": "micbia/serenet",
"repo_path": "serenet_extracted/serenet-main/utils_data/create_lightcone_21cmfast_rerun.py",
"type": "Python"
}
|
import numpy as np, os, sys, tarfile
import tools21cm as t2c, py21cmfast as p2c
from datetime import datetime
from glob import glob
from sklearn.decomposition import PCA as sciPCA
import sys
sys.path.insert(0,'../')
from utils.other_utils import get_dir_size
path_input = sys.argv[1]
path_input += '/' if path_input[-1] != '/' else ''
path_out = path_input if sys.argv[2] == 'same' else sys.argv[2]
path_out += '/' if path_out[-1] != '/' else ''
try:
os.makedirs(path_out)
os.makedirs(path_out+'data')
os.makedirs(path_out+'images')
os.makedirs(path_out+'parameters')
except:
pass
name_of_the_run = path_out[path_out[:-1].rfind('/')+1:-1]
# MPI setup
rank = int(os.environ['SLURM_ARRAY_TASK_ID'])
nprocs = int(os.environ['SLURM_ARRAY_TASK_COUNT'])
print(' Starting rank %d at: %s' %(rank, datetime.now().strftime('%H:%M:%S')))
# 21cmFAST parameters
COMPRESS = False
#RERUN = ['dT3', 'dT4', 'dT4pca', 'dT5', 'dT5pca']
RERUN = ['dT4pca']
nr = 4 # componens to remove in PCA
uvfile = '/store/ska/sk09/segunet/uvmap_128_z7-20.pkl'
z_min, z_max = 7, 11
tobs = 1000.
MAKE_PLOT = False
# Loop parameters
loop_start, loop_end = 0, 10000
perrank = (loop_end-loop_start)//nprocs
"""
path_cache = '/scratch/snx3000/mibianco/_cache%d/' %rank
if not (os.path.exists(path_cache)):
os.makedirs(path_cache)
else:
os.system('rm %s*h5' %path_cache)
p2c.config['direc'] = path_cache
"""
# read parameters files
try:
params = eval(open(path_input+'parameters/user_params.txt', 'r').read())
c_params = eval(open(path_input+'parameters/cosm_params.txt', 'r').read())
astro_params = np.loadtxt(path_input+'parameters/astro_params.txt')
redshifts = np.loadtxt(path_input+'lc_redshifts.txt')
except FileNotFoundError as error:
print(error)
# Set loop ending index per processor
i_start = int(loop_start+rank*perrank)
if(rank != nprocs-1):
i_end = int(loop_start+(rank+1)*perrank)
else:
i_end = loop_end
# Start loop
print(' Processors repartition:\n rank %d\t%d\t%d' %(rank, i_start, i_end))
for i in range(i_start, i_end):
#if not (os.path.exists(path_out+'data/dT3_21cm_i%d.bin' %i)):
if ('dT' in RERUN and not (os.path.exists(path_input+'data/dT_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/dT_21cm_i%d.bin' %i)) or 'xHI' in RERUN and not (os.path.exists(path_input+'data/xHI_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/xHI_21cm_i%d.bin' %i))):
# Define astronomical parameters
eff_fact, Rmfp, Tvir, rseed = astro_params[i, 1:]
a_params = {'HII_EFF_FACTOR':eff_fact, 'R_BUBBLE_MAX':Rmfp, 'ION_Tvir_MIN':Tvir}
print(' Re-run random seed:\t %d' %rseed)
path_cache = './'
try:
os.system('rm %s*h5' %path_cache)
except:
pass
print(' re-calculate lightcone...')
lightcone = p2c.run_lightcone(redshift=z_min, max_redshift=z_max,
user_params=params, astro_params=a_params, cosmo_params=c_params,
lightcone_quantities=("brightness_temp", 'xH_box'),
#flag_options={"USE_TS_FLUCT": True},
global_quantities=("brightness_temp", 'xH_box'),
direc=path_cache, random_seed=rseed)
dT = lightcone.brightness_temp
t2c.save_cbin(path_out+'data/dT_21cm_i%d.bin' %i, dT)
t2c.save_cbin(path_out+'data/xHI_21cm_i%d.bin' %i, lightcone.xH_box)
if('dT2' in RERUN and not (os.path.exists(path_input+'data/dT2_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/dT2_21cm_i%d.bin' %i))):
dT = t2c.read_cbin(path_input+'data/dT_21cm_i%d.bin' %i)
dT2, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT, los_axis=2), z_array=redshifts, box_size_mpc=params['BOX_LEN'])
t2c.save_cbin(path_out+'data/dT2_21cm_i%d.bin' %i, dT2) # smooth(dT - avrg_dT)
if('dT3' in RERUN or 'dT4' in RERUN or 'dT5' in RERUN):
dT = t2c.read_cbin(path_input+'data/dT_21cm_i%d.bin' %i)
lc_noise = t2c.noise_lightcone(ncells=params['HII_DIM'],
zs=redshifts,
obs_time=tobs, save_uvmap=uvfile,
boxsize=params['BOX_LEN'], n_jobs=1)
if('dT3' in RERUN and not (os.path.exists(path_input+'data/dT3_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/dT3_21cm_i%d.bin' %i))):
dT3, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT + lc_noise, los_axis=2), z_array=redshifts, box_size_mpc=params['BOX_LEN'])
t2c.save_cbin(path_out+'data/dT3_21cm_i%d.bin' %i, dT3) # smooth(dT + noise - avrg_dT)
if('dT4' in RERUN and not (os.path.exists(path_input+'data/dT4_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/dT4_21cm_i%d.bin' %i))):
gal_fg = t2c.galactic_synch_fg(z=redshifts, ncells=params['HII_DIM'], boxsize=params['BOX_LEN'], rseed=rseed)
dT4, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT + lc_noise + gal_fg, los_axis=2), z_array=redshifts, box_size_mpc=params['BOX_LEN'])
t2c.save_cbin(path_out+'data/dT4_21cm_i%d.bin' %i, dT4) # smooth(dT + noise + gf - avrg_dT)
if('dT4pca' in RERUN and not (os.path.exists(path_input+'data/dT4pca%s_21cm_i%d.bin' %(str(nr), i)) or os.path.exists(path_out+'data/dT4pca%s_21cm_i%d.bin' %(str(nr), i)))):
dT4 = t2c.read_cbin(path_input+'data/dT4_21cm_i%d.bin' %i)
data_flat = np.reshape(dT4, (-1, dT4.shape[2]))
pca = sciPCA(n_components=nr)
datapca = pca.fit_transform(data_flat)
pca_FG = pca.inverse_transform(datapca)
dT4pca = np.reshape(data_flat - pca_FG, dT4.shape)
t2c.save_cbin(path_out+'data/dT4pca%s_21cm_i%d.bin' %(str(nr), i), dT4pca)
if('dT5' in RERUN and not (os.path.exists(path_input+'data/dT5_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/dT5_21cm_i%d.bin' %i))):
gal_fg = t2c.galactic_synch_fg(z=redshifts, ncells=params['HII_DIM'], boxsize=params['BOX_LEN'], rseed=rseed)
exgal_fg = t2c.extragalactic_pointsource_fg(z=redshifts, ncells=params['HII_DIM'], boxsize=params['BOX_LEN'], rseed=rseed)
dT5, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT + lc_noise + exgal_fg + gal_fg, los_axis=2), z_array=redshifts, box_size_mpc=params['BOX_LEN'])
t2c.save_cbin(path_out+'data/dT5_21cm_i%d.bin' %i, dT5) # smooth(dT + noise + gf + exgf - avrg_dT)
np.save(path_out+'data/dTexgf_21cm_i%d.npy' %i, exgal_fg[..., 0]) # save just the extragalactic points first slice
if('dT5pca' in RERUN and not (os.path.exists(path_input+'data/dT5pca_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/dT5pca_21cm_i%d.bin' %i))):
dT5 = t2c.read_cbin(path_input+'data/dT5_21cm_i%d.bin' %i)
data_flat = np.reshape(dT5, (-1, dT5.shape[2]))
pca = sciPCA(n_components=7)
datapca = pca.fit_transform(data_flat)
pca_FG = pca.inverse_transform(datapca)
dT5pca = np.reshape(data_flat - pca_FG, dT5.shape)
t2c.save_cbin(path_out+'data/dT5pca_21cm_i%d.bin' %i, dT5pca)
if('xH' in RERUN and not (os.path.exists(path_input+'data/xH_21cm_i%d.bin' %i) or os.path.exists(path_out+'data/xH_21cm_i%d.bin' %i))):
xHI = t2c.read_cbin(path_input+'data/xHI_21cm_i%d.bin' %i)
smt_xn, _ = t2c.smooth_lightcone(xHI, z_array=redshifts, box_size_mpc=params['BOX_LEN'])
mask_xH = smt_xn>0.5
t2c.save_cbin(path_out+'data/xH_21cm_i%d.bin' %i, mask_xH)
# if output dir is more than 15 GB of size, compress and remove files in data/
if(get_dir_size(path_out) >= 8 and COMPRESS):
# start with compression on rank=0
if(rank == nprocs-1):
if(os.path.isfile(path_out+'written.txt')):
strd = np.loadtxt('%swritten.txt' %(path_out), dtype=str, delimiter='\n')
content = np.loadtxt('%scontent.txt' %(path_out+'data/'))
i_part = strd.size+1
else:
strd = np.array([])
content = np.zeros(loop_end)
i_part = 1
# file with content
idx_content = [int(cont[cont.rfind('_i')+2:cont.rfind('.')]) for i_c, cont in enumerate(glob(path_out+'data/xH_21cm_i*'))]
content[idx_content] = i_part
np.savetxt('%sdata/content.txt' %path_out, content, fmt='%d')
# compress data
os.system('tar -czvf %s_part%d.tar.gz %s/' %(name_of_the_run, i_part, name_of_the_run))
# get list of content and prepare it for the data_generator
mytar = tarfile.open('%s_part%d.tar.gz' %(name_of_the_run, i_part), 'r')
tar_content = mytar.getmembers()
tar_names = mytar.getnames()
np.save('%sdata/tar_content_part%d' %(path_out, i_part), tar_content)
np.save('%sdata/tar_names_part%d' %(path_out, i_part), tar_names)
mytar.close()
# note down the compressed file
np.savetxt('%swritten.txt' %(path_out), np.append(strd, ['%s written %s_part%d.tar.gz' %(datetime.now().strftime('%d/%m/%Y %H:%M:%S'), path_out[path_out[:-1].rfind('/')+1:-1], i_part)]), delimiter='\n', fmt='%s')
# free the space in the data/ directory
os.system('rm %sdata/*.bin' %path_out)
print(' \n Data created exeed 15GB. Compression completed...')
# wait that all processors are done before concluding the job
if(rank == nprocs-1 and COMPRESS):
print(' Gather done:\t%s\n' %datetime.now().strftime('%H:%M:%S'))
# merge the different astro_params_rank*.txt files into one
for i_p in range(nprocs):
data = np.loadtxt('%sastro_params_rank%d.txt' %(path_out+'parameters/', i_p))
if(i_p == 0):
stack_data = data
else:
stack_data = np.vstack((stack_data, data))
np.savetxt('%sastro_params.txt' %(path_out+'parameters/'), stack_data, header='HII_EFF_FACTOR: The ionizing efficiency of high-z galaxies\nR_BUBBLE_MAX: Mean free path in Mpc of ionizing photons within ionizing regions\nION_Tvir_MIN: Minimum virial Temperature of star-forming haloes in log10 units\ni\teff_f\tRmfp\tTvir\tseed', fmt='%d\t%.3f\t%.3f\t%.3f\t%d')
if(os.path.isfile(path_out+'written.txt')):
strd = np.loadtxt('%swritten.txt' %(path_out), dtype=str, delimiter='\n')
content = np.loadtxt('%sdata/content.txt' %(path_out))
i_part = strd.size+1
else:
strd = np.array([])
content = np.zeros(loop_end)
i_part = 1
# file with content
idx_content = [int(cont[cont.rfind('_i')+2:cont.rfind('.')]) for i_c, cont in enumerate(glob(path_out+'data/xH_21cm_i*'))]
content[idx_content] = i_part
np.savetxt('%sdata/content.txt' %path_out, content, fmt='%d')
# compress data
os.system('tar -czvf %s_part%d.tar.gz %s/' %(name_of_the_run, i_part, name_of_the_run))
# get list of content and prepare it for the data_generator
mytar = tarfile.open('%s_part%d.tar.gz' %(name_of_the_run, i_part), 'r')
tar_content = mytar.getmembers()
tar_names = mytar.getnames()
np.save('%sdata/tar_content_part%d' %(path_out, i_part), tar_content)
np.save('%sdata/tar_names_part%d' %(path_out, i_part), tar_names)
mytar.close()
# note down the compressed file
np.savetxt('%swritten.txt' %(path_out), np.append(strd, ['%s written %s_part%d.tar.gz' %(datetime.now().strftime('%d/%m/%Y %H:%M:%S'), path_out[path_out[:-1].rfind('/')+1:-1], i_part)]), delimiter='\n', fmt='%s')
# free the space in the data/ directory
os.system('rm %sdata/*.bin' %path_out)
os.system('mv %s../*tar.gz %sdata/' %(path_out, path_out))
# remove ranks cache directories
#os.system('rm -r %s' %path_cache)
print('... rank %d done at %s.' %(rank, datetime.now().strftime('%H:%M:%S')))
|
micbiaREPO_NAMEserenetPATH_START.@serenet_extracted@serenet-main@utils_data@create_lightcone_21cmfast_rerun.py@.PATH_END.py
|
{
"filename": "shapes.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/cairosvg_ES/shapes.py",
"type": "Python"
}
|
"""
Shapes drawers.
"""
from math import pi
from .helpers import normalize, point, point_angle, size
def circle(surface, node):
"""Draw a circle ``node`` on ``surface``."""
r = size(surface, node.get('r'))
if not r:
return
cx = size(surface, node.get('cx'), 'x')
cy = size(surface, node.get('cy'), 'y')
surface.context.new_sub_path()
surface.context.arc(cx, cy, r, 0, 2 * pi)
def ellipse(surface, node):
"""Draw an ellipse ``node`` on ``surface``."""
rx = size(surface, node.get('rx'), 'x')
ry = size(surface, node.get('ry'), 'y')
if not rx or not ry:
return
cx = size(surface, node.get('cx'), 'x')
cy = size(surface, node.get('cy'), 'y')
ratio = ry / rx
surface.context.new_sub_path()
surface.context.save()
surface.context.scale(1, ratio)
surface.context.arc(cx, cy / ratio, rx, 0, 2 * pi)
surface.context.restore()
def line(surface, node):
"""Draw a line ``node``."""
x1, y1, x2, y2 = tuple(
size(surface, node.get(position), position[0])
for position in ('x1', 'y1', 'x2', 'y2'))
surface.context.move_to(x1, y1)
surface.context.line_to(x2, y2)
angle = point_angle(x1, y1, x2, y2)
node.vertices = [(x1, y1), (pi - angle, angle), (x2, y2)]
def polygon(surface, node):
"""Draw a polygon ``node`` on ``surface``."""
polyline(surface, node)
surface.context.close_path()
def polyline(surface, node):
"""Draw a polyline ``node``."""
points = normalize(node.get('points', ''))
if points:
x, y, points = point(surface, points)
surface.context.move_to(x, y)
node.vertices = [(x, y)]
while points:
x_old, y_old = x, y
x, y, points = point(surface, points)
angle = point_angle(x_old, y_old, x, y)
node.vertices.append((pi - angle, angle))
surface.context.line_to(x, y)
node.vertices.append((x, y))
def rect(surface, node):
"""Draw a rect ``node`` on ``surface``."""
x, y = size(surface, node.get('x'), 'x'), size(surface, node.get('y'), 'y')
width = size(surface, node.get('width'), 'x')
height = size(surface, node.get('height'), 'y')
rx = node.get('rx')
ry = node.get('ry')
if rx and ry is None:
ry = rx
elif ry and rx is None:
rx = ry
rx = size(surface, rx, 'x')
ry = size(surface, ry, 'y')
if rx == 0 or ry == 0:
surface.context.rectangle(x, y, width, height)
else:
if rx > width / 2:
rx = width / 2
if ry > height / 2:
ry = height / 2
# Inspired by Cairo Cookbook
# http://cairographics.org/cookbook/roundedrectangles/
ARC_TO_BEZIER = 4 * (2 ** .5 - 1) / 3
c1 = ARC_TO_BEZIER * rx
c2 = ARC_TO_BEZIER * ry
surface.context.new_path()
surface.context.move_to(x + rx, y)
surface.context.rel_line_to(width - 2 * rx, 0)
surface.context.rel_curve_to(c1, 0, rx, c2, rx, ry)
surface.context.rel_line_to(0, height - 2 * ry)
surface.context.rel_curve_to(0, c2, c1 - rx, ry, -rx, ry)
surface.context.rel_line_to(-width + 2 * rx, 0)
surface.context.rel_curve_to(-c1, 0, -rx, -c2, -rx, -ry)
surface.context.rel_line_to(0, -height + 2 * ry)
surface.context.rel_curve_to(0, -c2, rx - c1, -ry, rx, -ry)
surface.context.close_path()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@cairosvg_ES@shapes.py@.PATH_END.py
|
{
"filename": "test_downloader.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/tests/utils/data/test_downloader.py",
"type": "Python"
}
|
import os
import warnings
from pathlib import Path
import numpy as np
import pytest
from plasmapy.utils.data.downloader import _API_CONNECTION_ESTABLISHED, Downloader
check_database_connection = pytest.mark.skipif(
not _API_CONNECTION_ESTABLISHED, reason="failed to connect to data repository"
)
def in_ci() -> bool:
"""
Determine whether the test is being run on CI by checking for a variable
always set by GitHub
"""
return "GITHUB_ACTIONS" in os.environ
@pytest.fixture(scope="module")
@check_database_connection
def downloader_validated(tmpdir_factory) -> Downloader:
api_token = os.environ["GH_TOKEN"] if in_ci() else None
# tmpdir_factory creates a session-scoped temporary directory
# while the tmp_path variable is function scoped
#
# Making this a session-scope directory means that other tests
# initialized with it should be able to access files if they are
# already downloaded by another test
path = tmpdir_factory.mktemp("data")
return Downloader(directory=path, api_token=api_token)
@check_database_connection
@pytest.mark.skipif(
not in_ci(), reason="Tests only use authenticated API calls when run in CI."
)
def test_api_token(downloader_validated: Downloader) -> None:
"""
Test whether the API connection is valid
"""
limit, used = downloader_validated._api_usage
# API limit is 5000/hr for auth user accounts, 60/hr without auth
assert limit >= 5000
@pytest.fixture(scope="module")
@check_database_connection
def downloader_unvalidated(tmpdir_factory) -> Downloader:
path = tmpdir_factory.mktemp("unvalidated")
return Downloader(directory=path, validate=False)
test_urls = [
# Test with a page we know is up if the tests are running
("https://github.com/PlasmaPy/PlasmaPy", None),
# Test with a known 404
("https://www.google.com/404", ValueError),
]
@check_database_connection
@pytest.mark.parametrize(("url", "expected"), test_urls)
def test_http_request(
downloader_validated: Downloader, url: str, expected: None | Exception
) -> None:
"""
Test exceptions from http downloader
"""
if expected is None:
downloader_validated._http_request(url)
else:
with pytest.raises(expected):
downloader_validated._http_request(url)
@check_database_connection
def test_blob_file(downloader_validated: Downloader) -> None:
"""
Test the read and write blob file routines
"""
# Add a key to the blob file dict
test_str = "abc123"
downloader_validated._blob_dict["test_key"] = test_str
# Write it to the file
downloader_validated._write_blobfile()
# Change the key but don't write to file again
downloader_validated._blob_dict["test_key"] = "not the same string"
# Read from file and confirm value was restored
downloader_validated._read_blobfile()
assert downloader_validated._blob_dict["test_key"] == test_str
@check_database_connection
def test_update_blob_entry(downloader_validated) -> None:
"""
Test the logic in the _update_blob_entry function
"""
dl = downloader_validated
# Initialize with all None
dl._update_blob_entry("f1")
assert "f1" in dl._blob_dict
assert dl._blob_dict["f1"]["local_sha"] is None
assert dl._blob_dict["f1"]["repo_sha"] is None
assert dl._blob_dict["f1"]["download_url"] is None
dl._update_blob_entry("f1", local_sha="1", repo_sha="2", download_url="3")
assert "f1" in dl._blob_dict
assert dl._blob_dict["f1"]["local_sha"] == "1"
assert dl._blob_dict["f1"]["repo_sha"] == "2"
assert dl._blob_dict["f1"]["download_url"] == "3"
test_files = [
# Test downloading a file
("NIST_PSTAR_aluminum.txt", None),
# Test with a different file type
("plasmapy_logo.png", None),
# Test an h5 file
("test.h5", None),
# Test that trying to download a file that doesn't exist raises an
# exception.
("not-a-real-file.txt", ValueError),
]
@pytest.mark.slow
@pytest.mark.parametrize(
"downloader", ["downloader_validated", "downloader_unvalidated"]
)
@pytest.mark.parametrize(("filename", "expected"), test_files)
@check_database_connection
def test_get_file(
filename: str, expected: Exception | None, downloader: Downloader, request
) -> None:
"""Test the get_file function."""
# Get the downloader fixture based on the string name provided
dl = request.getfixturevalue(downloader)
# Silence warnings from files not found on the repository
warnings.filterwarnings("ignore", category=UserWarning)
filepath = dl._filepath(filename)
if expected is not None:
with pytest.raises(expected):
dl.get_file(filename)
else:
# Download data (or check that it already exists)
assert dl.get_file(filename) == filepath
# Get the file again, already existing so it doesn't download it again
assert dl.get_file(filename) == filepath
@pytest.mark.parametrize(
"downloader", ["downloader_validated", "downloader_unvalidated"]
)
@check_database_connection
def test_get_local_only_file(downloader: Downloader, request) -> None:
"""
Test various file retrieval modes
"""
# Get the downloader fixture based on the string name provided
dl = request.getfixturevalue(downloader)
# Find the folder used to save files for this downloader
tmp_path = dl._download_directory
# Silence warnings from files not found on the repository
warnings.filterwarnings("ignore", category=UserWarning)
# Retrieve a local file that isn't on the remote
# First create the file
filename = "not_on_the_repo.txt"
filepath = Path(tmp_path, filename)
with filepath.open("w") as f:
f.write("Not data")
# Try getting it now that it exists but isn't in the blob file
assert dl.get_file(filename) == filepath
# Add it to the blob file
dl._update_blob_entry(filename, local_sha="123")
dl._write_blobfile()
# Now try retrieving it again
assert dl.get_file(filename) == filepath
# Error is raised when a file isn't local or on the remote
with pytest.raises(ValueError):
dl.get_file("not_anywhere.txt")
@check_database_connection
def test_get_file_NIST_PSTAR_datafile(downloader_validated) -> None:
"""Test getting a particular file and checking for known contents"""
# Silence warnings from files not found on the repository
warnings.filterwarnings("ignore", category=UserWarning)
# Download data (or check that it already exists)
path = downloader_validated.get_file("NIST_PSTAR_aluminum.txt")
arr = np.loadtxt(path, skiprows=7)
assert np.allclose(arr[0, :], np.array([1e-3, 1.043e2]))
@pytest.mark.flaky(reruns=2)
@check_database_connection
def test_at_most_one_api_call(downloader_validated) -> None:
"""
Test that at most one API call is made over multiple queries
"""
# Silence warnings from files not found on the repository
warnings.filterwarnings("ignore", category=UserWarning)
files = ["NIST_PSTAR_aluminum.txt", "plasmapy_logo.png", "test.h5"]
limit, used0 = downloader_validated._api_usage
for file in files:
downloader_validated.get_file(file)
limit, used1 = downloader_validated._api_usage
assert used1 <= used0 + 1
@check_database_connection
def test_creating_another_downloader(downloader_validated) -> None:
"""
Test creating a second downloader in the same directory.
This will test reading in the existing blob file.
"""
dl2 = Downloader(directory=downloader_validated._download_directory)
filename = "NIST_PSTAR_aluminum.txt"
filepath = dl2._filepath(filename)
assert dl2.get_file(filename) == filepath
@check_database_connection
def test_ensure_update_blob_dict_runs(downloader_validated: Downloader) -> None:
"""
Ensure the _update_blob_dict method gets run if it hasn't already.
"""
# Only run this test if the downloader fixture hasn't already updated
# form the repo (so tests remain limited to 1 api call)
# It seems that sometimes this can happen, in which case this test
# is necessary to cover that method
if not downloader_validated._updated_blob_file_from_repo:
# Reset timer so it doesn't prevent a dict update
downloader_validated._blob_dict["_timestamp"] = 0
# Update the dict
downloader_validated._update_repo_blob_dict()
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@tests@utils@data@test_downloader.py@.PATH_END.py
|
{
"filename": "ViewBoxFeatures.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/examples/ViewBoxFeatures.py",
"type": "Python"
}
|
"""
ViewBox is the general-purpose graphical container that allows the user to
zoom / pan to inspect any area of a 2D coordinate system.
This example demonstrates many of the features ViewBox provides.
"""
import numpy as np
import pyqtgraph as pg
x = np.arange(1000, dtype=float)
y = np.random.normal(size=1000)
y += 5 * np.sin(x/100)
win = pg.GraphicsLayoutWidget(show=True)
win.setWindowTitle('pyqtgraph example: ____')
win.resize(1000, 800)
win.ci.setBorder((50, 50, 100))
sub1 = win.addLayout()
sub1.addLabel("<b>Standard mouse interaction:</b><br>left-drag to pan, right-drag to zoom.")
sub1.nextRow()
v1 = sub1.addViewBox()
l1 = pg.PlotDataItem(y)
v1.addItem(l1)
sub2 = win.addLayout()
sub2.addLabel("<b>One-button mouse interaction:</b><br>left-drag zoom to box, wheel to zoom out.")
sub2.nextRow()
v2 = sub2.addViewBox()
v2.setMouseMode(v2.RectMode)
l2 = pg.PlotDataItem(y)
v2.addItem(l2)
win.nextRow()
sub3 = win.addLayout()
sub3.addLabel("<b>Locked aspect ratio when zooming.</b>")
sub3.nextRow()
v3 = sub3.addViewBox()
v3.setAspectLocked(1.0)
l3 = pg.PlotDataItem(y)
v3.addItem(l3)
sub4 = win.addLayout()
sub4.addLabel("<b>View limits:</b><br>prevent panning or zooming past limits.")
sub4.nextRow()
v4 = sub4.addViewBox()
v4.setLimits(xMin=-100, xMax=1100,
minXRange=20, maxXRange=500,
yMin=-10, yMax=10,
minYRange=1, maxYRange=10)
l4 = pg.PlotDataItem(y)
v4.addItem(l4)
win.nextRow()
sub5 = win.addLayout()
sub5.addLabel("<b>Linked axes:</b> Data in this plot is always X-aligned to<br>the plot above.")
sub5.nextRow()
v5 = sub5.addViewBox()
v5.setXLink(v3)
l5 = pg.PlotDataItem(y)
v5.addItem(l5)
sub6 = win.addLayout()
sub6.addLabel("<b>Disable mouse:</b> Per-axis control over mouse input.<br>"
"<b>Auto-scale-visible:</b> Automatically fit *visible* data within view<br>"
"(try panning left-right).")
sub6.nextRow()
v6 = sub6.addViewBox()
v6.setMouseEnabled(x=True, y=False)
v6.enableAutoRange(x=False, y=True)
v6.setXRange(300, 450)
v6.setAutoVisible(x=False, y=True)
l6 = pg.PlotDataItem(y)
v6.addItem(l6)
if __name__ == '__main__':
pg.exec()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@examples@ViewBoxFeatures.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/examples/mixed_hmm/README.md",
"type": "Markdown"
}
|
# Hierarchical mixed-effect hidden Markov models
Note: This is a cleaned-up version of the seal experiments in [Bingham et al 2019] that is a simplified variant of some of the analysis in the [momentuHMM harbour seal example](https://github.com/bmcclintock/momentuHMM/blob/master/vignettes/harbourSealExample.R) [McClintock et al 2018].
Recent advances in sensor technology have made it possible to capture the movements of multiple wild animals within a single population at high spatiotemporal resolution over long periods of time [McClintock et al 2013, Towner et al 2016]. Discrete state-space models, where the latent state is thought of as corresponding to a behavior state such as "foraging" or "resting", have become popular computational tools for analyzing these new datasets thanks to their interpretability and tractability.
This example applies several different hierarchical discrete state-space models to location data recorded from a colony of harbour seals on foraging excursions in the North Sea [McClintock et al 2013].
The raw data are irregularly sampled time series (roughly 5-15 minutes between samples) of GPS coordinates and diving activity for each individual in the colony (10 male and 7 female) over the course of a single day recorded by lightweight tracking devices physically attached to each animal by researchers. They have been preprocessed using the momentuHMM example code into smoothed, temporally regular series of step sizes, turn angles, and diving activity for each individual.
The models are special cases of a time-inhomogeneous discrete state space model
whose state transition distribution is specified by a hierarchical generalized linear mixed model (GLMM).
At each timestep `t`, for each individual trajectory `b` in each group `a`, we have
```
logit(p(x[t,a,b] = state i | x[t-1,a,b] = state j)) =
(epsilon_G[a] + epsilon_I[a,b] + Z_I[a,b].T @ beta1 + Z_G[a].T @ beta2 + Z_T[t,a,b].T @ beta3)[i,j]
```
where `a,b` correspond to plate indices, `epsilon_G` and `epsilon_I` are independent random variables for each group and individual within each group respectively, `Z`s are covariates, and `beta`s are parameter vectors.
The random variables `epsilon` may be either discrete or continuous.
If continuous, they are normally distributed.
If discrete, they are sampled from a set of three possible values shared across the innermost plate of a particular variable.
That is, for each individual trajectory `b` in each group `a`, we sample single random effect values for an entire trajectory:
```
iota_G[a] ~ Categorical(pi_G)
epsilon_G[a] = Theta_G[iota_G[a]]
iota_I[a,b] ~ Categorical(pi_I[a])
epsilon_I[a,b] = Theta_I[a][iota_I[a,b]]
```
Here `pi_G`, `Theta_G`, `pi_I`, and `Theta_I` are all learnable real-valued parameter vectors and `epsilon` values are batches of vectors the size of state transition matrices.
Observations `y[t,a,b]` are represented as sequences of real-valued step lengths and turn angles, modelled by zero-inflated Gamma and von Mises likelihoods respectively.
The seal models also include a third observed variable indicating the amount of diving activity between successive locations, which we model with a zero-inflated Beta distribution following [McClintock et al 2018].
We grouped animals by sex and implemented versions of this model with (i) no random effects (as a baseline), and with random effects present at the (ii) group, (iii) individual, or (iv) group+individual levels. Unlike the models in [Towner et al 2016], we do not consider fixed effects on any of the parameters.
# References
* [Obermeyer et al 2019] Obermeyer, F.\*, Bingham, E.\*, Jankowiak, M.\*, Chiu, J., Pradhan, N., Rush, A., and Goodman, N. Tensor Variable Elimination for Plated Factor Graphs, 2019
* [McClintock et al 2013] McClintock, B. T., Russell, D. J., Matthiopoulos, J., and King, R. Combining individual animal movement and ancillary biotelemetry data to investigate population-level activity budgets. Ecology, 94(4):838–849, 2013
* [McClintock et al 2018] McClintock, B. T. and Michelot,T. momentuhmm: R package for generalized hidden markov models of animal movement. Methods in Ecology and Evolution, 9(6): 1518–1530, 2018. doi: 10.1111/2041-210X.12995
* [Towner et al 2016] Towner, A. V., Leos-Barajas, V., Langrock, R., Schick, R. S., Smale, M. J., Kaschke, T., Jewell, O. J., and Papastamatiou, Y. P. Sex-specific and individual preferences for hunting strategies in white sharks. Functional Ecology, 30(8):1397–1407, 2016.
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@examples@mixed_hmm@README.md@.PATH_END.py
|
{
"filename": "test_construct.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/sparse/tests/test_construct.py",
"type": "Python"
}
|
"""test sparse matrix construction functions"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, matrix
from numpy.testing import (assert_equal, assert_,
assert_array_equal, assert_array_almost_equal_nulp)
import pytest
from pytest import raises as assert_raises
from scipy._lib._testutils import check_free_memory
from scipy.sparse import csr_matrix, coo_matrix
from scipy.sparse import construct
from scipy.sparse.construct import rand as sprand
sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']
#TODO check whether format=XXX is respected
def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None):
# Helper function for testing.
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
data_rvs = random_state.randn
return construct.random(m, n, density, format, dtype,
random_state, data_rvs)
class TestConstructUtils(object):
def test_spdiags(self):
diags1 = array([[1, 2, 3, 4, 5]])
diags2 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10]])
diags3 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10],
[11,12,13,14,15]])
cases = []
cases.append((diags1, 0, 1, 1, [[1]]))
cases.append((diags1, [0], 1, 1, [[1]]))
cases.append((diags1, [0], 2, 1, [[1],[0]]))
cases.append((diags1, [0], 1, 2, [[1,0]]))
cases.append((diags1, [1], 1, 2, [[0,2]]))
cases.append((diags1,[-1], 1, 2, [[0,0]]))
cases.append((diags1, [0], 2, 2, [[1,0],[0,2]]))
cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]]))
cases.append((diags1, [3], 2, 2, [[0,0],[0,0]]))
cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]))
cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]))
cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]))
cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],
[0,0,0,4,0,0],
[0,0,0,0,5,0],
[6,0,0,0,0,0],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0],
[1, 7,13, 0, 0, 0],
[0, 2, 8,14, 0, 0],
[0, 0, 3, 9,15, 0],
[0, 0, 0, 4,10, 0],
[0, 0, 0, 0, 5, 0]]))
cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0],
[11, 0, 0, 9, 0],
[0,12, 0, 0,10],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
for d,o,m,n,result in cases:
assert_equal(construct.spdiags(d,o,m,n).todense(), result)
def test_diags(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append((a[:1], 0, (1, 1), [[1]]))
cases.append(([a[:1]], [0], (1, 1), [[1]]))
cases.append(([a[:1]], [0], (2, 1), [[1],[0]]))
cases.append(([a[:1]], [0], (1, 2), [[1,0]]))
cases.append(([a[:1]], [1], (1, 2), [[0,1]]))
cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]]))
cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]]))
cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]))
cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]]))
cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]]))
cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]]))
cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]]))
cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]))
cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]]))
cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]]))
cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]]))
cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]]))
cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]]))
cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]]))
cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]]))
cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]))
cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],
[0,0,0,2,0,0],
[0,0,0,0,3,0],
[6,0,0,0,0,4],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0],
[1, 7,12, 0, 0],
[0, 2, 8,13, 0],
[0, 0, 3, 9,14],
[0, 0, 0, 4,10]]))
cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0],
[11, 0, 0, 7, 0],
[0,12, 0, 0, 8],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
# too long arrays are OK
cases.append(([a], [0], (1, 1), [[1]]))
cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]]))
cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]]))
# scalar case: broadcasting
cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0],
[1, -2, 1],
[0, 1, -2]]))
for d, o, shape, result in cases:
try:
assert_equal(construct.diags(d, o, shape=shape).todense(),
result)
if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape):
# should be able to find the shape automatically
assert_equal(construct.diags(d, o).todense(), result)
except:
print("%r %r %r %r" % (d, o, shape, result))
raise
def test_diags_default(self):
a = array([1, 2, 3, 4, 5])
assert_equal(construct.diags(a).todense(), np.diag(a))
def test_diags_default_bad(self):
a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
assert_raises(ValueError, construct.diags, a)
def test_diags_bad(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append(([a[:0]], 0, (1, 1)))
cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], None))
cases.append(([], [-4,2,-1], None))
cases.append(([1], [-5], (4, 4)))
cases.append(([a], 0, None))
for d, o, shape in cases:
try:
assert_raises(ValueError, construct.diags, d, o, shape)
except:
print("%r %r %r" % (d, o, shape))
raise
assert_raises(TypeError, construct.diags, [[None]], [0])
def test_diags_vs_diag(self):
# Check that
#
# diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
#
np.random.seed(1234)
for n_diags in [1, 2, 3, 4, 5, 10]:
n = 1 + n_diags//2 + np.random.randint(0, 10)
offsets = np.arange(-n+1, n-1)
np.random.shuffle(offsets)
offsets = offsets[:n_diags]
diagonals = [np.random.rand(n - abs(q)) for q in offsets]
mat = construct.diags(diagonals, offsets)
dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
if len(offsets) == 1:
mat = construct.diags(diagonals[0], offsets[0])
dense_mat = np.diag(diagonals[0], offsets[0])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
def test_diags_dtype(self):
x = construct.diags([2.2], [0], shape=(2, 2), dtype=int)
assert_equal(x.dtype, int)
assert_equal(x.todense(), [[2, 0], [0, 2]])
def test_diags_one_diagonal(self):
d = list(range(5))
for k in range(-5, 6):
assert_equal(construct.diags(d, k).toarray(),
construct.diags([d], [k]).toarray())
def test_diags_empty(self):
x = construct.diags([])
assert_equal(x.shape, (0, 0))
def test_identity(self):
assert_equal(construct.identity(1).toarray(), [[1]])
assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]])
I = construct.identity(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = construct.identity(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_eye(self):
assert_equal(construct.eye(1,1).toarray(), [[1]])
assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]])
assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]])
assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])
assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16'))
for m in [3, 5]:
for n in [3, 5]:
for k in range(-5,6):
assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k))
if m == n:
assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k))
def test_eye_one(self):
assert_equal(construct.eye(1).toarray(), [[1]])
assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]])
I = construct.eye(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = construct.eye(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_kron(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[0],[0]]))
cases.append(array([[0,0]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14]]))
cases.append(array([[5,4],[0,0],[6,0]]))
cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
cases.append(array([[0,1,0,2,0,5,8]]))
cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
for a in cases:
for b in cases:
result = construct.kron(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(a,b)
assert_array_equal(result,expected)
def test_kronsum(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
for a in cases:
for b in cases:
result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(np.eye(len(b)), a) + \
np.kron(b, np.eye(len(a)))
assert_array_equal(result,expected)
def test_vstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5,6]])
expected = matrix([[1, 2],
[3, 4],
[5, 6]])
assert_equal(construct.vstack([A,B]).todense(), expected)
assert_equal(construct.vstack([A,B], dtype=np.float32).dtype, np.float32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()]).todense(),
expected)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()], dtype=np.float32).dtype,
np.float32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()],
dtype=np.float32).indices.dtype, np.int32)
assert_equal(construct.vstack([A.tocsr(),B.tocsr()],
dtype=np.float32).indptr.dtype, np.int32)
def test_hstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
expected = matrix([[1, 2, 5],
[3, 4, 6]])
assert_equal(construct.hstack([A,B]).todense(), expected)
assert_equal(construct.hstack([A,B], dtype=np.float32).dtype, np.float32)
assert_equal(construct.hstack([A.tocsc(),B.tocsc()]).todense(),
expected)
assert_equal(construct.hstack([A.tocsc(),B.tocsc()], dtype=np.float32).dtype,
np.float32)
def test_bmat(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
D = coo_matrix((0,0))
expected = matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
assert_equal(construct.bmat([[A,B],[None,C]]).todense(), expected)
expected = matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
assert_equal(construct.bmat([[A,None],[None,C]]).todense(), expected)
expected = matrix([[0, 5],
[0, 6],
[7, 0]])
assert_equal(construct.bmat([[None,B],[C,None]]).todense(), expected)
expected = matrix(np.empty((0,0)))
assert_equal(construct.bmat([[None,None]]).todense(), expected)
assert_equal(construct.bmat([[None,D],[D,None]]).todense(), expected)
# test bug reported in gh-5976
expected = matrix([[7]])
assert_equal(construct.bmat([[None,D],[C,None]]).todense(), expected)
# test failure cases
with assert_raises(ValueError) as excinfo:
construct.bmat([[A], [B]])
excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2')
with assert_raises(ValueError) as excinfo:
construct.bmat([[A, C]])
excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2')
@pytest.mark.slow
def test_concatenate_int32_overflow(self):
""" test for indptr overflow when concatenating matrices """
check_free_memory(30000)
n = 33000
A = csr_matrix(np.ones((n, n), dtype=bool))
B = A.copy()
C = construct._compressed_sparse_stack((A,B), 0)
assert_(np.all(np.equal(np.diff(C.indptr), n)))
assert_equal(C.indices.dtype, np.int64)
assert_equal(C.indptr.dtype, np.int64)
def test_block_diag_basic(self):
""" basic test for block_diag """
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
expected = matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
assert_equal(construct.block_diag((A, B, C)).todense(), expected)
def test_block_diag_scalar_1d_args(self):
""" block_diag with scalar and 1d arguments """
# one 1d matrix and a scalar
assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),
[[2, 3, 0], [0, 0, 4]])
def test_block_diag_1(self):
""" block_diag with one matrix """
assert_equal(construct.block_diag([[1, 0]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1, 0]]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1], [0]]]).todense(),
matrix([[1], [0]]))
# just on scalar
assert_equal(construct.block_diag([1]).todense(),
matrix([[1]]))
def test_random_sampling(self):
# Simple sanity checks for sparse random sampling.
for f in sprand, _sprandn:
for t in [np.float32, np.float64, np.longdouble]:
x = f(5, 10, density=0.1, dtype=t)
assert_equal(x.dtype, t)
assert_equal(x.shape, (5, 10))
assert_equal(x.nonzero()[0].size, 5)
x1 = f(5, 10, density=0.1, random_state=4321)
assert_equal(x1.dtype, np.double)
x2 = f(5, 10, density=0.1, random_state=np.random.RandomState(4321))
assert_array_equal(x1.data, x2.data)
assert_array_equal(x1.row, x2.row)
assert_array_equal(x1.col, x2.col)
for density in [0.0, 0.1, 0.5, 1.0]:
x = f(5, 10, density=density)
assert_equal(x.nnz, int(density * np.prod(x.shape)))
for fmt in ['coo', 'csc', 'csr', 'lil']:
x = f(5, 10, format=fmt)
assert_equal(x.format, fmt)
assert_raises(ValueError, lambda: f(5, 10, 1.1))
assert_raises(ValueError, lambda: f(5, 10, -0.1))
def test_rand(self):
# Simple distributional checks for sparse.rand.
for random_state in None, 4321, np.random.RandomState():
x = sprand(10, 20, density=0.5, dtype=np.float64,
random_state=random_state)
assert_(np.all(np.less_equal(0, x.data)))
assert_(np.all(np.less_equal(x.data, 1)))
def test_randn(self):
# Simple distributional checks for sparse.randn.
# Statistically, some of these should be negative
# and some should be greater than 1.
for random_state in None, 4321, np.random.RandomState():
x = _sprandn(10, 20, density=0.5, dtype=np.float64,
random_state=random_state)
assert_(np.any(np.less(x.data, 0)))
assert_(np.any(np.less(1, x.data)))
def test_random_accept_str_dtype(self):
# anything that np.dtype can convert to a dtype should be accepted
# for the dtype
a = construct.random(10, 10, dtype='d')
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@sparse@tests@test_construct.py@.PATH_END.py
|
{
"filename": "test_fringe.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/fringe/tests/test_fringe.py",
"type": "Python"
}
|
"""
Unit tests for fringe correction
"""
import pytest
import numpy as np
import numpy.random as rn
from stdatamodels.jwst.datamodels import IFUImageModel, FringeModel
from jwst.fringe import fringe
FRINGE_CONSTANT = 2. # correction will be input data divided by this factor
def test_data_correction(setup_inputs):
''' Test both good and NaN pixels. '''
shape = (4, 5)
input_model, fringe_model = setup_inputs(shape)
# Make 1 bad pixel
input_model.data[0, 0] = np.nan
input_model.err[0, 0] = np.nan
# Do the correction()
output_model = fringe.do_correction(input_model, fringe_model)
# Check that correction was done on pixels with valid values for both
# SCI and ERR arrays
good_pix = np.where(np.isfinite(input_model.data))
assert (output_model.data[good_pix] ==
(input_model.data * FRINGE_CONSTANT)[good_pix]).all()
assert (output_model.err[good_pix] ==
(input_model.err * FRINGE_CONSTANT)[good_pix]).all()
# Check that correction was not done on pixel with NaN values for both SCI
# and ERR arrays (i.e. these pixels have not been corrected)
assert np.isnan(output_model.data[0, 0])
assert np.isnan(output_model.err[0, 0])
@pytest.fixture
def setup_inputs():
''' Create input and fringe models.'''
def _setup(shape=(2, 2)):
input_data = (np.ones(shape[0] * shape[1])).reshape(shape) * 6.
input_err = rn.random_sample(shape)
input_model = IFUImageModel(data=input_data, err=input_err)
fringe_data = (np.ones(shape[0] * shape[1])).reshape(shape) / FRINGE_CONSTANT
fringe_model = FringeModel(data=fringe_data)
return input_model, fringe_model
return _setup
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@fringe@tests@test_fringe.py@.PATH_END.py
|
{
"filename": "_templateitemname.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="layout.polar.radialaxis.tickformatstop",
**kwargs,
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@polar@radialaxis@tickformatstop@_templateitemname.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/carpet/baxis/title/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="carpet.baxis.title", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@carpet@baxis@title@_font.py@.PATH_END.py
|
{
"filename": "_selectedpoints.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/choropleth/_selectedpoints.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="selectedpoints", parent_name="choropleth", **kwargs
):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@choropleth@_selectedpoints.py@.PATH_END.py
|
{
"filename": "inplace_ops_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/kernel_tests/array_ops/inplace_ops_test.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inplace_ops."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.platform import test as test_lib
BASIC_TYPES = [
dtypes.float32,
dtypes.int8,
dtypes.uint8,
dtypes.int32,
dtypes.int64,
dtypes.uint64,
dtypes.bfloat16,
]
class InplaceOpsTest(test_util.TensorFlowTestCase):
def testBasicUpdate(self):
for dtype in BASIC_TYPES:
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] = 1
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.ones([1, 3], dtype) * 2)
y[-1, :] = 2
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] = 7
self.assertAllClose(x, y)
def testBasicUpdateBool(self):
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtypes.bool)
y = np.ones([7, 3], dtypes.bool.as_numpy_dtype)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3],
dtypes.bool))
y[3, :] = True
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.zeros([1, 3], dtypes.bool))
y[-1, :] = False
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, 5, array_ops.zeros([3], dtypes.bool))
y[5, :] = False
self.assertAllClose(x, y)
def testBasicAdd(self):
for dtype in BASIC_TYPES:
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = array_ops.inplace_add(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] += 1
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] += 2
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] += 7
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] += 99
self.assertAllClose(x, y)
def testBasicSub(self):
for dtype in BASIC_TYPES:
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] -= 1
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] -= 2
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] -= 7
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] -= 99
self.assertAllClose(x, y)
def testRandom(self):
with test_util.use_gpu():
d0, d1, d2 = 100, 3, 5
x = array_ops.zeros([d0, d1, d2])
y = np.zeros([d0, d1, d2])
for _ in range(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10, d1, d2))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx, :] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx, :] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx, :] -= val
self.assertAllClose(x, y)
def testRandom1D(self):
with test_util.use_gpu():
d0 = 100
x = array_ops.zeros([d0])
y = np.zeros([d0])
for _ in range(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx] -= val
self.assertAllClose(x, y)
def testAlias(self):
with test_util.use_gpu():
x = array_ops.ones([2, 3])
y = inplace_ops.alias_inplace_add(x, [0], [[1, 2, 3]])
with ops.control_dependencies([y]):
z = array_ops.identity(x)
_, vy, vz = self.evaluate([x, y, z])
self.assertAllClose(vy, vz)
def testError(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must be a vector"):
_ = self.evaluate(inplace_ops.inplace_update([[1.]], [[0]], [[10]]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"x and v shape doesn't match"):
_ = self.evaluate(inplace_ops.inplace_update([[1.]], [0], [10]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"i and x shape doesn't match"):
_ = self.evaluate(inplace_ops.inplace_update([[1.]], [0, 1], [[10]]))
def testEmpty(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64, dtypes.bool,
dtypes.uint8, dtypes.bfloat16
]:
with test_util.use_gpu():
test_shapes = [(), (1,), (2, 3), (0, 2), (2, 3, 5), (2, 0, 5)]
for shape in test_shapes:
val = self.evaluate(inplace_ops.empty(shape, dtype))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = self.evaluate(inplace_ops.empty(shape, dtype, init=True))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
val = self.evaluate(
inplace_ops.empty_like(array_ops.zeros(shape, dtype)))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = self.evaluate(inplace_ops.empty_like(
array_ops.zeros(shape, dtype), init=True))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
with test_util.use_gpu():
val = self.evaluate(inplace_ops.empty((1, 2), dtypes.string, init=True))
self.assertEqual(val.tolist(), [[b"", b""]])
val = self.evaluate(inplace_ops.empty((1, 2), dtypes.string, init=False))
self.assertEqual(val.tolist(), [[b"", b""]])
def testInplaceOpOnEmptyTensors(self):
op_fns = [
inplace_ops.inplace_add,
inplace_ops.inplace_sub,
inplace_ops.inplace_update,
]
for dtype in BASIC_TYPES:
for op_fn in op_fns:
with test_util.use_gpu():
x = array_ops.zeros([7, 0], dtype)
y = np.zeros([7, 0], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = op_fn(x, [3], array_ops.ones([1, 0], dtype))
self.assertAllClose(x, y)
x = op_fn(x, None, array_ops.ones([1, 0], dtype))
self.assertAllClose(x, y)
if __name__ == "__main__":
test_lib.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@kernel_tests@array_ops@inplace_ops_test.py@.PATH_END.py
|
{
"filename": "powerspectrum.py",
"repo_name": "radiocosmology/draco",
"repo_path": "draco_extracted/draco-master/draco/analysis/powerspectrum.py",
"type": "Python"
}
|
"""Power spectrum estimation code."""
import numpy as np
from caput import config
from ..core import containers, task
class QuadraticPSEstimation(task.SingleTask):
"""Estimate a power spectrum from a set of KLModes.
Attributes
----------
psname : str
Name of power spectrum to use. Must be precalculated in the driftscan
products.
pstype : str
Type of power spectrum estimate to calculate. One of 'unwindowed',
'minimum_variance' or 'uncorrelated'.
"""
psname = config.Property(proptype=str)
pstype = config.enum(
["unwindowed", "minimum_variance", "uncorrelated"], default="unwindowed"
)
def setup(self, manager):
"""Set the ProductManager instance to use.
Parameters
----------
manager : ProductManager
Manager object to use
"""
self.manager = manager
def process(self, klmodes):
"""Estimate the power spectrum from the given data.
Parameters
----------
klmodes : containers.KLModes
KLModes for which to estimate the power spectrum
Returns
-------
ps : containers.PowerSpectrum
"""
import scipy.linalg as la
if not isinstance(klmodes, containers.KLModes):
raise ValueError(
"Input container must be instance of "
f"KLModes (received {klmodes.__class__!s})"
)
klmodes.redistribute("m")
pse = self.manager.psestimators[self.psname]
pse.genbands()
q_list = []
for mi, m in klmodes.vis[:].enumerate(axis=0):
ps_single = pse.q_estimator(m, klmodes.vis[m, : klmodes.nmode[m]])
q_list.append(ps_single)
q = klmodes.comm.allgather(np.array(q_list).sum(axis=0))
q = np.array(q).sum(axis=0)
# reading from directory
fisher, bias = pse.fisher_bias()
ps = containers.Powerspectrum2D(
kperp_edges=pse.kperp_bands, kpar_edges=pse.kpar_bands
)
npar = len(ps.index_map["kpar"])
nperp = len(ps.index_map["kperp"])
# Calculate the right unmixing matrix for each ps type
if self.pstype == "unwindowed":
M = la.pinv(fisher, rcond=1e-8)
elif self.pstype == "uncorrelated":
Fh = la.cholesky(fisher)
M = la.inv(Fh) / Fh.sum(axis=1)[:, np.newaxis]
elif self.pstype == "minimum_variance":
M = np.diag(fisher.sum(axis=1) ** -1)
ps.powerspectrum[:] = np.dot(M, q - bias).reshape(nperp, npar)
ps.C_inv[:] = fisher.reshape(nperp, npar, nperp, npar)
return ps
|
radiocosmologyREPO_NAMEdracoPATH_START.@draco_extracted@draco-master@draco@analysis@powerspectrum.py@.PATH_END.py
|
{
"filename": "customblock.ipynb",
"repo_name": "lgrcia/prose",
"repo_path": "prose_extracted/prose-main/docs/ipynb/customblock.ipynb",
"type": "Jupyter Notebook"
}
|
# Custom block
Here is a more detailed example on how to create a custom block by subclassing the [Block](prose.Block) class (and make it user and community-friendly). The purpose of the [Block](prose.Block) we will create is to correct for [image vignetting](https://en.wikipedia.org/wiki/Vignetting)
## Dataset
We first consider an example image
```python
from prose import example_image
image = example_image(seed=4)
```
/Users/lgrcia/code/dev/prose/prose/console_utils.py:15: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from tqdm.autonotebook import tqdm
in which we include some vignetting
```python
import numpy as np
# vignette function
def gaussian2D(XY, xy, sigma, amplitude=1):
X, Y = XY
x, y = xy
return (
amplitude
* np.exp(-((X - x) ** 2 / sigma**2) - ((Y - y) ** 2 / sigma**2)) ** 3
)
# vignetting the image data
XY = np.indices(image.shape)
vignette = gaussian2D(XY, (np.array(image.shape) / 2), 1000)
image.data *= vignette
image.show()
```

## Method
The method to correct for the vignetting is simple:
1. We build a sigma-clipped version of the image to exclude bright (stars) pixels (iteratively)
2. We fit the vignette model to the sigma clipped data
3. We correct the image from the fitted vignette
### 1. Sigma clipping
```python
import matplotlib.pyplot as plt
sg_image = image.data.copy()
sg_image = image.data.copy()
mask = np.ones_like(sg_image).astype(bool)
for _ in range(5):
mask = np.abs((sg_image - np.median(sg_image[mask]))) < 5 * np.std(sg_image[mask])
sg_image[~mask] = np.nan
plt.imshow(sg_image)
```
<matplotlib.image.AxesImage at 0x291017310>

### 2. Fitting the model
```python
from scipy.optimize import minimize
center = np.array(image.shape) / 2
def model(p):
a, s = p
return a * gaussian2D(XY, center, s)
def nll(p, sg_image):
_model = model(p)
return np.log(np.nansum((_model - sg_image) ** 2))
x0 = [5000, image.shape[0]]
sol = minimize(nll, x0, bounds=((0, np.nanmax(sg_image)), (0, 2000)), args=(sg_image,))
```
### 3. Correction
```python
corrected_image = image.copy()
corrected_image.data -= model(sol.x)
# plotting
# --------
plt.figure(None, (12, 6))
ax1 = plt.subplot(131, title="raw image")
image.show(ax=ax1)
ax2 = plt.subplot(132, title="fitted vignette model")
plt.imshow(
model(sol.x),
origin="lower",
cmap="Greys_r",
vmin=np.nanmin(sg_image),
vmax=np.nanmax(sg_image),
)
ax3 = plt.subplot(133, title="corrected image")
_ = corrected_image.show(ax=ax3)
```

## Block creation
We will now create a block to be able to apply this correction in a `Sequence` ( and easily associate it to other processing blocks)
### The simple way
The simpliest way is to subclass the [Block](prose.Block) class, and copy-paste the code above into its `run(self, image)` method, which will be called on each [Image](prose.Image)
```python
from prose import Block
class SimpleVignettingCorr(Block):
def __init__(self, **kwargs):
super().__init__(self, **kwargs)
def run(self, image):
# 1. Sigma clipping
sg_image = image.data.copy()
mask = np.ones_like(sg_image).astype(bool)
for _ in range(5):
mask = np.abs((sg_image - np.median(sg_image[mask]))) < 5 * np.std(
sg_image[mask]
)
sg_image[~mask] = np.nan
XY = np.indices(image.shape)
center = np.array(image.shape) / 2
# 2. Fitting the model
def model(p):
a, s = p
return a * gaussian2D(XY, center, s)
def nll(p, sg_image):
_model = model(p)
return np.log(np.nansum((_model - sg_image) ** 2))
x0 = [5000, image.shape[0]]
sol = minimize(
nll, x0, bounds=((0, np.nanmax(sg_image)), (0, 2000)), args=(sg_image,)
)
# correction
image.data -= model(sol.x)
```
and applying it to data
```python
corrected_image = SimpleVignettingCorr()(image)
_ = corrected_image.show()
```

### User-friendly block
The block `SimpleVignettingCorr` does the work, but is not optimized. Indeed:
1. `XY` and `center` are computed for each image, whereas images with similar characteristics (like shape and center) are more likely to be fed into a sequence
2. The model parameters optimisation always start from an uninformed guess `x0`, whereas the solution from a previous image is likely to be a good guess
3. the code within `run` is lengthy and could be organized using class methods
A good way to solve 1. is to provide the block with a reference image, from which `XY` and `center` can be pre-computed. To solve 2., the last optmized parameters can be recorded and used as a first guess for the next optimization. Let's implement these two ideas in the block together with a bit of cleaning (solving 3.)
```python
class BetterVignettingCorr(Block):
# allowing for a reference image to be provided
def __init__(self, reference=None, **kwargs):
super().__init__(self, **kwargs)
# to avoid re-computing it for every new image
self.XY = None
self.center = None
# to save last optimized parameters
self.x0 = None
# pre-computing parameters if reference provided
if reference is not None:
self.XY = np.indices(reference.shape)
self.center = np.array(reference.shape) / 2
self.x0 = [5000, reference.shape[0]]
@staticmethod
def sigma_clip(data, n=5, sigma=5):
sg_image = data.copy()
mask = np.ones_like(sg_image).astype(bool)
for _ in range(5):
mask = np.abs((sg_image - np.median(sg_image[mask]))) < 5 * np.std(
sg_image[mask]
)
sg_image[~mask] = np.nan
return sg_image
def model(self, p):
a, s = p
return a * gaussian2D(self.XY, self.center, s)
def chi(self, p, sg_image):
model = self.model(p)
return np.nansum((model - sg_image) ** 2)
def run(self, image):
# sigma clipping
sg_image = self.sigma_clip(image.data)
# if no reference, using first image to initialize parameters
if self.x0 is None:
self.x0 = [5000, image.shape[0]]
if self.XY is None:
self.XY = np.indices(image.shape)
self.center = np.array(image.shape) / 2
sol = minimize(
self.chi,
self.x0,
bounds=((0, np.nanmax(sg_image)), (0, 2000)),
args=(sg_image,),
)
self.x0 = sol.x # keeping optimized parameters as first guess for next image
# correction
image.data -= model(sol.x)
```
and applying it to data
```python
corrected_image = BetterVignettingCorr()(image)
_ = corrected_image.show()
```

```{note}
Here, the performance of ``BetterVignettingCorr`` against ``SimpleVignettingCorr`` would be very similar, but providing a reference mechanism to a block (so it can precompute some redundant parameters) often greatly improves its performances.
```
## Documentation
Once created, a Block needs to be properly documented in order to be shared and properly maintained.
### Acknowledgment
Using your `Block` in a `Sequence` might lead to published results. In this context, one would need to properly aknowledge the packages and methods used by your `Block`, including your own work. To do that, the `Block` class provide the `citations` method, that can be overwritten in the following way:
```python
class CitableVignettingCorr(Block):
def __init__(self, **kwargs):
super().__init__(self, **kwargs)
@property
def citations(self):
return (
# we used scipy (known to prose)
"scipy",
# your custom reference
"""@misc{my-work, author = "me", title = "My work", year = "2022"}""",
)
```
To use it, let's define a sequence with your block in it
```python
from prose import Sequence, blocks
sequence = Sequence(
[
CitableVignettingCorr(),
]
)
```
and extract the aknowledgment for it (TODO)
tex, bib = sequence.citations()
print(tex)
print(bib[0:1500], "...")
<div class="alert alert-info">
Note
For more details, see the [aknowledgment reference](./acknowledgement.ipynb)
</div>
TODO !
### Doctring
TODO
|
lgrciaREPO_NAMEprosePATH_START.@prose_extracted@prose-main@docs@ipynb@customblock.ipynb@.PATH_END.py
|
{
"filename": "pol_triangle.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/batch3/outputs/pol_triangle.py",
"type": "Python"
}
|
import planckStyle as s
g = s.getSubplotPlotter()
params = ['theta', 'omegabh2', 'omegach2', 'logA', 'ns', 'tau']
for camspec in [True, False]:
for par in ['']:
g.newPlot()
dataroots = [s.defdata_root + '_EE_lowE', s.defdata_root + '_TE_lowE', s.defdata_root + '_TT_lowl_lowE', s.defdata_root + '_TTTEEE_lowl_lowE']
labs = [s.datalabel[t] for t in dataroots]
if camspec:
dataroots = [x.replace('plikHM','CamSpecHM') for x in dataroots]
print(dataroots)
roots = [ g.getRoot(par, root) for root in dataroots]
if par: params = [par] + params
g.triangle_plot(roots, params, filled_compare=True, legend_labels=labs)
g.export(tag=par + ('_CamSpec' if camspec else ''))
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@batch3@outputs@pol_triangle.py@.PATH_END.py
|
{
"filename": "usd.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/usd.py",
"type": "Python"
}
|
"""
pygments.lexers.usd
~~~~~~~~~~~~~~~~~~~
The module that parses Pixar's Universal Scene Description file format.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.lexer import words as words_
from pygments.lexers._usd_builtins import COMMON_ATTRIBUTES, KEYWORDS, \
OPERATORS, SPECIAL_NAMES, TYPES
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text, Whitespace
__all__ = ["UsdLexer"]
def _keywords(words, type_):
return [(words_(words, prefix=r"\b", suffix=r"\b"), type_)]
_TYPE = r"(\w+(?:\[\])?)"
_BASE_ATTRIBUTE = r"(\w+(?:\:\w+)*)(?:(\.)(timeSamples))?"
_WHITESPACE = r"([ \t]+)"
class UsdLexer(RegexLexer):
"""
A lexer that parses Pixar's Universal Scene Description file format.
"""
name = "USD"
url = 'https://graphics.pixar.com/usd/release/index.html'
aliases = ["usd", "usda"]
filenames = ["*.usd", "*.usda"]
version_added = '2.6'
tokens = {
"root": [
(rf"(custom){_WHITESPACE}(uniform)(\s+){_TYPE}(\s+){_BASE_ATTRIBUTE}(\s*)(=)",
bygroups(Keyword.Token, Whitespace, Keyword.Token, Whitespace,
Keyword.Type, Whitespace, Name.Attribute, Text,
Name.Keyword.Tokens, Whitespace, Operator)),
(rf"(custom){_WHITESPACE}{_TYPE}(\s+){_BASE_ATTRIBUTE}(\s*)(=)",
bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
Operator)),
(rf"(uniform){_WHITESPACE}{_TYPE}(\s+){_BASE_ATTRIBUTE}(\s*)(=)",
bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
Operator)),
(rf"{_TYPE}{_WHITESPACE}{_BASE_ATTRIBUTE}(\s*)(=)",
bygroups(Keyword.Type, Whitespace, Name.Attribute, Text,
Name.Keyword.Tokens, Whitespace, Operator)),
] +
_keywords(KEYWORDS, Keyword.Tokens) +
_keywords(SPECIAL_NAMES, Name.Builtins) +
_keywords(COMMON_ATTRIBUTES, Name.Attribute) +
[(r"\b\w+:[\w:]+\b", Name.Attribute)] +
_keywords(OPERATORS, Operator) + # more attributes
[(type_ + r"\[\]", Keyword.Type) for type_ in TYPES] +
_keywords(TYPES, Keyword.Type) +
[
(r"[(){}\[\]]", Punctuation),
("#.*?$", Comment.Single),
(",", Punctuation),
(";", Punctuation), # ";"s are allowed to combine separate metadata lines
("=", Operator),
(r"[-]*([0-9]*[.])?[0-9]+(?:e[+-]*\d+)?", Number),
(r"'''(?:.|\n)*?'''", String),
(r'"""(?:.|\n)*?"""', String),
(r"'.*?'", String),
(r'".*?"', String),
(r"<(\.\./)*([\w/]+|[\w/]+\.\w+[\w:]*)>", Name.Namespace),
(r"@.*?@", String.Interpol),
(r'\(.*"[.\\n]*".*\)', String.Doc),
(r"\A#usda .+$", Comment.Hashbang),
(r"\s+", Whitespace),
(r"\w+", Text),
(r"[_:.]+", Punctuation),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@usd.py@.PATH_END.py
|
{
"filename": "simple.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/importlib/simple.py",
"type": "Python"
}
|
"""
Compatibility shim for .resources.simple as found on Python 3.10.
Consumers that can rely on Python 3.11 should use the other
module directly.
"""
from .resources.simple import (
SimpleReader, ResourceHandle, ResourceContainer, TraversableReader,
)
__all__ = [
'SimpleReader', 'ResourceHandle', 'ResourceContainer', 'TraversableReader',
]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@importlib@simple.py@.PATH_END.py
|
{
"filename": "gaussian.py",
"repo_name": "herjy/SLIT",
"repo_path": "SLIT_extracted/SLIT-master/Tests/gaussian.py",
"type": "Python"
}
|
import numpy as np
import scipy.misc as spm
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def gaussian(n1,n2,x0,y0,A,e1,e2,alpha):
#img = gaussian(n1,n2,x0,y0,A,e1,e2,alpha)
#produces a gaussian profile image
#INPUTS:
# n1,n2: size of the output image
# x0,y0: centroid of the gaussian profile
# A: value of the maximum value for the gaussian profile
# e1,e2: ellipticity og the profile
# alpha: inclination of the profile
#OUTPUTS:
# img: n1xn2 image containing the gaussian profile
Img = np.zeros([n1,n2])
valcor = np.zeros([2,n1*n2])
AA = np.zeros([n1,n2])
xx0 = np.zeros(n1*n2)
xx0[:]=x0
yy0 = np.zeros(n2*n1)
yy0[:]=y0
coord0 = np.zeros([2, n1*n2])
coord = np.zeros([2, n1*n2])
# terme d'amplitude
ampli = A/(2*np.pi*np.sqrt(e1*e2))
mat_rot = [[np.cos(alpha), np.sin(alpha)],[-np.sin(alpha), np.cos(alpha)]]
tmat_rot = np.transpose(mat_rot)
matell = [[(e1*e1),0],[0,(e2*e2)]]
# Matrice des moments quadripolaires
matA = np.mat(np.dot(np.dot(tmat_rot,matell),mat_rot))
xc, yc = np.where(Img == 0)
ii = np.array(xc)
jj = np.array(yc)
#print(np.shape(i), np.shape(xx0))
count = np.linspace(0,n1*n2-1., n1*n2-1.)
count = np.int_(count)
valcor = np.array([ii,jj]) - np.array([xx0,yy0])
valcor = np.array(valcor)
for k in count:
val = np.mat(valcor[:,k])
invA = np.array(np.linalg.inv(matA))
var = np.dot(np.dot(val,invA),np.transpose(val))
AA[ii[k],jj[k]]= var
Img = (ampli*np.exp(-0.5*AA))
return Img
def moffat(n1,n2,x0,y0,A,e1,e2,alpha,beta):
#img = gaussian(n1,n2,x0,y0,A,e1,e2,alpha)
#produces a gaussian profile image
#INPUTS:
# n1,n2: size of the output image
# x0,y0: centroid of the gaussian profile
# A: value of the maximum value for the gaussian profile
# e1,e2: ellipticity og the profile
# alpha: inclination of the profile
#OUTPUTS:
# img: n1xn2 image containing the gaussian profile
Img = np.zeros([n1,n2])
valcor = np.zeros([2,n1*n2])
AA = np.zeros([n1,n2])
xx0 = np.zeros(n1*n2)
xx0[:]=x0
yy0 = np.zeros(n2*n1)
yy0[:]=y0
coord0 = np.zeros([2, n1*n2])
coord = np.zeros([2, n1*n2])
# terme d'amplitude
ampli = A/(2*np.pi*np.sqrt(e1*e2))
mat_rot = [[np.cos(alpha), np.sin(alpha)],[-np.sin(alpha), np.cos(alpha)]]
tmat_rot = np.transpose(mat_rot)
matell = [[1./(e1*e1),0],[0,1./(e2*e2)]]
# Matrice des moments quadripolaires
matA = np.mat(np.dot(np.dot(tmat_rot,matell),mat_rot))
xc, yc = np.where(Img == 0)
i = np.array(xc)
j = np.array(yc)
#print(np.shape(i), np.shape(xx0))
count = np.linspace(0,n1*n2-1, n1*n2-1)
count = np.int_(count)
valcor = np.array([i,j]) - np.array([xx0,yy0])
valcor = np.array(valcor)
for k in count:
val = np.mat(valcor[:,k])
invA = np.array(np.linalg.inv(matA))
var = np.dot(np.dot(val,invA),np.transpose(val))
AA[i[k],j[k]]= var
Img = (ampli*(1+AA**2)**(-beta))
return Img
def sersic(n1,n2,x0,y0,A,e1,e2,alpha,n):
#img = gaussian(n1,n2,x0,y0,A,e1,e2,alpha)
#produces a gaussian profile image
#INPUTS:
# n1,n2: size of the output image
# x0,y0: centroid of the gaussian profile
# A: value of the maximum value for the gaussian profile
# e1,e2: ellipticity og the profile
# alpha: inclination of the profile
#OUTPUTS:
# img: n1xn2 image containing the gaussian profile
Img = np.zeros([n1,n2])
valcor = np.zeros([2,n1*n2])
AA = np.zeros([n1,n2])
xx0 = np.zeros(n1*n2)
xx0[:]=x0
yy0 = np.zeros(n2*n1)
yy0[:]=y0
coord0 = np.zeros([2, n1*n2])
coord = np.zeros([2, n1*n2])
# terme d'amplitude
ampli = A/(2*np.pi*np.sqrt(e1*e2))
mat_rot = [[np.cos(alpha), np.sin(alpha)],[-np.sin(alpha), np.cos(alpha)]]
tmat_rot = np.transpose(mat_rot)
matell = [[1./(e1*e1),0],[0,1./(e2*e2)]]
# Matrice des moments quadripolaires
matA = np.mat(np.dot(np.dot(tmat_rot,matell),mat_rot))
xc, yc = np.where(Img == 0)
i = np.array(xc)
j = np.array(yc)
#print(np.shape(i), np.shape(xx0))
count = np.linspace(0,n1*n2-1, n1*n2-1)
count = np.int_(count)
valcor = np.array([i,j]) - np.array([xx0,yy0])
valcor = np.array(valcor)
for k in count:
val = np.mat(valcor[:,k])
invA = np.array(np.linalg.inv(matA))
var = np.dot(np.dot(val,invA),np.transpose(val))
AA[i[k],j[k]]= var
Img = (ampli*np.exp(-AA**(1/n)))
return Img
def add_noise(img, mean, sigma):
shp = np.shape(img)
n1 = shp[0]
cov = numpy.identity(2)
noise = np.random.multovariate_normal([mean,mean], cov, [128,128] )
imfinal = img+noise[:,:,0]
return imfinal
|
herjyREPO_NAMESLITPATH_START.@SLIT_extracted@SLIT-master@Tests@gaussian.py@.PATH_END.py
|
{
"filename": "test_savitzky_golay.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/signal/tests/test_savitzky_golay.py",
"type": "Python"
}
|
import pytest
import numpy as np
from numpy.testing import (assert_equal,
assert_array_equal,
)
from scipy._lib._array_api import (
assert_almost_equal, assert_array_almost_equal, xp_assert_close
)
from scipy.ndimage import convolve1d # type: ignore[attr-defined]
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
xp_assert_close(h, [1.0])
h = savgol_coeffs(3, 2)
xp_assert_close(h, [0.0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
xp_assert_close(h, [0.0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
xp_assert_close(h, [0.0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
xp_assert_close(h, [0.0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
xp_assert_close(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
xp_assert_close(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
xp_assert_close(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
xp_assert_close(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = np.full_like(i, 0.5)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
xp_assert_close(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
xp_assert_close(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
xp_assert_close(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_deriv_gt_polyorder():
"""
If deriv > polyorder, the coefficients should be all 0.
This is a regression test for a bug where, e.g.,
savgol_coeffs(5, polyorder=1, deriv=2)
raised an error.
"""
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
assert_array_equal(coeffs, np.zeros(5))
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
assert_array_equal(coeffs, np.zeros(7))
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
# --------------------------------------------------------------------
# savgol_coeffs tests for even window length
# --------------------------------------------------------------------
def test_sg_coeffs_even_window_length():
# Simple case - deriv=0, polyorder=0, 1
window_lengths = [4, 6, 8, 10, 12, 14, 16]
for length in window_lengths:
h_p_d = savgol_coeffs(length, 0, 0)
xp_assert_close(h_p_d, np.ones_like(h_p_d) / length)
# Verify with closed forms
# deriv=1, polyorder=1, 2
def h_p_d_closed_form_1(k, m):
return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))
# deriv=2, polyorder=2
def h_p_d_closed_form_2(k, m):
numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)
denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)
return numer/denom
for length in window_lengths:
m = length//2
expected_output = [h_p_d_closed_form_1(k, m)
for k in range(-m + 1, m + 1)][::-1]
actual_output = savgol_coeffs(length, 1, 1)
xp_assert_close(expected_output, actual_output)
actual_output = savgol_coeffs(length, 2, 1)
xp_assert_close(expected_output, actual_output)
expected_output = [h_p_d_closed_form_2(k, m)
for k in range(-m + 1, m + 1)][::-1]
actual_output = savgol_coeffs(length, 2, 2)
xp_assert_close(expected_output, actual_output)
actual_output = savgol_coeffs(length, 3, 2)
xp_assert_close(expected_output, actual_output)
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
xp_assert_close(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
xp_assert_close(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
xp_assert_close(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
xp_assert_close(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
xp_assert_close(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
np.full_like(t, 6),
6 * t])
window_length = 7
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
xp_assert_close(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=1, delta=delta)
xp_assert_close(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=2, delta=delta)
xp_assert_close(y2, d2x, atol=1e-12)
# Transpose everything, and test again with axis=0.
x = x.T
dx = dx.T
d2x = d2x.T
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
xp_assert_close(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=1, delta=delta)
xp_assert_close(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=2, delta=delta)
xp_assert_close(y2, d2x, atol=1e-12)
def test_sg_filter_interp_edges_3d():
# Test mode='interp' with a 3-D array.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
x1 = np.array([t, -t])
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
dx2 = np.array([2 * t, 6 * t])
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
# z has shape (3, 2, 21)
z = np.array([x1, x2, x3])
dz = np.array([dx1, dx2, dx3])
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
xp_assert_close(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
xp_assert_close(dy, dz, atol=1e-10)
# z has shape (3, 21, 2)
z = np.array([x1.T, x2.T, x3.T])
dz = np.array([dx1.T, dx2.T, dx3.T])
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
xp_assert_close(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
xp_assert_close(dy, dz, atol=1e-10)
# z has shape (21, 3, 2)
z = z.swapaxes(0, 1).copy()
dz = dz.swapaxes(0, 1).copy()
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
xp_assert_close(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
xp_assert_close(dy, dz, atol=1e-10)
def test_sg_filter_valid_window_length_3d():
"""Tests that the window_length check is using the correct axis."""
x = np.ones((10, 20, 30))
savgol_filter(x, window_length=29, polyorder=3, mode='interp')
with pytest.raises(ValueError, match='window_length must be less than'):
# window_length is more than x.shape[-1].
savgol_filter(x, window_length=31, polyorder=3, mode='interp')
savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp')
with pytest.raises(ValueError, match='window_length must be less than'):
# window_length is more than x.shape[0].
savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp')
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@signal@tests@test_savitzky_golay.py@.PATH_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/ohlc/hoverlabel/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="ohlc.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@ohlc@hoverlabel@_bordercolor.py@.PATH_END.py
|
{
"filename": "myscale_vector_sql.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/cookbook/myscale_vector_sql.ipynb",
"type": "Jupyter Notebook"
}
|
# Vector SQL Retriever with MyScale
>[MyScale](https://docs.myscale.com/en/) is an integrated vector database. You can access your database in SQL and also from here, LangChain. MyScale can make a use of [various data types and functions for filters](https://blog.myscale.com/2023/06/06/why-integrated-database-solution-can-boost-your-llm-apps/#filter-on-anything-without-constraints). It will boost up your LLM app no matter if you are scaling up your data or expand your system to broader application.
```python
!pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental
```
```python
import getpass
from os import environ
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.utilities import SQLDatabase
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
from sqlalchemy import MetaData, create_engine
MYSCALE_HOST = "msc-4a9e710a.us-east-1.aws.staging.myscale.cloud"
MYSCALE_PORT = 443
MYSCALE_USER = "chatdata"
MYSCALE_PASSWORD = "myscale_rocks"
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
engine = create_engine(
f"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https"
)
metadata = MetaData(bind=engine)
environ["OPENAI_API_KEY"] = OPENAI_API_KEY
```
```python
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_experimental.sql.vector_sql import VectorSQLOutputParser
output_parser = VectorSQLOutputParser.from_embeddings(
model=HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
)
```
```python
from langchain.callbacks import StdOutCallbackHandler
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
chain = VectorSQLDatabaseChain(
llm_chain=LLMChain(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
),
top_k=10,
return_direct=True,
sql_cmd_parser=output_parser,
database=SQLDatabase(engine, None, metadata),
)
import pandas as pd
pd.DataFrame(
chain.run(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[StdOutCallbackHandler()],
)
)
```
## SQL Database as Retriever
```python
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain_experimental.retrievers.vector_sql_database import (
VectorSQLDatabaseChainRetriever,
)
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import (
VectorSQLDatabaseChain,
VectorSQLRetrieveAllOutputParser,
)
from langchain_openai import ChatOpenAI
output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(
output_parser.model
)
chain = VectorSQLDatabaseChain.from_llm(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
top_k=10,
return_direct=True,
db=SQLDatabase(engine, None, metadata),
sql_cmd_parser=output_parser_retrieve_all,
native_format=True,
)
# You need all those keys to get docs
retriever = VectorSQLDatabaseChainRetriever(
sql_db_chain=chain, page_content_key="abstract"
)
document_with_metadata_prompt = PromptTemplate(
input_variables=["page_content", "id", "title", "authors", "pubdate", "categories"],
template="Content:\n\tTitle: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}",
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(
model_name="gpt-3.5-turbo-16k", openai_api_key=OPENAI_API_KEY, temperature=0.6
),
retriever=retriever,
chain_type="stuff",
chain_type_kwargs={
"document_prompt": document_with_metadata_prompt,
},
return_source_documents=True,
)
ans = chain(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[StdOutCallbackHandler()],
)
print(ans["answer"])
```
```python
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@cookbook@myscale_vector_sql.ipynb@.PATH_END.py
|
{
"filename": "node_link.py",
"repo_name": "ytree-project/ytree",
"repo_path": "ytree_extracted/ytree-main/ytree/data_structures/node_link.py",
"type": "Python"
}
|
"""
NodeLink class
"""
#-----------------------------------------------------------------------------
# Copyright (c) ytree development team. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
class NodeLink:
__slots__ = ('tree_id', 'descendent', 'ancestors')
def __init__(self, tree_id):
self.tree_id = tree_id
self.descendent = None
self.ancestors = []
def add_ancestor(self, node):
self.ancestors.append(node)
node.descendent = self
|
ytree-projectREPO_NAMEytreePATH_START.@ytree_extracted@ytree-main@ytree@data_structures@node_link.py@.PATH_END.py
|
{
"filename": "SNOwGLoBES_usage.ipynb",
"repo_name": "SNEWS2/snewpy",
"repo_path": "snewpy_extracted/snewpy-main/doc/nb/SNOwGLoBES_usage.ipynb",
"type": "Jupyter Notebook"
}
|
# `snewpy.snowglobes` Usage Example
This notebook demonstrates how to use SNEWPY with SNOwGLoBES.
To start, make sure you have SNOwGLoBES installed and have downloaded one of the models that are part of SNEWPY. Adjust the directory paths in the following cell.
```python
from astropy import units as u
import matplotlib.pyplot as plt
import numpy as np
from snewpy import snowglobes, model_path
SNOwGLoBES_path = None # to use custom SNOwGLoBES detector/channel/smearing files, set SNOwGLoBES directory
SNEWPY_models_base = model_path # directory containing SNEWPY models
```
Next, we will set up some basic parameters for the supernova we want to simulate.
```python
# set distance in kpc
distance = 10
# set SNOwGLoBES detector to use
detector = "icecube"
# set SNEWPY model type and filename
modeltype = 'Zha_2021'
model = 's17'
# set desired flavor transformation
transformation = 'AdiabaticMSW_NMO'
# Construct file system path of model file and name of output file
# The output file will be stored in the same directory as the model file.
modelfile = SNEWPY_models_base + "/" + modeltype + "/" + model + '.dat'
outfile = modeltype+"_"+model+"_"+transformation
# There are three ways to select a time range.
# Option 1 - don't specify tstart and tend, then the whole model is integrated
#tstart = None
#tend = None
# Option 2 - specify single tstart and tend, this makes 1 fluence file integrated over the window
#tstart = 0.7 * u.s
#tend = 0.8 * u.s
# Option 3 = specify sequence of time intervals, one fluence file is made for each interval
window_tstart = 0.742
window_tend = 0.762
window_bins = 60
tstart = np.linspace(window_tstart, window_tend, window_bins, endpoint=False) * u.s
tend = tstart + (window_tend - window_tstart) / window_bins * u.s
tmid = (tstart + tend) * 0.5
```
Now that everything’s set up, let’s start using SNOwGLoBES! Be patient—these three steps together may take a few minutes.
```python
# snowglobes.generate_fluence integrates the model over the specified time window(s)
# and generates input files for SNOwGLoBES. It returns the full file path of the output file.
print("Preparing fluences ...")
tarredfile = snowglobes.generate_fluence(modelfile, modeltype, transformation, distance, outfile, tstart, tend)
# Next, we run SNOwGLoBES. This will loop over all the fluence files in `tarredfile`.
print("Running SNOwGLoBES ...")
snowglobes.simulate(SNOwGLoBES_path, tarredfile, detector_input=detector)
# Finally, we collate SNOwGLoBES’ results into a dictionary
print("Collating results ...")
tables = snowglobes.collate(SNOwGLoBES_path, tarredfile, skip_plots=True)
```
Finally, since we chose option 3 above, and calculated the fluence in 60 time bins, we can now plot the event counts over time.
```python
%matplotlib inline
nevents = np.zeros(len(tmid))
for i in range(len(tmid)):
key = f"Collated_{outfile}_{i}_{detector}_events_smeared_weighted.dat"
for j in range(1,len(tables[key]['header'].split())):
nevents[i] += sum(tables[key]['data'][j])
# nevents is per bin, convert to per ms
factor = window_bins / (window_tend - window_tstart) / 1000
plt.plot(tmid - 0.742 * u.s, nevents * factor)
plt.xlabel("$t-t_{2c}$ [s]")
plt.ylabel("Counts [ms$^{-1}$]")
plt.show()
# compare to Figure 5 of Zha et al. (2021)
print("Total Events:", sum(nevents))
```
|
SNEWS2REPO_NAMEsnewpyPATH_START.@snewpy_extracted@snewpy-main@doc@nb@SNOwGLoBES_usage.ipynb@.PATH_END.py
|
{
"filename": "populations.py",
"repo_name": "ArisTr/PyRaTE",
"repo_path": "PyRaTE_extracted/PyRaTE-master/PyRaTE/populations.py",
"type": "Python"
}
|
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# NAME: #
# #
# populations.py #
# #
# #
# DESCRIPTION: #
# #
# Python script for calculating the population densities of E-levels #
# through a Lambda iteration #
# #
# A. If nonLTE = False: Solve detailed balance assuming escape probability #
# is 1 everywhere #
# B. If nonLTE = True: Start with the Boltzman distribution and do small #
# corrections. #
# #
# If GK = True initial betas are also set to 1. #
# gammas = angle between magnetic field and vectors #
# used to calculate the integrals #
# gammasPA = angle between the magnetic field and #
# principle axes #
# Phis = angle between the vectors used to calculate #
# the integrals and the y-axis (normally theta)#
# #
# 1. Assume a Boltzman distribution. #
# 2. Compute dtau_l #
# 3. Compute tau_l as the sum of dtau_l for dv_ij < Dtherm #
# #
# With "dv_ij < Dtherm" we basically assume a #
# step function for the profile. ^ #
# | f(v) #
# 1/2v_th Dv<v_th ____|____ #
# f(v) = { | | | #
# 0 otherwise _______| | |________ #
# #
# 4. Compute pd from tau_l #
# 5. Circle back and check if (PopRat_b-PopRat_a) < tollerance #
# #
# PARAMETERS: #
# #
# Input : All arrays from "export_sim" #
# Output : LevPops, tline #
# #
# COMMENT: #
# #
# fsolve can also be replaced with "scipy.optimize.root". Methods "lm" #
# and "hybr" seem to be working fine and lead to less descrepancies #
# between GK effect and simple case #
# #
# AUTHOR: #
# #
# Aris E. Tritsis #
# (aris.tritsis@epfl.ch) #
# #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
from numba import jit #
import numpy as np #
from scipy.constants import m_p, c, h, k #
from scipy.optimize import fsolve #
from scipy.integrate import nquad #
import sys #
#
#- - - - - - - - - - - - - -Convert to cgs- - - - - - - - - - - - - - - - - -#
m_p, c, h, Kb = m_p*1e+3, c*1e+2, h*1.e+7, k*1.e+7 #
amu, fwhm = 2.4237981621576, 2.35482 #
Tcmb = 2.7255 #
# Weights for LAMBDA iteration #
weight1, weight2 = 0.3, 0.7 #
# Take a "mean" optical depth for fiducial case or min (if mean= False) #
mean = True #
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# $$$ On axis vectors in the cell for GK $$$ #
# $$$ These will be used for "Integration" and $$$ #
# $$$ for interpolating betas.... $$$ #
# #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
rayvecYp, rayvecYm = np.array([1., 0., 0.]), np.array([-1., 0., 0.]) #
rayvecXp, rayvecXm = np.array([0., 1., 0.]), np.array([0., -1., 0.]) #
rayvecZp, rayvecZm = np.array([0., 0., 1.]), np.array([0., 0., -1.]) #
rayvecsA = np.array([rayvecYp, rayvecYm, rayvecXp, rayvecXm, rayvecZp, rayvecZm])
Intlim, IntNorm = 2.*np.pi, 1./(4.* np.pi) #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# #
# $$$ Define Auxiliary Arrays $$$ #
# #
# DESCRIPTION: #
# #
# Define some auxiliary arrays to declutter "populations" #
# #
# PARAMETERS: #
# Input : freqs, EinA, T, Ener, Cul #
# Output : NomFact, SCMB/CexpF, Dtherm #
# #
def auxiliaryAr(freqs, EinA, nlevels, BgCMB = None, T = None, Ener = None, Cul = None):
#
if not T: #
# $$$ Not temperature dependent $$$ #
NomFact, DkFact = [], [] #
#
for p in range (0, nlevels-1): #
#
NomFact.append(2. * h*freqs[p]**3/c**2) #
#
DkFact.append(c**2/(8.*np.pi*freqs[p]**2)*EinA[p]) #
#
NomFact, DkFact = np.array(NomFact), np.array(DkFact) #
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
SCMB = [] #
#
for p in range (0, nlevels-1): #
#
if BgCMB == True: #
#
SCMB.append( 1. /(np.exp(h*freqs[p]/(Kb*Tcmb)) - 1))
#
else: #
#
SCMB.append( 0.) #
#
SCMB = np.array(SCMB) #
#
return NomFact, DkFact, SCMB #
else: #
# $$$ Temperature dependent $$$ #
CexpF = [] #
#
for p in range (0, len(Cul)): #
#
CexpF.append(np.exp( - (Ener[int(round(Cul[p, 1]) -1)] - Ener[int(round(Cul[p, 2]) -1)])/(Kb*T)))
#
CexpF = np.array(CexpF) #
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
Dtherm = np.sqrt(8.*Kb*T*np.log(2.)/(amu*m_p*c**2))*c/fwhm #
#
return CexpF, Dtherm #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# \\ _____ _ _ _ _ // #
# \\ | ___(_) __| |_ _ ___(_) __ _| | // #
# \\ | |_ | |/ _` | | | |/ __| |/ _` | | // #
# // | _| | | (_| | |_| | (__| | (_| | | \\ #
# // |_| |_|\__,_|\__,_|\___|_|\__,_|_| \\ #
# // \\ #
# #
# $$$ Detailed Balance Equations $$$ #
# #
def eqsF(pd, densgp, molgp, gul, Cul, Clu, EinA, EinBul, EinBlu, beta, tmin, CexpF, NomFact, DkFact, SCMB):
#
n0, n1 = pd #
#
eq1 = n0 + n1 -molgp #
#
eq2 = densgp * (Cul[0, tmin] * n1 - n0 * (Clu[0, tmin] * CexpF[0] ) ) + EinA[0] * n1+ (EinBul[0] * n1 - EinBlu[0] * n0) * NomFact[0] * (SCMB[0] * beta[0] + (1.-beta[0]) / ( n0*gul[1]/(n1*gul[0]) -1) )
#
return eq1, eq2 #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# $$$ nonLTE case from here on $$$ #
# #
def tauF(pds_a, Dtherm, densgp, mol, gul, vx, vy, vz, dx, dy, dz, index, i, k, ndims, Cul, Clu, EinA, EinBul, EinBlu, tmin, CexpF, NomFact, DkFact, SCMB):
#
dummy, counter = False, 0 #
#
rtols = np.geomspace(1e-7, 5e-6, len(pds_a)) #
#
meanf = 0.25 if ndims == 2 else 0.16667 #
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
while (dummy==False): #
#
pds_ap = pds_a/mol[index, i, k] #
#
t_line = [] #
#
popdiffs = pds_ap[0:-1] * gul[1:]/gul[:-1] - pds_ap[1:] #
#
dtl = [] #
#
for n in range (0, len(DkFact)): #
#
dtl.append( DkFact[n] * mol / Dtherm / np.sqrt(np.pi) * popdiffs[n])
#
dtl = np.array(dtl, dtype=np.float64) #
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# Now compare which are one thermal linewidth away. #
# Do this towards all 6 directions. #
# #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
for n in range (0, len(dtl)): #
#
# $$$ SPHERICAL CASE $$$ #
if ndims == 1: #
#
tlineXp = dtl[n, index+np.where(abs(vx[index, i, k]-vx[index+1:, i, k]) < Dtherm )[0], i, k].sum() * dx
#
tlineXp = tlineXp + dtl[n, index, i, k].sum() * dx/2.
#
tlineXm, tlineYp, tlineYm, tlineZp, tlineZm = np.nan, np.nan, np.nan, np.nan, np.nan
# $$$ CYLINDRICAL CASE $$$ #
if ndims > 1: #
#
tlineXp = dtl[n, index, i + np.where(abs(vx[index, i, k]-vx[index, i+1:, k]) < Dtherm )[0], k].sum() * dx
#
tlineXp = tlineXp + dtl[n, index, i, k].sum() * dx /2.
#
tlineYp = dtl[n, index + np.where(abs(vy[index, i, k]-vy[index+1:, i, k]) < Dtherm )[0], i, k].sum() * dy
#
tlineYp = tlineYp + dtl[n, index, i, k].sum() * dy /2.
#
tlineXm, tlineYm, tlineZp, tlineZm = np.nan, np.nan, np.nan, np.nan
#
# $$$ CARTESIAN CASE $$$ #
if ndims > 2: #
#
tlineXm = dtl[n, index, np.where(abs(vx[index, i, k]-vx[index, :i, k]) < Dtherm )[0], k].sum() * dx
#
tlineXm = tlineXm + dtl[n, index, i, k].sum() * dx/2.
#
tlineYm = dtl[n, np.where(abs(vy[index, i, k]-vy[:index, i, k]) < Dtherm )[0], i, k].sum() * dy
#
tlineYm = tlineYm + dtl[n, index, i, k].sum() * dy/2.
#
tlineZp = dtl[n, index, i, k+np.where(abs(vz[index, i, k]-vz[index, i, k+1:]) < Dtherm )[0]].sum() * dz
#
tlineZm = dtl[n, index, i, np.where(abs(vz[index, i, k]-vz[index, i, :k]) < Dtherm )[0]].sum() * dz
#
tlineZp = tlineZp + dtl[n, index, i, k].sum() * dz/2.
#
tlineZm = tlineZm + dtl[n, index, i, k].sum() * dz/2.
#
if mean: #
#
t_line.append(1./ ( meanf * np.nansum ([1./tlineXp, 1./tlineXm, 1./tlineYp, 1./tlineYm, 1./tlineZp, 1./tlineZm])))
#
else: #
#
t_line.append(np.nanmin((tlineXp, tlineXm, tlineYp, tlineYm, tlineZp, tlineZm)))
#
t_line = np.array(t_line) #
#
t_line = t_line.flatten() #
#
bet0 = (1.- np.exp(-t_line))/t_line #
#
bet0[[not elem for elem in np.isfinite(bet0)]] = 1. #
#
bet0[bet0 > 1.] = 1. #
#
pds_b = fsolve(eqsF, pds_a, args=(densgp, mol[index, i, k], gul, Cul, Clu, EinA, EinBul, EinBlu, bet0, tmin, CexpF, NomFact, DkFact, SCMB))
#
check = np.absolute(np.absolute(pds_b-pds_a)/pds_a) #
#
if np.all(check<=rtols): #
#
dummy=True #
#
if counter>=250: #
#
raise SystemExit("No convergence! Last two populations computed were {} and {}. Change the tollerance or initial guess".format(pds_a, pds_b))
#
counter=counter+1 #
#
pds_a = pds_b * weight1 + pds_a * weight2 #
#
return pds_a, t_line #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# ____ _ __ #
# / ___| |/ / #
# | | _| ' / #
# | |_| | . \ #
# \____|_|\_\ #
# #
def GKangles(Bvec): #
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
# #
# $$$ Calculate angles between B-field and principal axes $$$ #
# #
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
#
return np.arccos(np.dot(rayvecsA, Bvec)) #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
@jit(nopython=True) #
def InterpTau(Tline, omega): #
#
cos_theta = np.dot(rayvecsA, omega) #
#
inds = np.where(cos_theta > 0)[0] #
#
invtau = np.sum(cos_theta[inds] * 1./Tline[inds])/np.sum(cos_theta[inds])
#
beta = (1. - np.exp(-1./invtau)) * invtau #
#
if beta > 1.: beta = 1. #
#
return beta #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
def SK(pd, jmjpmp, DkFact, NomFact, both): #
#
n00, n10, n11, n20, n21, n22, n30, n31, n32, n33 = pd #
#
S, k = [], [] #
#
for p in range (0, len(jmjpmp)): #
#
gu = 1. + (jmjpmp[p, 1] / jmjpmp[p, 1] if jmjpmp[p, 1] else 0.)
#
gl = 1. + (jmjpmp[p, 3] / jmjpmp[p, 3] if jmjpmp[p, 3] else 0.)
#
nu, nl = "n{}{}".format(jmjpmp[p][0], jmjpmp[p][1]), "n{}{}".format(jmjpmp[p][2], jmjpmp[p][3])
#
nu, nl = vars()[nu], vars()[nl] #
#
k.append(DkFact[p] * np.max((gu, gl)) * (nl - nu)) #
#
if both: #
#
S.append(NomFact[p] * 1./ ((nl / nu ) -1.)) #
#
else: #
#
S = None #
#
return np.array(S), np.array(k) #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
@jit(nopython=True) #
def SparalsInt(theta, phi, S, k, TlinePa, SCMB, b, jmjpmp, Bvec, cos2): #
#
ux = np.cos(theta) * np.sin(phi) #
#
uy = np.sin(theta) * np.sin(phi) #
#
uz = np.cos(phi) #
#
omega = np.array([uy, ux, uz]) #
#
gamma = np.arccos(np.dot(omega, Bvec)) #
#
beta = InterpTau(TlinePa, omega) #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# $$$ In the following: b -> lower level J #
# g -> direction for beta (xp, xm, yp...) #
# #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
tempN, tempD = 0., 0. #
#
for p in range (0, len(jmjpmp)): #
#
if (jmjpmp[p, 1] - jmjpmp[p, 3]) == 0. and jmjpmp[p, 2] == b:#
#
tempN, tempD = tempN + np.sin(gamma)**2 * S[p] * k[p], tempD + np.sin(gamma)**2 * k[p]
#
elif (jmjpmp[p, 1] - jmjpmp[p, 3]) != 0. and jmjpmp[p, 2] == b:
#
tempN, tempD = tempN + 0.5 * np.cos(gamma)**2 * S[p] * k[p], tempD + 0.5 * np.cos(gamma)**2 * k[p]
#
if cos2: #
#
return np.cos(gamma)**2 * (tempN/tempD * (1. - beta) + SCMB * beta) * np.sin(phi)
#
else: #
#
return (tempN/tempD * (1. - beta) + SCMB * beta) * np.sin(phi)
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
@jit(nopython=True) #
def SperpsInt(theta, phi, Sperp, SCMB, TlinePe): #
#
ux = np.cos(theta) * np.sin(phi) #
#
uy = np.sin(theta) * np.sin(phi) #
#
uz = np.cos(phi) #
#
omega = np.array([uy, ux, uz]) #
#
beta = InterpTau(TlinePe, omega) #
#
return (Sperp * (1. - beta) + SCMB * beta) * np.sin(phi) #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
def GKEm(pd, NomFact, DkFact, SCMB, TlinePe, TlinePa, jmjpmp, Bvec): #
#
S, k = SK(pd, jmjpmp, DkFact, NomFact, True) #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# $$$ Sparal/Sperp together with R & U $$$ #
# #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
#
U, R = [], [] #
#
for b in range (0, len(TlinePe)): #
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
# #
# $$$ For NomFact, SCMB we do not mind if ind $$$ #
# $$$ absolutely correct since these depend $$$ #
# $$$ only on frequency and $$$ #
# $$$ [1, 0, 0, 0]/[1, 1, 0, 0] have same f0 $$$ #
# #
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
ind = np.where(jmjpmp[:, 2] == b)[0][0] #
#
tempN, tempD = 0., 0. #
#
for p in range (0, len(jmjpmp)): #
#
if (jmjpmp[p, 1] - jmjpmp[p, 3]) != 0. and jmjpmp[p, 2] == b:
#
tempN, tempD = tempN + S[p]*k[p], tempD + k[p]
#
Sperp = tempN/tempD #
#
Sp, error = nquad(SparalsInt, [(0, Intlim), (0, np.pi)], args=(S, k, TlinePa[b], SCMB[ind], b, jmjpmp, Bvec, False))
#
SpInt, error = nquad(SparalsInt, [(0, Intlim), (0, np.pi)], args=(S, k, TlinePa[b], SCMB[ind], b, jmjpmp, Bvec, True))
#
Uperp, error = nquad(SperpsInt, [(0, Intlim), (0, np.pi)], args=(Sperp, SCMB[ind], TlinePe[b]))
#
U.append((Uperp + SpInt) * IntNorm) ; R.append((Sp - SpInt) * IntNorm)
#
U, R = np.array(U) * 1.5, np.array(R) * 3. #
#
return R, U #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
def eqsGK(pd, densgp, molgp, Cul, Clu, EinA, EinBul, EinBlu, TlinePe, TlinePa, tmin, CexpF, NomFact, DkFact, SCMB, CulGK, CluGK, jmjpmp, Bvec):
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
# U corresponds to => Dm = 1 and R to => Dm = 0 #
# Bx, By, Bz here are only for grid point index, i, k #
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
R, U = GKEm(pd, NomFact, DkFact, SCMB, TlinePe, TlinePa, jmjpmp, Bvec)
#
n00, n10, n11, n20, n21, n22, n30, n31, n32, n33 = pd #
#
eq1 = n00 + n10 + 2.0 * n11 + n20 + 2.0 * n21 + 2.0 * n22 + n30 + 2.0 * n31 + 2.0 * n32 + 2.0 * n33 -molgp
#
eq2 = n10 * EinA[0] + 2. * n11 * EinA[1] + densgp * ( n10 * Cul[0, tmin]+ 2. * n11 * Cul[1, tmin]+ n20 * Cul[2, tmin]+ 2. * n21 * Cul[3, tmin]+ 2. * n22 * Cul[4, tmin]+ n30 * Cul[11, tmin]+ 2. * n31 * Cul[12, tmin]+ 2. * n32 * Cul[13, tmin]+ 2. * n33 * Cul[14, tmin]- n00 * ( Clu[0, tmin] * CexpF[0] + 2. * Clu[1, tmin] * CexpF[1] + Clu[2, tmin] * CexpF[2] + 2. * Clu[3, tmin] * CexpF[3] + 2. * Clu[4, tmin] * CexpF[4] + Clu[11, tmin] * CexpF[11] + 2. * Clu[12, tmin] * CexpF[12] + 2. * Clu[13, tmin] * CexpF[13] + 2. * Clu[14, tmin] * CexpF[14]))+ R[0] * EinBul[0] * (n10 - n00)+ U[0] * 2. * EinBul[1] * (n11 - n00) # [0, 0]
#
eq3 = n20 * EinA[2] + 2. * n21 * EinA[3] - n10 * (EinA[0] ) +densgp * ( n20 * Cul[5, tmin]+ 2. * n21 * Cul[7, tmin]+ 2. * n22 * Cul[9, tmin]+ n30 * Cul[15, tmin]+ 2. * n31 * Cul[17, tmin]+ 2. * n32 * Cul[19, tmin]+ 2. * n33 * Cul[21, tmin]- n10 * ( Clu[5, tmin] * CexpF[5] + 2. * Clu[7, tmin] * CexpF[7] + 2. * Clu[9, tmin] * CexpF[9] + Clu[15, tmin] * CexpF[15] + 2. * Clu[17, tmin] * CexpF[17] + 2. * Clu[19, tmin] * CexpF[19] + 2. * Clu[21, tmin] * CexpF[21] + Cul[0, tmin])+ n00 * Clu[0, tmin] * CexpF[0]+ 2.* CulGK[0, tmin] * n11 - 2. * CluGK[0, tmin] * n10)+ R[1] * EinBul[2] * (n20 - n10)+ U[1] * 2. * EinBul[3] * (n21 - n10)- R[0] * EinBul[0] * (n10 - n00) # [1, 0]
#
eq4 = n20 * EinA[4] + n21 * EinA[5] + n22 * EinA[6] - n11 * (EinA[1] ) +densgp * ( n20 * Cul[6, tmin]+ 2. * n21 * Cul[8, tmin]+ 2. * n22 * Cul[10, tmin]+ n30 * Cul[16, tmin]+ 2. * n31 * Cul[18, tmin]+ 2. * n32 * Cul[20, tmin]+ 2. * n33 * Cul[22, tmin]- n11 * ( Clu[6, tmin] * CexpF[6] + 2. * Clu[8, tmin] * CexpF[8] + 2. * Clu[10, tmin] * CexpF[10] + Clu[16, tmin] * CexpF[16] + 2. * Clu[18, tmin] * CexpF[18] + 2. * Clu[20, tmin] * CexpF[20] + 2. * Clu[22, tmin] * CexpF[22] + Cul[1, tmin])+ n00 * Clu[1, tmin] * CexpF[1]+ CulGK[0, tmin] * n10 - CluGK[0, tmin] * n11)+ U[1] * EinBul[4] * (n20 - n11)+ R[1] * EinBul[5] * (n21 - n11)+ U[1] * EinBul[6] * (n22 - n11)- U[0] * EinBul[1] * (n11 - n00) # [1, 1]
#
eq5 = n30 * EinA[7] + 2. * n31 * EinA[8] - n20 * (EinA[2] + 2. * EinA[4] ) +densgp * ( n30 * Cul[23, tmin]+ 2. * n31 * Cul[26, tmin]+ 2. * n32 * Cul[29, tmin]+ 2. * n33 * Cul[32, tmin]- n20 * ( Clu[23, tmin] * CexpF[23] + 2. * Clu[26, tmin] * CexpF[26] + 2. * Clu[29, tmin] * CexpF[29] + 2. * Clu[32, tmin] * CexpF[32] + Cul[2, tmin] + Cul[5, tmin] + 2. * Cul[6, tmin])+ n00 * Clu[2, tmin] * CexpF[2]+ n10 * Clu[5, tmin] * CexpF[5]+ 2. * n11 * Clu[6, tmin] * CexpF[6]+ 2.* CulGK[5, tmin] * n21 - 2. * CluGK[5, tmin] * n20+ 2.* CulGK[5, tmin] * n22 - 2. * CluGK[5, tmin] * n20)+ R[2] * EinBul[7] * (n30 - n20)+ U[2] * 2. * EinBul[8] * (n31 - n20)- R[1] * EinBul[2] * (n20 - n10)- U[1] * 2. * EinBul[4] * (n20 - n11)
#
eq6 = n30 * EinA[9] + n31 * EinA[10] + n32 * EinA[11] - n21 * (EinA[3] + EinA[5] ) +densgp * ( n30 * Cul[24, tmin]+ 2. * n31 * Cul[27, tmin]+ 2. * n32 * Cul[30, tmin]+ 2. * n33 * Cul[33, tmin]- n21 * ( Clu[24, tmin] * CexpF[24] + 2. * Clu[27, tmin] * CexpF[27] + 2. * Clu[30, tmin] * CexpF[30] + 2. * Clu[33, tmin] * CexpF[33] + Cul[3, tmin] + Cul[7, tmin] + 2. * Cul[8, tmin])+ n00 * Clu[3, tmin] * CexpF[3]+ n10 * Clu[7, tmin] * CexpF[7]+ 2. * n11 * Clu[8, tmin] * CexpF[8]+ CulGK[5, tmin] * n20 - CluGK[5, tmin] * n21+ 2. * CulGK[5, tmin] * n22 - 2. * CluGK[5, tmin] * n21)+ U[2] * EinBul[9] * (n30 - n21)+ R[2] * EinBul[10] * (n31 - n21)+ U[2] * EinBul[11] * (n32 - n21)- U[1] * EinBul[3] * (n21 - n10)- R[1] * EinBul[5] * (n21 - n11) # [2, 1]
#
eq7 = n31 * EinA[12] + n32 * EinA[13] + n33 * EinA[14] - n22 * (EinA[6] ) +densgp * ( n30 * Cul[25, tmin]+ 2. * n31 * Cul[28, tmin]+ 2. * n32 * Cul[31, tmin]+ 2. * n33 * Cul[34, tmin]- n22 * ( Clu[25, tmin] * CexpF[25] + 2. * Clu[28, tmin] * CexpF[28] + 2. * Clu[31, tmin] * CexpF[31] + 2. * Clu[34, tmin] * CexpF[34] + Cul[4, tmin] + Cul[9, tmin] + 2. * Cul[10, tmin])+ n00 * Clu[4, tmin] * CexpF[4]+ n10 * Clu[9, tmin] * CexpF[9]+ 2. * n11 * Clu[10, tmin] * CexpF[10]+ CulGK[5, tmin] * n20 - CluGK[5, tmin] * n22+ 2. * CulGK[5, tmin] * n21 - 2. * CluGK[5, tmin] * n22)+ U[2] * EinBul[12] * (n31 - n22)+ R[2] * EinBul[13] * (n32 - n22)+ U[2] * EinBul[14] * (n33 - n22)- U[1] * EinBul[6] * (n22 - n11) # [2, 2]
#
eq8 = - n30 * (EinA[7] + 2. * EinA[9] ) +densgp * ( - n30 * ( Cul[11, tmin] + Cul[15, tmin] + 2. * Cul[16, tmin] + Cul[23, tmin] + 2. * Cul[24, tmin] + 2. * Cul[25, tmin])+ n00 * Clu[11, tmin] * CexpF[11]+ n10 * Clu[15, tmin] * CexpF[15]+ 2. * n11 * Clu[16, tmin] * CexpF[16]+ n20 * Clu[23, tmin] * CexpF[23]+ 2. * n21 * Clu[24, tmin] * CexpF[24]+ 2. * n22 * Clu[25, tmin] * CexpF[25]+ 2.* CulGK[23, tmin] * n31 - 2. * CluGK[23, tmin] * n30+ 2.* CulGK[23, tmin] * n32 - 2. * CluGK[23, tmin] * n30+ 2.* CulGK[23, tmin] * n33 - 2. * CluGK[23, tmin] * n30)- R[2] * EinBul[7] * (n30 - n20)- U[2] * 2. * EinBul[9] * (n30 - n21) # [3, 0]
#
eq9 = - n31 * (EinA[8] + EinA[10] + EinA[12] ) +densgp * ( - n31 * ( Cul[12, tmin] + Cul[17, tmin] + 2. * Cul[18, tmin] + Cul[26, tmin] + 2. * Cul[27, tmin] + 2. * Cul[28, tmin])+ n00 * Clu[12, tmin] * CexpF[12]+ n10 * Clu[17, tmin] * CexpF[17]+ 2. * n11 * Clu[18, tmin] * CexpF[18]+ n20 * Clu[26, tmin] * CexpF[26]+ 2. * n21 * Clu[27, tmin] * CexpF[27]+ 2. * n22 * Clu[28, tmin] * CexpF[28]+ CulGK[23, tmin] * n30 - CluGK[23, tmin] * n31+ 2. * CulGK[23, tmin] * n32 - 2. * CluGK[23, tmin] * n31+ 2. * CulGK[23, tmin] * n33 - 2. * CluGK[23, tmin] * n31)- U[2] * EinBul[8] * (n31 - n20)- R[2] * EinBul[10] * (n31 - n21)- U[2] * EinBul[12] * (n31 - n22) # [3, 1]
#
eq10 = - n32 * (EinA[11] + EinA[13] ) +densgp * ( - n32 * ( Cul[13, tmin] + Cul[19, tmin] + 2. * Cul[20, tmin] + Cul[29, tmin] + 2. * Cul[30, tmin] + 2. * Cul[31, tmin])+ n00 * Clu[13, tmin] * CexpF[13]+ n10 * Clu[19, tmin] * CexpF[19]+ 2. * n11 * Clu[20, tmin] * CexpF[20]+ n20 * Clu[29, tmin] * CexpF[29]+ 2. * n21 * Clu[30, tmin] * CexpF[30]+ 2. * n22 * Clu[31, tmin] * CexpF[31]+ CulGK[23, tmin] * n30 - CluGK[23, tmin] * n32+ 2. * CulGK[23, tmin] * n31 - 2. * CluGK[23, tmin] * n32+ 2. * CulGK[23, tmin] * n33 - 2. * CluGK[23, tmin] * n32)- U[2] * EinBul[11] * (n32 - n21)- R[2] * EinBul[13] * (n32 - n22) # [3, 2]
#
return eq1, eq2, eq3, eq4, eq5, eq6, eq7, eq8, eq9, eq10 #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# $$$ Calculate optical depths correspondin to // and _|_ $$$ #
# $$$ Verified multiple times that GK/DW formalisms $$$ #
# $$$ are equivalent for a 2-level molecule $$$ #
# #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
def GKOd(pd, DkFact, gammas, jmjpmp, jlevels): #
#
S, dtau = SK(pd, jmjpmp, DkFact, 0, False) #
#
dtauPerp, dtauParal = [], [] #
#
for t in range (len(jlevels)-1): #
#
tempPerp, tempParal = 0., np.zeros(6) #
#
for p in range (0, len(jmjpmp)): #
#
if (jmjpmp[p, 1] - jmjpmp[p, 3]) !=0 and jmjpmp[p, 2] == jlevels[t]:
#
tempPerp = tempPerp + dtau[p] #
#
elif (jmjpmp[p, 1] - jmjpmp[p, 3]) ==0 and jmjpmp[p, 2] == jlevels[t]:
#
for g in range (len(gammas)): #
#
tempParal[g] = tempParal[g] + dtau[p] * np.sin(gammas[g]) **2
#
dtauPerp.append(tempPerp) #
#
for g in range (len(gammas)): #
#
tempParal[g] = tempParal[g] + tempPerp * 0.5 * np.cos(gammas[g]) ** 2
#
dtauParal.append(tempParal) #
#
dtauPerp, dtauParal = 0.5 * np.array(dtauPerp), np.array(dtauParal) #
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# #
# \\ // #
# \\ $$ dtauPerp shape: (J-LEVELS) $$ // #
# \\ $$ dtauParal shape: $$ // #
# // $$ (J-LEVELS, 6) $$ \\ #
# // $$ 6: Yp, Ym, Xp, Xm, Zp, Zm $$ \\ #
# // \\ #
# #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
return dtauPerp, dtauParal #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
def tauGK(pd_a, Dtherm, densgp, mol, vx, vy, vz, dx, dy, dz, index, i, k, ndims, Cul, Clu, EinA, EinBul, EinBlu, tmin, CexpF, NomFact, DkFact, SCMB, CulGK, CluGK, jmjpmp, gammas, normTau, Bvec):
#
dummy, counter = False, 0 #
#
jlevels = np.unique(np.concatenate((jmjpmp[:, 0], jmjpmp[:, 2]))) #
#
rtols = np.geomspace(1e-8, 5e-7, len(pd_a)) #
#
indsXp = i + np.where(abs(vx[index, i, k]-vx[index, i+1:, k]) < Dtherm )[0]
#
indsXm = np.where(abs(vx[index, i, k]-vx[index, :i, k]) < Dtherm )[0]#
#
indsYp = index + np.where(abs(vy[index, i, k]-vy[index+1:, i, k]) < Dtherm )[0]
#
indsYm = np.where(abs(vy[index, i, k]-vy[:index, i, k]) < Dtherm )[0]#
#
if ndims > 2: #
#
indsZp = k+np.where(abs(vz[index, i, k]-vz[index, i, k+1:]) < Dtherm )[0]
#
indsZm = np.where(abs(vz[index, i, k]-vz[index, i, :k]) < Dtherm )[0]
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
# $$$ Cylindrical case now $$$ #
else: #
indsXm2 = np.where(abs(vx[index, i, k] + np.fliplr(vx)[index, :, k] ) < Dtherm )[0]
#
indsYm2 = np.where(abs(vy[index, i, k] + np.flipud(vx)[:, i, k] ) < Dtherm )[0]
#
# $$$ z (i.e. y) vel - component now, this is tricky $$$ #
# $$$ setting i here in "k-location" is not a mistake $$$ #
indsZp = np.where(abs(vz[index, :, i]) < Dtherm )[0] #
#
indsZm = indsZp #
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #
while (dummy==False): #
#
pds_ap = pd_a/mol[index, i, k] #
#
TlinePe, TlinePa = [], [] #
#
dtlPerp, dtlParal = GKOd(pds_ap, DkFact, gammas, jmjpmp, jlevels)
#
for n in range (len(dtlPerp)): #
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * #
# #
# $$$ Finalize Tau _|_ first $$$ #
# #
#* * * * * * * * * * * * * * * * * * * * * * * * * * #
JTauPe = dtlPerp[n] * normTau #
#
tl_Xp, tl_Xm = JTauPe[index, indsXp, k].sum() * dx, JTauPe[index, indsXm, k].sum() * dx
#
tl_Xp, tl_Xm = tl_Xp + JTauPe[index, i, k] * dx/2., tl_Xm + JTauPe[index, i, k] * dx/2.
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * #
tl_Yp, tl_Ym = JTauPe[indsYp, i, k].sum() * dy, JTauPe[indsYm, i, k].sum() * dy
#
tl_Yp, tl_Ym = tl_Yp + JTauPe[index, i, k].sum() * dy/2., tl_Ym + JTauPe[index, i, k].sum() * dy/2.
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * #
tl_Zp, tl_Zm = JTauPe[index, i, indsZp].sum() * dz, JTauPe[index, i, indsZm].sum() * dz
#
tl_Zp, tl_Zm = tl_Zp + JTauPe[index, i, k] * dz/2., tl_Zm + JTauPe[index, i, k] * dz/2.
#
if ndims == 2: #
#
tl_Ym, tl_Xm = tl_Ym + JTauPe[indsYm2, i, k].sum() * dy, JTauPe[index, indsXm2, k].sum() * dx
#
TlinePe.append([tl_Yp, tl_Ym, tl_Xp, tl_Xm, tl_Zp, tl_Zm])
#* * * * * * * * * * * * * * * * * * * * * * * * * * #
# #
# $$$ Finalize Tau // now $$$ #
# #
#* * * * * * * * * * * * * * * * * * * * * * * * * * #
# Y+, Y- (rayvecsA[0:2]) #
JTauPap, JTauPam = dtlParal[n, 0] * normTau, dtlParal[n, 1] * normTau
#
tl_Yp, tl_Ym = JTauPap[indsYp, i, k].sum() * dy, JTauPam[indsYm, i, k].sum() * dy
#
tl_Yp, tl_Ym = tl_Yp + JTauPap[index, i, k].sum() * dy/2., tl_Ym + JTauPam[index, i, k].sum() * dy/2.
#----------------------------------------------------#
# X+, X- (rayvecsA[2:4]) #
JTauPap, JTauPam = dtlParal[n, 2] * normTau, dtlParal[n, 3] * normTau
#
tl_Xp, tl_Xm = JTauPap[index, indsXp, k].sum() * dy, JTauPam[index, indsXm, k].sum() * dy
#
tl_Xp, tl_Xm = tl_Xp + JTauPap[index, i, k].sum() * dy/2., tl_Xm + JTauPam[index, i, k].sum() * dy/2.
#----------------------------------------------------#
# Z+, Z- (rayvecsA[4:6]) #
JTauPap, JTauPam = dtlParal[n, 4] * normTau, dtlParal[n, 5] * normTau
#
tl_Zp, tl_Zm = JTauPap[index, i, indsZp].sum() * dz, JTauPam[index, i, indsZm].sum() * dz
#
tl_Zp, tl_Zm = tl_Zp + JTauPap[index, i, k].sum() * dz/2., tl_Zm + JTauPam[index, i, k].sum() * dz/2.
#
if ndims == 2: #
#
tl_Ym, tl_Xm = tl_Ym + JTauPap[indsYm2, i, k].sum() * dy, JTauPap[index, indsXm2, k].sum() * dx
#
TlinePa.append([tl_Yp, tl_Ym, tl_Xp, tl_Xm, tl_Zp, tl_Zm])
#
TlinePe, TlinePa = np.array(TlinePe), np.array(TlinePa) #
#
pd_b = fsolve(eqsGK, pd_a, args=(densgp, mol[index, i, k], Cul, Clu, EinA, EinBul, EinBlu, TlinePe, TlinePa, tmin, CexpF, NomFact, DkFact, SCMB, CulGK, CluGK, jmjpmp, Bvec))
#
check = np.absolute(np.absolute(pd_b-pd_a)/pd_a) #
#
if np.all(check<=rtols): #
#
dummy=True #
#
if counter>=250: #
#
raise SystemExit("No convergence! Last two populations computed were {} and {}. Change tollerance and/or initial guess".format(pds_a, pds_b))
#
counter=counter+1 #
#
pd_a = pd_b * weight1 + pd_a * weight2 #
#
return pd_a, TlinePe #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
def populations(nonLTE, BgCMB, ndims, dens, mol, T, vx, dx, vy, dy, vz, dz, y, chopped_ys, Cul, Clu, EinA, EinBul, EinBlu, Ener, freqs, gul, tempers, GK, Bx, By, Bz, numlevels, CulGK=None, CluGK=None, jmjpmp = None):
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
# $$$ Initialize betas, initial guesses and method of sol $$$ #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
nlevels = len(EinA) + 1 #
#
NomFact, DkFact, SCMB = auxiliaryAr(freqs, EinA, nlevels, BgCMB) #
#
njm, nbetas = nlevels, nlevels - 1 #
#
if GK == True: #
#
DkFact, NomFact = DkFact * 3., NomFact/2. #
#
njm, nbetas = np.sum(range(numlevels+1)), numlevels - 1 #
#
SCMB = SCMB * NomFact #
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *#
#
LevPops, TLINE = [], [] #
#
size = np.array(dens.shape) #
#
if ndims==2: size[2] = 1 #
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
for j in range (0, len(chopped_ys)): #
#
index=np.where(y==chopped_ys[j])[0][0] #
#
temp, tempL = [], [] #
#
for i in range (size[1]): #
#
temp2, tempL2 = [], [] #
#
FirstCall = True #
#
for k in range (size[2]): #
#
bet0 = np.ones(nbetas) #
#
if FirstCall: pd_a, FirstCall = np.log10(np.logspace(1.5/njm, 0.5/njm, njm)) * mol[index, i, k], False
#
tmin = np.argmin(np.absolute(tempers - T[index, i, k])) + 3
#
CexpF, Dtherm = auxiliaryAr(freqs, EinA, nlevels, BgCMB, T[index, i, k], Ener, Cul)
#
t_line = np.zeros(nbetas) #
# * * * * * * * * * * * * * * * * * * * * * *#
# #
# $$$ some code duplication considering $$$ #
# $$$ differently the two cases but $$$ #
# $$$ better keep it clean $$$ #
# #
# * * * * * * * * * * * * * * * * * * * * * *#
densgp, molgp = dens[index, i, k], mol[index, i, k]
#
if not GK: #
#
pd_a=fsolve(eqsF, pd_a, args=(densgp, molgp, gul, Cul, Clu, EinA, EinBul, EinBlu, bet0, tmin, CexpF, NomFact, DkFact, SCMB))
#
if nonLTE == True: #
#
pd_a, t_line = tauF(pd_a, Dtherm, densgp, mol, gul, vx, vy, vz, dx, dy, dz, index, i, k, ndims, Cul, Clu, EinA, EinBul, EinBlu, tmin, CexpF, NomFact, DkFact, SCMB)
#
# * * * * * * * * * * * * * * * * * * * * * *#
else: #
#
Bvec = np.array([By[index, i, k], Bx[index, i, k], Bz[index, i, k]])
#
Bvec = Bvec/np.linalg.norm(Bvec) #
#
gammas = GKangles(Bvec) #
#
# * * * * * * * * * * * * * * * * * *#
# #
# $$ 6.705e-9 below is such that $$ #
# $$ beta = (1- e^tau)/tau ~ 1 $$ #
# $$ i.e. we start from LTE $$ #
# #
# * * * * * * * * * * * * * * * * * *#
TlinePe, TlinePa = np.ones((nbetas, 6)) * 6.705e-9, np.ones((nbetas, 6)) * 6.705e-9
#
pd_a = fsolve(eqsGK, pd_a, args=(densgp, molgp, Cul, Clu, EinA, EinBul, EinBlu, TlinePe, TlinePa, tmin, CexpF, NomFact, DkFact, SCMB, CulGK, CluGK, jmjpmp, Bvec))
#
if nonLTE == True: #
#
normTau = mol / Dtherm / np.sqrt(np.pi)
#
pd_a, t_line = tauGK(pd_a, Dtherm, densgp, mol, vx, vy, vz, dx, dy, dz, index, i, k, ndims, Cul, Clu, EinA, EinBul, EinBlu, tmin, CexpF, NomFact, DkFact, SCMB, CulGK, CluGK, jmjpmp, gammas, normTau, Bvec)
#
temp2.append(pd_a) ; tempL2.append(t_line) #
#
temp.append(temp2) ; tempL.append(tempL2) #
#
LevPops.append(temp) ; TLINE.append(tempL) #
#
LevPops, TLINE = np.array(LevPops), np.array(TLINE) #
#
if GK: TLINE = np.mean(TLINE, axis = 4) #
#
return LevPops, TLINE #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
|
ArisTrREPO_NAMEPyRaTEPATH_START.@PyRaTE_extracted@PyRaTE-master@PyRaTE@populations.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/BSSN/__init__.py",
"type": "Python"
}
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@BSSN@__init__.py@.PATH_END.py
|
|
{
"filename": "README.md",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/tests/unit/pipeline/sherlock/README.md",
"type": "Markdown"
}
|
### Unit tests for Sherlock wrapper
`test_sherlock_wrapper.py`
Multiple tests grouped into categories: consumer, producer, classifier.
It uses the following auxiliary files and directories:
* example_ingested.json
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@tests@unit@pipeline@sherlock@README.md@.PATH_END.py
|
{
"filename": "_ticklen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/marker/colorbar/_ticklen.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@marker@colorbar@_ticklen.py@.PATH_END.py
|
{
"filename": "radial_profile_styles.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/doc/source/cookbook/radial_profile_styles.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import yt
ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
# Get a sphere object
sp = ds.sphere(ds.domain_center, (500.0, "kpc"))
# Bin up the data from the sphere into a radial profile
rp = yt.create_profile(
sp,
"radius",
[("gas", "density"), ("gas", "temperature")],
units={"radius": "kpc"},
logs={"radius": False},
)
# Make plots using matplotlib
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot the density as a log-log plot using the default settings
dens_plot = ax.loglog(rp.x.value, rp["gas", "density"].value)
# Here we set the labels of the plot axes
ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
ax.set_ylabel(r"$\mathrm{\rho\ (g\ cm^{-3})}$")
# Save the default plot
fig.savefig("density_profile_default.png" % ds)
# The "dens_plot" object is a list of plot objects. In our case we only have one,
# so we index the list by '0' to get it.
# Plot using dashed red lines
dens_plot[0].set_linestyle("--")
dens_plot[0].set_color("red")
fig.savefig("density_profile_dashed_red.png")
# Increase the line width and add points in the shape of x's
dens_plot[0].set_linewidth(5)
dens_plot[0].set_marker("x")
dens_plot[0].set_markersize(10)
fig.savefig("density_profile_thick_with_xs.png")
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@doc@source@cookbook@radial_profile_styles.py@.PATH_END.py
|
{
"filename": "polygon.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/geometry/polygon.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .triangulation import Triangulation
class PolygonData(object):
"""Polygon class for data handling
Parameters
----------
vertices : (Nv, 3) array
Vertex coordinates. If faces is not specified, then this will instead
be interpreted as (Nf, 3, 3) array of coordinates.
edges : (Nv, 2) array
Constraining edges specified by vertex indices.
faces : (Nf, 3) array
Indexes into the vertex array.
Notes
-----
All arguments are optional.
"""
def __init__(self, vertices=None, edges=None, faces=None):
self._vertices = vertices
self._edges = edges
self._faces = faces
self._convex_hull = None
@property
def faces(self):
"""Return an array (Nf, 3) of vertex indexes, three per triangular
face in the mesh.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._faces
@faces.setter
def faces(self, f):
"""
If vertices and faces are incompatible, this will generate vertices
from these faces and set them.
"""
self._faces = f
@property
def vertices(self):
"""Return an array (Nf, 3) of vertices.
If only faces exist, the function computes the vertices and
returns them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._vertices
@vertices.setter
def vertices(self, v):
"""
If vertices and faces are incompatible, this will generate faces
from these vertices and set them.
"""
self._vertices = v
@property
def edges(self):
"""Return an array (Nv, 2) of vertex indices.
If no vertices or faces are specified, the function returns None.
"""
return self._edges
@edges.setter
def edges(self, e):
"""
Ensures that all edges are valid.
"""
self._edges = e
@property
def convex_hull(self):
"""Return an array of vertex indexes representing the convex hull.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._convex_hull
def triangulate(self):
"""
Triangulates the set of vertices and stores the triangles in faces and
the convex hull in convex_hull.
"""
npts = self._vertices.shape[0]
if np.any(self._vertices[0] != self._vertices[1]):
# start != end, so edges must wrap around to beginning.
edges = np.empty((npts, 2), dtype=np.uint32)
edges[:, 0] = np.arange(npts)
edges[:, 1] = edges[:, 0] + 1
edges[-1, 1] = 0
else:
# start == end; no wrapping required.
edges = np.empty((npts-1, 2), dtype=np.uint32)
edges[:, 0] = np.arange(npts)
edges[:, 1] = edges[:, 0] + 1
tri = Triangulation(self._vertices, edges)
tri.triangulate()
return tri.pts, tri.tris
def add_vertex(self, vertex):
"""
Adds given vertex and retriangulates to generate new faces.
Parameters
----------
vertex : array-like
The vertex to add.
"""
raise NotImplementedError
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@geometry@polygon.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "solo-spice/sospice",
"repo_path": "sospice_extracted/sospice-main/sospice/catalog/tests/__init__.py",
"type": "Python"
}
|
solo-spiceREPO_NAMEsospicePATH_START.@sospice_extracted@sospice-main@sospice@catalog@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "NOE.py",
"repo_name": "msm550/DMATIS",
"repo_path": "DMATIS_extracted/DMATIS-master/NOE.py",
"type": "Python"
}
|
import pandas as pd
import numpy as np
import multiprocessing as mp
def mT(A):
return (A * m_p)
def reduced(m1, m2):
return (m1 * m2 / (m1 + m2))
def r(mH, A):
return (4 * A * m_p * mH / (A * m_p + mH) ** 2)
def F(eR, A):
if eR == 0:
return (1)
else:
qF = np.sqrt(2 * mT(A) * eR)
cF = 1.23 * A ** (1 / 3) - 0.6
rF = np.sqrt(cF ** 2 + 7 * ((np.pi * aF) ** 2) / 3 - 5 * sF ** 2)
qrF = qF * rF / 0.197
return (3 * np.exp(-(qF * sF / 0.197) ** 2 / 2) * (np.sin(qrF) - np.cos(qrF) * qrF) / qrF ** 3)
"""
def F(eR, A):
return(1)
"""
def si(mH, si0, A, eR):
return (si0 * (A * reduced(A * m_p, mH) * F(eR, A) / reduced(m_p, mH)) ** 2)
def f(A):
if A == Ox:
return (0.465)
elif A == Si:
return (0.289)
elif A == Al:
return (0.089)
else:
return (0.048)
def lambdainv(mH, si0, A, eR):
return (5.62e+23 * rhoE * si(mH, si0, A, eR) * f(A) / 0.891 / A / m_p)
def lambdaeff(lambdainvSi, lambdainvOx, lambdainvAl, lambdainvFe):
return ((lambdainvSi + lambdainvOx + lambdainvAl + lambdainvFe) ** -1)
def pA(lambdainv, leff):
return (lambdainv * leff)
def ler(mH, A, randomCos):
return (1 - r(mH, A) * (1 - randomCos) / 2)
def lambdadis(leff, delta):
return (-leff * (1 + delta) * np.log(np.random.random_sample()))
def rCos():
return (2 * np.random.random_sample() - 1)
def weight(x, delta, leff):
return ((1 + delta) * np.exp(- delta * x / (1 + delta) / leff))
def phi():
return (2 * np.pi * np.random.random_sample())
def r01():
return (np.random.random_sample())
def a_sel(ra, pASi, pAOxSi, pAFe):
if ra >= 0 and ra < pASi:
return (Si)
elif ra >= pASi and ra < pAOxSi:
return (Ox)
elif ra >= pAOxSi and ra < 1 - pAFe:
return (Al)
else:
return (Fe)
def diffusion(i):
v_ini = v[i]
CosTheta = r1 = r01()
eRec = 0
l_inv_Si = lambdainv(mH, sigmap, Si, eRec)
l_inv_Ox = lambdainv(mH, sigmap, Ox, eRec)
l_inv_Al = lambdainv(mH, sigmap, Al, eRec)
l_inv_Fe = lambdainv(mH, sigmap, Fe, eRec)
leff = lambdaeff(l_inv_Si, l_inv_Ox, l_inv_Al, l_inv_Fe)
l = lambdadis(leff, delta)
w = wi = weight(l, delta, leff)
ztot = l * CosTheta
E0 = 0.5 * mH * v_ini ** 2
p = 0
totalleft = 1
Ef_a = E0 * totalleft
if ztot >= d and Ef_a >= Emin:
return [Ef_a, CosTheta, w]
if Ef_a < Emin:
return [1, w]
while Ef_a >= Emin and ztot < d and ztot > 0:
p += 1
ra = r01()
l_inv_Si = lambdainv(mH, sigmap, Si, eRec)
l_inv_Ox = lambdainv(mH, sigmap, Ox, eRec)
l_inv_Al = lambdainv(mH, sigmap, Al, eRec)
l_inv_Fe = lambdainv(mH, sigmap, Fe, eRec)
leff = lambdaeff(l_inv_Si, l_inv_Ox, l_inv_Al, l_inv_Fe)
pAOx = pA(l_inv_Ox, leff)
pASi = pA(l_inv_Si, leff)
pAFe = pA(l_inv_Fe, leff)
pAOxSi = pAOx + pASi
A = a_sel(ra, pASi, pAOxSi, pAFe)
mHmA = mH / mT(A)
l = lambdadis(leff, delta)
wi = weight(l, delta, leff)
CosXiCM = rCos()
CosXiLab = (mHmA + CosXiCM) / np.sqrt(1 + mHmA * (2 * CosXiCM + mHmA))
CosTheta = r1 * CosXiLab - np.sqrt(1 - r1 ** 2) * np.sqrt(1 - CosXiLab ** 2) * np.cos(phi())
Ef_b = E0 * totalleft
left = ler(mH, A, CosXiCM)
totalleft *= left
Ef_a = E0 * totalleft
eRec = Ef_b - Ef_a
w *= wi
z = l * CosTheta
ztot += z
if ztot < 0:
return [0, w]
if ztot >= d and Ef_a >= Emin:
return [Ef_a, CosTheta, w]
if Ef_a < Emin:
return [1, w]
r1 = CosTheta
def diffusion_Pb(i):
save_ini = s[i // rep]
r1_Pb = save_ini[1]
eRec = 0
l_Pb = (5.62e+23 * rhoPb * si(mH, sigmap, Pb, eRec) / Pb / m_p) ** -1
l2 = lambdadis(l_Pb, delta)
wiPb = wiPbSum = weight(l2, delta, l_Pb)
w_Pb = save_ini[2] * wiPb
ztot_Pb = l2 * r1_Pb
E0_Pb = Efa_Pb = save_ini[0]
p_Pb = 0
totalleft = 1
eRecDAMIC = E0_Pb * r(mH, Si) * (1 - rCos()) / 2
if ztot_Pb >= d_Pb and Efa_Pb >= Emin:
return save_ini + [Efa_Pb, eRecDAMIC, w_Pb]
if Efa_Pb < Emin:
return [1, w_Pb]
while Efa_Pb >= Emin and ztot_Pb < d_Pb and ztot_Pb > 0:
p_Pb += 1
l_Pb = (5.62e+23 * rhoPb * si(mH, sigmap, Pb, eRec) / Pb / m_p) ** -1
l2 = lambdadis(l_Pb, delta)
mHmPb = mH / mT(Pb)
CosXiCM_Pb = rCos()
wiPb = weight(l2, delta, l_Pb)
CosXiLab_Pb = (mHmPb + CosXiCM_Pb) / np.sqrt(1 + mHmPb * (2 * CosXiCM_Pb + mHmPb))
CosTheta_Pb = r1_Pb * CosXiLab_Pb - np.sqrt(1 - r1_Pb ** 2) * np.sqrt(1 - CosXiLab_Pb ** 2) * np.cos(phi())
wiPbSum += wiPb
Efb_Pb = E0_Pb * totalleft
left = ler(mH, Pb, CosXiCM_Pb)
totalleft *= left
Efa_Pb = E0_Pb * totalleft
eRec = Efb_Pb - Efa_Pb
w_Pb *= wiPb
z = l2 * CosTheta_Pb
ztot_Pb += z
if ztot_Pb < 0:
return [0, w_Pb]
eRecDAMIC = Efa_Pb * r(mH, Si) * (1 - rCos()) / 2
if ztot_Pb >= d_Pb and Efa_Pb >= Emin:
return save_ini + [Efa_Pb, eRecDAMIC, w_Pb]
if Efa_Pb < Emin:
return [1, w_Pb]
r1_Pb = CosTheta_Pb
if __name__ == '__main__':
print('Loading the velocity distribution on the Earth surface and setting the parameters ...')
v_df = pd.read_csv('vi.csv')
v = [v_row[0] for v_row in v_df.values]
v_len = len(v)
delta = float(input("Set path length modification parameter, delta = "))
n_cores = int(input("# of cores for multiprocessing = "))
nj = float(input("# of particles at the Earth's surface to be simulated = "))
mH = float(input("DM mass in GeV = "))
sigmap = float(input("DM-nucleon cross_section in ubarn = ")) * 1e-30
rep = int(input("Repetition factor from top to the bottom of the lead shield = "))
# atomic mass numbers
Si = 28
Ox = 16
Al = 27
Fe = 56
Pb = 207
Cu = 63
# mass densities in gr/cm^3
rhoPb = 11.34
rhoCu = 8.96
rhoE = 2.7
# Nuclear form factor parameters
aF = 0.52
sF = 0.9
# proton mass in GeV
m_p = 0.938
# DM local mass density in GeV/cm^3
rhoDM = 0.3
# DAMIC exposure in Kg*days
e = 0.107
# DAMIC detector depth in cm
d = 350 * 30.48
# lead shield thickness
d_Pb = 6 * 2.54
# Nuclear recoil energy threshold of the detector
E_th = 5.5e-7
# output resetting
nElost = nUp = nElost_Pb = nUp_Pb = ws = ws_Pb = 0
# Min energy that a DM particle needs to have to potentially trigger the DAMIC detector in GeV
Emin = E_th / (1 - ler(mH, Si, -1))
pool = mp.Pool(n_cores)
s = []
for i in range(int(nj / v_len)):
save = pool.map(diffusion, range(v_len))
for j in range(v_len):
if save[j][0] == 0:
nUp += save[j][1]
elif save[j][0] == 1:
nElost += save[j][1]
else:
ws += save[j][2]
s.append(save[j])
if i % 10 == 0 and i != 0:
print(str(int(i*1000000))+' particles diffusion has been simulated')
attenuation_factor_Earth = ws / nj
print('Number of particles that are deflected back to atmosphere = ', nUp)
print('Number of particles that lost a large fraction of their energy and cannot trigger the detector = ', nElost)
print('Number of particles that reached the lead shield', len(s))
print("Earth attenuation factor = ", attenuation_factor_Earth)
n_Pb = rep * len(s)
pool_Pb = mp.Pool(n_cores)
s_Pb = []
save_Pb = pool_Pb.map(diffusion_Pb, range(n_Pb))
for j in range(n_Pb):
if save_Pb[j][0] == 0:
nUp_Pb += save_Pb[j][1]
elif save_Pb[j][0] == 1:
nElost_Pb += save_Pb[j][1]
else:
ws_Pb += save_Pb[j][5]
s_Pb.append(save_Pb[j])
sRec = []
for k in range(len(s_Pb)):
if s_Pb[k][4] >= E_th:
sRec.append(s_Pb[k])
nUp = rep * nUp
nElost = rep * nElost
ws = rep * ws
nj *= rep
attenuation_factor = ws_Pb / nj
print('Number of capable DM particles = ', len(s_Pb))
print("Total attenuation factor = ", attenuation_factor)
print('Number of successful DM particles = ', len(sRec))
# factor 2 in calculation of total number of events is due the Earth-shielding of DM particles entering the Earth from below the horizon
print("Expected total number of events by DAMIC = " + format(1.46e+42 * e * 0.3 * attenuation_factor * sum(
si(mH, sigmap, Si, sRec[i][4]) * np.sqrt(2 * sRec[i][3] / mH) * sRec[i][5] for i in
range(len(sRec))) / ws_Pb / Si / mH / 2, '.5f'))
|
msm550REPO_NAMEDMATISPATH_START.@DMATIS_extracted@DMATIS-master@NOE.py@.PATH_END.py
|
{
"filename": "getpota_input_Y3.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/mock_tools/getpota_input_Y3.py",
"type": "Python"
}
|
'''
Find all potential assignment and counts tiles for some input (must have data model needed for fiberassign) and some set of tiles
Use the following environment
source /global/common/software/desi/desi_environment.sh main
'''
import numpy as np
import os
from astropy.table import Table, join, vstack
import argparse
from fiberassign.hardware import load_hardware, get_default_exclusion_margins
from fiberassign._internal import Hardware
from fiberassign.tiles import load_tiles
from fiberassign.targets import Targets, TargetsAvailable, LocationsAvailable, create_tagalong, load_target_file, targets_in_tiles
from fiberassign.assign import Assignment
from fiberassign.utils import Logger
from desitarget.io import read_targets_in_tiles
import desimodel.focalplane
import desimodel.footprint
trad = desimodel.focalplane.get_tile_radius_deg()*1.1 #make 10% greater just in case
import fitsio
import LSS.common_tools as common
#from LSS.imaging import get_nobsandmask
from LSS.main.cattools import count_tiles_better
from LSS.globals import main
import bisect
import time
from datetime import datetime
import multiprocessing
t_start = time.time()
log = Logger.get()
parser = argparse.ArgumentParser()
parser.add_argument("--prog", choices=['DARK','BRIGHT'],default='DARK')
parser.add_argument("--survey", help="e.g.,", default='DA2') ##falta una coma aqui
parser.add_argument("--getcoll",default='y')
parser.add_argument("--input",help='full path to input file, assumed to be fits') ##falta -- delante de input
parser.add_argument("--output",help='full path to output file, will be saved as fits') ##falta -- delante de output
parser.add_argument("--base_output", help="base directory for output",default='/global/cfs/cdirs/desi/survey/catalogs/DA2/mocks/') ##Esta opción no está en el código de Ashley
parser.add_argument("--realization") ##Esta opción no está en el código de Ashley
parser.add_argument("--tile-temp-dir", help="Directory for temp tile files, default %(default)s",
default=os.path.join(os.environ['SCRATCH'], 'rantiles'))
parser.add_argument("--counttiles", default = 'n')
parser.add_argument("--nprocs", help="Number of multiprocessing processes to use, default %(default)i",
default=multiprocessing.cpu_count()//2, type=int)
# On Perlmutter, this read-only access point can be *much* faster thanks to aggressive caching.
# If you didn't want this for some reason, you could revert '/dvs_ro/cfs/cdirs/desi' to '/global/cfs/cdirs/desi' in the following.
desi_input_dir = os.getenv('DESI_ROOT_READONLY', default='/dvs_ro/cfs/cdirs/desi')
args = parser.parse_args()
print(args)
infn = args.input
tars = fitsio.read(infn)
tarcols = list(tars.dtype.names)
tileoutdir = os.path.join(args.base_output.replace('global', os.getenv('SCRATCH')), 'SecondGenMocks', 'Generic', 'tartiles'+args.realization)
# Ensure that the targets file is sorted by Dec.
t0 = time.time()
is_sorted = np.all(tars['DEC'][:-1] <= tars['DEC'][1:])
if not is_sorted:
I = np.argsort(tars['DEC'])
tars = tars[I]
t1 = time.time()
log.info('Sorting/verifying mocks: %.1f' % (t1-t0))
if not os.path.exists(tileoutdir):
os.makedirs(tileoutdir)
#print('made '+tileoutdir)
tiletab = Table.read(os.path.join(desi_input_dir, 'survey', 'catalogs', args.survey, 'LSS', 'tiles-'+args.prog+'.fits'))
log.info('Reading startup globals: %.3f' % (time.time() - t_start))
def get_tile_targ(tile):
'''
Creates an astropy Table of (mock) targets within the given `tile`.
'''
tdec = tile['DEC']
decmin = tdec - trad
decmax = tdec + trad
dec = tars['DEC']
# The `tars` global table of targets is sorted by Dec. We therefore only need to look at
# indices that can possibly be within range given just the Dec distance (decmin to decmax).
# "bisect_left" is way faster than "np.searchsorted"!
#i0,i1 = np.searchsorted(dec, [np.float32(decmin), np.float32(decmax)])
i0 = bisect.bisect_left(dec, decmin)
i1 = bisect.bisect_left(dec, decmax, lo=i0)
Idec = slice(i0, i1+1)
inds = desimodel.footprint.find_points_radec(tile['RA'], tdec,
tars['RA'][Idec], tars['DEC'][Idec])
rtw = tars[i0 + np.array(inds)]
rmtl = Table(rtw)
#print('made table')
del rtw
if 'DESI_TARGET' not in tarcols:
rmtl['DESI_TARGET'] = np.ones(len(rmtl),dtype=int)*2
if 'NUMOBS_INIT' not in tarcols:
rmtl['NUMOBS_INIT'] = np.zeros(len(rmtl),dtype=int)
if 'NUMOBS_MORE' not in tarcols:
rmtl['NUMOBS_MORE'] = np.ones(len(rmtl),dtype=int)
if 'PRIORITY' not in tarcols:
rmtl['PRIORITY'] = np.ones(len(rmtl),dtype=int)*3400
#if 'OBSCONDITIONS' not in tarcols:
rmtl['OBSCONDITIONS'] = np.ones(len(rmtl),dtype=int)*516#forcing it to match value assumed below
if 'SUBPRIORITY' not in tarcols:
rmtl['SUBPRIORITY'] = np.random.random(len(rmtl))
return rmtl
def write_tile_targ(ind):
'''
Write the targets file for the single tile table index "ind".
'''
tile = tiletab[ind]
fname = os.path.join(tileoutdir, 'tilenofa-'+str(tile['TILEID'])+'.fits')
log.info('creating %s' % fname)
rmtl = get_tile_targ(tile)
#print('added columns for', fname)
rmtl.write(fname, format='fits', overwrite=True)
#print('added columns, wrote to', fname)
margins = get_default_exclusion_margins()
rann = 0
n = 0
def getpa(ind):
#tile = 1230
tile = tiletab[ind]['TILEID']
ts = '%06i' % tile
fbah = fitsio.read_header(os.path.join(desi_input_dir, 'target', 'fiberassign', 'tiles', 'trunk', ts[:3], 'fiberassign-'+ts+'.fits.gz'))
dt = fbah['RUNDATE']#[:19]
pr = args.prog
t = Table(tiletab[ind])
t['OBSCONDITIONS'] = 516
t['IN_DESI'] = 1
t['MTLTIME'] = fbah['MTLTIME']
t['FA_RUN'] = fbah['FA_RUN']
t['PROGRAM'] = pr
obsha = fbah['FA_HA']
obstheta = fbah['FIELDROT']
tt = parse_datetime(dt)
hw = get_hardware_for_time(tt)
assert(hw is not None)
tilefn = os.path.join(args.tile_temp_dir, str(tile)+'-'+str(rann)+'-tiles.fits')
t.write(tilefn, overwrite=True)
tiles = load_tiles(
tiles_file=tilefn, obsha=obsha, obstheta=obstheta,
select=[tile])
tids = tiles.id
#print('Tile ids:', tids)
I = np.flatnonzero(np.array(tids) == tile)
assert(len(I) == 1)
i = I[0]
tile_ra = tiles.ra[i]
tile_dec = tiles.dec[i]
# Create empty target list
tgs = Targets()
# Create structure for carrying along auxiliary target data not needed by C++.
plate_radec=True
tagalong = create_tagalong(plate_radec=plate_radec)
#print(tile)
# Load target files...
tilenofafn = os.path.join(tileoutdir, 'tilenofa-%i.fits' % tile)
load_target_file(tgs, tagalong, tilenofafn)
#loading it again straight to table format because I can't quickly figure out exactly where targetid,ra,dec gets stored
tar_tab = fitsio.read(tilenofafn, columns=tarcols)
# Find targets within tiles, and project their RA,Dec positions
# into focal-plane coordinates.
tile_targetids, tile_x, tile_y, tile_xy_cs5 = targets_in_tiles(hw, tgs, tiles, tagalong)
# Compute the targets available to each fiber for each tile.
tgsavail = TargetsAvailable(hw, tiles, tile_targetids, tile_x, tile_y)
# Compute the fibers on all tiles available for each target and sky
favail = LocationsAvailable(tgsavail)
# FAKE stucksky (positioners that happen to be stuck on good sky positions)
stucksky = {}
# Create assignment object
asgn = Assignment(tgs, tgsavail, favail, stucksky)
tgsavail = asgn.targets_avail()
avail = tgsavail.tile_data(tile)
navail = np.sum([len(avail[x]) for x in avail.keys()])
fibers = dict(hw.loc_fiber)
fdata = Table()
fdata['LOCATION'] = np.zeros(navail, dtype=int)
fdata['FIBER'] = np.zeros(navail, dtype=int)
fdata['TARGETID'] = np.zeros(navail, dtype=int)
off = 0
# The "FAVAIL" (available targets) HDU is sorted first by LOCATION,
# then by TARGETID.
for lid in sorted(avail.keys()):
# lid (location id) is a scalar, tg (target ids) is an array
tg = avail[lid]
fdata['LOCATION'][off:off+len(tg)] = lid
fdata['FIBER'] [off:off+len(tg)] = fibers[lid]
fdata['TARGETID'][off:off+len(tg)] = sorted(tg)
off += len(tg)
fdata = join(fdata,tar_tab,keys=['TARGETID'],join_type='left')
if args.getcoll == 'y':
coll = asgn.check_avail_collisions(tile)
kl = np.array(list(coll.keys())).transpose()
locs = kl[0]
ids = kl[1]
locids = ids*10000+locs
log.info('N collisions: %i' % len(coll))
locidsin = np.isin(fdata['LOCATION']+10000*fdata['TARGETID'],locids)
log.info('N collisions original: %i %i' % (np.sum(locidsin),len(fdata)))
fdata['COLLISION'] = locidsin
#colltab = Table(forig[locidsin])
fdata['TILEID'] = tile
return fdata
def run_one_tile(ind):
t0 = time.time()
write_tile_targ(ind)
res = getpa(ind)
res = np.array(res)
t1 = time.time()
log.info('Tile %i took %.3f sec' % (tiletab[ind]['TILEID'], t1-t0))
return res
def read_fba_header(ind):
'''
Read the fiberassign header for one tile index.
'''
tile = tiletab['TILEID'][ind]
ts = '%06i' % tile
fbah = fitsio.read_header(os.path.join(desi_input_dir, 'target', 'fiberassign', 'tiles', 'trunk', ts[:3], 'fiberassign-'+ts+'.fits.gz'))
return dict([(k, fbah[k]) for k in ['RUNDATE', 'MTLTIME', 'FA_RUN', 'FA_HA', 'FIELDROT']])
def parse_datetime(s):
try:
return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S%z")
except ValueError:
d = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
# msg = "Requested run date '{}' is not timezone-aware. Assuming UTC.".format(runtime)
d = d.replace(tzinfo=timezone.utc)
hardware_times = []
def get_hardware_for_time(t):
global hardware_times
for tlo,thi,hw in hardware_times:
if (tlo <= t) and (thi is None or thi > t):
#print('Match to time range', tlo, 'to', thi)
return hw
return None
def main():
from multiprocessing import Pool
tls = list(tiletab['TILEID'])
#inds = np.flatnonzero(np.array(tls) == 1230)
#inds = np.arange(256)
inds = np.arange(len(tls))
t0 = time.time()
# Read all fiberassign headers to get the RUNDATES.
with Pool(processes=args.nprocs) as pool:
headers = pool.map(read_fba_header, inds)
rundates = set([h['RUNDATE'] for h in headers])
rundates = sorted(list(rundates))
log.info('Unique rundates: %i of %i' % (len(rundates), len(headers)))
t1 = time.time()
log.info('Reading fiberassign headers in parallel: %.3f sec' % (t1-t0))
# Read all hardware configurations for our RUNDATES.
global hardware_times
for t in rundates:
dt = parse_datetime(t)
cached = get_hardware_for_time(dt)
if cached is not None:
continue
hw,time_lo,time_hi = load_hardware(rundate=t, add_margins=margins, get_time_range=True)
hardware_times.append((time_lo, time_hi, hw))
t2 = time.time()
log.info('Loading hardware in series: %.3f sec' % (t2-t1))
# Keeping this old code because it's a little easier to understand than what we're doing
# below (streaming results to disk).
#
# # Run fiber assignment on tiles in parallel
# with Pool(processes=128) as pool:
# res = pool.map(run_one_tile, inds)
# t3 = time.time()
# log.info('Running tiles in parallel: %.3f sec' % (t3-t2))
#
# # Merge and write results
# colltot = np.concatenate(res)
# if args.getcoll == 'y':
# print(len(colltot),np.sum(colltot['COLLISION']))
# t3b = time.time()
#
# common.write_LSS(colltot,paoutdir+'/pota-'+args.prog+'.fits')
# t4 = time.time()
# log.info('Merging results and writing: %.3f sec (%.3f + %.3f)' % (t4-t3, t3b-t3, t4-t3b))
# Write output *while* retrieving results in parallel
outfn = args.output#os.path.join(paoutdir, 'pota-'+args.prog+'.fits')
tempout = outfn + '.tmp'
fits = fitsio.FITS(tempout, 'rw', clobber=True)
first = True
ntot = 0
ncoll = 0
with Pool(processes=args.nprocs) as pool:
it = pool.imap_unordered(run_one_tile, inds)
# fetch results as they complete
for res in it:
ntot += len(res)
ncoll += np.sum(res['COLLISION'])
# First result: write to output file.
if first:
fits.write(res, extname='LSS')
first = False
# Subsequent results: append to output file.
else:
fits[-1].append(res)
del res
fits.close()
os.rename(tempout, outfn)
log.info('Wrote %s' % outfn)
t3 = time.time()
if args.getcoll == 'y':
log.info('%i %i' % (ntot, ncoll))
log.info('Running tiles and writing results: %.3f sec' % (t3-t2))
if __name__ == '__main__':
main()
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@mock_tools@getpota_input_Y3.py@.PATH_END.py
|
{
"filename": "_tickvals.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="tickvals", parent_name="layout.xaxis", **kwargs):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@xaxis@_tickvals.py@.PATH_END.py
|
{
"filename": "image_classifier.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_model_maker/image_classifier.md",
"type": "Markdown"
}
|
page_type: reference
description: APIs to train an image classification model.
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_model_maker.image_classifier" />
<meta itemprop="path" content="Stable" />
</div>
# Module: tflite_model_maker.image_classifier
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/tflmm/v0.4.2/tensorflow_examples/lite/model_maker/public/image_classifier/__init__.py">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
APIs to train an image classification model.
#### Task guide:
<a href="https://www.tensorflow.org/lite/tutorials/model_maker_image_classification">https://www.tensorflow.org/lite/tutorials/model_maker_image_classification</a>
## Classes
[`class DataLoader`](../tflite_model_maker/image_classifier/DataLoader): DataLoader for image classifier.
[`class ImageClassifier`](../tflite_model_maker/image_classifier/ImageClassifier): ImageClassifier class for inference and exporting to tflite.
[`class ModelSpec`](../tflite_model_maker/image_classifier/ModelSpec): A specification of image model.
## Functions
[`EfficientNetLite0Spec(...)`](../tflite_model_maker/image_classifier/EfficientNetLite0Spec): Creates EfficientNet-Lite0 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`EfficientNetLite1Spec(...)`](../tflite_model_maker/image_classifier/EfficientNetLite1Spec): Creates EfficientNet-Lite1 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`EfficientNetLite2Spec(...)`](../tflite_model_maker/image_classifier/EfficientNetLite2Spec): Creates EfficientNet-Lite2 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`EfficientNetLite3Spec(...)`](../tflite_model_maker/image_classifier/EfficientNetLite3Spec): Creates EfficientNet-Lite3 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`EfficientNetLite4Spec(...)`](../tflite_model_maker/image_classifier/EfficientNetLite4Spec): Creates EfficientNet-Lite4 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`MobileNetV2Spec(...)`](../tflite_model_maker/image_classifier/MobileNetV2Spec): Creates MobileNet v2 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`Resnet50Spec(...)`](../tflite_model_maker/image_classifier/Resnet50Spec): Creates ResNet 50 model spec. See also: <a href="../tflite_model_maker/image_classifier/ModelSpec"><code>tflite_model_maker.image_classifier.ModelSpec</code></a>.
[`create(...)`](../tflite_model_maker/image_classifier/create): Loads data and retrains the model based on data for image classification.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_model_maker@image_classifier.md@.PATH_END.py
|
{
"filename": "scs_.py",
"repo_name": "stephane-caron/qpsolvers",
"repo_path": "qpsolvers_extracted/qpsolvers-main/qpsolvers/solvers/scs_.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright 2016-2022 Stéphane Caron and the qpsolvers contributors
"""Solver interface for `SCS <https://www.cvxgrp.org/scs/>`__.
SCS (Splitting Conic Solver) is a numerical optimization package for solving
large-scale convex quadratic cone problems, which is a general class of
problems that includes quadratic programming. If you use SCS in some academic
work, consider citing the corresponding paper [ODonoghue2021]_.
"""
import warnings
from typing import Any, Dict, Optional, Union
import numpy as np
import scipy.sparse as spa
from numpy import ndarray
from scipy.sparse import csc_matrix
from scs import solve
from ..conversions import ensure_sparse_matrices
from ..problem import Problem
from ..solution import Solution
from ..solve_unconstrained import solve_unconstrained
# See https://www.cvxgrp.org/scs/api/exit_flags.html#exit-flags
__status_val_meaning__ = {
-7: "INFEASIBLE_INACCURATE",
-6: "UNBOUNDED_INACCURATE",
-5: "SIGINT",
-4: "FAILED",
-3: "INDETERMINATE",
-2: "INFEASIBLE (primal infeasible, dual unbounded)",
-1: "UNBOUNDED (primal unbounded, dual infeasible)",
0: "UNFINISHED (never returned, used as placeholder)",
1: "SOLVED",
2: "SOLVED_INACCURATE",
}
def __add_box_cone(
n: int,
lb: Optional[ndarray],
ub: Optional[ndarray],
cone: Dict[str, Any],
data: Dict[str, Any],
) -> None:
"""Add box cone to the problem.
Parameters
----------
n :
Number of optimization variables.
lb :
Lower bound constraint vector.
ub :
Upper bound constraint vector.
cone :
SCS cone dictionary.
data :
SCS data dictionary.
Notes
-----
See the `SCS Cones <https://www.cvxgrp.org/scs/api/cones.html>`__
documentation for details.
"""
cone["bl"] = lb if lb is not None else np.full((n,), -np.inf)
cone["bu"] = ub if ub is not None else np.full((n,), +np.inf)
zero_row = csc_matrix((1, n))
data["A"] = spa.vstack(
((data["A"],) if "A" in data else ()) + (zero_row, -spa.eye(n)),
format="csc",
)
data["b"] = np.hstack(
((data["b"],) if "b" in data else ()) + (1.0, np.zeros(n))
)
def scs_solve_problem(
problem: Problem,
initvals: Optional[ndarray] = None,
verbose: bool = False,
**kwargs,
) -> Solution:
"""Solve a quadratic program using SCS.
Parameters
----------
problem :
Quadratic program to solve.
initvals :
Warm-start guess vector (not used).
verbose :
Set to `True` to print out extra information.
Returns
-------
:
Solution returned by the solver.
Raises
------
ValueError
If the quadratic program is not unbounded below.
Notes
-----
Keyword arguments are forwarded as is to SCS. For instance, we can call
``scs_solve_qp(P, q, G, h, eps_abs=1e-6, eps_rel=1e-4)``. SCS settings
include the following:
.. list-table::
:widths: 30 70
:header-rows: 1
* - Name
- Description
* - ``max_iters``
- Maximum number of iterations to run.
* - ``time_limit_secs``
- Time limit for solve run in seconds (can be fractional). 0 is
interpreted as no limit.
* - ``eps_abs``
- Absolute feasibility tolerance. See `Termination criteria
<https://www.cvxgrp.org/scs/algorithm/index.html#termination>`__.
* - ``eps_rel``
- Relative feasibility tolerance. See `Termination criteria
<https://www.cvxgrp.org/scs/algorithm/index.html#termination>`__.
* - ``eps_infeas``
- Infeasibility tolerance (primal and dual), see `Certificate of
infeasibility
<https://www.cvxgrp.org/scs/algorithm/index.html#certificate-of-infeasibility>`_.
* - ``normalize``
- Whether to perform heuristic data rescaling. See `Data equilibration
<https://www.cvxgrp.org/scs/algorithm/equilibration.html#equilibration>`__.
Check out the `SCS settings
<https://www.cvxgrp.org/scs/api/settings.html#settings>`_ documentation for
all available settings.
"""
P, q, G, h, A, b, lb, ub = problem.unpack()
P, G, A = ensure_sparse_matrices(P, G, A)
n = P.shape[0]
data: Dict[str, Any] = {"P": P, "c": q}
cone: Dict[str, Any] = {}
if initvals is not None:
data["x"] = initvals
if A is not None and b is not None:
if G is not None and h is not None:
data["A"] = spa.vstack([A, G], format="csc")
data["b"] = np.hstack([b, h])
cone["z"] = b.shape[0] # zero cone
cone["l"] = h.shape[0] # positive cone
else: # G is None and h is None
data["A"] = A
data["b"] = b
cone["z"] = b.shape[0] # zero cone
elif G is not None and h is not None:
data["A"] = G
data["b"] = h
cone["l"] = h.shape[0] # positive cone
elif lb is None and ub is None: # no constraint
return solve_unconstrained(problem)
if lb is not None or ub is not None:
__add_box_cone(n, lb, ub, cone, data)
kwargs["verbose"] = verbose
result = solve(data, cone, **kwargs)
solution = Solution(problem)
solution.extras = result["info"]
status_val = result["info"]["status_val"]
solution.found = status_val == 1
if not solution.found:
warnings.warn(
f"SCS returned {status_val}: {__status_val_meaning__[status_val]}"
)
solution.x = result["x"]
meq = A.shape[0] if A is not None else 0
solution.y = result["y"][:meq] if A is not None else np.empty((0,))
solution.z = (
result["y"][meq : meq + G.shape[0]]
if G is not None
else np.empty((0,))
)
solution.z_box = (
-result["y"][-n:]
if lb is not None or ub is not None
else np.empty((0,))
)
return solution
def scs_solve_qp(
P: Union[ndarray, csc_matrix],
q: ndarray,
G: Optional[Union[ndarray, csc_matrix]] = None,
h: Optional[ndarray] = None,
A: Optional[Union[ndarray, csc_matrix]] = None,
b: Optional[ndarray] = None,
lb: Optional[ndarray] = None,
ub: Optional[ndarray] = None,
initvals: Optional[ndarray] = None,
verbose: bool = False,
**kwargs,
) -> Optional[ndarray]:
r"""Solve a quadratic program using SCS.
The quadratic program is defined as:
.. math::
\begin{split}\begin{array}{ll}
\underset{x}{\mbox{minimize}} &
\frac{1}{2} x^T P x + q^T x \\
\mbox{subject to}
& G x \leq h \\
& A x = b \\
& lb \leq x \leq ub
\end{array}\end{split}
It is solved using `SCS <https://github.com/cvxgrp/scs>`__.
Parameters
----------
P :
Primal quadratic cost matrix.
q :
Primal quadratic cost vector.
G :
Linear inequality constraint matrix.
h :
Linear inequality constraint vector.
A :
Linear equality constraint matrix.
b :
Linear equality constraint vector.
lb :
Lower bound constraint vector.
ub :
Upper bound constraint vector.
initvals :
Warm-start guess vector (not used).
verbose :
Set to `True` to print out extra information.
Returns
-------
:
Solution to the QP, if found, otherwise ``None``.
Raises
------
ValueError
If the quadratic program is not unbounded below.
Notes
-----
Keyword arguments are forwarded as is to SCS. For instance, we can call
``scs_solve_qp(P, q, G, h, eps_abs=1e-6, eps_rel=1e-4)``. SCS settings
include the following:
.. list-table::
:widths: 30 70
:header-rows: 1
* - Name
- Description
* - ``max_iters``
- Maximum number of iterations to run.
* - ``time_limit_secs``
- Time limit for solve run in seconds (can be fractional). 0 is
interpreted as no limit.
* - ``eps_abs``
- Absolute feasibility tolerance. See `Termination criteria
<https://www.cvxgrp.org/scs/algorithm/index.html#termination>`__.
* - ``eps_rel``
- Relative feasibility tolerance. See `Termination criteria
<https://www.cvxgrp.org/scs/algorithm/index.html#termination>`__.
* - ``eps_infeas``
- Infeasibility tolerance (primal and dual), see `Certificate of
infeasibility
<https://www.cvxgrp.org/scs/algorithm/index.html#certificate-of-infeasibility>`_.
* - ``normalize``
- Whether to perform heuristic data rescaling. See `Data equilibration
<https://www.cvxgrp.org/scs/algorithm/equilibration.html#equilibration>`__.
Check out the `SCS settings
<https://www.cvxgrp.org/scs/api/settings.html#settings>`_ documentation for
all available settings.
"""
problem = Problem(P, q, G, h, A, b, lb, ub)
solution = scs_solve_problem(problem, initvals, verbose, **kwargs)
return solution.x if solution.found else None
|
stephane-caronREPO_NAMEqpsolversPATH_START.@qpsolvers_extracted@qpsolvers-main@qpsolvers@solvers@scs_.py@.PATH_END.py
|
{
"filename": "wcs.py",
"repo_name": "SAMI-Galaxy-Survey/sami",
"repo_path": "sami_extracted/sami-master/general/wcs.py",
"type": "Python"
}
|
"""
Functions for measuring and recording WCS information.
In particular, wcs_position_coords is supposed to determine the WCS for
a file based on cross-correlating a collapsed image from the datacube with
an external photometric image. However, this was never shown to work
properly (the results were clustering around particular values, for
unknown reasons), so it was put to one side and never finished. Instead,
the SAMI Galaxy Survey has been using the 'nominal' WCS, which assumes
that the catalogued object is in the centre of the data.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import scipy as sp
import astropy.io.ascii as ascii
from scipy.interpolate import griddata
import astropy.io.fits as pf
import os
import urllib
from .. import samifitting as fitting
from ..sdss import sdss
#########################
def wcs_solve(myIFU, object_flux_cube, object_name, band, size_of_grid, output_pix_size_arcsec, plot=False, write=False, nominal=False, remove_thput_file=True):
"""Wrapper for wcs_position_coords, extracting coords from IFU.
This function cross-correlates a g or r-band convolved SAMI cube with its
respective SDSS g-band image and pins down the positional WCS for the
central spaxel of the cube.
"""
# Get Object RA + DEC from fibre table (this is the input catalogues RA+DEC in deg)
object_RA = np.around(myIFU.obj_ra[myIFU.n == 1][0], decimals=6)
object_DEC = np.around(myIFU.obj_dec[myIFU.n == 1][0], decimals=6)
# Build wavelength axis.
CRVAL3 = myIFU.crval1
CDELT3 = myIFU.cdelt1
Nwave = np.shape(object_flux_cube)[0]
# -- crval3 is middle of range and indexing starts at 0.
# -- this wave-axis agrees with QFitsView interpretation.
CRVAL3a = CRVAL3 - ((Nwave-1)/2)*CDELT3
wave = CRVAL3a + CDELT3*np.arange(Nwave)
object_flux_cube = np.transpose(object_flux_cube, (2,0,1))
return wcs_position_coords(object_RA, object_DEC, wave, object_flux_cube, object_name, band, size_of_grid, output_pix_size_arcsec, plot=plot, write=write, nominal=nominal)
def wcs_position_coords(object_RA, object_DEC, wave, object_flux_cube, object_name, band, size_of_grid, output_pix_size_arcsec, plot=False, write=False, nominal=False, remove_thput_file=True):
"""Equate the WCS position information from a cross-correlation between a
g-band SAMI cube and a g-band SDSS image."""
if nominal:
img_crval1 = object_RA
img_crval2 = object_DEC
xcube = size_of_grid
ycube = size_of_grid
img_cdelt1 = -1.0 * output_pix_size_arcsec / 3600.0
img_cdelt2 = output_pix_size_arcsec / 3600.0
else:
# Get SDSS g-band throughput curve
if not os.path.isfile("sdss_"+str(band)+".dat"):
urllib.urlretrieve("http://www.sdss.org/dr3/instruments/imager/filters/"+str(band)+".dat", "sdss_"+str(band)+".dat")
# and convolve with the SDSS throughput
sdss_filter = ascii.read("sdss_"+str(band)+".dat", comment="#", names=["wave", "pt_secz=1.3", "ext_secz=1.3", "ext_secz=0.0", "extinction"])
# re-grid g["wave"] -> wave
thru_regrid = griddata(sdss_filter["wave"], sdss_filter["ext_secz=1.3"], wave, method="cubic", fill_value=0.0)
# initialise a 2D simulated g' band flux array.
len_axis = np.shape(object_flux_cube)[1]
Nwave = len(wave)
reconstruct = np.zeros((len_axis,len_axis))
tester = np.zeros((len_axis,len_axis))
data_bit = np.zeros((Nwave,len_axis,len_axis))
# Sum convolved flux:
for i in range(Nwave):
data_bit[i] = object_flux_cube[i]*thru_regrid[i]
reconstruct = np.nansum(data_bit,axis=0) # not absolute right now
reconstruct[np.isnan(reconstruct)] = 0. # replacing nan with 0.0
reconstruct[reconstruct < 0] = 0. # replacing negative fluxes with 0.0
cube_image = reconstruct
xcube = len(cube_image[0])
ycube = len(cube_image[1])
cube_image_crop = cube_image[(len(cube_image[0])/2)-10:(len(cube_image[0])/2)+10,(len(cube_image[1])/2)-10:(len(cube_image[1])/2)+10]
cube_image_crop = sp.ndimage.zoom(cube_image_crop, 5, order=3)
cube_image_crop_norm = (cube_image_crop - np.min(cube_image_crop))/np.max(cube_image_crop - np.min(cube_image_crop))
# Check if the user supplied a red RSS file, throw exception.
if np.array_equal(cube_image, tester):
raise SystemExit("All values are zero: please provide the cube corresponding to the requested spectral band of the image!")
##########
cube_size = np.around((size_of_grid*output_pix_size_arcsec)/3600, decimals=6)
# Get SDSS Image
if not os.path.isfile(str(object_name)+"_SDSS_"+str(band)+".fits"):
sdss.getSDSSimage(object_name=object_name, RA=object_RA, DEC=object_DEC,
band=str(band), size=cube_size, number_of_pixels=size_of_grid)
# Open SDSS image and extract data & header information
image_file = pf.open(str(object_name)+"_SDSS_"+str(band)+".fits")
image_data = image_file['Primary'].data
image_header = image_file['Primary'].header
img_crval1 = float(image_header['CRVAL1']) #RA
img_crval2 = float(image_header['CRVAL2']) #DEC
img_crpix1 = float(image_header['CRPIX1']) #Reference x-pixel
img_crpix2 = float(image_header['CRPIX2']) #Reference y-pixel
img_cdelt1 = float(image_header['CDELT1']) #Delta RA
img_cdelt2 = float(image_header['CDELT2']) #Delta DEC
SDSS_image = image_data
SDSS_image_crop = SDSS_image[(len(SDSS_image[0])/2)-10:(len(SDSS_image[0])/2)+10,(len(SDSS_image[1])/2)-10:(len(SDSS_image[1])/2)+10]
SDSS_image_crop_norm = (SDSS_image_crop - np.min(SDSS_image_crop))/np.max(SDSS_image_crop - np.min(SDSS_image_crop))
##########
if (not nominal) and np.size(np.where(image_data == 0.0)) != 2*np.size(image_data):
# Cross-correlate normalised SAMI-cube g-band image and SDSS g-band image
WCS_flag = 'SDSS'
crosscorr_image = sp.signal.correlate2d(SDSS_image_crop_norm, cube_image_crop_norm)
# 2D Gauss Fit the cross-correlated cropped image
crosscorr_image_1d = np.ravel(crosscorr_image)
#use for loops to recover indicies in x and y positions of flux values
x_pos = []
y_pos = []
for i in range(np.shape(crosscorr_image)[0]):
for j in range(np.shape(crosscorr_image)[1]):
x_pos.append(i)
y_pos.append(j)
x_pos=np.array(x_pos)
y_pos=np.array(y_pos)
#define guess parameters for TwoDGaussFitter:
amplitude = max(crosscorr_image_1d)
mean_x = (np.shape(crosscorr_image)[0])/2
mean_y = (np.shape(crosscorr_image)[1])/2
sigma_x = 5.0
sigma_y = 6.0
rotation = 60.0
offset = 4.0
p0 = [amplitude, mean_x, mean_y, sigma_x, sigma_y, rotation, offset]
# call SAMI TwoDGaussFitter
GF2d = fitting.TwoDGaussFitter(p0, x_pos, y_pos, crosscorr_image_1d)
# execute gauss fit using
GF2d.fit()
GF2d_xpos = GF2d.p[2]
GF2d_ypos = GF2d.p[1]
# reconstruct the fit
GF2d_reconstruct=GF2d(x_pos, y_pos)
x_shape = len(crosscorr_image[0])
y_shape = len(crosscorr_image[1])
x_offset_pix = GF2d_xpos - x_shape/2
y_offset_pix = GF2d_ypos - y_shape/2
x_offset_arcsec = -x_offset_pix * output_pix_size_arcsec/5
y_offset_arcsec = y_offset_pix * output_pix_size_arcsec/5
x_offset_degree = ((x_offset_arcsec/3600)/24)*360
y_offset_degree = (y_offset_arcsec/3600)
else:
WCS_flag = 'Nominal'
y_offset_degree = 0.0
x_offset_degree = 0.0
# Create dictionary of positional WCS
if isinstance(xcube//2, int):
WCS_pos={"CRVAL1":(img_crval1 + x_offset_degree), "CRVAL2":(img_crval2 + y_offset_degree), "CRPIX1":(xcube/2 + 0.5),
"CRPIX2":(ycube/2 + 0.5), "CDELT1":(img_cdelt1), "CDELT2":(img_cdelt2), "CTYPE1":"RA---TAN", "CTYPE2":"DEC--TAN",
"CUNIT1": 'deg', "CUNIT2": 'deg'}
else:
WCS_pos={"CRVAL1":(img_crval1 + x_offset_degree), "CRVAL2":(img_crval2 + y_offset_degree), "CRPIX1":(xcube/2),
"CRPIX2":(ycube/2), "CDELT1":(img_cdelt1), "CDELT2":(img_cdelt2), "CTYPE1":"RA---TAN", "CTYPE2":"DEC--TAN",
"CUNIT1": 'deg', "CUNIT2": 'deg'}
##########
# Remove temporary files
if remove_thput_file and os.path.exists("sdss_"+str(band)+".dat"):
os.remove("sdss_"+str(band)+".dat")
if os.path.exists(str(object_name)+"_SDSS_"+str(band)+".fits"):
os.remove(str(object_name)+"_SDSS_"+str(band)+".fits")
return WCS_pos,WCS_flag
def update_wcs_coords(filename, nominal=False, remove_thput_file=True):
"""Recalculate the WCS data in a SAMI datacube."""
# Pick out the relevant data
header = pf.getheader(filename)
ra = (header['CRVAL1'] + (1 + np.arange(header['NAXIS1']) - header['CRPIX1']) * header['CDELT1'])
dec = (header['CRVAL2'] + (1 + np.arange(header['NAXIS2']) - header['CRPIX2']) * header['CDELT2'])
wave = (header['CRVAL3'] + (1 + np.arange(header['NAXIS3']) - header['CRPIX3']) * header['CDELT3'])
object_RA = np.mean(ra)
object_DEC = np.mean(dec)
object_flux_cube = pf.getdata(filename)
object_name = header['NAME']
if header['GRATID'] == '580V':
band = 'g'
elif header['GRATID'] == '1000R':
band = 'r'
else:
raise ValueError('Could not identify band. Exiting')
size_of_grid = np.shape(object_flux_cube)[0] #should be = 50
output_pix_size_arcsec = header['CDELT1'] #should be = 0.5
# Calculate the WCS
WCS_pos, WCS_flag = wcs_position_coords(object_RA, object_DEC, wave, object_flux_cube, object_name, band, size_of_grid, output_pix_size_arcsec, nominal=nominal, remove_thput_file=remove_thput_file)
# Update the file
hdulist = pf.open(filename, 'update', do_not_scale_image_data=True)
header = hdulist[0].header
for key, value in WCS_pos.items():
header[key] = value
header['WCS_SRC'] = WCS_flag
hdulist.close()
return
############### END OF FILE ###############
|
SAMI-Galaxy-SurveyREPO_NAMEsamiPATH_START.@sami_extracted@sami-master@general@wcs.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.