metadata
dict
text
stringlengths
0
40.6M
id
stringlengths
14
255
{ "filename": "__init__.py", "repo_name": "simonsobs/socs", "repo_path": "socs_extracted/socs-main/socs/agents/wiregrid_kikusui/drivers/__init__.py", "type": "Python" }
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@wiregrid_kikusui@drivers@__init__.py@.PATH_END.py
{ "filename": "trivia_my.py", "repo_name": "AashishGpta/TIPSY", "repo_path": "TIPSY_extracted/TIPSY-master/trivia_my.py", "type": "Python" }
import numpy as np import pandas as pd import cmasher as cmr import plotly.express as px import plotly.graph_objects as go from scipy.interpolate import RegularGridInterpolator from scipy.interpolate import CubicSpline from gofish import imagecube from matplotlib import cm from matplotlib.colors import ListedColormap, LinearSegmentedColormap import PIL # added by Aashish for rotating GIFs import io # added by Aashish for rotating GIFs def read_cube(path, clip=None, rmin=None, rmax=None, N=None, vmin=None, vmax=None, dv=None, vunit_per_s=None): # Read in the FITS data. cube = imagecube(path) cube.data = cube.data.astype(float) if vunit_per_s == 'km': # added by me cube.velax = cube.velax*1e3 # Crop the data along the velocity axis, implemented from gofish vmin = cube.velax[0] if vmin is None else vmin*1.0e3 vmax = cube.velax[-1] if vmax is None else vmax*1.0e3 # vmin = 0.5*(cube.velax.min() + cube.velax.max()) - 5.0e3 if vmin is None else vmin*1.0e3 # vmax = 0.5*(cube.velax.min() + cube.velax.max()) + 5.0e3 if vmax is None else vmax*1.0e3 i = np.abs(cube.velax - vmin).argmin() i += 1 if cube.velax[i] < vmin else 0 j = np.abs(cube.velax - vmax).argmin() j -= 1 if cube.velax[j] > vmax else 0 cube.velax = cube.velax[i:j+1] cube.data = cube.data[i:j+1] if dv is not None: newvelax = np.arange(cube.velax[0], cube.velax[-1], dv*1.0e3) cs = CubicSpline(cube.velax, cube.data, axis=0) cube.data = cs(newvelax) cube.velax = newvelax # Generate a SNR mask mask_SNR = cube.data > clip * cube.rms # Generate a radial mask r, t, z = cube.disk_coords() rmin = 0 if rmin is None else rmin # rmax = cube.FOV/3. if rmax is None else rmax rmax = cube.FOV*3 if rmax is None else rmax # Added by Aashish to be conservative mask_r = np.logical_and(r >= rmin, r <= rmax) mask_r = np.tile(mask_r, (cube.data.shape[0], 1, 1)) # Generate a combined mask mask = np.logical_and(mask_SNR, mask_r) # Masked LOS velocity, RA, Dec, intensity arrays. v = np.around((cube.velax[:, None, None] * np.ones(cube.data.shape))[mask]/1e3,decimals=3) x = np.around((cube.xaxis[None, None, :] * np.ones(cube.data.shape))[mask],decimals=3) y = np.around((cube.yaxis[None, :, None] * np.ones(cube.data.shape))[mask],decimals=3) i = np.around(cube.data[mask],decimals=3) # Take N random voxel. N = int(np.max([v.size/1.0e5,1])) if N is None else N if N > 1: idx = np.arange(v.size) np.random.shuffle(idx) v = v[idx][::N] x = x[idx][::N] y = y[idx][::N] i = i[idx][::N] if (v.shape[0] > 1.0e6): print("Warning: There are total", v.shape[0], "points to present. The output file can be very large! Consider using a smaller N.") # Normalize the intensity. i = (i - i.min())/(i.max() - i.min()) # print(vmin,vmax,min(v),max(v)) # return(cube, x, y, v, vmin/1e3, vmax/1e3, i) return(cube, x, y, v, vmin, vmax, i) def make_ppv(path, clip=3., rmin=None, rmax=None, N=None, cmin=None, cmax=None, constant_opacity=None, ntrace=20, marker_size=2, cmap=None, hoverinfo='x+y+z', xaxis_title=None, yaxis_title=None, zaxis_title=None, xaxis_backgroundcolor=None, xaxis_gridcolor=None, yaxis_backgroundcolor=None, yaxis_gridcolor=None, zaxis_backgroundcolor=None, zaxis_gridcolor=None, xmin=None, xmax=None, ymin=None, ymax=None, vmin=None, vmax=None, dv=None, projection_x=False, projection_y=False, projection_z=True, show_colorbar=True, camera_eye_x=-1., camera_eye_y=-2., camera_eye_z=1., show_figure=False, write_pdf=False, write_png=False, write_html=True, write_csv=False, vunit_per_s='m', source_ra_off=None, source_dec_off=None, source_vel=None, bool_traj = False, traj = None, path2=None, clip2=3., marker_color=None,marker_color2=None,out_filename=None, write_gif=False,gif_start_ang=90,gif_duration=3,gif_N_angs = 15,gif_loops=0): """ Make a three-dimensional position-position-velocity diagram. Args: path (str): Relative path to the FITS cube. clip (Optional[float]): Clip the cube having cube.data > clip * cube.rms rmin (Optional[float]): Inner radius of the radial mask rmax (Optional[float]): Outer radius of the radial mask N (Optional[integer]): Downsample the data by a factor of N. cmin (Optional[float]): The lower bound of the velocity for the colorscale in km/s. cmax (Optional[float]): The upper bound of the velocity for the colorscale in km/s. constant_opacity (Optional[float]): If not None, use a constant opacity of the given value. ntrace (Optional[integer]): Number of opacity layers. markersize (Optional[integer]): Size of the marker in the PPV diagram. cmap (Optional[str]): Name of the colormap to use for the PPV diagram. hoverinfo (Optional[str]): Determines which trace information appear on hover. Any combination of "x", "y", "z", "text", "name" joined with a "+" or "all" or "none" or "skip". If `none` or `skip` are set, no information is displayed upon hovering. But, if `none` is set, click and hover events are still fired. xaxis_title (Optional[str]): X-axis title. yaxis_title (Optional[str]): Y-axis title. zaxis_title (Optional[str]): Z-axis title. xaxis_backgroundcolor (Optional[str]): X-axis background color. xaxis_gridcolor (Optional[str]): X-axis grid color. yaxis_backgroundcolor (Optional[str]): Y-axis background color. yaxis_gridcolor (Optional[str]): Y-axis grid color. zaxis_backgroundcolor (Optional[str]): Z-axis background color. zaxis_gridcolor (Optional[str]): Z-axis grid color. xmin (Optional[float]): The lower bound of PPV diagram X range. xmax (Optional[float]): The upper bound of PPV diagram X range. ymin (Optional[float]): The lower bound of PPV diagram Y range. ymax (Optional[float]): The upper bound of PPV diagram Y range. vmin (Optional[float]): The lower bound of PPV diagram Z range in km/s. vmax (Optional[float]): The upper bound of PPV diagram Z range in km/s. dv (Optional[float]): Desired velocity resolution in km/s. projection_x (Optional[bool]): Whether or not to add projection on the Y-Z plane. projection_y (Optional[bool]): Whether or not to add projection on the X-Z plane. projection_z (Optional[bool]): Whether or not to add projection on the X-Y plane. show_colorbar (Optional[bool]): Whether or not to plot a colorbar. camera_eye_x (Optional[float]): The x component of the 'eye' camera vector. camera_eye_y (Optional[float]): The y component of the 'eye' camera vector. camera_eye_z (Optional[float]): The z component of the 'eye' camera vector. show_figure (Optional[bool]): If True, show PPV diagram. write_pdf (Optional[bool]): If True, write PPV diagram in a pdf file. write_png (Optional[bool]): If True, write PPV diagram in a png file. write_html (Optional[bool]): If True, write PPV diagram in a html file. write_csv (Optional[bool]): If True, write the data to create the PPV diagram in a csv file. vunit_per_s (Optional[str]): Unit of spectral axis is assumed m(per second), could be changed to km. Added by Aashish. source_ra_off (Optional[float]): R.A. offset (arcsec) of a YSO wrt cube centre for a special marker. Added by Aashish. source_dec_off (Optional[float]): Decl. offset (arcsec) of a YSO wrt cube centre for a special marker. Added by Aashish. source_vel (Optional[float]): Radial velocity of a YSO for a special marker. Added by Aashish. bool_traj (Optional[bool]): If True, display trajectory profile(s). traj (Optional[array]): Array (list of lists, 2D numpy array, etc.) of shape 3xn which gives trajectory of particle in R.A., Decl., and R.V. domain. path2 (Optional[str]): Path to secondary fits file clip2 (Optional[float]): Clip the second cube having cube2.data > clip * cube2.rms marker_color (Optional[str]): Color for all the cube data markers, overrides cmap marker_color2 (Optional[str]): Color for all the second cube data markers, overrides cmap out_filename (Optional[str]): Name for the output files (.html, .pdf and .gif) write_gif (Optional[bool]): If True, save a rotating plot as a gif gif_start_ang (Optional[float]): Angle (projection) (in degrees) for the starting and ending frame of the gif, can be experimented with gif_duration (Optional[float]): Total duration of one gif loop, in seconds gif_N_angs (Optional[float]): Total no. of frames in one gif loop gif_loops (Optional[int]): Total no. of time gif loops, by default it is 0 and it means gif doesn't stop looping Returns: PPV diagram. Can also save in a pdf, html or gif format. """ vmin0 = vmin vmax0 = vmax cube, x, y, v, vmin, vmax, i = read_cube(path, clip=clip, rmin=rmin, rmax=rmax, N=N, vmin=vmin0, vmax=vmax0, dv=dv, vunit_per_s=vunit_per_s) # Determine the opacity of the data points. cuts = np.linspace(0, 1, ntrace+1) opacity = np.logspace(-1., 0.5, cuts.size - 1) if constant_opacity is not None: opacity[:] = constant_opacity data = [] xaxis_title = 'R.A. offset [arcsec]' if xaxis_title is None else xaxis_title yaxis_title = 'Decl. offset [arcsec]' if yaxis_title is None else yaxis_title zaxis_title = 'Radial velocity [km/s]' if zaxis_title is None else zaxis_title # xaxis_backgroundcolor = 'white' if xaxis_backgroundcolor is None else xaxis_backgroundcolor # xaxis_gridcolor = 'gray' if xaxis_gridcolor is None else xaxis_gridcolor # yaxis_backgroundcolor = 'white' if yaxis_backgroundcolor is None else yaxis_backgroundcolor # yaxis_gridcolor = 'gray' if yaxis_gridcolor is None else yaxis_gridcolor # zaxis_backgroundcolor = 'white' if zaxis_backgroundcolor is None else zaxis_backgroundcolor # zaxis_gridcolor = 'gray' if zaxis_gridcolor is None else zaxis_gridcolor xmin, xmax, ymin, ymax = min(x),max(x),min(y),max(y) # added by Aashish # xmin = cube.FOV/2.0 if xmin is None else xmin # xmax = -cube.FOV/2.0 if xmax is None else xmax # ymin = -cube.FOV/2.0 if ymin is None else ymin # ymax = cube.FOV/2.0 if ymax is None else ymax # if rmax is not None: # xmin, xmax, ymin, ymax = rmax, -rmax, -rmax, rmax colorscale = make_colorscale('cmr.pride') if cmap is None else cmap # cmin = min(v)/1.0e3 if cmin is None else cmin # added by Aashish # cmax = max(v)/1.0e3 if cmax is None else cmax # added by Aashish cmin = vmin/1.0e3 if cmin is None else cmin cmax = vmax/1.0e3 if cmax is None else cmax # 3d scatter plot for a, alpha in enumerate(opacity): mask = np.logical_and(i >= cuts[a], i < cuts[a+1]) if marker_color is None: data += [go.Scatter3d(x=x[mask], y=y[mask], z=v[mask], mode='markers', marker=dict(size=marker_size, color=v[mask], colorscale=colorscale, cauto=False, cmin=cmin, cmax=cmax, opacity=min(1.0, alpha)), hoverinfo=hoverinfo,name='Data', ) ] else: data += [go.Scatter3d(x=x[mask], y=y[mask], z=v[mask], mode='markers', marker=dict(size=marker_size, color=marker_color, opacity=min(1.0, alpha)), hoverinfo=hoverinfo,name='Data', ) ] ## Plotting data from another cube data2 = [] if path2 is not None: cube2, x2, y2, v2, vmin2, vmax2, i2 = read_cube(path2, clip=clip2, rmin=rmin, rmax=rmax, N=N, vmin=vmin0, vmax=vmax0, dv=dv, vunit_per_s=vunit_per_s) for a, alpha in enumerate(opacity): mask = np.logical_and(i2 >= cuts[a], i2 < cuts[a+1]) if marker_color2 is None: data2 += [go.Scatter3d(x=x2[mask], y=y2[mask], z=v2[mask], mode='markers', marker=dict(size=marker_size, color=v2[mask], colorscale=colorscale, cauto=False, cmin=cmin, cmax=cmax, opacity=min(1.0, alpha)), hoverinfo=hoverinfo,name='Data 2', ) ] else: data2 += [go.Scatter3d(x=x2[mask], y=y2[mask], z=v2[mask], mode='markers', marker=dict(size=marker_size, color=marker_color2, opacity=min(1.0, alpha)), hoverinfo=hoverinfo,name='Data 2', ) ] ### Add a special marker for source source = [] if source_ra_off is not None: # print(source_ra_off,source_dec_off,source_vel) source_vel = np.median(v) if source_vel is None else source_vel source += [go.Scatter3d(x=[source_ra_off], y=[source_dec_off], z=[source_vel],# mode='markers', marker=dict(size=10, color='black', opacity=0.9), hoverinfo=hoverinfo,name='Source',)] ## overplotting trajectories traj_line = [] if bool_traj: if traj != None: print("Showing Trajectory...") traj_line += [go.Scatter3d(x=traj[0], y=traj[1], z=traj[2], mode='lines', line=dict(color='black', width=8))] # traj_line += [go.Scatter3d(x=traj[0], y=traj[1], z=traj[2], mode='markers+lines', line=dict(color='black', width=8), marker=dict(color='black', size=3))] # just "mode='lines'" would work as well, added markers to show projections else: # Some defualt profiles to plot... not sure what is the best default, can be deleted r = np.linspace(xmin, xmax, 51) v_sys = 7.5 v_rot = 10 v = v_sys + v_rot / np.sqrt(np.abs(r)) * np.sign(r) l_rot = [] for theta in np.arange(-25,11,5): traj_line += [go.Scatter3d(x=traj[0], y=traj[1], z=traj[2], mode='lines', line=dict(color='black', width=8))] # traj_line += [go.Scatter3d(x=traj[0], y=traj[1], z=traj[2], mode='markers+lines', line=dict(color='black', width=8), marker=dict(color='black', size=3))] # just "mode='lines'" would work as well, added markers to show projections ## ----------------------- copied from jonathan's code # ## velocity profiles # ##first try plotting lines (but see below - surface is better) # r = np.linspace(xmin, xmax, 51) # v_sys = 7.5 # v_rot = 10 # v = v_sys + v_rot / np.sqrt(np.abs(r)) * np.sign(r) # l_rot = [] # for theta in np.arange(-25,11,5): # datas += [go.Scatter3d(x=r*np.cos(np.radians(theta)), y=r*np.sin(np.radians(theta)), z=v, mode='lines', line=dict(color='black', width=5))] # # fig = go.Figure(data = l_rot) ## plot surface, different angular extents on each side # v_sys = 3.9 # v_rot = 6.2 # pure rotation # #v_rot = 3.6 # rotation for a 2 Msun central mass # v_fall = 20 # pure infall # v_fall = 12 # infall added on to 2 Msun rotation # r1 = np.linspace(xmin/20,xmin,21) # t1 = np.radians(np.linspace(-30,30,21)) # RR, TT = np.meshgrid(r1, t1) # X1 = RR * np.cos(TT) # Y1 = RR * np.sin(TT) # Z1_rot = v_rot / np.sqrt(np.abs(RR)) * np.sign(RR) # Z1_fall = v_fall / RR # r2 = np.linspace(xmax/20,xmax,21) # t2 = np.radians(np.linspace(-30,10,21)) # RR, TT = np.meshgrid(r2, t2) # X2 = RR * np.cos(TT) # Y2 = RR * np.sin(TT) # Z2_rot = v_rot / np.sqrt(np.abs(RR)) * np.sign(RR) # Z2_fall = v_fall / RR # #fig = go.Figure(data = s_red + s_green + s_blue + l_rot) # # fig = go.Figure(data = s_red + s_green + s_blue) # plot_rotation = True # plot_infall = True # if plot_rotation: # print('Plotting rotation profile') # fig.add_surface(x=X1, y=Y1, z=v_sys+Z1_rot, opacity=0.5, showscale=False, surfacecolor=0*Z1_rot, colorscale='gray') # fig.add_surface(x=X2, y=Y2, z=v_sys+Z2_rot, opacity=0.5, showscale=False, surfacecolor=0*Z2_rot, colorscale='gray') # if plot_infall: # print('Plotting infall profile') # fig.add_surface(x=X1, y=Y1, z=v_sys+Z1_fall, opacity=0.5, showscale=False, surfacecolor=0*Z1_fall, colorscale='gray') # fig.add_surface(x=X2, y=Y2, z=v_sys+Z2_fall, opacity=0.5, showscale=False, surfacecolor=0*Z2_fall, colorscale='gray') # if plot_infall and plot_rotation: # print('Plotting infall and rotation') # fig.add_surface(x=X1, y=Y1, z=v_sys+Z1_rot+Z1_fall, opacity=0.5, showscale=False, surfacecolor=0*Z1_fall, colorscale='plasma') # fig.add_surface(x=X2, y=Y2, z=v_sys+Z2_rot+Z2_fall, opacity=0.5, showscale=False, surfacecolor=0*Z2_fall, colorscale='plasma') ## ----------------------- # layout layout = go.Layout(scene=dict(xaxis_title=xaxis_title, yaxis_title=yaxis_title, zaxis_title=zaxis_title, xaxis_backgroundcolor=xaxis_backgroundcolor, xaxis_gridcolor=xaxis_gridcolor, yaxis_backgroundcolor=yaxis_backgroundcolor, yaxis_gridcolor=yaxis_gridcolor, zaxis_backgroundcolor=zaxis_backgroundcolor, zaxis_gridcolor=zaxis_gridcolor, xaxis_range=[xmin, xmax], yaxis_range=[ymin, ymax], zaxis_range=[vmin/1.0e3, vmax/1.0e3], aspectmode='cube'), # margin=dict(l=0, r=0, b=0, t=0), margin=dict(l=0, r=0, b=0, t=0), showlegend=False, ) fig = go.Figure(data=data+data2+source+traj_line, layout=layout) proj_opacity = 0.9 fig.update_traces(projection_x=dict(show=projection_x, opacity=proj_opacity), projection_y=dict(show=projection_y, opacity=proj_opacity), projection_z=dict(show=projection_z, opacity=proj_opacity), ) if show_colorbar: fig.update_traces(marker_colorbar=dict(thickness=20, # tickvals=np.arange(cmin,cmax+1), tickformat='.1f', title='v [km/s]', title_side='right', len=0.5 ) ) # fig.update_layout(coloraxis_colorbar_x=-1.) camera = dict(up=dict(x=0, y=0, z=1), center=dict(x=0, y=0, z=0), eye=dict(x=camera_eye_x, y=camera_eye_y, z=camera_eye_z) ) fig.update_layout(scene_camera=camera) fig.update_layout(legend=dict( yanchor="top", y=0.99, xanchor="left", x=0.01 )) out_filename = path.replace('.fits', '_ppv') if out_filename is None else out_filename if show_figure: fig.show() if write_pdf: fig.write_image(out_filename +'.pdf',scale=3) if write_png: fig.write_image(out_filename +'.png',scale=3) if write_html: fig.write_html(out_filename +'.html', include_plotlyjs=True) if write_gif: print("Saving GIF...") # Rotate the plot ang_step = 360/gif_N_angs # Step between angles angs1 = np.arange(gif_start_ang,gif_start_ang-180,-ang_step) angs2 = np.arange(gif_start_ang,gif_start_ang+180,ang_step) angs = np.concatenate([angs2,(angs1[::-1]+360)%360]) # Array of all angles zoom_fac = 2.3 # Adjusts size of the image frames = [] for i in range(gif_N_angs): frame = go.Frame(layout=dict(scene_camera=dict(eye=dict(x=zoom_fac*np.cos(np.radians(angs[i])), y=zoom_fac*np.sin(np.radians(angs[i])), z=zoom_fac*0.5)))) frames.append(frame) fig.frames = frames # ### Following is to show the rotating plot in jupyter, not required for GIF # # Set animation settings # animation_settings = dict(frame=dict(duration=50, redraw=True),romcurrent=True, # transition=dict(duration=100, easing='quadratic-in-out'),) # # Add buttons to control the animation # fig.update_layout(updatemenus=[dict(type="buttons",buttons=[ # dict(label="Play",method="animate",args=[None, animation_settings]), # dict(label="Pause",method="animate",args=['null',dict(mode= "immediate")]) # ],),]) # generate images for each step in animation gif_frames = [] for s, fr in enumerate(fig.frames): # set main traces to appropriate traces within plotly frame fig.update(data=fr.data,layout=fr.layout) # generate image of current state gif_frames.append(PIL.Image.open(io.BytesIO(fig.to_image(format="png")))) # create animated GIF gif_frames[0].save( out_filename +'.gif', save_all=True, append_images=gif_frames[1:], optimize=True, duration=(gif_duration*1000)/gif_N_angs, # Total will take gif_duration*1000 milli-secs, /gif_N_angs for per frame loop=gif_loops, ) if write_csv: df = pd.DataFrame({"RA offset" : x, "Dec offset" : y, "velocity" : v}) df.to_csv(out_filename +'.csv', float_format='%.3e', index=False) return def concatenate_cmaps(cmap1, cmap2, ratio=None, ntot=None): """ Concatenate two colormaps. https://matplotlib.org/stable/tutorials/colors/colormap-manipulation.html Args: cmap1 (str): Name of the first colormap (bottom) to concatenate. cmap2 (str): Name of the second colormap (top) to concatenate. ratio (Optional[float]): The ratio between the first and second colormap. ntot (Optional[int]): The number of levels in the concatenated colormap. """ ratio = 0.5 if ratio is None else ratio ntot = 256 if ntot is None else ntot bottom = cm.get_cmap(cmap1, ntot) top = cm.get_cmap(cmap2, ntot) nbottom = int(ratio*ntot) ntop = ntot-nbottom newcolors = np.vstack((bottom(np.linspace(0, 1, nbottom)), top(np.linspace(0, 1, ntop)))) newcmp = ListedColormap(newcolors, name='newcolormap') newcmp = np.around(newcmp(range(ntot)),decimals=4) colorscale = [[f, 'rgb({}, {}, {})'.format(*newcmp[ff])] for ff, f in enumerate(np.around(np.linspace(0, 1, newcmp.shape[0]),decimals=4))] return colorscale def make_colorscale(cmap): """ Convert a color table into a CSS-compatible color table. Args: cmap (str): Color table name. e.g., 'cmr.pride' Returns: A list containing CSS-compatible color table. """ cmarr = np.array(cmr.take_cmap_colors('cmr.pride', 128)) colorscale = [[f, 'rgb({}, {}, {})'.format(*cmarr[ff])] for ff, f in enumerate(np.linspace(0, 1, cmarr.shape[0]))] return colorscale def make_cm(path, clip=3., fmin=None, fmed=None, fmax=None, vmin=None, vmax=None, xmin=None, xmax=None, ymin=None, ymax=None, nx=None, ny=None, cmap=None, nointerp=False, show_figure=False, write_html=True): """ Make interactive channel map. Args: path (str): Relative path to the FITS cube. clip (Optional[float]): Plot cube.data < clip * cube.rms in black and white. fmin (Optional[float]): The lower bound of the flux. fmed (Optional[float]): The boundary between bw/color cmaps. fmax (Optional[float]): The upper bound of the flux. vmin (Optional[float]): The lower bound of the velocity in km/s. vmax (Optional[float]): The upper bound of the velocity in km/s. xmin (Optional[float]): The lower bound of X range. xmax (Optional[float]): The upper bound of X range. ymin (Optional[float]): The lower bound of Y range. ymax (Optional[float]): The upper bound of Y range. nx (Optional[float]): Number of x pixels. ny (Optional[float]): Number of y pixels. cmap (Optional[str]): Color map to use. nointerp (Optional[bool]): If True, no interpolation applied to the data. show_figure (Optional[bool]): If True, show channel map. write_html (Optional[bool]): If True, write channel map in a html file. Returns: Interactive channel map in a html format. """ # Read in the FITS data. cube = imagecube(path) cube.data = cube.data.astype(float) fmin = 0. if fmin is None else fmin fmed = clip*cube.rms if fmed is None else fmed fmax = cube.data.max()*0.7 if fmax is None else fmax funit = 'Jy/beam' if fmax < 0.5 : cube.data *= 1.0e3 fmin *= 1.0e3 fmed *= 1.0e3 fmax *= 1.0e3 funit = 'mJy/beam' if xmin is None: xmin = cube.FOV/2.0 i = -1 else: xmin = xmin i = np.abs(cube.xaxis - xmin).argmin() i += 1 if cube.xaxis[i] < xmin else 0 if xmax is None: xmax = -cube.FOV/2.0 j = -1 else: xmax = xmax j = np.abs(cube.xaxis - xmax).argmin() j -= 1 if cube.xaxis[j] > xmax else 0 cube.xaxis = cube.xaxis[j+1:i] cube.data = cube.data[:,:,j+1:i] if ymin is None: ymin = -cube.FOV/2.0 i = 0 else: ymin = ymin i = np.abs(cube.yaxis - ymin).argmin() i += 1 if cube.yaxis[i] < ymin else 0 if ymax is None: ymax = cube.FOV/2.0 j = -1 else: ymax = ymax j = np.abs(cube.yaxis - ymax).argmin() j -= 1 if cube.yaxis[j] > ymax else 0 cube.yaxis = cube.yaxis[i:j] cube.data = cube.data[:,i:j,:] # Crop the data along the velocity axis, implemented from gofish vmin = cube.velax[0] if vmin is None else vmin*1.0e3 vmax = cube.velax[-1] if vmax is None else vmax*1.0e3 i = np.abs(cube.velax - vmin).argmin() i += 1 if cube.velax[i] < vmin else 0 j = np.abs(cube.velax - vmax).argmin() j -= 1 if cube.velax[j] > vmax else 0 cube.velax = cube.velax[i:j+1] cube.data = cube.data[i:j+1] if (cube.velax.shape[0] > 200.): print("Warning: There are total", cube.velax.shape[0], "channels. The output file can be very large! Consider using a smaller velocity range by changing vmin and vmax.") # Interpolate the cube on the RA-Dec plane # Caution: This is only for visualization purposes. # Avoid using this interpolation routine for scientific purposes. if not nointerp: nx = 400 if nx is None else nx ny = 400 if ny is None else ny oldx = cube.xaxis oldy = cube.yaxis cube.xaxis = np.linspace(cube.xaxis[0],cube.xaxis[-1],nx) cube.yaxis = np.linspace(cube.yaxis[0],cube.yaxis[-1],ny) cube.nxpix, cube.nypix = nx, ny newx, newy = np.meshgrid(cube.xaxis, cube.yaxis) newdata = np.zeros((cube.data.shape[0],ny,nx)) for i in np.arange(cube.data.shape[0]): interp_func = RegularGridInterpolator((oldy, oldx[::-1]), cube.data[i]) newdata[i] = interp_func(np.array([newy.flatten(), newx.flatten()]).T).reshape((ny,nx))[:,::-1] cube.data = newdata else: print("Warning: No interpolation will perform. The output file can be very large!") cube.xaxis = np.around(cube.xaxis,decimals=3) cube.yaxis = np.around(cube.yaxis,decimals=3) toplot = np.around(cube.data,decimals=3) cmap = concatenate_cmaps('binary','inferno',ratio=fmed/fmax) if cmap is None else concatenate_cmaps('binary',cmap,ratio=fmed/fmax) fig = px.imshow(toplot, color_continuous_scale=cmap, origin='lower', x=cube.xaxis, y=cube.yaxis, zmin=fmin, zmax=fmax, labels=dict(x="RA offset [arcsec]", y="Dec offset [arcsec]", color="Intensity ["+funit+"]", animation_frame="channel"), animation_frame=0, ) # fig.update_xaxes(range=[xmin, xmax],autorange=False) # fig.update_yaxes(range=[ymax, ymin],autorange=False) fig.update_xaxes(autorange="reversed") fig.update_xaxes(ticks="outside") fig.update_yaxes(ticks="outside") for i, frame in enumerate(fig.frames): frame['layout'].update(title_text="v = {:.2f} km/s".format(cube.velax[i]/1.0e3), title_x=0.5, ) if show_figure: fig.show() if write_html: fig.write_html(path.replace('.fits', '_channel.html'), include_plotlyjs='cdn') return
AashishGptaREPO_NAMETIPSYPATH_START.@TIPSY_extracted@TIPSY-master@trivia_my.py@.PATH_END.py
{ "filename": "transforms.py", "repo_name": "statsmodels/statsmodels", "repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/distributions/copula/transforms.py", "type": "Python" }
""" Transformation Classes as generators for Archimedean copulas Created on Wed Jan 27 14:33:40 2021 Author: Josef Perktold License: BSD-3 """ import warnings import numpy as np from scipy.special import expm1, gamma class Transforms: def __init__(self): pass def deriv2_inverse(self, phi, args): t = self.inverse(phi, args) phi_d1 = self.deriv(t, args) phi_d2 = self.deriv2(t, args) return np.abs(phi_d2 / phi_d1**3) def derivk_inverse(self, k, phi, theta): raise NotImplementedError("not yet implemented") class TransfFrank(Transforms): def evaluate(self, t, theta): t = np.asarray(t) with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) val = -(np.log(-expm1(-theta*t)) - np.log(-expm1(-theta))) return val # return - np.log(expm1(-theta*t) / expm1(-theta)) def inverse(self, phi, theta): phi = np.asarray(phi) return -np.log1p(np.exp(-phi) * expm1(-theta)) / theta def deriv(self, t, theta): t = np.asarray(t) tmp = np.exp(-t*theta) return -theta * tmp/(tmp - 1) def deriv2(self, t, theta): t = np.asarray(t) tmp = np.exp(theta * t) d2 = - theta**2 * tmp / (tmp - 1)**2 return d2 def deriv2_inverse(self, phi, theta): et = np.exp(theta) ept = np.exp(phi + theta) d2 = (et - 1) * ept / (theta * (ept - et + 1)**2) return d2 def deriv3_inverse(self, phi, theta): et = np.exp(theta) ept = np.exp(phi + theta) d3 = -(((et - 1) * ept * (ept + et - 1)) / (theta * (ept - et + 1)**3)) return d3 def deriv4_inverse(self, phi, theta): et = np.exp(theta) ept = np.exp(phi + theta) p = phi b = theta d4 = ((et - 1) * ept * (-4 * ept + np.exp(2 * (p + b)) + 4 * np.exp(p + 2 * b) - 2 * et + np.exp(2 * b) + 1) ) / (b * (ept - et + 1)**4) return d4 def is_completly_monotonic(self, theta): # range of theta for which it is copula for d>2 (more than 2 rvs) return theta > 0 & theta < 1 class TransfClayton(Transforms): def _checkargs(self, theta): return theta > 0 def evaluate(self, t, theta): return np.power(t, -theta) - 1. def inverse(self, phi, theta): return np.power(1 + phi, -1/theta) def deriv(self, t, theta): return -theta * np.power(t, -theta-1) def deriv2(self, t, theta): return theta * (theta + 1) * np.power(t, -theta-2) def deriv_inverse(self, phi, theta): return -(1 + phi)**(-(theta + 1) / theta) / theta def deriv2_inverse(self, phi, theta): return ((theta + 1) * (1 + phi)**(-1 / theta - 2)) / theta**2 def deriv3_inverse(self, phi, theta): th = theta # shorthand d3 = -((1 + th) * (1 + 2 * th) / th**3 * (1 + phi)**(-1 / th - 3)) return d3 def deriv4_inverse(self, phi, theta): th = theta # shorthand d4 = ((1 + th) * (1 + 2 * th) * (1 + 3 * th) / th**4 ) * (1 + phi)**(-1 / th - 4) return d4 def derivk_inverse(self, k, phi, theta): thi = 1 / theta # shorthand d4 = (-1)**k * gamma(k + thi) / gamma(thi) * (1 + phi)**(-(k + thi)) return d4 def is_completly_monotonic(self, theta): return theta > 0 class TransfGumbel(Transforms): ''' requires theta >=1 ''' def _checkargs(self, theta): return theta >= 1 def evaluate(self, t, theta): return np.power(-np.log(t), theta) def inverse(self, phi, theta): return np.exp(-np.power(phi, 1. / theta)) def deriv(self, t, theta): return - theta * (-np.log(t))**(theta - 1) / t def deriv2(self, t, theta): tmp1 = np.log(t) d2 = (theta*(-1)**(1 + theta) * tmp1**(theta-1) * (1 - theta) + theta*(-1)**(1 + theta)*tmp1**theta)/(t**2*tmp1) # d2 = (theta * tmp1**(-1 + theta) * (1 - theta) + theta * tmp1**theta # ) / (t**2 * tmp1) return d2 def deriv2_inverse(self, phi, theta): th = theta # shorthand d2 = (phi**(2 / th) + (th - 1) * phi**(1 / th)) / (phi**2 * th**2) d2 *= np.exp(-phi**(1 / th)) return d2 def deriv3_inverse(self, phi, theta): p = phi # shorthand b = theta d3 = (-p**(3 / b) + (3 - 3 * b) * p**(2 / b) + ((3 - 2 * b) * b - 1) * p**(1 / b) ) / (p * b)**3 d3 *= np.exp(-p**(1 / b)) return d3 def deriv4_inverse(self, phi, theta): p = phi # shorthand b = theta d4 = ((6 * b**3 - 11 * b**2 + 6. * b - 1) * p**(1 / b) + (11 * b**2 - 18 * b + 7) * p**(2 / b) + (6 * (b - 1)) * p**(3 / b) + p**(4 / b) ) / (p * b)**4 d4 *= np.exp(-p**(1 / b)) return d4 def is_completly_monotonic(self, theta): return theta > 1 class TransfIndep(Transforms): def evaluate(self, t, *args): t = np.asarray(t) return -np.log(t) def inverse(self, phi, *args): phi = np.asarray(phi) return np.exp(-phi) def deriv(self, t, *args): t = np.asarray(t) return - 1./t def deriv2(self, t, *args): t = np.asarray(t) return 1. / t**2 def deriv2_inverse(self, phi, *args): return np.exp(-phi) def deriv3_inverse(self, phi, *args): return -np.exp(-phi) def deriv4_inverse(self, phi, *args): return np.exp(-phi) class _TransfPower(Transforms): """generic multivariate Archimedean copula with additional power transforms Nelson p.144, equ. 4.5.2 experimental, not yet tested and used """ def __init__(self, transform): self.transform = transform def evaluate(self, t, alpha, beta, *tr_args): t = np.asarray(t) phi = np.power(self.transform.evaluate(np.power(t, alpha), *tr_args), beta) return phi def inverse(self, phi, alpha, beta, *tr_args): phi = np.asarray(phi) transf = self.transform phi_inv = np.power(transf.evaluate(np.power(phi, 1. / beta), *tr_args), 1. / alpha) return phi_inv
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@distributions@copula@transforms.py@.PATH_END.py
{ "filename": "_ticktextsrc.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/polar/radialaxis/_ticktextsrc.py", "type": "Python" }
import _plotly_utils.basevalidators class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="ticktextsrc", parent_name="layout.polar.radialaxis", **kwargs ): super(TicktextsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@polar@radialaxis@_ticktextsrc.py@.PATH_END.py
{ "filename": "_domain.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/indicator/_domain.py", "type": "Python" }
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Domain(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "indicator" _path_str = "indicator.domain" _valid_props = {"column", "row", "x", "y"} # column # ------ @property def column(self): """ If there is a layout grid, use the domain for this column in the grid for this indicator trace . The 'column' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [0, 9223372036854775807] Returns ------- int """ return self["column"] @column.setter def column(self, val): self["column"] = val # row # --- @property def row(self): """ If there is a layout grid, use the domain for this row in the grid for this indicator trace . The 'row' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [0, 9223372036854775807] Returns ------- int """ return self["row"] @row.setter def row(self, val): self["row"] = val # x # - @property def x(self): """ Sets the horizontal domain of this indicator trace (in plot fraction). The 'x' property is an info array that may be specified as: * a list or tuple of 2 elements where: (0) The 'x[0]' property is a number and may be specified as: - An int or float in the interval [0, 1] (1) The 'x[1]' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- list """ return self["x"] @x.setter def x(self, val): self["x"] = val # y # - @property def y(self): """ Sets the vertical domain of this indicator trace (in plot fraction). The 'y' property is an info array that may be specified as: * a list or tuple of 2 elements where: (0) The 'y[0]' property is a number and may be specified as: - An int or float in the interval [0, 1] (1) The 'y[1]' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- list """ return self["y"] @y.setter def y(self, val): self["y"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ column If there is a layout grid, use the domain for this column in the grid for this indicator trace . row If there is a layout grid, use the domain for this row in the grid for this indicator trace . x Sets the horizontal domain of this indicator trace (in plot fraction). y Sets the vertical domain of this indicator trace (in plot fraction). """ def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs): """ Construct a new Domain object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.indicator.Domain` column If there is a layout grid, use the domain for this column in the grid for this indicator trace . row If there is a layout grid, use the domain for this row in the grid for this indicator trace . x Sets the horizontal domain of this indicator trace (in plot fraction). y Sets the vertical domain of this indicator trace (in plot fraction). Returns ------- Domain """ super(Domain, self).__init__("domain") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.indicator.Domain constructor must be a dict or an instance of :class:`plotly.graph_objs.indicator.Domain`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("column", None) _v = column if column is not None else _v if _v is not None: self["column"] = _v _v = arg.pop("row", None) _v = row if row is not None else _v if _v is not None: self["row"] = _v _v = arg.pop("x", None) _v = x if x is not None else _v if _v is not None: self["x"] = _v _v = arg.pop("y", None) _v = y if y is not None else _v if _v is not None: self["y"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@indicator@_domain.py@.PATH_END.py
{ "filename": "_colorsrc.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/waterfall/outsidetextfont/_colorsrc.py", "type": "Python" }
import _plotly_utils.basevalidators class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="colorsrc", parent_name="waterfall.outsidetextfont", **kwargs ): super(ColorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@waterfall@outsidetextfont@_colorsrc.py@.PATH_END.py
{ "filename": "heuristics.py", "repo_name": "realfastvla/realfast", "repo_path": "realfast_extracted/realfast-main/realfast/heuristics.py", "type": "Python" }
from __future__ import print_function, division, absolute_import#, unicode_literals # not casa compatible from builtins import bytes, dict, object, range, map, input#, str # not casa compatible from future.utils import itervalues, viewitems, iteritems, listvalues, listitems from io import open import os.path import sys import logging logger = logging.getLogger(__name__) logger.setLevel(20) def reader_memory_available(cl): """ Calc memory in use by READERs """ memories = [] for vals in itervalues(cl.scheduler_info()['workers']): if 'READER' in vals['resources']: if vals['resources']['MEMORY'] > 0: memories.append(vals['resources']['MEMORY']-vals['metrics']['memory']) else: memories.append(0) return memories def reader_memory_used(cl): """ Calc memory in use by READERs """ return [vals['metrics']['memory'] for vals in itervalues(cl.scheduler_info()['workers']) if 'READER' in vals['resources']] def spilled_memory(daskdir='.'): """ How much memory has been spilled by dask/distributed? """ spilled = 0 for dirpath, dirnames, filenames in os.walk(daskdir): for filename in filenames: try: spilled += os.path.getsize(os.path.join(dirpath, filename))/1024.**3 except OSError: try: spilled += os.path.getsize(os.path.join(dirpath, filename))/1024.**3 except OSError: logger.warn("Could not get size of spilled file. Skipping.") return spilled def reader_memory_ok(cl, memory_required): """ Does any READER worker have enough memory? memory_required is the size of the read in bytes """ for worker_memory in reader_memory_available(cl): if worker_memory and (worker_memory > memory_required): return True logger.debug("No worker found with required memory of {0} GB" .format(memory_required/1e9)) return False def readertotal_memory_ok(cl, memory_limit): """ Is total READER memory usage too high? memory_limit is total memory used in bytes """ if memory_limit is not None: total = sum(reader_memory_used(cl)) if total > memory_limit: logger.debug("Total of {0} GB in use. Exceeds limit of {1} GB." .format(total/1e9, memory_limit/1e9)) return total < memory_limit else: return True def spilled_memory_ok(limit=1.0, daskdir='.'): """ Calculate total memory spilled (in GB) by dask distributed. """ spilled = spilled_memory(daskdir) if spilled < limit: return True else: logger.debug("Spilled memory {0:.1f} GB exceeds limit of {1:.1f}" .format(spilled, limit)) return False def state_validates(config=None, inmeta=None, sdmfile=None, sdmscan=None, bdfdir=None, preffile=None, prefsname=None, inprefs={}): """ Try to compile state """ from rfpipe import state try: st = state.State(inmeta=inmeta, config=config, preffile=preffile, inprefs=inprefs, name=prefsname, sdmfile=sdmfile, sdmscan=sdmscan, bdfdir=bdfdir, showsummary=False, validate=True) return True except: import traceback traceback.print_tb(sys.exc_info()[2]) logger.warn("State did not validate") return False def reffreq_to_band(reffreqs, edge=5e8): """ Given list of reffreqs, return name of band that contains all of them. edge defines frequency edge around each nominal band to include. """ nspw = len(reffreqs) for band, low, high in [('L', 1e9, 2e9), ('S', 2e9, 4e9), ('C', 4e9, 8e9), ('X', 8e9, 12e9), ('Ku', 12e9, 18e9), ('K', 18e9, 26.5e9), ('Ka', 26.5e9, 30e9), ('Q', 40e9, 50e9)]: reffreq_inband = [reffreq for reffreq in reffreqs if ((reffreq >= low-edge) and (reffreq < high+edge))] if len(reffreq_inband) == nspw: return band return None def is_nrao_default(inmeta): """ Parses metadata to determine if it is consistent with NRAO default correlator mode. """ nspw = len(inmeta['spw_orig']) if nspw != 16: logger.info("NRAO default fail: {0} spw".format(nspw)) return False else: logger.info("NRAO default pass: 16 spw") band = reffreq_to_band(inmeta['spw_reffreq']) if band is None: logger.info("NRAO default fail: reffreqs not in single band {0} " .format(inmeta['spw_reffreq'])) return False else: logger.info("NRAO default pass: All {0} spw are in {1} band" .format(nspw, band)) if not all([nchan == 64 for nchan in inmeta['spw_nchan']]): logger.info("NRAO default fail: not all spw have 64 chans {0} " .format(inmeta['spw_nchan'])) return False else: nchan = 64 logger.info("NRAO default pass: all spw have {0} channels" .format(nchan)) if not all([inmeta['spw_chansize'][0] for chansize in inmeta['spw_chansize']]): logger.info("NRAO default fail: not all spw have same chansize") return False else: chansize = inmeta['spw_chansize'][0] logger.info("NRAO default pass: all spw have chansize of {0}" .format(chansize)) if len(inmeta['pols_orig']) != 4: logger.info("NRAO default fail: {0} pols".format(inmeta['pols_orig'])) return False else: logger.info("NRAO default pass: Full pol") bandwidth = max(inmeta['spw_reffreq']) - min(inmeta['spw_reffreq']) + nchan*chansize # bandwidth = sum([nchan * chansize for chansize in inmeta['spw_chansize']]) if band == 'L' and bandwidth != 1024000000.0: logger.info("NRAO default fail: band {0} has bandwidth {1}" .format(band, bandwidth)) return False elif band == 'S' and bandwidth != 2048000000.0: logger.info("NRAO default fail: band {0} has bandwidth {1}" .format(band, bandwidth)) return False elif band == 'C' and bandwidth != 2048000000.0: logger.info("NRAO default fail: band {0} has bandwidth {1}" .format(band, bandwidth)) return False elif band == 'X' and bandwidth != 2048000000.0: logger.info("NRAO default fail: band {0} has bandwidth {1}" .format(band, bandwidth)) return False else: logger.info("NRAO default pass: bandwidth {0} for band {1}" .format(bandwidth, band)) if inmeta['inttime'] > 1.: logger.info("NRAO default fail: inttime {0} >= 1 s".format(inmeta['inttime'])) return False else: logger.info("NRAO default pass: inttime {0} < 1 s".format(inmeta['inttime'])) return True def total_images_searched(st): """ Number of images formed (trials) in all segments, dms, dts. """ si = 0 for segment in range(st.nsegment): for dmind in range(len(st.dmarr)): for dtind in range(len(st.dtarr)): si += len(st.get_search_ints(segment, dmind, dtind)) return si def total_compute_time(st): """ Uses a simple model for total GPU compute time (in sec) based on profiling. Models the GPU time per trial (incl data in, amortized over many dm/dt). No distributed data movement time included. 2.3e-4 s (512x512) 6.1e-4 s (1024x1024) 1.2e-3 s (2048x2048) 3.8e-3 s (4096x4096) """ from math import log time_ref = 2.3e-4 npix_ref = 512 si = total_images_searched(st) npix = (st.npixx+st.npixy)/2 return si * time_ref * npix*log(npix)/(npix_ref*log(npix_ref)) def total_memory_read(st): """ Memory read (in GB) including overlapping read at segment boundaries. """ return st.nsegment*st.vismem
realfastvlaREPO_NAMErealfastPATH_START.@realfast_extracted@realfast-main@realfast@heuristics.py@.PATH_END.py
{ "filename": "_side.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/line/colorbar/title/_side.py", "type": "Python" }
import _plotly_utils.basevalidators class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="side", parent_name="scatter3d.line.colorbar.title", **kwargs ): super(SideValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), values=kwargs.pop("values", ["right", "top", "bottom"]), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@line@colorbar@title@_side.py@.PATH_END.py
{ "filename": "_color.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/legendgrouptitle/font/_color.py", "type": "Python" }
import _plotly_utils.basevalidators class ColorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__( self, plotly_name="color", parent_name="scatterpolargl.legendgrouptitle.font", **kwargs, ): super(ColorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "style"), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@legendgrouptitle@font@_color.py@.PATH_END.py
{ "filename": "plot_viol.py", "repo_name": "MNGuenther/allesfitter", "repo_path": "allesfitter_extracted/allesfitter-master/allesfitter/postprocessing/plot_viol.py", "type": "Python" }
""" Pipeline to analyze allesfitter output for planet transit timings argument 1: allesfitter path argument 2: p-value threshold argument 3: Boolean to select to plot wout/with TESS or wout/with/only TESS Tansu Daylan MIT Kavli Institute, Cambridge, MA, 02109, US tansu.daylan@gmail.com www.tansudaylan.com """ import numpy as np import scipy import os, datetime, sys import matplotlib.pyplot as plt from tdpy.util import summgene import allesfitter import allesfitter.postprocessing.plot_viol from allesfitter import config import astropy class gdatstrt(object): def __init__(self): self.boollockmodi = False pass def __setattr__(self, attr, valu): super(gdatstrt, self).__setattr__(attr, valu) def plot(gdat, indxstar, indxpara=None, strgtype='evol'): if indxstar.size == 1: strg = gdat.liststrgstar[indxstar[0]] + '_' else: strg = '' print('strgtype') print(strgtype) listticklabl = [] if strgtype == 'epocevol': chanlist = [[[] for m in gdat.indxstar] for i in gdat.indxruns] xpos = np.array(gdat.listyear) for i in gdat.indxruns: for m in indxstar: chanlist[i][m] = [gdat.timejwst[k][i][m] for k in gdat.indxyear] for k in gdat.indxyear: listticklabl.append('%s' % str(gdat.listyear[k])) else: chanlist = [] numbxdat = gdat.numbruns * indxstar.size xpos = 0.6 * (np.arange(numbxdat) + 1.) for i in gdat.indxruns: for m in indxstar: if strgtype == 'jwstcomp': chanlist.append(gdat.timejwst[1][i][m]) if strgtype == 'paracomp': for k in indxpara: chanlist.append((gdat.listobjtalle[i][m].posterior_params[gdat.liststrgparaconc[k]] - \ np.mean(gdat.listobjtalle[i][m].posterior_params[gdat.liststrgparaconc[k]])) * 24. * 60.) if strgtype == 'paracomp' or strgtype == 'jwstcomp': ticklabl = '%s, %s' % (gdat.liststrgstar[m], gdat.liststrgruns[i]) listticklabl.append(ticklabl) else: ticklabl = '%s, %s' % (gdat.liststrgstar[m], gdat.liststrgruns[i]) listticklabl.append(ticklabl) if xpos.size != len(listticklabl): raise Exception('') print('xpos') summgene(xpos) print('chanlist') print(chanlist) figr, axis = plt.subplots(figsize=(5, 4)) if strgtype != 'epocevol': axis.violinplot(chanlist, xpos, showmedians=True, showextrema=False) else: for i in gdat.indxruns: for m in indxstar: axis.violinplot(chanlist[i][m], xpos, showmedians=True, showextrema=False) axis.set_xticks(xpos) if strgtype == 'jwstcomp': axis.set_ylabel('Transit time residual in 2023 [min]') strgbase = strgtype if strgtype == 'paracomp': if gdat.liststrgparaconc[indxpara] == 'b_period': axis.set_ylabel('P [min]') else: labl = gdat.listlablparaconc[indxpara[0]] axis.set_ylabel(labl) strgbase = '%04d' % indxpara if strgtype == 'epocevol': axis.set_xlabel('Year') axis.set_ylabel('Transit time residual [min]') strgbase = strgtype path = gdat.pathimag + 'viol_%s.%s' % (strgbase, gdat.strgplotextn) axis.set_xticklabels(listticklabl) plt.tight_layout() print('Writing to %s...' % path) print() figr.savefig(path) plt.close() def plot_viol(pathbase, liststrgstar, liststrgruns, lablstrgruns, pathimag, pvalthrs=1e-3): strgtimestmp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') print('allesfitter postprocessing violin plot started at %s...' % strgtimestmp) # construct global object gdat = gdatstrt() # copy unnamed inputs to the global object #for attr, valu in locals().iter(): for attr, valu in locals().items(): if '__' not in attr and attr != 'gdat': setattr(gdat, attr, valu) # runs to be compared for each star gdat.numbruns = len(liststrgruns) gdat.indxruns = np.arange(gdat.numbruns) gdat.pathimag = pathimag gdat.liststrgstar = liststrgstar # stars numbstar = len(liststrgstar) gdat.indxstar = np.arange(numbstar) # plotting gdat.strgplotextn = 'png' # read parameter keys, labels and posterior from allesfitter output liststrgpara = [[] for i in gdat.indxruns] listlablpara = [[] for i in gdat.indxruns] gdat.listobjtalle = [[[] for m in gdat.indxstar] for i in gdat.indxruns] for i in gdat.indxruns: for m in gdat.indxstar: pathalle = pathbase + '%s/allesfits/allesfit_%s/' % (gdat.liststrgstar[m], gdat.liststrgruns[i]) print('Reading from %s...' % pathalle) config.init(pathalle) liststrgpara[i] = np.array(config.BASEMENT.fitkeys) listlablpara[i] = np.array(config.BASEMENT.fitlabels) # read the chain print('pathalle') print(pathalle) gdat.listobjtalle[i][m] = allesfitter.allesclass(pathalle) # concatenate the keys, labels from different runs gdat.liststrgparaconc = np.concatenate(liststrgpara) gdat.liststrgparaconc = np.unique(gdat.liststrgparaconc) gdat.listlablparaconc = np.copy(gdat.liststrgparaconc) for k, strgparaconc in enumerate(gdat.liststrgparaconc): for i, strgruns in enumerate(liststrgruns): if strgparaconc in liststrgpara[i]: gdat.listlablparaconc[k] = listlablpara[i][np.where(liststrgpara[i] == strgparaconc)[0][0]] gdat.numbparaconc = len(gdat.liststrgparaconc) gdat.indxparaconc = np.arange(gdat.numbparaconc) for k, strgpara in enumerate(gdat.liststrgparaconc): booltemp = True for i in gdat.indxruns: if not strgpara in liststrgpara[i]: booltemp = False if not booltemp: continue ## violin plot ## mid-transit time prediction plot(gdat, gdat.indxstar, indxpara=np.array([k]), strgtype='paracomp') ## per-star #for m in gdat.indxstar: # plot(gdat, indxstar=np.array([m]), indxpara=k, strgtype='paracomp') # calculate the future evolution of epoch gdat.listyear = [2021, 2023, 2025] numbyear = len(gdat.listyear) gdat.indxyear = np.arange(numbyear) gdat.timejwst = [[[[] for m in gdat.indxstar] for i in gdat.indxruns] for k in gdat.indxyear] for k, year in enumerate(gdat.listyear): epocjwst = astropy.time.Time('%d-01-01 00:00:00' % year, format='iso').jd for i in gdat.indxruns: for m in gdat.indxstar: epoc = gdat.listobjtalle[i][m].posterior_params['b_epoch'] peri = gdat.listobjtalle[i][m].posterior_params['b_period'] indxtran = (epocjwst - epoc) / peri indxtran = np.mean(np.rint(indxtran)) if indxtran.size != np.unique(indxtran).size: raise Exception('') gdat.timejwst[k][i][m] = epoc + peri * indxtran gdat.timejwst[k][i][m] -= np.mean(gdat.timejwst[k][i][m]) gdat.timejwst[k][i][m] *= 24. * 60. listfigr = [] listaxis = [] # temporal evolution of mid-transit time prediction plot(gdat, gdat.indxstar, strgtype='epocevol') ## per-star #for m in gdat.indxstar: # plot(gdat, indxstar=np.array([m]), strgtype='epocevol') ## mid-transit time prediction plot(gdat, gdat.indxstar, strgtype='jwstcomp') ## per-star #for m in gdat.indxstar: # plot(gdat, indxstar=np.array([m]), strgtype='jwstcomp') return listfigr, listaxis
MNGuentherREPO_NAMEallesfitterPATH_START.@allesfitter_extracted@allesfitter-master@allesfitter@postprocessing@plot_viol.py@.PATH_END.py
{ "filename": "_cauto.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/marker/_cauto.py", "type": "Python" }
import _plotly_utils.basevalidators class CautoValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__(self, plotly_name="cauto", parent_name="scattergeo.marker", **kwargs): super(CautoValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {}), role=kwargs.pop("role", "info"), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@marker@_cauto.py@.PATH_END.py
{ "filename": "anyscale.ipynb", "repo_name": "langchain-ai/langchain", "repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/chat/anyscale.ipynb", "type": "Jupyter Notebook" }
--- sidebar_label: Anyscale --- # ChatAnyscale This notebook demonstrates the use of `langchain.chat_models.ChatAnyscale` for [Anyscale Endpoints](https://endpoints.anyscale.com/). * Set `ANYSCALE_API_KEY` environment variable * or use the `anyscale_api_key` keyword argument ```python %pip install --upgrade --quiet langchain-openai ``` ```python import os from getpass import getpass if "ANYSCALE_API_KEY" not in os.environ: os.environ["ANYSCALE_API_KEY"] = getpass() ``` ········ # Let's try out each model offered on Anyscale Endpoints ```python from langchain_community.chat_models import ChatAnyscale chats = { model: ChatAnyscale(model_name=model, temperature=1.0) for model in ChatAnyscale.get_available_models() } print(chats.keys()) ``` dict_keys(['meta-llama/Llama-2-70b-chat-hf', 'meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-13b-chat-hf']) # We can use async methods and other stuff supported by ChatOpenAI This way, the three requests will only take as long as the longest individual request. ```python import asyncio from langchain_core.messages import HumanMessage, SystemMessage messages = [ SystemMessage(content="You are a helpful AI that shares everything you know."), HumanMessage( content="Tell me technical facts about yourself. Are you a transformer model? How many billions of parameters do you have?" ), ] async def get_msgs(): tasks = [chat.apredict_messages(messages) for chat in chats.values()] responses = await asyncio.gather(*tasks) return dict(zip(chats.keys(), responses)) ``` ```python import nest_asyncio nest_asyncio.apply() ``` ```python %%time response_dict = asyncio.run(get_msgs()) for model_name, response in response_dict.items(): print(f"\t{model_name}") print() print(response.content) print("\n---\n") ``` meta-llama/Llama-2-70b-chat-hf Greetings! I'm just an AI, I don't have a personal identity like humans do, but I'm here to help you with any questions you have. I'm a large language model, which means I'm trained on a large corpus of text data to generate language outputs that are coherent and natural-sounding. My architecture is based on a transformer model, which is a type of neural network that's particularly well-suited for natural language processing tasks. As for my parameters, I have a few billion parameters, but I don't have access to the exact number as it's not relevant to my functioning. My training data includes a vast amount of text from various sources, including books, articles, and websites, which I use to learn patterns and relationships in language. I'm designed to be a helpful tool for a variety of tasks, such as answering questions, providing information, and generating text. I'm constantly learning and improving my abilities through machine learning algorithms and feedback from users like you. I hope this helps! Is there anything else you'd like to know about me or my capabilities? --- meta-llama/Llama-2-7b-chat-hf Ah, a fellow tech enthusiast! *adjusts glasses* I'm glad to share some technical details about myself. 🤓 Indeed, I'm a transformer model, specifically a BERT-like language model trained on a large corpus of text data. My architecture is based on the transformer framework, which is a type of neural network designed for natural language processing tasks. 🏠 As for the number of parameters, I have approximately 340 million. *winks* That's a pretty hefty number, if I do say so myself! These parameters allow me to learn and represent complex patterns in language, such as syntax, semantics, and more. 🤔 But don't ask me to do math in my head – I'm a language model, not a calculating machine! 😅 My strengths lie in understanding and generating human-like text, so feel free to chat with me anytime you'd like. 💬 Now, do you have any more technical questions for me? Or would you like to engage in a nice chat? 😊 --- meta-llama/Llama-2-13b-chat-hf Hello! As a friendly and helpful AI, I'd be happy to share some technical facts about myself. I am a transformer-based language model, specifically a variant of the BERT (Bidirectional Encoder Representations from Transformers) architecture. BERT was developed by Google in 2018 and has since become one of the most popular and widely-used AI language models. Here are some technical details about my capabilities: 1. Parameters: I have approximately 340 million parameters, which are the numbers that I use to learn and represent language. This is a relatively large number of parameters compared to some other languages models, but it allows me to learn and understand complex language patterns and relationships. 2. Training: I was trained on a large corpus of text data, including books, articles, and other sources of written content. This training allows me to learn about the structure and conventions of language, as well as the relationships between words and phrases. 3. Architectures: My architecture is based on the transformer model, which is a type of neural network that is particularly well-suited for natural language processing tasks. The transformer model uses self-attention mechanisms to allow the model to "attend" to different parts of the input text, allowing it to capture long-range dependencies and contextual relationships. 4. Precision: I am capable of generating text with high precision and accuracy, meaning that I can produce text that is close to human-level quality in terms of grammar, syntax, and coherence. 5. Generative capabilities: In addition to being able to generate text based on prompts and questions, I am also capable of generating text based on a given topic or theme. This allows me to create longer, more coherent pieces of text that are organized around a specific idea or concept. Overall, I am a powerful and versatile language model that is capable of a wide range of natural language processing tasks. I am constantly learning and improving, and I am here to help answer any questions you may have! --- CPU times: user 371 ms, sys: 15.5 ms, total: 387 ms Wall time: 12 s
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@chat@anyscale.ipynb@.PATH_END.py
{ "filename": "test_yandex.py", "repo_name": "langchain-ai/langchain", "repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/embeddings/test_yandex.py", "type": "Python" }
import pytest from langchain_community.embeddings.yandex import YandexGPTEmbeddings @pytest.mark.parametrize( "constructor_args", [ dict(), dict(disable_request_logging=True), ], ) # @pytest.mark.scheduled - idk what it means # requires YC_* env and active service def test_yandex_embedding(constructor_args: dict) -> None: documents = ["exactly same", "exactly same", "different"] embedding = YandexGPTEmbeddings(**constructor_args) doc_outputs = embedding.embed_documents(documents) assert len(doc_outputs) == 3 for i in range(3): assert len(doc_outputs[i]) >= 256 # there are many dims assert len(doc_outputs[0]) == len(doc_outputs[i]) # dims are te same assert doc_outputs[0] == doc_outputs[1] # same input, same embeddings assert doc_outputs[2] != doc_outputs[1] # different input, different embeddings qry_output = embedding.embed_query(documents[0]) assert len(qry_output) >= 256 assert len(doc_outputs[0]) == len( qry_output ) # query and doc models have same dimensions assert doc_outputs[0] != qry_output # query and doc models are different
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@embeddings@test_yandex.py@.PATH_END.py
{ "filename": "_name.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/yaxis/tickformatstop/_name.py", "type": "Python" }
import _plotly_utils.basevalidators class NameValidator(_plotly_utils.basevalidators.StringValidator): def __init__( self, plotly_name="name", parent_name="layout.scene.yaxis.tickformatstop", **kwargs, ): super(NameValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@yaxis@tickformatstop@_name.py@.PATH_END.py
{ "filename": "load_elasticc_truth.py", "repo_name": "LSSTDESC/elasticc", "repo_path": "elasticc_extracted/elasticc-main/tom_management/load_elasticc_truth.py", "type": "Python" }
raise RuntimeError( "Deprecated. See elasticc2/management/commands/load_snana_fits.py in desc-tom" ) import sys import argparse import logging from truthloader import TruthLoader class SourceTruthLoader(TruthLoader): def __init__( self, *args, **kwargs ): urlend = 'elasticc/addtruth' converters = { 'SourceID': int, 'SNID': int, 'MJD': float, 'DETECT': int, 'TRUE_GENTYPE': int, 'TRUE_GENMAG': float } renames = {} super().__init__( *args, urlend=urlend, converters=converters, renames=renames, **kwargs ) def main(): logger = logging.getLogger( "main" ) logout = logging.StreamHandler( sys.stderr ) logger.addHandler( logout ) logout.setFormatter( logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s' ) ) logger.setLevel( logging.DEBUG ) parser = argparse.ArgumentParser( "Load truth for already-loaded Elasticc sources" ) parser.add_argument( "filenames", nargs='+', help="Filenames of truth" ) parser.add_argument( "-u", "--urlbase", default="https://desc-tom.lbl.gov", help="URL of TOM (no trailing / ; default https://desc-tom.lbl.gov)" ) parser.add_argument( "-U", "--username", default="root", help="TOM username" ) parser.add_argument( "-p", "--password", default="password", help="TOM password" ) args = parser.parse_args() loader = SourceTruthLoader( args.urlbase, args.username, args.password, logger=logger ) for filename in args.filenames: loader.load_csv( filename ) logger.info( "All done" ) # ====================================================================== if __name__ == "__main__": main()
LSSTDESCREPO_NAMEelasticcPATH_START.@elasticc_extracted@elasticc-main@tom_management@load_elasticc_truth.py@.PATH_END.py
{ "filename": "patch_util.py", "repo_name": "henrysky/astroNN", "repo_path": "astroNN_extracted/astroNN-master/src/astroNN/shared/patch_util.py", "type": "Python" }
""" Pure python patching with brute-force line-by-line non-recursive parsing Original code adapted from Copyright (c) 2008-2016 Anatoly Techtonik <techtonik@gmail.com> with MIT license """ import copy import logging import re from os.path import exists, isfile import os import shutil import itertools # Logging is controlled by logger named after the module name logger = logging.getLogger(__name__) debug = logger.debug info = logger.info warning = logger.warning # initialize logger itself and add a NulHandler # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library logger.addHandler(logging.NullHandler()) import sys logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) class Hunk(object): """ Parsed hunk data container (hunk starts with @@ -R +R @@) """ def __init__(self): self.startsrc = None #: line count starts with 1 self.linessrc = None self.starttgt = None self.linestgt = None self.invalid = False self.desc = "" self.text = [] self.offset = 0 self.contextstart = None self.contextend = None class _Patch(object): """Patch for a single file. If used as an iterable, returns hunks. """ def __init__(self): self.source = None self.target = None self.hunks = [] self.hunkends = [] self.header = [] self.type = None def __iter__(self): for h in self.hunks: yield h class Patch(object): """ Patch is a patch parser and container. When used as an iterable, returns patches. """ def __init__(self, patchpath=None): # name of the PatchSet (filepath or ...) self.name = None # patch set type - one of constants self.type = None # list of Patch objects self.items = [] self.errors = 0 # fatal parsing errors self.warnings = 0 # non-critical warnings with open(patchpath, "rb") as fp: # parse .patch or .diff file self.parse(fp) def __len__(self): return len(self.items) def __iter__(self): for i in self.items: yield i def parse(self, stream): """ parse unified diff return True on success """ lineends = dict(lf=0, crlf=0, cr=0) nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1 p = None hunk = None # hunkactual variable is used to calculate hunk lines for comparison hunkactual = dict(linessrc=None, linestgt=None) class wrapumerate(enumerate): """Enumerate wrapper that uses boolean end of stream status instead of StopIteration exception, and properties to access line information. """ def __init__(self, *args, **kwargs): # we don't call parent, it is magically created by __new__ method self._exhausted = False self._lineno = False # after end of stream equal to the num of lines self._line = False # will be reset to False after end of stream def next(self): """Try to read the next line and return True if it is available, False if end of stream is reached.""" if self._exhausted: return False try: self._lineno, self._line = super(wrapumerate, self).__next__() except StopIteration: self._exhausted = True self._line = False return False return True @property def is_empty(self): return self._exhausted @property def line(self): return self._line @property def lineno(self): return self._lineno # define states (possible file regions) that direct parse flow headscan = True # start with scanning header filepaths = False # lines starting with --- and +++ hunkhead = False # @@ -R +R @@ sequence hunkbody = False # hunkskip = False # skipping invalid hunk mode hunkparsed = False # state after successfully parsed hunk # regexp to match start of hunk, used groups - 1,3,4,6 re_hunk_start = re.compile(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@") self.errors = 0 # temp buffers for header and filepaths info header = [] srcname = None tgtname = None # start of main cycle # each parsing block already has line available in fe.line fe = wrapumerate(stream) while fe.next(): # -- deciders: these only switch state to decide who should process # -- line fetched at the start of this cycle if hunkparsed: hunkparsed = False if re_hunk_start.match(fe.line): hunkhead = True elif fe.line.startswith(b"--- "): filepaths = True else: headscan = True # -- ------------------------------------ # read out header if headscan: while not fe.is_empty and not fe.line.startswith(b"--- "): header.append(fe.line) fe.next() if fe.is_empty: if p is None: debug("no patch data found") # error is shown later self.errors += 1 else: info( f"{len(b''.join(header))} unparsed bytes left at the end of stream" ) self.warnings += 1 # otherwise error += 1 # this is actually a loop exit continue headscan = False # switch to filepaths state filepaths = True line = fe.line lineno = fe.lineno # hunkskip and hunkbody code skipped until definition of hunkhead is parsed if hunkbody: # [x] treat empty lines inside hunks as containing single space # (this happens when diff is saved by copy/pasting to editor # that strips trailing whitespace) if line.strip(b"\r\n") == b"": debug("expanding empty line in a middle of hunk body") self.warnings += 1 line = b" " + line # process line first if re.match(b"^[- \\+\\\\]", line): # gather stats about line endings if line.endswith(b"\r\n"): p.hunkends["crlf"] += 1 elif line.endswith(b"\n"): p.hunkends["lf"] += 1 elif line.endswith(b"\r"): p.hunkends["cr"] += 1 if line.startswith(b"-"): hunkactual["linessrc"] += 1 elif line.startswith(b"+"): hunkactual["linestgt"] += 1 elif not line.startswith(b"\\"): hunkactual["linessrc"] += 1 hunkactual["linestgt"] += 1 hunk.text.append(line) else: warning( f"invalid hunk no.{nexthunkno} at {lineno + 1} for target file {p.target}" ) # add hunk status node hunk.invalid = True p.hunks.append(hunk) self.errors += 1 # switch to hunkskip state hunkbody = False hunkskip = True # check exit conditions if ( hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt ): warning( f"extra lines for hunk no.{nexthunkno} at {lineno + 1} for target {p.target}" ) # add hunk status node hunk.invalid = True p.hunks.append(hunk) self.errors += 1 # switch to hunkskip state hunkbody = False hunkskip = True elif ( hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"] ): # hunk parsed successfully p.hunks.append(hunk) # switch to hunkparsed state hunkbody = False hunkparsed = True # detect mixed window/unix line ends ends = p.hunkends if ( (ends["cr"] != 0) + (ends["crlf"] != 0) + (ends["lf"] != 0) ) > 1: warning(f"inconsistent line ends in patch hunks for {p.source}") self.warnings += 1 # fetch next line continue if hunkskip: if re_hunk_start.match(line): # switch to hunkhead state hunkskip = False hunkhead = True elif line.startswith(b"--- "): # switch to filepaths state hunkskip = False filepaths = True if filepaths: if line.startswith(b"--- "): if srcname is not None: # XXX testcase warning(f"skipping false patch for {srcname}") srcname = None # XXX header += srcname # double source filepath line is encountered # attempt to restart from this second line re_filepath = b"^--- ([^\t]+)" match = re.match(re_filepath, line) if match: srcname = match.group(1).strip() else: warning(f"skipping invalid filepath at line {lineno + 1}") self.errors += 1 # XXX p.header += line # switch back to headscan state filepaths = False headscan = True elif not line.startswith(b"+++ "): if srcname is not None: warning(f"skipping invalid patch with no target for {srcname}") self.errors += 1 srcname = None # XXX header += srcname # XXX header += line else: # this should be unreachable warning("skipping invalid target patch") filepaths = False headscan = True else: if tgtname is not None: # XXX seems to be a dead branch warning( f"skipping invalid patch - double target at line {lineno + 1}" ) self.errors += 1 srcname = None tgtname = None # XXX header += srcname # XXX header += tgtname # XXX header += line # double target filepath line is encountered # switch back to headscan state filepaths = False headscan = True else: re_filepath = b"^\+\+\+ ([^\t]+)" match = re.match(re_filepath, line) if not match: warning( f"skipping invalid patch - no target filepath at line {lineno + 1}" ) self.errors += 1 srcname = None # switch back to headscan state filepaths = False headscan = True else: if p: # for the first run p is None self.items.append(p) p = _Patch() p.source = srcname srcname = None p.target = match.group(1).strip() p.header = header header = [] # switch to hunkhead state filepaths = False hunkhead = True nexthunkno = 0 p.hunkends = lineends.copy() continue if hunkhead: match = re.match(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@(.*)", line) if not match: if not p.hunks: warning( f"skipping invalid patch with no hunks for file {p.source}" ) self.errors += 1 # XXX review switch # switch to headscan state hunkhead = False headscan = True continue else: # switch to headscan state hunkhead = False headscan = True else: hunk = Hunk() hunk.startsrc = int(match.group(1)) hunk.linessrc = 1 if match.group(3): hunk.linessrc = int(match.group(3)) hunk.starttgt = int(match.group(4)) hunk.linestgt = 1 if match.group(6): hunk.linestgt = int(match.group(6)) hunk.invalid = False hunk.desc = match.group(7)[1:].rstrip() hunk.text = [] hunkactual["linessrc"] = hunkactual["linestgt"] = 0 # switch to hunkbody state hunkhead = False hunkbody = True nexthunkno += 1 continue # /while fe.next() if p: self.items.append(p) if not hunkparsed: if hunkskip: warning("warning: finished with errors, some hunks may be invalid") elif headscan: if len(self.items) == 0: warning("error: no patch data found!") return False else: # extra data at the end of file pass else: warning("error: patch stream is incomplete!") self.errors += 1 if len(self.items) == 0: return False # Count context lines at the beginning and end of each hunk for p in self.items: for hunk in p.hunks: hunk.contextstart = [ x[0:1] if x[0] in b" -" else b"-" for x in hunk.text ].index(b"-") hunk.contextend = [ x[0:1] if x[0] in b" -" else b"-" for x in reversed(hunk.text) ].index(b"-") # XXX fix total hunks calculation debug( f"total files: {len(self.items)} total hunks: {sum(len(p.hunks))}" for p in self.items ) # ---- detect patch and patchset types ---- for idx, p in enumerate(self.items): self.items[idx].type = "git" types = set([p.type for p in self.items]) if len(types) > 1: self.type = "mixed" else: self.type = types.pop() # -------- return self.errors == 0 def apply(self, filepath=None): """Apply parsed patch, optionally stripping leading components from file paths. `root` parameter specifies working dir. return True on success """ total = len(self.items) errors = 0 # for fileno, filepath in enumerate(self.source): for i, p in enumerate(self.items): if not isfile(filepath): warning(f"not a file: {filepath}") errors += 1 continue # [ ] check absolute paths security here debug(f"processing {i + 1}/{total}:\t {filepath}") # validate before patching hunkno = 0 canpatch = False hunks = self._match_file_hunks(filepath, p.hunks) if hunks is not False: p.hunks = hunks canpatch = True else: errors += 1 if canpatch: backupname = filepath + ".orig" if exists(backupname): warning(f"can't backup original file to {backupname} - aborting") else: shutil.move(filepath, backupname) if self.write_hunks(backupname, filepath, p.hunks): info(f"successfully patched {i + 1}/{total}:\t {filepath}") os.unlink(backupname) else: errors += 1 warning(f"error patching file {filepath}") shutil.copy(filepath, filepath + ".invalid") warning(f"invalid version is saved to {filepath}.invalid") shutil.move(backupname, filepath) return errors def _reverse(self): """reverse patch direction (this doesn't touch filepaths)""" for p in self.items: for h in p.hunks: h.startsrc, h.starttgt = h.starttgt, h.startsrc h.linessrc, h.linestgt = h.linestgt, h.linessrc for i, line in enumerate(h.text): # need to use line[0:1] here, because line[0] # returns int instead of bytes on Python 3 if line[0:1] == b"+": h.text[i] = b"-" + line[1:] elif line[0:1] == b"-": h.text[i] = b"+" + line[1:] def revert(self, filepath=None): """apply patch in reverse order""" reverted = copy.deepcopy(self) reverted._reverse() return reverted.apply(filepath) def _match_file_hunks(self, filepath, hunks): f2fp = open(filepath, "rb") hunktext = [] hunkindex = [] matches = [] # Prepare hunk data for concurrent validation for hunkno, hunk in enumerate(hunks): hunktext += [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" -"] hunkindex += [(hunkno, hunkline) for hunkline in range(hunk.linessrc)] for lineno, line in enumerate(f2fp): # Check all hunks concurrently, irrespective of line number and order line = line.rstrip(b"\r\n") if line in hunktext: # Add all matching hunk start lines to matches list matches += [ { "hunk": hunkindex[i][0], "length": 0, "start": lineno, "offset": lineno - hunks[hunkindex[i][0]].startsrc + 1, "valid": None, } for i, x in enumerate(hunktext) if line == x and hunkindex[i][1] == 0 ] # Check each hunk match which hasn't already been validated for match in (m for m in matches if m["valid"] is None): hunkno = match["hunk"] hunkline = match["length"] if line == hunktext[hunkindex.index((hunkno, hunkline))]: match["length"] += 1 if match["length"] == hunks[hunkno].linessrc: match["valid"] = True debug( "hunk {} matched at line {} with offset {}".format( hunkno + 1, match["start"] + 1, match["offset"] ) ) else: match["valid"] = False f2fp.close() # Discard invalid hunk matches matches = [m for m in matches if m["valid"] is True] # Group matches by hunk number hunkmatches = [list() for x in range(len(hunks))] for match in matches: hunkmatches[match["hunk"]].append(match) validhunks = sum([1 for x in hunkmatches if len(x) > 0]) if validhunks < len(hunks): failedhunks = [ str(hunkno + 1) for hunkno, x in enumerate(hunkmatches) if len(x) == 0 ] debug( "check failed - hunk{} {} not matched".format( "s" if len(failedhunks) > 1 else "", ", ".join(failedhunks) ) ) return False # Check for conflicting hunk offsets which will modify the same line hunkoffsets = [sorted([x["offset"] for x in y], key=abs) for y in hunkmatches] for offsets in itertools.product(*hunkoffsets): patchlines = [] for hunkno, hunk in enumerate(hunks): hunklines = list( range( hunk.startsrc + hunk.contextstart + offsets[hunkno], hunk.startsrc + hunk.linessrc - hunk.contextend + offsets[hunkno], ) ) if len(set(patchlines).intersection(hunklines)) == 0: patchlines += hunklines # Stop searching if the last hunk is reached without conflicts if hunkno + 1 == len(hunks): for hunkno, offset in enumerate(offsets): hunks[hunkno].offset = offset if offset != 0: info( "hunk {} offset by {:+} lines".format( hunkno + 1, offset ) ) return hunks # Return hunk objects, including new offset values else: break debug("file cannot be patched - hunks conflict") return False def patch_stream(self, instream, hunks): """Generator that yields stream patched with hunks iterable Converts lineends in hunk lines to the best suitable format autodetected from input """ hunks = iter( sorted(hunks, key=lambda x: x.startsrc + x.offset + x.contextstart) ) srclineno = 1 lineends = {b"\n": 0, b"\r\n": 0, b"\r": 0} def get_line(): """ local utility function - return line from source stream collecting line end statistics on the way """ line = instream.readline() # 'U' mode works only with text files if line.endswith(b"\r\n"): lineends[b"\r\n"] += 1 elif line.endswith(b"\n"): lineends[b"\n"] += 1 elif line.endswith(b"\r"): lineends[b"\r"] += 1 return line for hno, h in enumerate(hunks): debug(f"hunk {hno + 1}") # skip to line just before hunk starts while srclineno < h.startsrc + h.offset + h.contextstart: yield get_line() srclineno += 1 for hline in h.text[h.contextstart : -h.contextend]: if hline.startswith(b"-") or hline.startswith(b"\\"): get_line() srclineno += 1 continue else: if not hline.startswith(b"+"): get_line() srclineno += 1 line2write = hline[1:] # detect if line ends are consistent in source file if sum([bool(lineends[x]) for x in lineends]) == 1: newline = [x for x in lineends if lineends[x] != 0][0] yield line2write.rstrip(b"\r\n") + newline else: # newlines are mixed yield line2write for line in instream: yield line def write_hunks(self, srcname, tgtname, hunks): src = open(srcname, "rb") tgt = open(tgtname, "wb") debug(f"processing target file {tgtname}") tgt.writelines(self.patch_stream(src, hunks)) tgt.close() src.close() shutil.copymode(srcname, tgtname) return True
henryskyREPO_NAMEastroNNPATH_START.@astroNN_extracted@astroNN-master@src@astroNN@shared@patch_util.py@.PATH_END.py
{ "filename": "_autotypenumbers.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/_autotypenumbers.py", "type": "Python" }
import _plotly_utils.basevalidators class AutotypenumbersValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="autotypenumbers", parent_name="layout.xaxis", **kwargs ): super(AutotypenumbersValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "info"), values=kwargs.pop("values", ["convert types", "strict"]), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@_autotypenumbers.py@.PATH_END.py
{ "filename": "__init__.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/image/legendgrouptitle/font/__init__.py", "type": "Python" }
import sys from typing import TYPE_CHECKING if sys.version_info < (3, 7) or TYPE_CHECKING: from ._weight import WeightValidator from ._variant import VariantValidator from ._textcase import TextcaseValidator from ._style import StyleValidator from ._size import SizeValidator from ._shadow import ShadowValidator from ._lineposition import LinepositionValidator from ._family import FamilyValidator from ._color import ColorValidator else: from _plotly_utils.importers import relative_import __all__, __getattr__, __dir__ = relative_import( __name__, [], [ "._weight.WeightValidator", "._variant.VariantValidator", "._textcase.TextcaseValidator", "._style.StyleValidator", "._size.SizeValidator", "._shadow.ShadowValidator", "._lineposition.LinepositionValidator", "._family.FamilyValidator", "._color.ColorValidator", ], )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@image@legendgrouptitle@font@__init__.py@.PATH_END.py
{ "filename": "05a. RFC Comparison (one-vs-rest).ipynb", "repo_name": "sidchaini/LightCurveDistanceClassification", "repo_path": "LightCurveDistanceClassification_extracted/LightCurveDistanceClassification-main/notebooks/05. RFC Comparison/05a. RFC Comparison (one-vs-rest).ipynb", "type": "Jupyter Notebook" }
```python import numpy as np import pandas as pd from tqdm.auto import tqdm import matplotlib.pyplot as plt import seaborn as sns from mlxtend.feature_selection import ( SequentialFeatureSelector, ) from mlxtend.evaluate import feature_importance_permutation from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs from sklearn.utils.estimator_checks import check_estimator from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_predict, train_test_split from sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef import matplotlib.ticker as ticker import os os.chdir("../../") from pathlib import Path import json import sys sys.path.append("scripts") import utils import distclassipy as dcpy cd = dcpy.Distance() ``` ```python with open("settings.txt") as f: settings_dict = json.load(f) np.random.seed(settings_dict["seed_choice"]) classification_letter = "a" classification_problem = settings_dict["classification_problem"][classification_letter] classes_to_keep = settings_dict["classes_to_keep"][classification_letter] results_subfolder = f"{classification_letter}. {classification_problem}" sns_dict = settings_dict["sns_dict"] sns.set_theme(**sns_dict) ``` ```python # Load Data X_df_FULL = pd.read_csv("data/X_df.csv", index_col=0) y_df_FULL = pd.read_csv("data/y_df.csv", index_col=0) ``` ```python # Remove features to be dropped from previous notebook with open(os.path.join("results", results_subfolder, "drop_features.txt")) as f: bad_features = json.load(f) # manually selected X_df_FULL = X_df_FULL.drop(bad_features, axis=1) print(X_df_FULL.shape[1]) ``` 30 ```python # one-vs-rest special posclass_df = y_df_FULL[y_df_FULL["class"].isin(classes_to_keep)] negclass_df = y_df_FULL[~y_df_FULL["class"].isin(classes_to_keep)].sample( n=len(posclass_df), random_state=settings_dict["seed_choice"] ) # negclass_df["class"].value_counts() negclass_df["class"] = f"Not{classes_to_keep[0]}" y_df = pd.concat([posclass_df, negclass_df]).sample( frac=1, random_state=settings_dict["seed_choice"] ) # Shuffle X_df = X_df_FULL.loc[y_df.index] X = X_df.to_numpy() y = y_df.to_numpy().ravel() ``` ```python locpath = os.path.join("results", results_subfolder, "random forest") Path(locpath).mkdir(parents=True, exist_ok=True) ``` ```python # Make sure we're not over fitting from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, stratify=y, random_state=settings_dict["seed_choice"] ) rfc = RandomForestClassifier( random_state=settings_dict["seed_choice"], max_depth=3, n_jobs=-1 ) # Important to make sure it's not overfitting. rfc.fit(X_train, y_train) print(rfc.score(X_train, y_train)) print(rfc.score(X_test, y_test)) ``` 0.9745649263721553 0.926829268292683 ```python y_pred = cross_val_predict(rfc, X, y, cv=5, n_jobs=-1) ``` ```python acc = accuracy_score(y, y_pred) f1score = f1_score(y, y_pred, average="macro") matthew_coef = matthews_corrcoef(y, y_pred) print( f"F1 = {f1score*100:.2f} %\nAccuracy = {acc*100:.2f} %\nMatthew's Coefficient = {matthew_coef*100:.2f} %" ) ax = utils.plot_cm(y_true=y, y_pred=y_pred) plt.title("Random Forest") plt.savefig(os.path.join(locpath, "confusion_matrix.pdf"), bbox_inches="tight") plt.show() ``` F1 = 94.71 % Accuracy = 94.71 % Matthew's Coefficient = 89.43 % ![png](output_8_1.png) ```python from IPython.display import display, HTML filepath1 = os.path.join( "results", results_subfolder, "distclassipy", "Clark", "sfs_best_confusion_matrix.pdf", ) filepath2 = os.path.join( "results", results_subfolder, "distclassipy", "Canberra", "sfs_best_confusion_matrix.pdf", ) html_str = f""" <div style='width: 100%; display: flex;'> <iframe src='../../{filepath1}' width='50%' height='500'></iframe> <iframe src='../../{filepath2}' width='50%' height='500'></iframe> </div> """ # Display the PDFs side by side display(HTML(html_str)) ``` <div style='width: 100%; display: flex;'> <iframe src='../../results/a. one-vs-rest/distclassipy/Clark/sfs_best_confusion_matrix.pdf' width='50%' height='500'></iframe> <iframe src='../../results/a. one-vs-rest/distclassipy/Canberra/sfs_best_confusion_matrix.pdf' width='50%' height='500'></iframe> </div> ```python dat = np.hstack([y_pred.reshape(-1, 1), y.reshape(-1, 1)]) objs = X_df.index preds_best_df = pd.DataFrame(data=dat, columns=["y_pred", "y_true"], index=objs) preds_best_df.to_csv(os.path.join(locpath, "preds_best.csv")) ``` ```python rfc.fit(X, y) importances = rfc.feature_importances_ std = np.std([tree.feature_importances_ for tree in rfc.estimators_], axis=0) forest_importances = pd.DataFrame( data=np.concatenate([importances.reshape(-1, 1), std.reshape(-1, 1)], axis=1), index=X_df_FULL.columns, columns=["importance", "std"], ) ``` ```python forest_importances["sum"] = forest_importances["importance"] + forest_importances["std"] forest_importances = forest_importances.sort_values(by="sum", ascending=False) ``` ```python feats_top_plot = 15 tempdf = forest_importances.iloc[:feats_top_plot] tempdf.index.name = "feature" y = tempdf["importance"] yerr = tempdf["std"] x = tempdf.index ``` ```python fig, ax = plt.subplots() ax.bar(x, y, yerr=yerr, color="SteelBlue") plt.xticks(rotation=90) plt.title(f"Top {feats_top_plot} features for Random Forest") plt.xlabel("Feature name") plt.ylabel("Relative Importance") plt.tight_layout() plt.savefig(os.path.join(locpath, "feature_importance.pdf"), bbox_inches="tight") plt.show() ``` ![png](output_14_0.png) See final result comparison here: - https://docs.google.com/spreadsheets/d/1cNaXAjW_RMu3y6MUPkNmjOs03kbRjExU9YLIAjg8A-M/edit?usp=sharing
sidchainiREPO_NAMELightCurveDistanceClassificationPATH_START.@LightCurveDistanceClassification_extracted@LightCurveDistanceClassification-main@notebooks@05. RFC Comparison@05a. RFC Comparison (one-vs-rest).ipynb@.PATH_END.py
{ "filename": "argparse_ext.py", "repo_name": "toros-astro/corral", "repo_path": "corral_extracted/corral-master/corral/libs/argparse_ext.py", "type": "Python" }
#!/usr/bin/env python # -*- coding: utf-8 -*- import codecs as _codecs import argparse as _argparse import sys as _sys from gettext import gettext as _ import six as _six if _six.PY2: class FileType(object): """Factory for creating file object types with unicode patch Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin codecs.open() function. - encoding -- default utf8 """ def __init__(self, mode='r', bufsize=-1, encoding="utf8"): self._mode = mode self._bufsize = bufsize self._encoding = "utf8" def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r') % self._mode raise ValueError(msg) # all other arguments are used as file names try: return _codecs.open(string, mode=self._mode, encoding=self._encoding, buffering=self._bufsize) except IOError as e: message = _("can't open '%s': %s") raise _argparse.ArgumentTypeError(message % (string, e)) def __repr__(self): args = self._mode, self._bufsize, self._encoding args_str = ', '.join(repr(arg) for arg in args if arg != -1) return '%s(%s)' % (type(self).__name__, args_str) else: FileType = _argparse.FileType
toros-astroREPO_NAMEcorralPATH_START.@corral_extracted@corral-master@corral@libs@argparse_ext.py@.PATH_END.py
{ "filename": "utils.py", "repo_name": "jzuhone/pyxsim", "repo_path": "pyxsim_extracted/pyxsim-main/pyxsim/utils.py", "type": "Python" }
import logging import numpy as np from astropy.units import Quantity from more_itertools import always_iterable from soxs.constants import abund_tables, atomic_weights, elem_names from unyt import unyt_array, unyt_quantity pyxsimLogger = logging.getLogger("pyxsim") ufstring = "%(name)-3s : [%(levelname)-9s] %(asctime)s %(message)s" cfstring = "%(name)-3s : [%(levelname)-18s] %(asctime)s %(message)s" pyxsim_sh = logging.StreamHandler() # create formatter and add it to the handlers formatter = logging.Formatter(ufstring) pyxsim_sh.setFormatter(formatter) # add the handler to the logger pyxsimLogger.addHandler(pyxsim_sh) pyxsimLogger.setLevel("INFO") pyxsimLogger.propagate = False mylog = pyxsimLogger def parse_value(value, default_units, ds=None): if isinstance(value, Quantity): value = unyt_quantity.from_astropy(value) if ds is None: quan = unyt_quantity else: quan = ds.quan if isinstance(value, unyt_quantity): return quan(value.v, value.units).in_units(default_units) elif isinstance(value, tuple): return quan(value[0], value[1]).in_units(default_units) else: return quan(value, default_units) def isunitful(a): from unyt.exceptions import UnitParseError if isinstance(a, (Quantity, unyt_array)): return True elif isinstance(a, tuple): try: unyt_array(a[0], a[1]) return True except UnitParseError: pass return False def ensure_list(obj): return list(always_iterable(obj)) def validate_parameters(first, second, skip=None): if skip is None: skip = [] keys1 = list(first.keys()) keys2 = list(second.keys()) keys1.sort() keys2.sort() if keys1 != keys2: raise RuntimeError("The two inputs do not have the same parameters!") for k1, k2 in zip(keys1, keys2): if k1 not in skip: v1 = first[k1][()] v2 = first[k2][()] if isinstance(v1, (str, bytes)) or isinstance(v2, (str, bytes)): check_equal = v1 == v2 elif ( getattr(getattr(v1, "dtype", None), "char", None) == "S" or getattr(getattr(v2, "dtype", None), "char", None) == "S" ): check_equal = np.char.equal(v1, v2).all() else: check_equal = np.allclose( np.array(v1), np.array(v2), rtol=0.0, atol=1.0e-10 ) if not check_equal: raise RuntimeError( f"The values for the parameter '{k1}' in the two inputs" f" are not identical ({v1} vs. {v2})!" ) def merge_files(input_files, output_file, overwrite=False, add_exposure_times=False): """ Helper function for merging PhotonList or EventList HDF5 files. Parameters ---------- input_files : list of strings List of filenames that will be merged together. output_file : string Name of the merged file to be outputted. overwrite : boolean, default False If the output file already exists, set this to True to overwrite it. add_exposure_times : boolean, default False If set to True, exposure times will be added together. Otherwise, the exposure times of all of the files must be the same. Examples -------- >>> from pyxsim import merge_files >>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5", ... overwrite=True, add_exposure_times=True) Notes ----- Currently, to merge files it is mandated that all of the parameters have the same values, with the exception of the exposure time parameter "exp_time". If add_exposure_times=False, the maximum exposure time will be used. """ from collections import defaultdict import h5py from pathlib import Path if Path(output_file).exists() and not overwrite: raise IOError( f"Cannot overwrite existing file {output_file}. " "If you want to do this, set overwrite=True." ) f_in = h5py.File(input_files[0], "r") f_out = h5py.File(output_file, "w") exp_time_key = "" p_out = f_out.create_group("parameters") for key, param in f_in["parameters"].items(): if key.endswith("exp_time"): exp_time_key = key else: p_out[key] = param[()] skip = [exp_time_key] if add_exposure_times else [] for fn in input_files[1:]: with h5py.File(fn, "r") as f: validate_parameters(f_in["parameters"], f["parameters"], skip=skip) f_in.close() data = defaultdict(list) tot_exp_time = 0.0 info = f_out.create_group("info") for i, fn in enumerate(input_files): with h5py.File(fn, "r") as f: if add_exposure_times: tot_exp_time += f["/parameters"][exp_time_key][()] else: tot_exp_time = max(tot_exp_time, f["/parameters"][exp_time_key][()]) for key in f["/data"]: data[key].append(f["/data"][key][:]) for key, value in f["info"].attrs.items(): info.attrs[f"{key}_{i}"] = value info.attrs["original_files"] = input_files p_out[exp_time_key] = tot_exp_time d = f_out.create_group("data") for k in data: d.create_dataset(k, data=np.concatenate(data[k])) f_out.close() def _parse_abund_table(abund_table): if not isinstance(abund_table, str): if len(abund_table) != 30: raise RuntimeError( "User-supplied abundance tables must be 30 elements long!" ) atable = np.concatenate([[0.0], np.array(abund_table)]) else: if abund_table not in abund_tables: raise KeyError( f"Abundance table {abund_table} not found! Options are: {list(abund_tables.keys())}" ) atable = abund_tables[abund_table].copy() return atable def compute_elem_mass_fraction(elem, abund_table="angr"): if isinstance(elem, str): elem = elem_names.index(elem) atable = _parse_abund_table(abund_table) mZ = (atomic_weights[3:] * atable[3:]).sum() mE = atomic_weights[elem] * atable[elem] return mE / mZ def create_metal_fields(ds, metallicity_field, elements, abund_table): """ Create a set of metal abundance fields based on an abundance table for a dataset that does not have them. An overall metallicity field is required to scale the individual abundances by. Parameters ---------- ds : :class:`~yt.data_objects.static_output.Dataset` The dataset object for which this field will be created. metallicity_field : 2-tuple of strings The metallicity field of the dataset. elements : string or list of strings The element or elements to make fields for. abund_table : string The abundance table to use when computing the fields for the individual elements. """ elements = ensure_list(elements) def make_metal_field(elem): fac = compute_elem_mass_fraction(elem, abund_table=abund_table) def _metal_field(field, data): return fac * data[metallicity_field].to("dimensionless") return _metal_field mfields = [] for elem in elements: func = make_metal_field(elem) mfield = (metallicity_field[0], f"{elem}_fraction") ds.add_field(mfield, func, sampling_type="local", units="") mfields.append(mfield) return mfields def compute_H_abund(abund_table): atable = _parse_abund_table(abund_table) return atomic_weights[1] / (atomic_weights * atable).sum() def compute_zsolar(abund_table): atable = _parse_abund_table(abund_table) if abund_table not in abund_tables: raise KeyError( f"Abundance table {abund_table} not found! Options are: {list(abund_tables.keys())}" ) elems = atomic_weights * atable return elems[3:].sum() / elems.sum() class ParallelProgressBar: def __init__(self, title): self.title = title mylog.info("Starting %s", title) def update(self, *args, **kwargs): return def close(self): mylog.info("Finishing %s", self.title)
jzuhoneREPO_NAMEpyxsimPATH_START.@pyxsim_extracted@pyxsim-main@pyxsim@utils.py@.PATH_END.py
{ "filename": "_colorscale.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/icicle/marker/_colorscale.py", "type": "Python" }
import _plotly_utils.basevalidators class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator): def __init__(self, plotly_name="colorscale", parent_name="icicle.marker", **kwargs): super(ColorscaleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@icicle@marker@_colorscale.py@.PATH_END.py
{ "filename": "cross_section.py", "repo_name": "franciscovillaescusa/Pylians", "repo_path": "Pylians_extracted/Pylians-master/HI/cross_section.py", "type": "Python" }
import numpy as np import readsnap import readsubf import HI_library as HIL import sys ################################# UNITS ####################################### rho_crit=2.77536627e11 #h^2 Msun/Mpc^3 Mpc=3.0856e24 #cm Msun=1.989e33 #g Ymass=0.24 #helium mass fraction mH=1.6726e-24 #proton mass in grams pi=np.pi ############################################################################### ################################ INPUT ######################################## if len(sys.argv)>1: sa=sys.argv snapshot_fname=sa[1]; groups_fname=sa[2]; groups_number=int(sa[3]) method=sa[4] fac=float(sa[5]); HI_frac=float(sa[6]); Omega_HI_ref=float(sa[7]) method_Bagla=int(sa[8]); long_ids_flag=bool(int(sa[9])) SFR_flag=bool(int(sa[10])); f_MF=sa[11] threads=int(sa[12]); num_los=int(sa[13]) f_out=sa[14] print '################# INFO ##############' for element in sa: element else: #snapshot and halo catalogue snapshot_fname='../Efective_model_15Mpc/snapdir_013/snap_013' groups_fname='../Efective_model_15Mpc/FoF_0.2' groups_number=13 #'Dave','method_1','Bagla','Barnes' method='Dave' #1.362889 (60 Mpc/h z=3) 1.436037 (30 Mpc/h z=3) 1.440990 (15 Mpc/h z=3) fac=1.436037 #factor to obtain <F> = <F>_obs from the Lya : only for Dave HI_frac=0.95 #HI/H for self-shielded regions : for method_1 Omega_HI_ref=1e-3 #for method_1 and Bagla method_Bagla=3 #only for Bagla long_ids_flag=False; SFR_flag=True #flags for reading the FoF file f_MF='../mass_function/ST_MF_z=2.4.dat' #file containing the mass function threads=15 num_los=5000 f_out='cross_section_Dave_15Mpc_z=2.4.dat' ############################################################################### #read snapshot head and obtain BoxSize, Omega_m and Omega_L print '\nREADING SNAPSHOTS PROPERTIES' head=readsnap.snapshot_header(snapshot_fname) BoxSize=head.boxsize/1e3 #Mpc/h Nall=head.nall Masses=head.massarr*1e10 #Msun/h Omega_m=head.omega_m Omega_l=head.omega_l redshift=head.redshift Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l) #h*km/s/Mpc h=head.hubble #find the total number of particles in the simulation Ntotal=np.sum(Nall,dtype=np.uint64) print 'Total number of particles in the simulation:',Ntotal #sort the pos array ID_unsort=readsnap.read_block(snapshot_fname,"ID ",parttype=-1)-1 pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort del pos_unsort, ID_unsort #sort the R array ID_unsort=readsnap.read_block(snapshot_fname,"ID ",parttype=0)-1 R_unsort=readsnap.read_block(snapshot_fname,"HSML",parttype=0)/1e3 #Mpc/h R=np.zeros(Ntotal,dtype=np.float32); R[ID_unsort]=R_unsort del R_unsort, ID_unsort #find the IDs and HI masses of the particles to which HI has been assigned if method=='Dave': [IDs,M_HI]=HIL.Dave_HI_assignment(snapshot_fname,HI_frac,fac) elif method=='method_1': [IDs,M_HI]=HIL.method_1_HI_assignment(snapshot_fname,HI_frac,Omega_HI_ref) elif method=='Barnes': [IDs,M_HI]=HIL.Barnes_Haehnelt(snapshot_fname,groups_fname, groups_number,long_ids_flag,SFR_flag) elif method=='Paco': [IDs,M_HI]=HIL.Paco_HI_assignment(snapshot_fname,groups_fname, groups_number,long_ids_flag,SFR_flag) elif method=='Bagla': [IDs,M_HI]=HIL.Bagla_HI_assignment(snapshot_fname,groups_fname, groups_number,Omega_HI_ref,method_Bagla, f_MF,long_ids_flag,SFR_flag) else: print 'Incorrect method selected!!!'; sys.exit() #just keep with the particles having HI masses M_HI=M_HI[IDs]; pos=pos[IDs]; R=R[IDs]; del IDs #compute the value of Omega_HI print 'Omega_HI = %e'%(np.sum(M_HI,dtype=np.float64)/BoxSize**3/rho_crit) #read FoF/Subfind halos information halos=readsubf.subfind_catalog(groups_fname,groups_number,group_veldisp=True, masstab=True,long_ids=long_ids_flag,swap=False) pos_SO =halos.group_pos/1e3 #Mpc/h M_SO =halos.group_m_mean200*1e10 #Msun/h. SO mass R_SO =halos.group_r_mean200/1e3 #Mpc/h M_FoF =halos.group_mass*1e10 #Msun/h. FoF mass del halos """#write X-Y positions and R of the halos f=open('borrar.dat','w') for i in range(len(R_SO)): print i if M_FoF[i]>8.75e8: f.write(str(pos_SO[i,0])+' '+str(pos_SO[i,1])+' '+str(pos_SO[i,2])+\ ' '+str(R_SO[i])+' '+str(M_FoF[i])+'\n') f.close()""" #some verbose print 'Number of FoF halos:',len(pos_SO),len(M_SO) print '%f < X [Mpc/h] < %f'%(np.min(pos_SO[:,0]),np.max(pos_SO[:,0])) print '%f < Y [Mpc/h] < %f'%(np.min(pos_SO[:,1]),np.max(pos_SO[:,1])) print '%f < Z [Mpc/h] < %f'%(np.min(pos_SO[:,2]),np.max(pos_SO[:,2])) print '%e < M [Msun/h] < %e'%(np.min(M_SO),np.max(M_SO)) #find the number of cells per dimension: the grid will have cells x cells points cells=int(BoxSize/np.max(R)) print '%d x %d grid created\n'%(cells,cells) #sort particles: to each particle we associate a (index_x,index_y) coordinate index_x=(pos[:,0]/BoxSize*cells).astype(np.int32) index_y=(pos[:,1]/BoxSize*cells).astype(np.int32) index_x[np.where(index_x==cells)[0]]=0 index_y[np.where(index_y==cells)[0]]=0 print '%d < index_x < %d'%(np.min(index_x),np.max(index_x)) print '%d < index_y < %d'%(np.min(index_y),np.max(index_y)) print 'Sorting particles...' indexes=[] for i in range(cells): for j in range(cells): indexes.append([]) number=cells*index_y+index_x; del index_x,index_y for i in range(len(number)): indexes[number[i]].append(i) indexes=np.array(indexes); print 'Done!' #do a loop over all the halos for l in xrange(0,50000): #(len(pos_SO)): """x_halo=10.2 #Mpc/h y_halo=8.3 #Mpc/h r_halo=1.0 #Mpc/h""" x_halo=pos_SO[l,0]; y_halo=pos_SO[l,1]; z_halo=pos_SO[l,2]; r_halo=R_SO[l] print '\nl=',l print 'halo pos =',pos_SO[l] print 'halo mass = %e'%M_FoF[l] print 'halo radius = %f'%r_halo index_x_min=int((x_halo-r_halo)/BoxSize*cells) index_x_max=int((x_halo+r_halo)/BoxSize*cells) if (x_halo+r_halo)>((index_x_max+1)*BoxSize/cells): index_x_max+=1 index_y_min=int((y_halo-r_halo)/BoxSize*cells) index_y_max=int((y_halo+r_halo)/BoxSize*cells) if (y_halo+r_halo)>((index_y_max+1)*BoxSize/cells): index_y_max+=1 print index_x_min,index_x_max print index_y_min,index_y_max #identify the IDs of the particles that can contribute to that region length=0 for i in xrange(index_x_min,index_x_max+1): number_x=(i+cells)%cells for j in xrange(index_y_min,index_y_max+1): number_y=(j+cells)%cells num=cells*number_y+number_x length+=len(indexes[num]) IDs=np.empty(length,dtype=np.int32); offset=0 for i in xrange(index_x_min,index_x_max+1): number_x=(i+cells)%cells for j in xrange(index_y_min,index_y_max+1): number_y=(j+cells)%cells num=cells*number_y+number_x length=len(indexes[num]) IDs[offset:offset+length]=indexes[num] offset+=length pos_gas=pos[IDs]; R_gas=R[IDs]; M_HI_gas=M_HI[IDs] #compute the cross section only if there are particles!!! if len(pos_gas)>0: #keep only with particles with z-coordinates within the virial radius z=pos_gas[:,2] indexes_z=np.where((z>(z_halo-r_halo)) & (z<(z_halo+r_halo)))[0] pos_gas=pos_gas[indexes_z]; R_gas=R_gas[indexes_z] M_HI_gas=M_HI_gas[indexes_z] #f=open('borrar2.dat','w') #for i in xrange(len(pos_gas)): #f.write(str(pos_gas[i,0])+' '+str(pos_gas[i,1])+' '+str(R_gas[i])+'\n') #f.close() cross_section=HIL.cross_section_halo(x_halo,y_halo,r_halo,num_los, redshift,h,pos_gas[:,0], pos_gas[:,1],R_gas,M_HI_gas, threads) #note that the cross_section returned it is comoving (Mpc/h)^2 units print 'cross section=',cross_section f=open(f_out,'a') f.write(str(M_FoF[l])+' '+str(cross_section*1e6)+'\n') f.close()
franciscovillaescusaREPO_NAMEPyliansPATH_START.@Pylians_extracted@Pylians-master@HI@cross_section.py@.PATH_END.py
{ "filename": "h2opr.py", "repo_name": "mpound/pdrtpy", "repo_path": "pdrtpy_extracted/pdrtpy-master/examples/h2opr.py", "type": "Python" }
############################################################################ ### Listing A.5: Creating and fitting H2 excitation diagrams, ### ### including ortho-to-para ratio (OPR) ### ############################################################################ from pdrtpy.measurement import Measurement from pdrtpy.tool.h2excitation import H2ExcitationFit from pdrtpy.plot.excitationplot import ExcitationPlot from astropy.nddata import StdDevUncertainty intensity = dict() intensity['H200S0'] = 3.003e-05 intensity['H200S1'] = 3.143e-04 intensity['H200S2'] = 3.706e-04 intensity['H200S3'] = 1.060e-03 intensity['H200S4'] = 5.282e-04 intensity['H200S5'] = 5.795e-04 observations = [] for i in intensity: m = Measurement(data=intensity[i], uncertainty=StdDevUncertainty(0.75*intensity[i]), identifier=i,unit="erg cm-2 s-1 sr-1") observations.append(m) # Create the tool to run the fit hopr = H2ExcitationFit(observations) # Instantiate the plotter hplot = ExcitationPlot(hopr,"H_2") # Set some plot parameters appropriate for manuscript figure; # these pass through to matplotlib hplot._plt.rcParams["xtick.major.size"] = 7 hplot._plt.rcParams["xtick.minor.size"] = 4 hplot._plt.rcParams["ytick.major.size"] = 7 hplot._plt.rcParams["ytick.minor.size"] = 4 hplot._plt.rcParams['font.size'] = 14 hplot._plt.rcParams['axes.linewidth'] =1.5 hplot.ex_diagram(ymax=21) hplot.savefig('example9_figure.png',dpi=300) # Fit a two temperature model allowing OPR to vary hopr.run(fit_opr=True) hplot.ex_diagram(show_fit=True,ymax=21) hplot.savefig('example10_figure.png',dpi=300)
mpoundREPO_NAMEpdrtpyPATH_START.@pdrtpy_extracted@pdrtpy-master@examples@h2opr.py@.PATH_END.py
{ "filename": "recipe_wvlsol_tell.py", "repo_name": "igrins/plp", "repo_path": "plp_extracted/plp-master/igrins/igrins_recipes/recipe_wvlsol_tell.py", "type": "Python" }
from __future__ import print_function from ..pipeline.steps import Step from .. import DESCS import os from ..procedures import wvlsol_tell as wvlsol_tell def do_wvlsol_tell(obsset): from igrins import get_obsset # obsset = get_obsset("20190116", "H", "DARK", obsids=range(1, 11)) obsset = get_obsset("20220303", "H", "A0V", obsids=range(93, 105)) # spec = obsset.load(DESCS["SPEC_FITS_FLATTENED"]) src_filename = obsset.locate(DESCS["SPEC_FITS_FLATTENED"]) # f = obsset.rs.load_ref_data(kind="TELL_WVLSOL_MODEL") item_desc = DESCS["SPEC_FITS_WAVELENGTH"] basename = obsset.groupname postfix = "" # , DESCS[] section, fnout = obsset.rs.get_section_n_fn(basename, item_desc, postfix) tell_file = obsset.rs.query_ref_data_path(kind="TELL_WVLSOL_MODEL") figout_dir = None blaze_corrected = True out_filename = os.path.join(os.path.dirname(src_filename), fnout) wvlsol_tell.run(src_filename, out_filename, plot_dir=figout_dir, tell_file=tell_file, blaze_corrected=blaze_corrected) # def process_band(utdate, recipe_name, band, # groupname, obsids, config, # interactive=True): # # utdate, recipe_name, band, obsids, config = "20150525", "A0V", "H", [63, 64], "recipe.config" # from igrins.libs.recipe_helper import RecipeHelper # helper = RecipeHelper(config, utdate, recipe_name) # caldb = helper.get_caldb() # master_obsid = obsids[0] # desc = "SPEC_FITS_FLATTENED" # blaze_corrected=True # src_filename = caldb.query_item_path((band, groupname), # desc) # if not os.path.exists(src_filename): # desc = "SPEC_FITS" # blaze_corrected=False # src_filename = caldb.query_item_path((band, groupname), # desc) # out_filename = caldb.query_item_path((band, groupname), # "SPEC_FITS_WAVELENGTH") # from igrins.libs.master_calib import get_ref_data_path # tell_file = get_ref_data_path(helper.config, band, # kind="TELL_WVLSOL_MODEL") # if not interactive: # tgt_basename = helper.get_basename(band, groupname) # figout_dir = helper._igr_path.get_section_filename_base("QA_PATH", # "", # "tell_wvsol_"+tgt_basename) # from igrins.libs.path_info import ensure_dir # ensure_dir(figout_dir) # else: # figout_dir = None # #print src_filename, out_filename, figout_dir, tell_file # run(src_filename, out_filename, # plot_dir=figout_dir, tell_file=tell_file, # blaze_corrected=blaze_corrected) # # process_band(utdate, recipe_name, band, # # groupname, obsids, self.config, # # interactive) # def wvlsol_tell(utdate, refdate=None, bands="HK", # starting_obsids=None, # groups=None, # interactive=False, # recipe_name = "A0V*", # config_file="recipe.config", # ): # recipe = RecipeTellWvlsol(interactive=interactive) # recipe.set_recipe_name(recipe_name) # recipe.process(utdate, bands, # starting_obsids, groups, # config_file=config_file) steps = [Step("Refine wvlsol w/ tellurics", do_wvlsol_tell), ] if __name__ == "__main__": pass
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@igrins_recipes@recipe_wvlsol_tell.py@.PATH_END.py
{ "filename": "print_test_kern_calc_visi_common_common.py", "repo_name": "JLBLine/WODEN", "repo_path": "WODEN_extracted/WODEN-master/cmake_testing/GPU_code/source_components/print_test_kern_calc_visi_common_common.py", "type": "Python" }
import numpy as np np.random.seed(983745) def make_list_arrays(num_lists): num_list_values = np.random.uniform(5, 32, num_lists).astype(int) total_vals = int(np.sum(num_list_values)) list_start_indexes = np.empty(num_lists, dtype=int) list_freqs = np.empty(total_vals) list_stokes = np.empty(total_vals) start_index = 0 for ind, num_list_vals in enumerate(num_list_values): list_freqs[start_index:start_index+num_list_vals] = np.linspace(50e+6, 300e+6, num_list_vals) list_stokes[start_index:start_index+num_list_vals] = np.random.uniform(-1,10,num_list_vals) list_start_indexes[ind] = start_index start_index += num_list_vals return num_list_values, list_start_indexes, list_freqs, list_stokes num_curves = 10 num_lists = 5 num_comps = 25 ##all the test_ker_calc_visi_common.c tests use 10 power-law Stokes I ##power law stuff all happens inside test_ker_calc_visi_common ##stuff below is used in test_ker_calc_visi_common::test_kern_calc_visi_VarylmnVaryFlux ##=======CURVED_POWER_LAW======================================================= ref_stokesI = np.random.uniform(1e-3, 100.0, num_curves) ref_stokesQ = np.zeros(num_curves) ref_stokesU = np.zeros(num_curves) ref_stokesV = np.zeros(num_curves) ref_freqs = np.linspace(50e+6, 300e+6, num_curves) ##want to have peaks between 100 and 200MHz so we can visually see things ##in this test, so define curvature and peak freq, and calculate SI from that # peak_freqs = np.random.uniform(100, 200, num_curves)*1e+6 ref_qs = np.random.uniform(-2, 0.5, num_curves) ref_curve_SIs = np.random.uniform(-1, 1, num_curves) # ref_curve_SIs = -2*ref_qs*np.log(peak_freqs) curve_inds = range(num_curves, 2*num_curves) ##=======LIST_STUFF============================================================= num_list_values, list_start_indexes, list_freqs, list_stokesI = make_list_arrays(num_lists) list_inds = range(2*num_curves, 2*num_curves + num_lists) ##polarisation stuff============================================================ n_stokesV_pol_frac = 3 n_stokesV_power = 4 n_stokesV_curve = 4 n_linpol_pol_frac = 5 n_linpol_power = 2 n_linpol_curve = 2 n_stokesV_list = 3 n_linpol_list = 2 n_linpol_p_list = 3 n_linpol_angles = n_linpol_pol_frac + n_linpol_power + n_linpol_curve + n_linpol_p_list stokesV_pol_fracs = np.random.uniform(-1.0, 1.0, n_stokesV_pol_frac) ref_stokesV = np.random.uniform(1e-3, 10.0, n_stokesV_power) stokesV_power_SIs = np.random.uniform(-1.5, 0.5, n_stokesV_power) stokesV_qs = np.random.uniform(-2, 0.5, n_stokesV_curve) stokesV_curve_SIs = np.random.uniform(-1, 1, n_stokesV_curve) # stokesV_curve_SIs = -2*stokesV_qs*np.log(peak_freqs[:n_stokesV_curve]) stokesV_num_list_values, stokesV_list_start_indexes, stokesV_list_ref_freqs, stokesV_list_ref_flux = make_list_arrays(n_stokesV_list) stokesV_inds = np.random.choice(num_comps, n_stokesV_pol_frac + n_stokesV_power + n_stokesV_curve + n_stokesV_list, replace=False) stokesV_pol_frac_comp_inds = stokesV_inds[:n_stokesV_pol_frac] stokesV_power_comp_inds = stokesV_inds[n_stokesV_pol_frac:n_stokesV_pol_frac+n_stokesV_power] stokesV_curve_comp_inds = stokesV_inds[n_stokesV_pol_frac+n_stokesV_power:n_stokesV_pol_frac+n_stokesV_power+n_stokesV_curve] stokesV_list_comp_inds = stokesV_inds[-n_stokesV_list:] linpol_pol_fracs = np.random.uniform(-1.0, 1.0, n_linpol_pol_frac) ref_linpol = np.random.uniform(1e-3, 10.0, n_linpol_power) linpol_power_SIs = np.random.uniform(-1.5, 0.5, n_linpol_power) linpol_qs = np.random.uniform(-2, 0.5, n_linpol_curve) linpol_curve_SIs = np.random.uniform(-1, 1, n_linpol_curve) # linpol_curve_SIs = -2*linpol_qs*np.log(peak_freqs[:n_linpol_curve]) stokesQ_num_list_values, stokesQ_list_start_indexes, stokesQ_list_ref_freqs, stokesQ_list_ref_flux = make_list_arrays(n_linpol_list) stokesU_num_list_values, stokesU_list_start_indexes, stokesU_list_ref_freqs, stokesU_list_ref_flux = make_list_arrays(n_linpol_list) linpol_p_num_list_values, linpol_p_list_start_indexes, linpol_p_list_ref_freqs, linpol_p_list_ref_flux = make_list_arrays(n_linpol_p_list) linpol_inds = np.random.choice(num_comps, n_linpol_pol_frac + n_linpol_power + n_linpol_curve + n_linpol_list + n_linpol_p_list, replace=False) linpol_pol_frac_comp_inds = linpol_inds[:n_linpol_pol_frac] linpol_power_comp_inds = linpol_inds[n_linpol_pol_frac:n_linpol_pol_frac+n_linpol_power] linpol_curve_comp_inds = linpol_inds[n_linpol_pol_frac+n_linpol_power:n_linpol_pol_frac+n_linpol_power+n_linpol_curve] print("HEY HEY HEY", n_linpol_pol_frac+n_linpol_power+n_linpol_curve,n_linpol_pol_frac+n_linpol_power+n_linpol_curve+n_linpol_p_list) # print(linpol_p_list_comp_inds) linpol_p_list_comp_inds = linpol_inds[n_linpol_pol_frac+n_linpol_power+n_linpol_curve:n_linpol_pol_frac+n_linpol_power+n_linpol_curve+n_linpol_p_list] linpol_list_comp_inds = linpol_inds[-n_linpol_list:] stokesU_list_comp_inds = linpol_list_comp_inds stokesQ_list_comp_inds = linpol_list_comp_inds intr_pol_angle = np.random.uniform(0, 2*np.pi, n_linpol_angles) rms = np.random.uniform(0, 80, n_linpol_angles) linpol_angle_inds = np.concatenate([linpol_pol_frac_comp_inds, linpol_power_comp_inds, linpol_curve_comp_inds, linpol_p_list_comp_inds]) def print_header_file(arrays, array_names, precisions): print(f"int k_n_stokesV_pol_frac = {n_stokesV_pol_frac};") print(f"int k_n_stokesV_power = {n_stokesV_power};") print(f"int k_n_stokesV_curve = {n_stokesV_curve};") print(f"int k_n_stokesV_list = {n_stokesV_list};") print(f"int k_n_linpol_pol_frac = {n_linpol_pol_frac};") print(f"int k_n_linpol_power = {n_linpol_power};") print(f"int k_n_linpol_curve = {n_linpol_curve};") print(f"int k_n_linpol_list = {n_linpol_list};") print(f"int k_n_linpol_p_list = {n_linpol_p_list};") print(f"int k_n_stokesV_list_flux_entries = {int(stokesV_num_list_values.sum())};") print(f"int k_n_stokesQ_list_flux_entries = {int(stokesQ_num_list_values.sum())};") print(f"int k_n_stokesU_list_flux_entries = {int(stokesU_num_list_values.sum())};") print(f"int k_n_linpol_p_list_flux_entries = {int(linpol_p_num_list_values.sum())};") for array, array_name, precision in zip(arrays, array_names, precisions): arr_string = f"{precision} {array_name}[] = \u007b" for ind in range(len(array)-1): if precision == "user_precision_t": arr_string += f'{array[ind]:.16f},' elif precision == "double": arr_string += f'{array[ind]:.16e},' elif precision == "int": arr_string += f'{array[ind]:d},' if precision == "user_precision_t": arr_string += f'{array[-1]:.16f}}};\n' elif precision == "double": arr_string += f'{array[-1]:.16e}}};\n' elif precision == "int": arr_string += f'{array[-1]:d}}};\n' print(arr_string) print(f"int total_num_flux_entires = {int(np.sum(num_list_values)):d};") return if __name__ == '__main__': arrays = [ref_stokesI, ref_freqs, ref_curve_SIs, ref_qs, num_list_values, list_start_indexes, list_freqs, list_stokesI, curve_inds, list_inds, stokesV_pol_fracs, ref_stokesV, stokesV_power_SIs, stokesV_qs, stokesV_curve_SIs, linpol_pol_fracs, ref_linpol, linpol_power_SIs, linpol_qs, linpol_curve_SIs, intr_pol_angle, rms, stokesV_pol_frac_comp_inds, stokesV_power_comp_inds, stokesV_curve_comp_inds, linpol_pol_frac_comp_inds, linpol_power_comp_inds, linpol_curve_comp_inds, linpol_angle_inds, stokesV_list_ref_flux, stokesV_list_ref_freqs, stokesV_num_list_values, stokesV_list_start_indexes, stokesV_list_comp_inds, stokesQ_list_ref_flux, stokesQ_list_ref_freqs, stokesQ_num_list_values, stokesQ_list_start_indexes, stokesQ_list_comp_inds, stokesU_list_ref_flux, stokesU_list_ref_freqs, stokesU_num_list_values, stokesU_list_start_indexes, stokesU_list_comp_inds, linpol_p_list_ref_flux, linpol_p_list_ref_freqs, linpol_p_num_list_values, linpol_p_list_start_indexes, linpol_p_list_comp_inds] array_names = ["k_ref_stokesI", "k_ref_freqs", "k_ref_curve_SIs", "k_ref_qs", "k_num_list_values", "k_list_start_indexes", "k_list_freqs", "k_list_stokesI", "k_curve_comp_inds", "k_list_comp_inds", "k_stokesV_pol_fracs", "k_ref_stokesV", "k_stokesV_power_SIs", "k_stokesV_qs", "k_stokesV_curve_SIs", "k_linpol_pol_fracs", "k_ref_linpol", "k_linpol_power_SIs", "k_linpol_qs", "k_linpol_curve_SIs", "k_intr_pol_angle", "k_rms", "k_stokesV_pol_frac_comp_inds", "k_stokesV_power_comp_inds", "k_stokesV_curve_comp_inds", "k_linpol_pol_frac_comp_inds", "k_linpol_power_comp_inds", "k_linpol_curve_comp_inds", "k_linpol_angle_inds", "k_stokesV_list_ref_flux", "k_stokesV_list_ref_freqs", "k_stokesV_num_list_values", "k_stokesV_list_start_indexes", "k_stokesV_list_comp_inds", "k_stokesQ_list_ref_flux", "k_stokesQ_list_ref_freqs", "k_stokesQ_num_list_values", "k_stokesQ_list_start_indexes", "k_stokesQ_list_comp_inds", "k_stokesU_list_ref_flux", "k_stokesU_list_ref_freqs", "k_stokesU_num_list_values", "k_stokesU_list_start_indexes", "k_stokesU_list_comp_inds", "k_linpol_p_list_ref_flux", "k_linpol_p_list_ref_freqs", "k_linpol_p_num_list_values", "k_linpol_p_list_start_indexes", "k_linpol_p_list_comp_inds"] precisions = ["user_precision_t", "double", "user_precision_t", "user_precision_t", "int", "int", "double", "user_precision_t", "int", "int", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "user_precision_t", "int", "int","int", "int","int", "int","int", "user_precision_t", "double", "int", "int","int", "user_precision_t", "double", "int", "int","int", "user_precision_t", "double", "int", "int","int", "user_precision_t", "double", "int", "int","int"] print_header_file(arrays, array_names, precisions)
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@cmake_testing@GPU_code@source_components@print_test_kern_calc_visi_common_common.py@.PATH_END.py
{ "filename": "__init__.py", "repo_name": "legacysurvey/imagine", "repo_path": "imagine_extracted/imagine-main/cat/migrations/__init__.py", "type": "Python" }
legacysurveyREPO_NAMEimaginePATH_START.@imagine_extracted@imagine-main@cat@migrations@__init__.py@.PATH_END.py
{ "filename": "_variant.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/colorbar/tickfont/_variant.py", "type": "Python" }
import _plotly_utils.basevalidators class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="variant", parent_name="heatmap.colorbar.tickfont", **kwargs ): super(VariantValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), values=kwargs.pop( "values", [ "normal", "small-caps", "all-small-caps", "all-petite-caps", "petite-caps", "unicase", ], ), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@colorbar@tickfont@_variant.py@.PATH_END.py
{ "filename": "chains.py", "repo_name": "joezuntz/cosmosis", "repo_path": "cosmosis_extracted/cosmosis-main/cosmosis/postprocessing/v2/chains.py", "type": "Python" }
""" This is the core of what will at some point become the new postprocessing system in CosmoSIS. The plan is: - do plotting with GetDist instead of manually - make the plotting much simpler than the complex hierarchies we have now - connect this to the campaign system to make it easy to specify what to do after completing a campaign These APIs will change and should not yet be relied on. """ from cosmosis.runtime.utils import import_by_path from cosmosis import Inifile, output as output_module import numpy as np from io import StringIO import os class Chain: chain_subclasses = {} def __init__( self, name: str, sampler: str, columns: list[str], data: list[np.ndarray], metadata: list[dict[str, str]], comments: list[list[str]], final_metadata: list[list[str]], **options ): self.name = name self.sampler = sampler self.colnames = columns self.data = data self.metadata = metadata self.comments = comments self.final_metadata = final_metadata self.options = options self._mcsamples = None @classmethod def load(cls, inputs, name=None, **options): if isinstance(inputs, Inifile): name_, sampler, colnames, data, metadata, comments, final_metadata = ( cls.load_ini(inputs) ) elif isinstance(inputs, str): name_, sampler, colnames, data, metadata, comments, final_metadata = ( cls.load_text_file(inputs) ) elif isinstance(inputs, output_module.InMemoryOutput): name_, sampler, colnames, data, metadata, comments, final_metadata = ( cls.load_in_memory_storage(inputs) ) elif isinstance(inputs, output_module.AstropyOutput): name_, sampler, colnames, data, metadata, comments, final_metadata = ( cls.load_astropy(inputs) ) elif isinstance(inputs, output_module.OutputBase): name_, sampler, colnames, data, metadata, comments, final_metadata = ( cls.load_output_object(inputs) ) else: raise ValueError("Unknown input chain type " + str(type(inputs))) subclass = cls.chain_subclasses.get(sampler, cls) if name is None: name = name_ # Assume metropolis if no more info is given if subclass is None: subclass = MetropolisChain return subclass( name, sampler, colnames, data, metadata, comments, final_metadata, **options ) @classmethod def load_ini(cls, inputs): output_options = dict(inputs.items("output")) filename = output_options["filename"] name = filename sampler = inputs.get("runtime", "sampler") colnames, data, metadata, comments, final_metadata = ( output_module.input_from_options(output_options) ) return name, sampler, colnames, data, metadata, comments, final_metadata @classmethod def load_in_memory_storage(cls, inputs): if name is None: name = "chain" colnames = [c[0] for c in inputs.columns] data = [np.array(inputs.rows)] metadata = [{k: v[0] for k, v in inputs.meta.items()}] name = metadata[0].get("chain_name", "chain") sampler = metadata[0].get("sampler") final_metadata = [{k: v[0] for k, v in inputs.final_meta.items()}] comments = [inputs.comments[:]] return name, sampler, colnames, data, metadata, comments, final_metadata @classmethod def load_astropy(cls, inputs): name = inputs.meta.get("chain_name", "chain") colnames = inputs.colnames # convert astropy table to numpy array data = [np.array([inputs[c] for c in colnames]).T] metadata = [inputs.meta] sampler = metadata.get("sampler") final_metadata = [ {k[6:]: v for k, v in meta.items() if k.startswith("final:")} for meta in metadata ] comments = [meta["comments"] for meta in metadata] return name, sampler, colnames, data, metadata, comments, final_metadata @classmethod def load_text_file(cls, inputs): colnames, data, metadata, comments, final_metadata = ( output_module.TextColumnOutput.load_from_options({"filename": inputs}) ) name = metadata[0].get("chain_name", "chain") sampler = metadata[0].get("sampler") return name, sampler, colnames, data, metadata, comments, final_metadata @classmethod def load_output_object(self, inputs): colnames, data, metadata, comments, final_metadata = inputs.load_from_options( {"filename": inputs} ) name = metadata[0].get("chain_name", "chain") sampler = metadata[0].get("sampler") return name, sampler, colnames, data, metadata, comments, final_metadata def __init_subclass__(cls): name = cls.__name__ sampler = name.lower().replace("chain", "") cls.chain_subclasses[sampler] = cls def derive_extra_column(self, function): new_data = [] for d in self.data: chain = SingleChainData(d, self.colnames) col, code = function(chain) if col is None: break # insert a new column into the chain, second from the end d = np.insert(d, -2, col, axis=1) # save the new chain new_data.append(d) if code is None: return self.colnames.insert(-2, code) self.data = new_data def derive_extra_columns(self): if not self.derive_file: return name = os.path.splitext(os.path.split(self.derive_file)[1])[0] module = import_by_path(name, self.derive_file) functions = [getattr(module, f) for f in dir(module) if f.startswith("derive_")] print( "Deriving new columns from these functions in {}:".format(self.derive_file) ) for f in functions: self.derive_extra_column(f) # derive any additional parameters self.derive_extra_columns() # set the column names self.colnames = [c.lower() for c in self.colnames] self.data_stacked = np.concatenate(self.data).T def extract_ini(self, tag): in_ = False lines = [] for line in self.comments[0]: line = line.strip() if line == "START_OF_{}_INI".format(tag): in_ = True elif line == "END_OF_{}_INI".format(tag): break elif in_: lines.append(line) s = StringIO("\n".join(lines)) ini = Inifile(None) ini.read_file(s) return ini def __len__(self): return self.data_stacked.shape[1] def get_row(self, index): return self.data_stacked[:, index] def has_col(self, name): return name in self.colnames def get_col(self, index_or_name, stacked=True): """Get the named or numbered column.""" if isinstance(index_or_name, int): index = index_or_name else: name = index_or_name index = self.colnames.index(name) cols = [d[:, index] for d in self.data] if stacked: return np.concatenate(cols) else: return cols @property def mcsamples(self): import getdist if self._mcsamples is None: samples = [] names = [] for col in self.colnames: if col == "weight": continue c = self.reduced_col(col) samples.append(c) names.append(col) if "weight" in self.colnames: weights = self.reduced_col("weight") else: weights = None samples = np.array(samples).T self._mcsamples = getdist.MCSamples( samples=samples, weights=weights, names=names, name_tag=self.name ) return self._mcsamples # Subclasses implement these def reduced_col(self, column): pass class EmceeChain(Chain): def reduced_col(self, name, stacked=True): cols = self.get_col(name, stacked=False) burn = self.options.get("burn", 0) thin = self.options.get("thin", 1) if 0.0 < burn < 1.0: burn = int(len(cols[0]) * burn) else: burn = int(burn) cols = [col[burn::] for col in cols] if thin != 1: walkers = self.sampler_option("walkers") index = np.arange(len(cols[0]), dtype=np.int64) index = index // int(walkers) w = (index % thin) == 0 cols = [col[w] for col in cols] if stacked: return np.concatenate(cols) else: return cols class MetropolisChain(Chain): def reduced_col(self, name, stacked=True): cols = self.get_col(name, stacked=False) burn = self.options.get("burn", 0) thin = self.options.get("thin", 1) if 0.0 < burn < 1.0: burn = int(len(cols[0]) * burn) else: burn = int(burn) cols = [col[burn::thin] for col in cols] if stacked: return np.concatenate(cols) else: return np.array(cols).squeeze() class ImportanceChain(MetropolisChain): # Importance sample chains may or may not # be based on a Metropolis-Hastings chain, # but they are treated the same way here. pass class NestedChain(Chain): def reduced_col(self, name, stacked=True): """ Nested sampling does not required cutting the chain from the main output file. These are also single chains, unlike the MH ones, so there is no stacking needed. """ # stacking does not col = self.get_col(name) return np.array(col) class PolychordChain(NestedChain): pass class MultinestChain(NestedChain): pass class ZeusChain(EmceeChain): pass class PocoChain(MetropolisChain): pass class DynestyChain(NestedChain): pass class NautilusChain(NestedChain): pass class SimpleListChain(Chain): """ Base class for chains that are just lists of samples in some way. We might still want to burn or thin them though """ def reduced_col(self, name, stacked=True): cols = self.get_col(name, stacked=False) thin = self.options.get("thin", 1) burn = self.options.get("burn", 0) if 0.0 < burn < 1.0: burn = int(len(cols[0]) * burn) else: burn = int(burn) cols = [col[burn::thin] for col in cols] return np.array(cols).squeeze() class ListChain(SimpleListChain): pass class AprioriChain(SimpleListChain): pass class SingleChainData(object): """ This helper object is to make it easier for users to write functions that derive new parameters. """ def __init__(self, data, colnames): self.data = data self.colnames = colnames def __getitem__(self, index_or_name): if isinstance(index_or_name, int): index = index_or_name else: name = index_or_name index = self.colnames.index(name) return self.data[:, index]
joezuntzREPO_NAMEcosmosisPATH_START.@cosmosis_extracted@cosmosis-main@cosmosis@postprocessing@v2@chains.py@.PATH_END.py
{ "filename": "apero_pphotpix_spirou.py", "repo_name": "njcuk9999/apero-drs", "repo_path": "apero-drs_extracted/apero-drs-main/apero/tools/recipes/spirou/apero_pphotpix_spirou.py", "type": "Python" }
#!/usr/bin/env python # -*- coding: utf-8 -*- """ # CODE NAME HERE # CODE DESCRIPTION HERE Created on 2020-04-05 11:44:00 @author: cook """ import os import numpy as np from astropy.io import fits from astropy.table import Table from scipy.signal import medfilt, convolve2d from apero import lang from apero.base import base from apero.core import constants from apero.core import math as mp from apero.core.core import drs_log from apero.core.utils import drs_startup from apero.tools.module.testing import drs_dev # ============================================================================= # Define variables # ============================================================================= __NAME__ = 'apero_pphotpix_spirou.py' __INSTRUMENT__ = 'SPIROU' __PACKAGE__ = base.__PACKAGE__ __version__ = base.__version__ __author__ = base.__author__ __date__ = base.__date__ __release__ = base.__release__ # get param dict ParamDict = constants.ParamDict # Get Logging function WLOG = drs_log.wlog # Get the text types textentry = lang.textentry # whether this is a debug run (produces mask image) DEBUG = False # define relative output path DEBUGFILE = 'mask_hotpix_pp.fits' # ----------------------------------------------------------------------------- # get file definitions for this instrument FMOD = drs_dev.FileDefinition(instrument=__INSTRUMENT__) # set up recipe definitions (overwrites default one) RMOD = drs_dev.RecipeDefinition(instrument=__INSTRUMENT__) # define a recipe for this tool c_hotpix = drs_dev.TmpRecipe() c_hotpix.name = __NAME__ c_hotpix.shortname = 'CRT_HTPX' c_hotpix.instrument = __INSTRUMENT__ c_hotpix.in_block_str = 'raw' c_hotpix.out_block_str = 'red' c_hotpix.extension = 'fits' c_hotpix.description = ('Create the hotpix table for an instrument (required ' 'for preprocessing)') c_hotpix.kind = 'misc' c_hotpix.set_arg(pos=0, **RMOD.mod.obs_dir) c_hotpix.set_arg(pos=1, name='darkfile', dtype='file', helpstr='[STRING] The dark file name [DARK_DARK_INT] or ' '[DARK_DARK_TEL]', files=[FMOD.files.raw_dark_dark_int, FMOD.files.raw_dark_dark_tel]) c_hotpix.set_kwarg(name='--debugfile', dtype='switch', default=False, helpstr='If set activates debug mode (saves mask)') # add recipe to recipe definition RMOD.add(c_hotpix) # ============================================================================= # Define functions # ============================================================================= # All recipe code goes in _main # Only change the following from here: # 1) function calls (i.e. main(arg1, arg2, **kwargs) # 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs) # 3) config_main outputs value (i.e. None, pp, reduced) # Everything else is controlled from recipe_definition def main(obs_dir=None, darkfile=None, **kwargs): """ Main function for exposuremeter_spirou.py :param obs_dir: str, observation directory :param darkfile: str, dark file name :param kwargs: additional keyword arguments :keyword debug: int, debug level (0 for None) :returns: dictionary of the local space :rtype: dict """ # assign function calls (must add positional) fkwargs = dict(obs_dir=obs_dir, darkfile=darkfile, **kwargs) # ---------------------------------------------------------------------- # deal with command line inputs / function call inputs recipe, params = drs_startup.setup(__NAME__, __INSTRUMENT__, fkwargs, rmod=RMOD) # solid debug mode option if kwargs.get('DEBUG0000', False): return recipe, params # ---------------------------------------------------------------------- # run main bulk of code (catching all errors) llmain, success = drs_startup.run(__main__, recipe, params) # ---------------------------------------------------------------------- # End Message # ---------------------------------------------------------------------- return drs_startup.end_main(params, llmain, recipe, success) def __main__(recipe, params): # get input dark file drs fits file instance darkfile = params['INPUTS']['darkfile'][1][0] debug = params['INPUTS']['debugfile'] # ---------------------------------------------------------------------- # Prepare dark file # ---------------------------------------------------------------------- WLOG(params, '', 'Loading dark and preparing image') # load file image = darkfile.get_data(copy=True) # set NaNS and infs to zero. NaN pixels will not be flagged as hot pixels image[~np.isfinite(image)] = 0 # subtract a DC offset of the image level image = image - mp.nanmedian(image) # express image normalized in terms of sigma image = image / mp.nanpercentile(np.abs(image), 100 * mp.normal_fraction()) # ---------------------------------------------------------------------- # Find hot pixels # ---------------------------------------------------------------------- WLOG(params, '', 'Finding hot pixels') # get box size from parameters boxsize = params['PP_HOTPIX_BOXSIZE'] threshold = params['PP_CORRUPT_HOT_THRES'] # a hot pixel is a point that is > 10 sigma (positive) and that has a # 5x5 median around it that is within +/- 1 sigma; it is well-behaved and # not surrounded by bad pixels WLOG(params, '', '\t- median filter') medimage = medfilt(image, [boxsize, boxsize]) # find the hot pixels mask = (np.abs(medimage) < 1.0) & (image > threshold) hotpix = np.array(mask).astype(float) # find if hot pixels are alone in a 5x5 box WLOG(params, '', '\t- convolve') box = np.ones([boxsize, boxsize]).astype(float) neighbours = convolve2d(hotpix, box, mode='same') # after the convolution, isolated (within 5x5) # hotpixels have neighbours = 1 WLOG(params, '', '\t- find neighbours') has_neighbours = neighbours == 1 # set non-isolated hot pixels to zero hotpix[~has_neighbours] = 0.0 # find positions in x and y of good hot pixels WLOG(params, '', '\t- locate') y, x = np.where(hotpix) # ---------------------------------------------------------------------- # write table to file # ---------------------------------------------------------------------- # print progress WLOG(params, '', 'Writing to file') # create table table = Table() table['nsig'] = image[y, x] table['xpix'] = x table['ypix'] = y # get outpath assetdir = params['DRS_DATA_ASSETS'] filename = params['PP_HOTPIX_FILE'] relpath = params['DATA_ENGINEERING'] absoutpath = os.path.join(assetdir, relpath, filename) # write output as a csv file WLOG(params, '', '\t Saved to: {0}'.format(absoutpath)) table.write(absoutpath, format='csv', overwrite=True) # if debug is True save the mask (to compare to image) if debug: # get debug file debugabspath = os.path.join(assetdir, relpath, DEBUGFILE) # print progress WLOG(params, '', '\t Saved debug to: {0}'.format(debugabspath)) # write to file fits.writeto(debugabspath, hotpix, overwrite=True) # ---------------------------------------------------------------------- # End of main code # ---------------------------------------------------------------------- return locals() # ============================================================================= # Start of code # ============================================================================= if __name__ == "__main__": # run main with no arguments (get from command line - sys.argv) ll = main() # ============================================================================= # End of code # =============================================================================
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@tools@recipes@spirou@apero_pphotpix_spirou.py@.PATH_END.py
{ "filename": "_resampling.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/stats/_resampling.py", "type": "Python" }
from __future__ import annotations import warnings import numpy as np from itertools import combinations, permutations, product from collections.abc import Sequence import inspect from scipy._lib._util import check_random_state, _rename_parameter from scipy.special import ndtr, ndtri, comb, factorial from scipy._lib._util import rng_integers from dataclasses import dataclass from ._common import ConfidenceInterval from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays from ._warnings_errors import DegenerateDataWarning __all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test'] def _vectorize_statistic(statistic): """Vectorize an n-sample statistic""" # This is a little cleaner than np.nditer at the expense of some data # copying: concatenate samples together, then use np.apply_along_axis def stat_nd(*data, axis=0): lengths = [sample.shape[axis] for sample in data] split_indices = np.cumsum(lengths)[:-1] z = _broadcast_concatenate(data, axis) # move working axis to position 0 so that new dimensions in the output # of `statistic` are _prepended_. ("This axis is removed, and replaced # with new dimensions...") z = np.moveaxis(z, axis, 0) def stat_1d(z): data = np.split(z, split_indices) return statistic(*data) return np.apply_along_axis(stat_1d, 0, z)[()] return stat_nd def _jackknife_resample(sample, batch=None): """Jackknife resample the sample. Only one-sample stats for now.""" n = sample.shape[-1] batch_nominal = batch or n for k in range(0, n, batch_nominal): # col_start:col_end are the observations to remove batch_actual = min(batch_nominal, n-k) # jackknife - each row leaves out one observation j = np.ones((batch_actual, n), dtype=bool) np.fill_diagonal(j[:, k:k+batch_actual], False) i = np.arange(n) i = np.broadcast_to(i, (batch_actual, n)) i = i[j].reshape((batch_actual, n-1)) resamples = sample[..., i] yield resamples def _bootstrap_resample(sample, n_resamples=None, random_state=None): """Bootstrap resample the sample.""" n = sample.shape[-1] # bootstrap - each row is a random resample of original observations i = rng_integers(random_state, 0, n, (n_resamples, n)) resamples = sample[..., i] return resamples def _percentile_of_score(a, score, axis): """Vectorized, simplified `scipy.stats.percentileofscore`. Uses logic of the 'mean' value of percentileofscore's kind parameter. Unlike `stats.percentileofscore`, the percentile returned is a fraction in [0, 1]. """ B = a.shape[axis] return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B) def _percentile_along_axis(theta_hat_b, alpha): """`np.percentile` with different percentile for each slice.""" # the difference between _percentile_along_axis and np.percentile is that # np.percentile gets _all_ the qs for each axis slice, whereas # _percentile_along_axis gets the q corresponding with each axis slice shape = theta_hat_b.shape[:-1] alpha = np.broadcast_to(alpha, shape) percentiles = np.zeros_like(alpha, dtype=np.float64) for indices, alpha_i in np.ndenumerate(alpha): if np.isnan(alpha_i): # e.g. when bootstrap distribution has only one unique element msg = ( "The BCa confidence interval cannot be calculated." " This problem is known to occur when the distribution" " is degenerate or the statistic is np.min." ) warnings.warn(DegenerateDataWarning(msg)) percentiles[indices] = np.nan else: theta_hat_b_i = theta_hat_b[indices] percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i) return percentiles[()] # return scalar instead of 0d array def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch): """Bias-corrected and accelerated interval.""" # closely follows [1] 14.3 and 15.4 (Eq. 15.36) # calculate z0_hat theta_hat = np.asarray(statistic(*data, axis=axis))[..., None] percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1) z0_hat = ndtri(percentile) # calculate a_hat theta_hat_ji = [] # j is for sample of data, i is for jackknife resample for j, sample in enumerate(data): # _jackknife_resample will add an axis prior to the last axis that # corresponds with the different jackknife resamples. Do the same for # each sample of the data to ensure broadcastability. We need to # create a copy of the list containing the samples anyway, so do this # in the loop to simplify the code. This is not the bottleneck... samples = [np.expand_dims(sample, -2) for sample in data] theta_hat_i = [] for jackknife_sample in _jackknife_resample(sample, batch): samples[j] = jackknife_sample broadcasted = _broadcast_arrays(samples, axis=-1) theta_hat_i.append(statistic(*broadcasted, axis=-1)) theta_hat_ji.append(theta_hat_i) theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1) for theta_hat_i in theta_hat_ji] n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji] theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True) for theta_hat_i in theta_hat_ji] U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i) for theta_hat_dot, theta_hat_i, n in zip(theta_hat_j_dot, theta_hat_ji, n_j)] nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)] dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)] a_hat = 1/6 * sum(nums) / sum(dens)**(3/2) # calculate alpha_1, alpha_2 z_alpha = ndtri(alpha) z_1alpha = -z_alpha num1 = z0_hat + z_alpha alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1)) num2 = z0_hat + z_1alpha alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2)) return alpha_1, alpha_2, a_hat # return a_hat for testing def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level, alternative, n_resamples, batch, method, bootstrap_result, random_state): """Input validation and standardization for `bootstrap`.""" if vectorized not in {True, False, None}: raise ValueError("`vectorized` must be `True`, `False`, or `None`.") if vectorized is None: vectorized = 'axis' in inspect.signature(statistic).parameters if not vectorized: statistic = _vectorize_statistic(statistic) axis_int = int(axis) if axis != axis_int: raise ValueError("`axis` must be an integer.") n_samples = 0 try: n_samples = len(data) except TypeError: raise ValueError("`data` must be a sequence of samples.") if n_samples == 0: raise ValueError("`data` must contain at least one sample.") data_iv = [] for sample in data: sample = np.atleast_1d(sample) if sample.shape[axis_int] <= 1: raise ValueError("each sample in `data` must contain two or more " "observations along `axis`.") sample = np.moveaxis(sample, axis_int, -1) data_iv.append(sample) if paired not in {True, False}: raise ValueError("`paired` must be `True` or `False`.") if paired: n = data_iv[0].shape[-1] for sample in data_iv[1:]: if sample.shape[-1] != n: message = ("When `paired is True`, all samples must have the " "same length along `axis`") raise ValueError(message) # to generate the bootstrap distribution for paired-sample statistics, # resample the indices of the observations def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic): data = [sample[..., i] for sample in data] return unpaired_statistic(*data, axis=axis) data_iv = [np.arange(n)] confidence_level_float = float(confidence_level) alternative = alternative.lower() alternatives = {'two-sided', 'less', 'greater'} if alternative not in alternatives: raise ValueError(f"`alternative` must be one of {alternatives}") n_resamples_int = int(n_resamples) if n_resamples != n_resamples_int or n_resamples_int < 0: raise ValueError("`n_resamples` must be a non-negative integer.") if batch is None: batch_iv = batch else: batch_iv = int(batch) if batch != batch_iv or batch_iv <= 0: raise ValueError("`batch` must be a positive integer or None.") methods = {'percentile', 'basic', 'bca'} method = method.lower() if method not in methods: raise ValueError(f"`method` must be in {methods}") message = "`bootstrap_result` must have attribute `bootstrap_distribution'" if (bootstrap_result is not None and not hasattr(bootstrap_result, "bootstrap_distribution")): raise ValueError(message) message = ("Either `bootstrap_result.bootstrap_distribution.size` or " "`n_resamples` must be positive.") if ((not bootstrap_result or not bootstrap_result.bootstrap_distribution.size) and n_resamples_int == 0): raise ValueError(message) random_state = check_random_state(random_state) return (data_iv, statistic, vectorized, paired, axis_int, confidence_level_float, alternative, n_resamples_int, batch_iv, method, bootstrap_result, random_state) @dataclass class BootstrapResult: """Result object returned by `scipy.stats.bootstrap`. Attributes ---------- confidence_interval : ConfidenceInterval The bootstrap confidence interval as an instance of `collections.namedtuple` with attributes `low` and `high`. bootstrap_distribution : ndarray The bootstrap distribution, that is, the value of `statistic` for each resample. The last dimension corresponds with the resamples (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). standard_error : float or ndarray The bootstrap standard error, that is, the sample standard deviation of the bootstrap distribution. """ confidence_interval: ConfidenceInterval bootstrap_distribution: np.ndarray standard_error: float | np.ndarray def bootstrap(data, statistic, *, n_resamples=9999, batch=None, vectorized=None, paired=False, axis=0, confidence_level=0.95, alternative='two-sided', method='BCa', bootstrap_result=None, random_state=None): r""" Compute a two-sided bootstrap confidence interval of a statistic. When `method` is ``'percentile'`` and `alternative` is ``'two-sided'``, a bootstrap confidence interval is computed according to the following procedure. 1. Resample the data: for each sample in `data` and for each of `n_resamples`, take a random sample of the original sample (with replacement) of the same size as the original sample. 2. Compute the bootstrap distribution of the statistic: for each set of resamples, compute the test statistic. 3. Determine the confidence interval: find the interval of the bootstrap distribution that is - symmetric about the median and - contains `confidence_level` of the resampled statistic values. While the ``'percentile'`` method is the most intuitive, it is rarely used in practice. Two more common methods are available, ``'basic'`` ('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated'); they differ in how step 3 is performed. If the samples in `data` are taken at random from their respective distributions :math:`n` times, the confidence interval returned by `bootstrap` will contain the true value of the statistic for those distributions approximately `confidence_level`:math:`\, \times \, n` times. Parameters ---------- data : sequence of array-like Each element of data is a sample from an underlying distribution. statistic : callable Statistic for which the confidence interval is to be calculated. `statistic` must be a callable that accepts ``len(data)`` samples as separate arguments and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis`. n_resamples : int, default: ``9999`` The number of resamples performed to form the bootstrap distribution of the statistic. batch : int, optional The number of resamples to process in each vectorized call to `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the sample size. Default is ``None``, in which case ``batch = n_resamples`` (or ``batch = max(n_resamples, n)`` for ``method='BCa'``). vectorized : bool, optional If `vectorized` is set ``False``, `statistic` will not be passed keyword argument `axis` and is expected to calculate the statistic only for 1D samples. If ``True``, `statistic` will be passed keyword argument `axis` and is expected to calculate the statistic along `axis` when passed an ND sample array. If ``None`` (default), `vectorized` will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of a vectorized statistic typically reduces computation time. paired : bool, default: ``False`` Whether the statistic treats corresponding elements of the samples in `data` as paired. axis : int, default: ``0`` The axis of the samples in `data` along which the `statistic` is calculated. confidence_level : float, default: ``0.95`` The confidence level of the confidence interval. alternative : {'two-sided', 'less', 'greater'}, default: ``'two-sided'`` Choose ``'two-sided'`` (default) for a two-sided confidence interval, ``'less'`` for a one-sided confidence interval with the lower bound at ``-np.inf``, and ``'greater'`` for a one-sided confidence interval with the upper bound at ``np.inf``. The other bound of the one-sided confidence intervals is the same as that of a two-sided confidence interval with `confidence_level` twice as far from 1.0; e.g. the upper bound of a 95% ``'less'`` confidence interval is the same as the upper bound of a 90% ``'two-sided'`` confidence interval. method : {'percentile', 'basic', 'bca'}, default: ``'BCa'`` Whether to return the 'percentile' bootstrap confidence interval (``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence interval (``'basic'``), or the bias-corrected and accelerated bootstrap confidence interval (``'BCa'``). bootstrap_result : BootstrapResult, optional Provide the result object returned by a previous call to `bootstrap` to include the previous bootstrap distribution in the new bootstrap distribution. This can be used, for example, to change `confidence_level`, change `method`, or see the effect of performing additional resampling without repeating computations. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate resamples. If `random_state` is ``None`` (or `np.random`), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- res : BootstrapResult An object with attributes: confidence_interval : ConfidenceInterval The bootstrap confidence interval as an instance of `collections.namedtuple` with attributes `low` and `high`. bootstrap_distribution : ndarray The bootstrap distribution, that is, the value of `statistic` for each resample. The last dimension corresponds with the resamples (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). standard_error : float or ndarray The bootstrap standard error, that is, the sample standard deviation of the bootstrap distribution. Warns ----- `~scipy.stats.DegenerateDataWarning` Generated when ``method='BCa'`` and the bootstrap distribution is degenerate (e.g. all elements are identical). Notes ----- Elements of the confidence interval may be NaN for ``method='BCa'`` if the bootstrap distribution is degenerate (e.g. all elements are identical). In this case, consider using another `method` or inspecting `data` for indications that other analysis may be more appropriate (e.g. all observations are identical). References ---------- .. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap, Chapman & Hall/CRC, Boca Raton, FL, USA (1993) .. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals", http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf .. [3] Bootstrapping (statistics), Wikipedia, https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Examples -------- Suppose we have sampled data from an unknown distribution. >>> import numpy as np >>> rng = np.random.default_rng() >>> from scipy.stats import norm >>> dist = norm(loc=2, scale=4) # our "unknown" distribution >>> data = dist.rvs(size=100, random_state=rng) We are interested in the standard deviation of the distribution. >>> std_true = dist.std() # the true value of the statistic >>> print(std_true) 4.0 >>> std_sample = np.std(data) # the sample statistic >>> print(std_sample) 3.9460644295563863 The bootstrap is used to approximate the variability we would expect if we were to repeatedly sample from the unknown distribution and calculate the statistic of the sample each time. It does this by repeatedly resampling values *from the original sample* with replacement and calculating the statistic of each resample. This results in a "bootstrap distribution" of the statistic. >>> import matplotlib.pyplot as plt >>> from scipy.stats import bootstrap >>> data = (data,) # samples must be in a sequence >>> res = bootstrap(data, np.std, confidence_level=0.9, ... random_state=rng) >>> fig, ax = plt.subplots() >>> ax.hist(res.bootstrap_distribution, bins=25) >>> ax.set_title('Bootstrap Distribution') >>> ax.set_xlabel('statistic value') >>> ax.set_ylabel('frequency') >>> plt.show() The standard error quantifies this variability. It is calculated as the standard deviation of the bootstrap distribution. >>> res.standard_error 0.24427002125829136 >>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1) True The bootstrap distribution of the statistic is often approximately normal with scale equal to the standard error. >>> x = np.linspace(3, 5) >>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error) >>> fig, ax = plt.subplots() >>> ax.hist(res.bootstrap_distribution, bins=25, density=True) >>> ax.plot(x, pdf) >>> ax.set_title('Normal Approximation of the Bootstrap Distribution') >>> ax.set_xlabel('statistic value') >>> ax.set_ylabel('pdf') >>> plt.show() This suggests that we could construct a 90% confidence interval on the statistic based on quantiles of this normal distribution. >>> norm.interval(0.9, loc=std_sample, scale=res.standard_error) (3.5442759991341726, 4.3478528599786) Due to central limit theorem, this normal approximation is accurate for a variety of statistics and distributions underlying the samples; however, the approximation is not reliable in all cases. Because `bootstrap` is designed to work with arbitrary underlying distributions and statistics, it uses more advanced techniques to generate an accurate confidence interval. >>> print(res.confidence_interval) ConfidenceInterval(low=3.57655333533867, high=4.382043696342881) If we sample from the original distribution 1000 times and form a bootstrap confidence interval for each sample, the confidence interval contains the true value of the statistic approximately 90% of the time. >>> n_trials = 1000 >>> ci_contains_true_std = 0 >>> for i in range(n_trials): ... data = (dist.rvs(size=100, random_state=rng),) ... ci = bootstrap(data, np.std, confidence_level=0.9, n_resamples=1000, ... random_state=rng).confidence_interval ... if ci[0] < std_true < ci[1]: ... ci_contains_true_std += 1 >>> print(ci_contains_true_std) 875 Rather than writing a loop, we can also determine the confidence intervals for all 1000 samples at once. >>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),) >>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9, ... n_resamples=1000, random_state=rng) >>> ci_l, ci_u = res.confidence_interval Here, `ci_l` and `ci_u` contain the confidence interval for each of the ``n_trials = 1000`` samples. >>> print(ci_l[995:]) [3.77729695 3.75090233 3.45829131 3.34078217 3.48072829] >>> print(ci_u[995:]) [4.88316666 4.86924034 4.32032996 4.2822427 4.59360598] And again, approximately 90% contain the true value, ``std_true = 4``. >>> print(np.sum((ci_l < std_true) & (std_true < ci_u))) 900 `bootstrap` can also be used to estimate confidence intervals of multi-sample statistics, including those calculated by hypothesis tests. `scipy.stats.mood` perform's Mood's test for equal scale parameters, and it returns two outputs: a statistic, and a p-value. To get a confidence interval for the test statistic, we first wrap `scipy.stats.mood` in a function that accepts two sample arguments, accepts an `axis` keyword argument, and returns only the statistic. >>> from scipy.stats import mood >>> def my_statistic(sample1, sample2, axis): ... statistic, _ = mood(sample1, sample2, axis=-1) ... return statistic Here, we use the 'percentile' method with the default 95% confidence level. >>> sample1 = norm.rvs(scale=1, size=100, random_state=rng) >>> sample2 = norm.rvs(scale=2, size=100, random_state=rng) >>> data = (sample1, sample2) >>> res = bootstrap(data, my_statistic, method='basic', random_state=rng) >>> print(mood(sample1, sample2)[0]) # element 0 is the statistic -5.521109549096542 >>> print(res.confidence_interval) ConfidenceInterval(low=-7.255994487314675, high=-4.016202624747605) The bootstrap estimate of the standard error is also available. >>> print(res.standard_error) 0.8344963846318795 Paired-sample statistics work, too. For example, consider the Pearson correlation coefficient. >>> from scipy.stats import pearsonr >>> n = 100 >>> x = np.linspace(0, 10, n) >>> y = x + rng.uniform(size=n) >>> print(pearsonr(x, y)[0]) # element 0 is the statistic 0.9962357936065914 We wrap `pearsonr` so that it returns only the statistic. >>> def my_statistic(x, y): ... return pearsonr(x, y)[0] We call `bootstrap` using ``paired=True``. Also, since ``my_statistic`` isn't vectorized to calculate the statistic along a given axis, we pass in ``vectorized=False``. >>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True, ... random_state=rng) >>> print(res.confidence_interval) ConfidenceInterval(low=0.9950085825848624, high=0.9971212407917498) The result object can be passed back into `bootstrap` to perform additional resampling: >>> len(res.bootstrap_distribution) 9999 >>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True, ... n_resamples=1001, random_state=rng, ... bootstrap_result=res) >>> len(res.bootstrap_distribution) 11000 or to change the confidence interval options: >>> res2 = bootstrap((x, y), my_statistic, vectorized=False, paired=True, ... n_resamples=0, random_state=rng, bootstrap_result=res, ... method='percentile', confidence_level=0.9) >>> np.testing.assert_equal(res2.bootstrap_distribution, ... res.bootstrap_distribution) >>> res.confidence_interval ConfidenceInterval(low=0.9950035351407804, high=0.9971170323404578) without repeating computation of the original bootstrap distribution. """ # Input validation args = _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level, alternative, n_resamples, batch, method, bootstrap_result, random_state) (data, statistic, vectorized, paired, axis, confidence_level, alternative, n_resamples, batch, method, bootstrap_result, random_state) = args theta_hat_b = ([] if bootstrap_result is None else [bootstrap_result.bootstrap_distribution]) batch_nominal = batch or n_resamples or 1 for k in range(0, n_resamples, batch_nominal): batch_actual = min(batch_nominal, n_resamples-k) # Generate resamples resampled_data = [] for sample in data: resample = _bootstrap_resample(sample, n_resamples=batch_actual, random_state=random_state) resampled_data.append(resample) # Compute bootstrap distribution of statistic theta_hat_b.append(statistic(*resampled_data, axis=-1)) theta_hat_b = np.concatenate(theta_hat_b, axis=-1) # Calculate percentile interval alpha = ((1 - confidence_level)/2 if alternative == 'two-sided' else (1 - confidence_level)) if method == 'bca': interval = _bca_interval(data, statistic, axis=-1, alpha=alpha, theta_hat_b=theta_hat_b, batch=batch)[:2] percentile_fun = _percentile_along_axis else: interval = alpha, 1-alpha def percentile_fun(a, q): return np.percentile(a=a, q=q, axis=-1) # Calculate confidence interval of statistic ci_l = percentile_fun(theta_hat_b, interval[0]*100) ci_u = percentile_fun(theta_hat_b, interval[1]*100) if method == 'basic': # see [3] theta_hat = statistic(*data, axis=-1) ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l if alternative == 'less': ci_l = np.full_like(ci_l, -np.inf) elif alternative == 'greater': ci_u = np.full_like(ci_u, np.inf) return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u), bootstrap_distribution=theta_hat_b, standard_error=np.std(theta_hat_b, ddof=1, axis=-1)) def _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples, batch, alternative, axis): """Input validation for `monte_carlo_test`.""" axis_int = int(axis) if axis != axis_int: raise ValueError("`axis` must be an integer.") if vectorized not in {True, False, None}: raise ValueError("`vectorized` must be `True`, `False`, or `None`.") if not isinstance(rvs, Sequence): rvs = (rvs,) data = (data,) for rvs_i in rvs: if not callable(rvs_i): raise TypeError("`rvs` must be callable or sequence of callables.") if not len(rvs) == len(data): message = "If `rvs` is a sequence, `len(rvs)` must equal `len(data)`." raise ValueError(message) if not callable(statistic): raise TypeError("`statistic` must be callable.") if vectorized is None: vectorized = 'axis' in inspect.signature(statistic).parameters if not vectorized: statistic_vectorized = _vectorize_statistic(statistic) else: statistic_vectorized = statistic data = _broadcast_arrays(data, axis) data_iv = [] for sample in data: sample = np.atleast_1d(sample) sample = np.moveaxis(sample, axis_int, -1) data_iv.append(sample) n_resamples_int = int(n_resamples) if n_resamples != n_resamples_int or n_resamples_int <= 0: raise ValueError("`n_resamples` must be a positive integer.") if batch is None: batch_iv = batch else: batch_iv = int(batch) if batch != batch_iv or batch_iv <= 0: raise ValueError("`batch` must be a positive integer or None.") alternatives = {'two-sided', 'greater', 'less'} alternative = alternative.lower() if alternative not in alternatives: raise ValueError(f"`alternative` must be in {alternatives}") return (data_iv, rvs, statistic_vectorized, vectorized, n_resamples_int, batch_iv, alternative, axis_int) @dataclass class MonteCarloTestResult: """Result object returned by `scipy.stats.monte_carlo_test`. Attributes ---------- statistic : float or ndarray The observed test statistic of the sample. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. """ statistic: float | np.ndarray pvalue: float | np.ndarray null_distribution: np.ndarray @_rename_parameter('sample', 'data') def monte_carlo_test(data, rvs, statistic, *, vectorized=None, n_resamples=9999, batch=None, alternative="two-sided", axis=0): r"""Perform a Monte Carlo hypothesis test. `data` contains a sample or a sequence of one or more samples. `rvs` specifies the distribution(s) of the sample(s) in `data` under the null hypothesis. The value of `statistic` for the given `data` is compared against a Monte Carlo null distribution: the value of the statistic for each of `n_resamples` sets of samples generated using `rvs`. This gives the p-value, the probability of observing such an extreme value of the test statistic under the null hypothesis. Parameters ---------- data : array-like or sequence of array-like An array or sequence of arrays of observations. rvs : callable or tuple of callables A callable or sequence of callables that generates random variates under the null hypothesis. Each element of `rvs` must be a callable that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and returns an N-d array sample of that shape. If `rvs` is a sequence, the number of callables in `rvs` must match the number of samples in `data`, i.e. ``len(rvs) == len(data)``. If `rvs` is a single callable, `data` is treated as a single sample. statistic : callable Statistic for which the p-value of the hypothesis test is to be calculated. `statistic` must be a callable that accepts a sample (e.g. ``statistic(sample)``) or ``len(rvs)`` separate samples (e.g. ``statistic(samples1, sample2)`` if `rvs` contains two callables and `data` contains two samples) and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the samples in `data`. vectorized : bool, optional If `vectorized` is set ``False``, `statistic` will not be passed keyword argument `axis` and is expected to calculate the statistic only for 1D samples. If ``True``, `statistic` will be passed keyword argument `axis` and is expected to calculate the statistic along `axis` when passed ND sample arrays. If ``None`` (default), `vectorized` will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of a vectorized statistic typically reduces computation time. n_resamples : int, default: 9999 Number of samples drawn from each of the callables of `rvs`. Equivalently, the number statistic values under the null hypothesis used as the Monte Carlo null distribution. batch : int, optional The number of Monte Carlo samples to process in each call to `statistic`. Memory usage is O(`batch`*``sample.size[axis]``). Default is ``None``, in which case `batch` equals `n_resamples`. alternative : {'two-sided', 'less', 'greater'} The alternative hypothesis for which the p-value is calculated. For each alternative, the p-value is defined as follows. - ``'greater'`` : the percentage of the null distribution that is greater than or equal to the observed value of the test statistic. - ``'less'`` : the percentage of the null distribution that is less than or equal to the observed value of the test statistic. - ``'two-sided'`` : twice the smaller of the p-values above. axis : int, default: 0 The axis of `data` (or each sample within `data`) over which to calculate the statistic. Returns ------- res : MonteCarloTestResult An object with attributes: statistic : float or ndarray The test statistic of the observed `data`. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. References ---------- .. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." Statistical Applications in Genetics and Molecular Biology 9.1 (2010). Examples -------- Suppose we wish to test whether a small sample has been drawn from a normal distribution. We decide that we will use the skew of the sample as a test statistic, and we will consider a p-value of 0.05 to be statistically significant. >>> import numpy as np >>> from scipy import stats >>> def statistic(x, axis): ... return stats.skew(x, axis) After collecting our data, we calculate the observed value of the test statistic. >>> rng = np.random.default_rng() >>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng) >>> statistic(x, axis=0) 0.12457412450240658 To determine the probability of observing such an extreme value of the skewness by chance if the sample were drawn from the normal distribution, we can perform a Monte Carlo hypothesis test. The test will draw many samples at random from their normal distribution, calculate the skewness of each sample, and compare our original skewness against this distribution to determine an approximate p-value. >>> from scipy.stats import monte_carlo_test >>> # because our statistic is vectorized, we pass `vectorized=True` >>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng) >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) >>> print(res.statistic) 0.12457412450240658 >>> print(res.pvalue) 0.7012 The probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is ~70%. This is greater than our chosen threshold of 5%, so we cannot consider this to be significant evidence against the null hypothesis. Note that this p-value essentially matches that of `scipy.stats.skewtest`, which relies on an asymptotic distribution of a test statistic based on the sample skewness. >>> stats.skewtest(x).pvalue 0.6892046027110614 This asymptotic approximation is not valid for small sample sizes, but `monte_carlo_test` can be used with samples of any size. >>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng) >>> # stats.skewtest(x) would produce an error due to small sample >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) The Monte Carlo distribution of the test statistic is provided for further investigation. >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> ax.hist(res.null_distribution, bins=50) >>> ax.set_title("Monte Carlo distribution of test statistic") >>> ax.set_xlabel("Value of Statistic") >>> ax.set_ylabel("Frequency") >>> plt.show() """ args = _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples, batch, alternative, axis) (data, rvs, statistic, vectorized, n_resamples, batch, alternative, axis) = args # Some statistics return plain floats; ensure they're at least np.float64 observed = np.asarray(statistic(*data, axis=-1))[()] n_observations = [sample.shape[-1] for sample in data] batch_nominal = batch or n_resamples null_distribution = [] for k in range(0, n_resamples, batch_nominal): batch_actual = min(batch_nominal, n_resamples - k) resamples = [rvs_i(size=(batch_actual, n_observations_i)) for rvs_i, n_observations_i in zip(rvs, n_observations)] null_distribution.append(statistic(*resamples, axis=-1)) null_distribution = np.concatenate(null_distribution) null_distribution = null_distribution.reshape([-1] + [1]*observed.ndim) def less(null_distribution, observed): cmps = null_distribution <= observed pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1] return pvalues def greater(null_distribution, observed): cmps = null_distribution >= observed pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1] return pvalues def two_sided(null_distribution, observed): pvalues_less = less(null_distribution, observed) pvalues_greater = greater(null_distribution, observed) pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 return pvalues compare = {"less": less, "greater": greater, "two-sided": two_sided} pvalues = compare[alternative](null_distribution, observed) pvalues = np.clip(pvalues, 0, 1) return MonteCarloTestResult(observed, pvalues, null_distribution) @dataclass class PermutationTestResult: """Result object returned by `scipy.stats.permutation_test`. Attributes ---------- statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. """ statistic: float | np.ndarray pvalue: float | np.ndarray null_distribution: np.ndarray def _all_partitions_concatenated(ns): """ Generate all partitions of indices of groups of given sizes, concatenated `ns` is an iterable of ints. """ def all_partitions(z, n): for c in combinations(z, n): x0 = set(c) x1 = z - x0 yield [x0, x1] def all_partitions_n(z, ns): if len(ns) == 0: yield [z] return for c in all_partitions(z, ns[0]): for d in all_partitions_n(c[1], ns[1:]): yield c[0:1] + d z = set(range(np.sum(ns))) for partitioning in all_partitions_n(z, ns[:]): x = np.concatenate([list(partition) for partition in partitioning]).astype(int) yield x def _batch_generator(iterable, batch): """A generator that yields batches of elements from an iterable""" iterator = iter(iterable) if batch <= 0: raise ValueError("`batch` must be positive.") z = [item for i, item in zip(range(batch), iterator)] while z: # we don't want StopIteration without yielding an empty list yield z z = [item for i, item in zip(range(batch), iterator)] def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch, random_state): # Returns a generator that yields arrays of size # `(batch, n_samples, n_obs_sample)`. # Each row is an independent permutation of indices 0 to `n_obs_sample`. batch = min(batch, n_permutations) if hasattr(random_state, 'permuted'): def batched_perm_generator(): indices = np.arange(n_obs_sample) indices = np.tile(indices, (batch, n_samples, 1)) for k in range(0, n_permutations, batch): batch_actual = min(batch, n_permutations-k) # Don't permute in place, otherwise results depend on `batch` permuted_indices = random_state.permuted(indices, axis=-1) yield permuted_indices[:batch_actual] else: # RandomState and early Generators don't have `permuted` def batched_perm_generator(): for k in range(0, n_permutations, batch): batch_actual = min(batch, n_permutations-k) size = (batch_actual, n_samples, n_obs_sample) x = random_state.random(size=size) yield np.argsort(x, axis=-1)[:batch_actual] return batched_perm_generator() def _calculate_null_both(data, statistic, n_permutations, batch, random_state=None): """ Calculate null distribution for independent sample tests. """ n_samples = len(data) # compute number of permutations # (distinct partitions of data into samples of these sizes) n_obs_i = [sample.shape[-1] for sample in data] # observations per sample n_obs_ic = np.cumsum(n_obs_i) n_obs = n_obs_ic[-1] # total number of observations n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1]) for i in range(n_samples-1, 0, -1)]) # perm_generator is an iterator that produces permutations of indices # from 0 to n_obs. We'll concatenate the samples, use these indices to # permute the data, then split the samples apart again. if n_permutations >= n_max: exact_test = True n_permutations = n_max perm_generator = _all_partitions_concatenated(n_obs_i) else: exact_test = False # Neither RandomState.permutation nor Generator.permutation # can permute axis-slices independently. If this feature is # added in the future, batches of the desired size should be # generated in a single call. perm_generator = (random_state.permutation(n_obs) for i in range(n_permutations)) batch = batch or int(n_permutations) null_distribution = [] # First, concatenate all the samples. In batches, permute samples with # indices produced by the `perm_generator`, split them into new samples of # the original sizes, compute the statistic for each batch, and add these # statistic values to the null distribution. data = np.concatenate(data, axis=-1) for indices in _batch_generator(perm_generator, batch=batch): indices = np.array(indices) # `indices` is 2D: each row is a permutation of the indices. # We use it to index `data` along its last axis, which corresponds # with observations. # After indexing, the second to last axis of `data_batch` corresponds # with permutations, and the last axis corresponds with observations. data_batch = data[..., indices] # Move the permutation axis to the front: we'll concatenate a list # of batched statistic values along this zeroth axis to form the # null distribution. data_batch = np.moveaxis(data_batch, -2, 0) data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1) null_distribution.append(statistic(*data_batch, axis=-1)) null_distribution = np.concatenate(null_distribution, axis=0) return null_distribution, n_permutations, exact_test def _calculate_null_pairings(data, statistic, n_permutations, batch, random_state=None): """ Calculate null distribution for association tests. """ n_samples = len(data) # compute number of permutations (factorial(n) permutations of each sample) n_obs_sample = data[0].shape[-1] # observations per sample; same for each n_max = factorial(n_obs_sample)**n_samples # `perm_generator` is an iterator that produces a list of permutations of # indices from 0 to n_obs_sample, one for each sample. if n_permutations >= n_max: exact_test = True n_permutations = n_max batch = batch or int(n_permutations) # cartesian product of the sets of all permutations of indices perm_generator = product(*(permutations(range(n_obs_sample)) for i in range(n_samples))) batched_perm_generator = _batch_generator(perm_generator, batch=batch) else: exact_test = False batch = batch or int(n_permutations) # Separate random permutations of indices for each sample. # Again, it would be nice if RandomState/Generator.permutation # could permute each axis-slice separately. args = n_permutations, n_samples, n_obs_sample, batch, random_state batched_perm_generator = _pairings_permutations_gen(*args) null_distribution = [] for indices in batched_perm_generator: indices = np.array(indices) # `indices` is 3D: the zeroth axis is for permutations, the next is # for samples, and the last is for observations. Swap the first two # to make the zeroth axis correspond with samples, as it does for # `data`. indices = np.swapaxes(indices, 0, 1) # When we're done, `data_batch` will be a list of length `n_samples`. # Each element will be a batch of random permutations of one sample. # The zeroth axis of each batch will correspond with permutations, # and the last will correspond with observations. (This makes it # easy to pass into `statistic`.) data_batch = [None]*n_samples for i in range(n_samples): data_batch[i] = data[i][..., indices[i]] data_batch[i] = np.moveaxis(data_batch[i], -2, 0) null_distribution.append(statistic(*data_batch, axis=-1)) null_distribution = np.concatenate(null_distribution, axis=0) return null_distribution, n_permutations, exact_test def _calculate_null_samples(data, statistic, n_permutations, batch, random_state=None): """ Calculate null distribution for paired-sample tests. """ n_samples = len(data) # By convention, the meaning of the "samples" permutations type for # data with only one sample is to flip the sign of the observations. # Achieve this by adding a second sample - the negative of the original. if n_samples == 1: data = [data[0], -data[0]] # The "samples" permutation strategy is the same as the "pairings" # strategy except the roles of samples and observations are flipped. # So swap these axes, then we'll use the function for the "pairings" # strategy to do all the work! data = np.swapaxes(data, 0, -1) # (Of course, the user's statistic doesn't know what we've done here, # so we need to pass it what it's expecting.) def statistic_wrapped(*data, axis): data = np.swapaxes(data, 0, -1) if n_samples == 1: data = data[0:1] return statistic(*data, axis=axis) return _calculate_null_pairings(data, statistic_wrapped, n_permutations, batch, random_state) def _permutation_test_iv(data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state): """Input validation for `permutation_test`.""" axis_int = int(axis) if axis != axis_int: raise ValueError("`axis` must be an integer.") permutation_types = {'samples', 'pairings', 'independent'} permutation_type = permutation_type.lower() if permutation_type not in permutation_types: raise ValueError(f"`permutation_type` must be in {permutation_types}.") if vectorized not in {True, False, None}: raise ValueError("`vectorized` must be `True`, `False`, or `None`.") if vectorized is None: vectorized = 'axis' in inspect.signature(statistic).parameters if not vectorized: statistic = _vectorize_statistic(statistic) message = "`data` must be a tuple containing at least two samples" try: if len(data) < 2 and permutation_type == 'independent': raise ValueError(message) except TypeError: raise TypeError(message) data = _broadcast_arrays(data, axis) data_iv = [] for sample in data: sample = np.atleast_1d(sample) if sample.shape[axis] <= 1: raise ValueError("each sample in `data` must contain two or more " "observations along `axis`.") sample = np.moveaxis(sample, axis_int, -1) data_iv.append(sample) n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples) else np.inf) if n_resamples != n_resamples_int or n_resamples_int <= 0: raise ValueError("`n_resamples` must be a positive integer.") if batch is None: batch_iv = batch else: batch_iv = int(batch) if batch != batch_iv or batch_iv <= 0: raise ValueError("`batch` must be a positive integer or None.") alternatives = {'two-sided', 'greater', 'less'} alternative = alternative.lower() if alternative not in alternatives: raise ValueError(f"`alternative` must be in {alternatives}") random_state = check_random_state(random_state) return (data_iv, statistic, permutation_type, vectorized, n_resamples_int, batch_iv, alternative, axis_int, random_state) def permutation_test(data, statistic, *, permutation_type='independent', vectorized=None, n_resamples=9999, batch=None, alternative="two-sided", axis=0, random_state=None): r""" Performs a permutation test of a given statistic on provided data. For independent sample statistics, the null hypothesis is that the data are randomly sampled from the same distribution. For paired sample statistics, two null hypothesis can be tested: that the data are paired at random or that the data are assigned to samples at random. Parameters ---------- data : iterable of array-like Contains the samples, each of which is an array of observations. Dimensions of sample arrays must be compatible for broadcasting except along `axis`. statistic : callable Statistic for which the p-value of the hypothesis test is to be calculated. `statistic` must be a callable that accepts samples as separate arguments (e.g. ``statistic(*data)``) and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the sample arrays. permutation_type : {'independent', 'samples', 'pairings'}, optional The type of permutations to be performed, in accordance with the null hypothesis. The first two permutation types are for paired sample statistics, in which all samples contain the same number of observations and observations with corresponding indices along `axis` are considered to be paired; the third is for independent sample statistics. - ``'samples'`` : observations are assigned to different samples but remain paired with the same observations from other samples. This permutation type is appropriate for paired sample hypothesis tests such as the Wilcoxon signed-rank test and the paired t-test. - ``'pairings'`` : observations are paired with different observations, but they remain within the same sample. This permutation type is appropriate for association/correlation tests with statistics such as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's :math:`r`. - ``'independent'`` (default) : observations are assigned to different samples. Samples may contain different numbers of observations. This permutation type is appropriate for independent sample hypothesis tests such as the Mann-Whitney :math:`U` test and the independent sample t-test. Please see the Notes section below for more detailed descriptions of the permutation types. vectorized : bool, optional If `vectorized` is set ``False``, `statistic` will not be passed keyword argument `axis` and is expected to calculate the statistic only for 1D samples. If ``True``, `statistic` will be passed keyword argument `axis` and is expected to calculate the statistic along `axis` when passed an ND sample array. If ``None`` (default), `vectorized` will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of a vectorized statistic typically reduces computation time. n_resamples : int or np.inf, default: 9999 Number of random permutations (resamples) used to approximate the null distribution. If greater than or equal to the number of distinct permutations, the exact null distribution will be computed. Note that the number of distinct permutations grows very rapidly with the sizes of samples, so exact tests are feasible only for very small data sets. batch : int, optional The number of permutations to process in each call to `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the total size of all samples, regardless of the value of `vectorized`. Default is ``None``, in which case ``batch`` is the number of permutations. alternative : {'two-sided', 'less', 'greater'}, optional The alternative hypothesis for which the p-value is calculated. For each alternative, the p-value is defined for exact tests as follows. - ``'greater'`` : the percentage of the null distribution that is greater than or equal to the observed value of the test statistic. - ``'less'`` : the percentage of the null distribution that is less than or equal to the observed value of the test statistic. - ``'two-sided'`` (default) : twice the smaller of the p-values above. Note that p-values for randomized tests are calculated according to the conservative (over-estimated) approximation suggested in [2]_ and [3]_ rather than the unbiased estimator suggested in [4]_. That is, when calculating the proportion of the randomized null distribution that is as extreme as the observed value of the test statistic, the values in the numerator and denominator are both increased by one. An interpretation of this adjustment is that the observed value of the test statistic is always included as an element of the randomized null distribution. The convention used for two-sided p-values is not universal; the observed test statistic and null distribution are returned in case a different definition is preferred. axis : int, default: 0 The axis of the (broadcasted) samples over which to calculate the statistic. If samples have a different number of dimensions, singleton dimensions are prepended to samples with fewer dimensions before `axis` is considered. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate permutations. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- res : PermutationTestResult An object with attributes: statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. Notes ----- The three types of permutation tests supported by this function are described below. **Unpaired statistics** (``permutation_type='independent'``): The null hypothesis associated with this permutation type is that all observations are sampled from the same underlying distribution and that they have been assigned to one of the samples at random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. When ``1 < n_resamples < binom(n, k)``, where * ``k`` is the number of observations in ``a``, * ``n`` is the total number of observations in ``a`` and ``b``, and * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), the data are pooled (concatenated), randomly assigned to either the first or second sample, and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= binom(n, k)``, an exact test is performed: the data are *partitioned* between the samples in each distinct way exactly once, and the exact null distribution is formed. Note that for a given partitioning of the data between the samples, only one ordering/permutation of the data *within* each sample is considered. For statistics that do not depend on the order of the data within samples, this dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. Because only one ordering/permutation of the data *within* each sample is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` and ``y = [a4, a3, b1]`` would *not* be considered distinct from the example above. ``permutation_type='independent'`` does not support one-sample statistics, but it can be applied to statistics with more than two samples. In this case, if ``n`` is an array of the number of observations within each sample, the number of distinct partitions is:: np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) **Paired statistics, permute pairings** (``permutation_type='pairings'``): The null hypothesis associated with this permutation type is that observations within each sample are drawn from the same underlying distribution and that pairings with elements of other samples are assigned at random. Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we wish to consider all possible pairings of elements of ``a`` with elements of a second sample, ``b``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are randomly permuted. The user-supplied statistic accepts one data argument, say ``a_perm``, and calculates the statistic considering ``a_perm`` and ``b``. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= factorial(n)``, an exact test is performed: ``a`` is permuted in each distinct way exactly once. Therefore, the `statistic` is computed for each unique pairing of samples between ``a`` and ``b`` exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left in its original order. ``permutation_type='pairings'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. All samples provided in ``data`` are permuted *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(n)**m Note that if a two-sample statistic, for example, does not inherently depend on the order in which observations are provided - only on the *pairings* of observations - then only one of the two samples should be provided in ``data``. This dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). **Paired statistics, permute samples** (``permutation_type='samples'``): The null hypothesis associated with this permutation type is that observations within each pair are drawn from the same underlying distribution and that the sample to which they are assigned is random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are randomly swapped between samples (maintaining their pairings) and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= 2**n``, an exact test is performed: the observations are assigned to the two samples in each distinct way (while maintaining pairings) exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. ``permutation_type='samples'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. If ``data`` contains more than one sample, paired observations within ``data`` are exchanged between samples *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(m)**n Several paired-sample statistical tests, such as the Wilcoxon signed rank test and paired-sample t-test, can be performed considering only the *difference* between two paired elements. Accordingly, if ``data`` contains only one sample, then the null distribution is formed by independently changing the *sign* of each observation. .. warning:: The p-value is calculated by counting the elements of the null distribution that are as extreme or more extreme than the observed value of the statistic. Due to the use of finite precision arithmetic, some statistic functions return numerically distinct values when the theoretical values would be exactly equal. In some cases, this could lead to a large error in the calculated p-value. `permutation_test` guards against this by considering elements in the null distribution that are "close" (within a factor of ``1+1e-14``) to the observed value of the test statistic as equal to the observed value of the test statistic. However, the user is advised to inspect the null distribution to assess whether this method of comparison is appropriate, and if not, calculate the p-value manually. See example below. References ---------- .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." Statistical Applications in Genetics and Molecular Biology 9.1 (2010). .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference". Statistical Science (2004). .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap (1993). Examples -------- Suppose we wish to test whether two samples are drawn from the same distribution. Assume that the underlying distributions are unknown to us, and that before observing the data, we hypothesized that the mean of the first sample would be less than that of the second sample. We decide that we will use the difference between the sample means as a test statistic, and we will consider a p-value of 0.05 to be statistically significant. For efficiency, we write the function defining the test statistic in a vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the statistic will be calculated for each axis-slice along `axis`. >>> import numpy as np >>> def statistic(x, y, axis): ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) After collecting our data, we calculate the observed value of the test statistic. >>> from scipy.stats import norm >>> rng = np.random.default_rng() >>> x = norm.rvs(size=5, random_state=rng) >>> y = norm.rvs(size=6, loc = 3, random_state=rng) >>> statistic(x, y, 0) -3.5411688580987266 Indeed, the test statistic is negative, suggesting that the true mean of the distribution underlying ``x`` is less than that of the distribution underlying ``y``. To determine the probability of this occuring by chance if the two samples were drawn from the same distribution, we perform a permutation test. >>> from scipy.stats import permutation_test >>> # because our statistic is vectorized, we pass `vectorized=True` >>> # `n_resamples=np.inf` indicates that an exact test is to be performed >>> res = permutation_test((x, y), statistic, vectorized=True, ... n_resamples=np.inf, alternative='less') >>> print(res.statistic) -3.5411688580987266 >>> print(res.pvalue) 0.004329004329004329 The probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.4329%. This is less than our chosen threshold of 5%, so we consider this to be significant evidence against the null hypothesis in favor of the alternative. Because the size of the samples above was small, `permutation_test` could perform an exact test. For larger samples, we resort to a randomized permutation test. >>> x = norm.rvs(size=100, random_state=rng) >>> y = norm.rvs(size=120, loc=0.3, random_state=rng) >>> res = permutation_test((x, y), statistic, n_resamples=100000, ... vectorized=True, alternative='less', ... random_state=rng) >>> print(res.statistic) -0.5230459671240913 >>> print(res.pvalue) 0.00016999830001699983 The approximate probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.0225%. This is again less than our chosen threshold of 5%, so again we have significant evidence to reject the null hypothesis in favor of the alternative. For large samples and number of permutations, the result is comparable to that of the corresponding asymptotic test, the independent sample t-test. >>> from scipy.stats import ttest_ind >>> res_asymptotic = ttest_ind(x, y, alternative='less') >>> print(res_asymptotic.pvalue) 0.00012688101537979522 The permutation distribution of the test statistic is provided for further investigation. >>> import matplotlib.pyplot as plt >>> plt.hist(res.null_distribution, bins=50) >>> plt.title("Permutation distribution of test statistic") >>> plt.xlabel("Value of Statistic") >>> plt.ylabel("Frequency") >>> plt.show() Inspection of the null distribution is essential if the statistic suffers from inaccuracy due to limited machine precision. Consider the following case: >>> from scipy.stats import pearsonr >>> x = [1, 2, 4, 3] >>> y = [2, 4, 6, 8] >>> def statistic(x, y): ... return pearsonr(x, y).statistic >>> res = permutation_test((x, y), statistic, vectorized=False, ... permutation_type='pairings', ... alternative='greater') >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution In this case, some elements of the null distribution differ from the observed value of the correlation coefficient ``r`` due to numerical noise. We manually inspect the elements of the null distribution that are nearly the same as the observed value of the test statistic. >>> r 0.8 >>> unique = np.unique(null) >>> unique array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4, 0.6, 0.8, 0.8, 1. ]) # may vary >>> unique[np.isclose(r, unique)].tolist() [0.7999999999999999, 0.8] If `permutation_test` were to perform the comparison naively, the elements of the null distribution with value ``0.7999999999999999`` would not be considered as extreme or more extreme as the observed value of the statistic, so the calculated p-value would be too small. >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) >>> incorrect_pvalue 0.1111111111111111 # may vary Instead, `permutation_test` treats elements of the null distribution that are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the statistic ``r`` to be equal to ``r``. >>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null) >>> correct_pvalue 0.16666666666666666 >>> res.pvalue == correct_pvalue True This method of comparison is expected to be accurate in most practical situations, but the user is advised to assess this by inspecting the elements of the null distribution that are close to the observed value of the statistic. Also, consider the use of statistics that can be calculated using exact arithmetic (e.g. integer statistics). """ args = _permutation_test_iv(data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) (data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) = args observed = statistic(*data, axis=-1) null_calculators = {"pairings": _calculate_null_pairings, "samples": _calculate_null_samples, "independent": _calculate_null_both} null_calculator_args = (data, statistic, n_resamples, batch, random_state) calculate_null = null_calculators[permutation_type] null_distribution, n_resamples, exact_test = ( calculate_null(*null_calculator_args)) # See References [2] and [3] adjustment = 0 if exact_test else 1 # relative tolerance for detecting numerically distinct but # theoretically equal values in the null distribution eps = 1e-14 gamma = np.maximum(eps, np.abs(eps * observed)) def less(null_distribution, observed): cmps = null_distribution <= observed + gamma pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def greater(null_distribution, observed): cmps = null_distribution >= observed - gamma pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def two_sided(null_distribution, observed): pvalues_less = less(null_distribution, observed) pvalues_greater = greater(null_distribution, observed) pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 return pvalues compare = {"less": less, "greater": greater, "two-sided": two_sided} pvalues = compare[alternative](null_distribution, observed) pvalues = np.clip(pvalues, 0, 1) return PermutationTestResult(observed, pvalues, null_distribution) @dataclass class ResamplingMethod: """Configuration information for a statistical resampling method. Instances of this class can be passed into the `method` parameter of some hypothesis test functions to perform a resampling or Monte Carlo version of the hypothesis test. Attributes ---------- n_resamples : int The number of resamples to perform or Monte Carlo samples to draw. batch : int, optional The number of resamples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is ``None``, which processes all resamples in a single batch. """ n_resamples: int = 9999 batch: int = None # type: ignore[assignment] @dataclass class MonteCarloMethod(ResamplingMethod): """Configuration information for a Monte Carlo hypothesis test. Instances of this class can be passed into the `method` parameter of some hypothesis test functions to perform a Monte Carlo version of the hypothesis tests. Attributes ---------- n_resamples : int, optional The number of Monte Carlo samples to draw. Default is 9999. batch : int, optional The number of Monte Carlo samples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is ``None``, which processes all samples in a single batch. rvs : callable or tuple of callables, optional A callable or sequence of callables that generates random variates under the null hypothesis. Each element of `rvs` must be a callable that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and returns an N-d array sample of that shape. If `rvs` is a sequence, the number of callables in `rvs` must match the number of samples passed to the hypothesis test in which the `MonteCarloMethod` is used. Default is ``None``, in which case the hypothesis test function chooses values to match the standard version of the hypothesis test. For example, the null hypothesis of `scipy.stats.pearsonr` is typically that the samples are drawn from the standard normal distribution, so ``rvs = (rng.normal, rng.normal)`` where ``rng = np.random.default_rng()``. """ rvs: object = None def _asdict(self): # `dataclasses.asdict` deepcopies; we don't want that. return dict(n_resamples=self.n_resamples, batch=self.batch, rvs=self.rvs) @dataclass class PermutationMethod(ResamplingMethod): """Configuration information for a permutation hypothesis test. Instances of this class can be passed into the `method` parameter of some hypothesis test functions to perform a permutation version of the hypothesis tests. Attributes ---------- n_resamples : int, optional The number of resamples to perform. Default is 9999. batch : int, optional The number of resamples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is ``None``, which processes all resamples in a single batch. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate resamples. If `random_state` is already a ``Generator`` or ``RandomState`` instance, then that instance is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. """ random_state: object = None def _asdict(self): # `dataclasses.asdict` deepcopies; we don't want that. return dict(n_resamples=self.n_resamples, batch=self.batch, random_state=self.random_state) @dataclass class BootstrapMethod(ResamplingMethod): """Configuration information for a bootstrap confidence interval. Instances of this class can be passed into the `method` parameter of some confidence interval methods to generate a bootstrap confidence interval. Attributes ---------- n_resamples : int, optional The number of resamples to perform. Default is 9999. batch : int, optional The number of resamples to process in each vectorized call to the statistic. Batch sizes >>1 tend to be faster when the statistic is vectorized, but memory usage scales linearly with the batch size. Default is ``None``, which processes all resamples in a single batch. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate resamples. If `random_state` is already a ``Generator`` or ``RandomState`` instance, then that instance is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. method : {'bca', 'percentile', 'basic'} Whether to use the 'percentile' bootstrap ('percentile'), the 'basic' (AKA 'reverse') bootstrap ('basic'), or the bias-corrected and accelerated bootstrap ('BCa', default). """ random_state: object = None method: str = 'BCa' def _asdict(self): # `dataclasses.asdict` deepcopies; we don't want that. return dict(n_resamples=self.n_resamples, batch=self.batch, random_state=self.random_state, method=self.method)
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@stats@_resampling.py@.PATH_END.py
{ "filename": "test_Acspy_Util_NodeList.py", "repo_name": "ACS-Community/ACS", "repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acspycommon/test/test_Acspy_Util_NodeList.py", "type": "Python" }
#! /usr/bin/env python #******************************************************************************* # ALMA - Atacama Large Millimiter Array # (c) Associated Universities Inc., 2010 # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # "@(#) $Id: test_Acspy_Util_NodeList.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $" # # who when what # -------- -------- ---------------------------------------------- # agrimstrup 2010-02-12 created # #------------------------------------------------------------------------------ __revision__ = "$Id: test_Acspy_Util_NodeList.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $" #--REGULAR IMPORTS------------------------------------------------------------- import sys import unittest import mock import new import omniORB #--ACS IMPORTS----------------------------------------------------------------- import Acspy.Util.NodeList as NL #------------------------------------------------------------------------------ class TestListDir(unittest.TestCase): pass if __name__ == "__main__": unittest.main() # # ___oOo___
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acspycommon@test@test_Acspy_Util_NodeList.py@.PATH_END.py
{ "filename": "TEA-tutorial.ipynb", "repo_name": "dzesmin/TEA", "repo_path": "TEA_extracted/TEA-master/doc/examples/jupyter_tutorial/TEA-tutorial.ipynb", "type": "Jupyter Notebook" }
# `TEA` Tutorial **TEA** is a numerical **Thermochemical Equilibrium Abundances** code, \ that calculates the abundances of gaseous molecular species in thermochemical equilibrium. The TEA code is available on Github [https://github.com/dzesmin/TEA](https://github.com/dzesmin/TEA). The code applies Gibbs free-energy minimization using an iterative, Lagrangian optimization scheme.\ It uses JANAF thermochemical data and can calculate abundances for 84 elemental species,\ and more than 600 hundred molecular species. There is a start guide, user manual, code document in addition to the [TEA published paper](https://iopscience.iop.org/article/10.3847/0067-0049/225/1/4/pdf).\ (See References section at the bottom of the notebook for the links for all the documents.) TEA is an open-source code, available under a [Reproducible Research, RR license](https://github.com/dzesmin/TEA). **<u>In this tutorial you will learn</u>:** ``` 1. How to download, configure and run TEA 2. How C/O ratio and metallicity influence species abundances 3. How major, most abundant molecular species transition with temperature ``` $\color{red}{\text{===============================================}}$\ **PRIOR TO THE TUTORIAL SESSION, EXECUTE ITEMS 1 TO 5 IN TERMINAL,**\ **TO TEST IF TEA RUNS PROPERLY AND ALL PACKAGES ARE CORRECTLY INSTALLED!** $\color{red}{\text{===============================================}}$ ``` 1. Download all the files from TEA DropBox 2. Generate new environment 3. Ensure listed packages are installed 4. Download TEA 5. Test if TEA runs in your terminal ``` **Questions?** [Submit an Issue to TEA Github Discussion Page](https://github.com/dzesmin/TEA/discussions) or [TEA Github Issues Page](https://github.com/dzesmin/TEA/issues) $\color{red}{\text{=====================================================================}}$ ## 1.Download all files 1. Go to Dropbox: - Location: [ERS_THEORY_WEBINAR->Day5_AtmosphericChemistry_Aug5->Tutorial_Blecic_TEA](https://www.dropbox.com/sh/n9qo8jfdyj7crl1/AAC-pWEQ2hVZm8h1yQE3OaCha/Day5_AtmosphericChemistry_Aug5/Tutorial_Blecic_TEA?dl=0&subfolder_nav_tracking=1) 2. Download [Tutorial_Blecic_TEA](https://www.dropbox.com/sh/n9qo8jfdyj7crl1/AAC-pWEQ2hVZm8h1yQE3OaCha/Day5_AtmosphericChemistry_Aug5/Tutorial_Blecic_TEA?dl=0&subfolder_nav_tracking=1) folder to your computer: - This will make Tutorial_Blecic_TEA/ directory on your computer with all necessary files inside. 3. Go inside the directory: - cd Tutorial_Blecic_TEA 4. Start jupyter notebook from this directory: - jupyter notebook ## 2. Generate new environment: **Generate python3 environment and call it 'tea'** ``` python3 -m venv ~/envs/tea ``` **Bash shortcut:** ``` alias tea='source ~/envs/tea/bin/activate' ``` **Start virtual environment:** ``` source ~/envs/tea/bin/activate ``` or use a shortcut: ``` tea ``` **When needed exit virtual environment:** ``` deactivate ``` ## 3. Ensure following python packages are installed: ``` Python: 2.7.3+ NumPy: 1.6.1+ SymPy: 0.7.1+ ``` ### Install them using pip: ``` pip install --upgrade pip pip install --upgrade ipython pip install --upgrade numpy pip install --upgrade scipy pip install --upgrade matplotlib pip install --upgrade jupyter pip install --upgrade sympy ``` ## 4. Download TEA **Clone TEA in the Tutorial_Blecic_TEA/ directory** ``` git clone https://github.com/dzesmin/TEA ``` ## 5. Test if TEA runs in terminal **<u>To run TEA, you must have TEA.cfg in the working/running directory.</u>:**\ (If you downloaded the DropBox, Tutorial_Blecic_TEA/, the file is already placed there for you) **Run TEA in terminal:** ``` # To make the pre-atm file run makeatm.py and define the output directory # makeatm.py <output_dir> ./TEA/tea/makeatm.py run_example ``` _In the screen you should see this:\ ...\ ...\ Created pre-atmospheric file:\ ./run_example/atm_inputs/quick_example.atm_ ``` # To make the tea result file run runatm.py, give the path to the pre-atm file and define the output directory # runatm.py (path_to_pre-atm> <output_dir> ./TEA/tea/runatm.py ./TEA/doc/examples/quick_example/inputs/quick_example.atm run_example ``` _It will run for ~10 sec and in the screen you should see this:\ ...\ ...\ Layer 100:\ 5\ The solution has converged to the given tolerance error._ _Species abundances calculated.\ Created TEA atmospheric file._ **Plot test results** ``` # To plot the results run plotTEA.py, give path to the results tea file and define molecular species # plotTEA.py <path_to_tea> <species_names_separated_with_comma> ./TEA/tea/plotTEA.py run_example.tea H2,H2O,CO ``` _A figure named run_example.png should be plotted on the screen\ and also placed in ./plots/run_example.png_ $\color{red}{\text{=====================================================================}}$ # Setup TEA and run it in jupyter notebook ## How to configure TEA? Below is the **TEA configuration file, TEA.cfg** containing two important sections: 1. One section that generates the pre-atmospheric file with requested species and T-P profile \ This section requires: - T-P profile file in the TEA format - names of the input elemental species as they appear in the periodic table - elemental abundances file (written in dex units) - output species in the format that TEA can recognize (see ./TEA/doc/conversion_record_sorted.txt) 2. The other section sets parameters to control how TEA runs ``` ############################# BEGIN FRONTMATTER ################################ # # # TEA - calculates Thermochemical Equilibrium Abundances of chemical species # # # # TEA is part of the PhD dissertation work of Dr. Jasmina # # Blecic, who developed it with coding assistance from # # undergraduate M. Oliver Bowman and under the advice of # # Prof. Joseph Harrington at the University of Central Florida, # # Orlando, Florida, USA. # # # # Copyright © 2014-2016 University of Central Florida # # # # This program is reproducible-research software: you can # # redistribute it and/or modify it under the terms of the # # Reproducible Research Software License as published by # # Prof. Joseph Harrington at the University of Central Florida, # # either version 0.3 of the License, or (at your option) any later # # version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # Reproducible Research Software License for more details. # # # # You should have received a copy of the Reproducible Research # # Software License along with this program. If not, see # # <http://planets.ucf.edu/resources/reproducible/>. The license's # # preamble explains the situation, concepts, and reasons surrounding # # reproducible research, and answers some common questions. # # # # This project was started with the support of the NASA Earth and # # Space Science Fellowship Program, grant NNX12AL83H, held by # # Jasmina Blecic, Principal Investigator Joseph Harrington, and the # # NASA Science Mission Directorate’s Planetary Atmospheres Program, # # grant NNX12AI69G. # # # # See the file ACKNOWLEDGING in the top-level TEA directory for # # instructions on how to acknowledge TEA in publications. # # # # Visit our Github site: # # https://github.com/dzesmin/TEA/ # # # # Reach us directly at: # # Jasmina Blecic <jasmina@nyu.edu> # # # ############################## END FRONTMATTER ################################# # ============================================================================= # Configuration file containing two sections: # 1. TEA section with parameters and booleans to run and debug TEA. # 2. PRE-ATM section with parameters to make pre-atmospheric file. # ============================================================================= # =============================== TEA SECTION =============================== # # Change the parameters below to control how TEA runs. # The code works without adjustments for the temperatures above ~500 K. # For temperatures below 500 K, it is not recommended to use TEA, as it # produces results with low precision. Setting xtol to 1e-8 and maxinter # to 200 is most optimizing. If higher tolerance level (xtol>1e-8) is # desired, maxium number of iterations must be increased # (see start_quide.txt for more info on potential user errors). # Run runatm.py as: runatm.py <PATH_TO_PRE_ATM_FILE> <RESULTS_DIR_NAME> [TEA] # Maximum number of iterations to run for each T-P point (default: 200): maxiter = 200 # Preserve headers and intermediate outputs (default: False): savefiles = False # Verbosity level (0: mute most, 1: some prints, 2: debug prints. Default: 1) # If both verb = 0 and ncpu >1 mute all verb = 1 # Enable time-stamp printing (default: False): times = False # Location of abundances file: abun_file = ./TEA/lib/abundances.txt # Location of working directory: location_out = . # Convergence tolerance level (default: 1e-8): xtol = 1e-8 # Number of parallel CPUs (default: 1): ncpu = 1 # ============================= PRE-ATM SECTION ============================= # # Execution of this section is optional. The user can produce a TEA # pre-atmospheric file by running makeatm.py, or make a custom-made file in # the format that TEA can read it and place it in the inputs/ folder. # See the correct format in the examples/multiTP/ folder. # # Change the parameters below to control how pre-atmospheric file is made. # Before executing the makeatm.py module make a pressure-temperature file. # Run makeatm.py as: makeatm.py <RESULTS_DIR_NAME> [PRE-ATM] # === Pressure and temperature file === # Path to pressure and temperature file. Recommended extension .dat. PT_file = ./TEA/doc/examples/PT/PT.dat # === Pre-atmospheric filename === # Recomended extension .atm. File will be placed in atm_inputs/. pre_atm_name = quick_example.atm # === Input elements names === # MUST have names as in the periodic table. input_elem = H He C N O # === Output species names === # MUST have names as they appear in gdata/ folder. # MUST include all elemental species at the begining. output_species = H_g He_ref C_g N_g O_g H2_ref CO_g CO2_g CH4_g H2O_g N2_ref NH3_g ``` ## What is the proper TP profile file format? **The correct format of the TP file:**\ Example is given in ./TEA/doc/examples/multiTP/atm_inputs/PT.dat ``` P (bar) T (K) 1.0000e-05 500.00 1.1768e-05 535.35 1.3849e-05 570.71 ... ... 7.2208e+01 3929.29 8.4975e+01 3964.65 1.0000e+02 4000.00 ``` ## How to choose proper molecular species names? The correct names of the molecular species requested to be calculated with TEA are given in this file.\ **./TEA/lib/conversion_record_sorted.txt** Original JANAF species names are not very informative, so we convert them into more informative file names, \ and use only JANAF columns needed to calculate species chemical potentials. ---------------------**converted**-----------------------------**JANAF names**------------------------------- ``` ./lib/gdata/Al2_g.txt made from ./janaf/Al-080.txt ./lib/gdata/Al2O2_ion_p_g.txt made from ./janaf/Al-095.txt ./lib/gdata/Al2O3_cr_Alpha.txt made from ./janaf/Al-096.txt ./lib/gdata/Al2O3_cr_Delta.txt made from ./janaf/Al-097.txt ./lib/gdata/Al2O3_cr_Gamma.txt made from ./janaf/Al-098.txt ./lib/gdata/Al2O3_cr_Kappa.txt made from ./janaf/Al-099.txt ./lib/gdata/Al2O3_cr-l.txt made from ./janaf/Al-101.txt ./lib/gdata/Al2O3_l.txt made from ./janaf/Al-100.txt ./lib/gdata/Al2O_g.txt made from ./janaf/Al-092.txt ./lib/gdata/Al2O_ion_p_g.txt made from ./janaf/Al-093.txt ... ... ``` **In the TEA.cfg the output species must be written in this way:** ``` output_species = H_g He_ref C_g N_g O_g H2_ref CO_g CO2_g CH4_g H2O_g N2_ref NH3_g ``` ## Elemental abundances file (abundances.txt) **This file is placed in ./TEA/lib/abundances.txt** ``` # This file contains elemental solar abundances as found by Asplund et al. 2009. # http://adsabs.harvard.edu/abs/2009ARA%26A..47..481A # The file is in the public domain. Contact: jasmina@nyu.edu. # Columns: atomic number, atomic symbol, abundance (in dex-decimal exponent), # atomic name, atomic mass (in g/mol). 0 D 7.30 Deuterium 2.014101 1 H 12.00 Hydrogen 1.008 2 He 10.93 Helium 4.002602 3 Li 1.05 Lithium 6.94 4 Be 1.38 Beryllium 9.012182 5 B 2.70 Boron 10.81 6 C 8.43 Carbon 12.011 7 N 7.83 Nitrogen 14.007 8 O 8.69 Oxygen 15.999 9 F 4.56 Fluorine 18.9984032 10 Ne 7.93 Neon 20.1797 11 Na 6.24 Sodium 22.98976928 12 Mg 7.60 Magnesium 24.3050 13 Al 6.45 Aluminium 26.9815386 14 Si 7.51 Silicon 28.085 15 P 5.41 Phosphorus 30.973762 16 S 7.12 Sulfur 32.06 17 Cl 5.50 Chlorine 35.45 18 Ar 6.40 Argon 39.948 19 K 5.03 Potassium 39.0983 20 Ca 6.34 Calcium 40.078 21 Sc 3.15 Scandium 44.955912 22 Ti 4.95 Titanium 47.867 23 V 3.93 Vanadium 50.9415 24 Cr 5.64 Chromium 51.9961 25 Mn 5.43 Manganese 54.938045 26 Fe 7.50 Iron 55.845 27 Co 4.99 Cobalt 58.933195 28 Ni 6.22 Nickel 58.6934 29 Cu 4.19 Copper 63.546 30 Zn 4.56 Zinc 65.38 31 Ga 3.04 Gallium 69.723 32 Ge 3.65 Germanium 72.63 33 As 0.0 Arsenic 74.92160 ... ``` # Run TEA example in jupyter notebook ```python ''' We will run now first the makeatm.py and then the runatm.py code ''' import numpy as np import os, subprocess, shutil import sys import matplotlib as mpl import matplotlib.pyplot as plt # TEA directory TEAsource = './TEA/' # Making pre-atm file # Call to TEA (we now run makeatm.py code that asks for 2 arguments: makeatm.py <output_dir>) TEAcall = TEAsource + "tea/makeatm.py" # Run TEA with makeatm.py output = 'run_example_ipy' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() # Making final .tea file # Call to TEA (we now run runatm.py code that asks for 3 arguments: runatm.py <path_to_pre-atm> <output_dir>) TEAcall = TEAsource + "tea/runatm.py" preAtm_file = './TEA/doc/examples/multiTP/atm_inputs/multiTP.atm' # Run TEA with runatm.py output = 'run_example_ipy' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ## Plot the results ```python import numpy as np import matplotlib.pyplot as plt import os, shutil # Locate the tea results file filename = 'run_example_ipy.tea' # Species names species = 'H2,CO,CO2,CH4,H2O,N2,NH3' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Get molecules names imol = np.where(lines == "#SPECIES\n")[0][0] + 1 molecules = lines[imol].split() nmol = len(molecules) for m in np.arange(nmol): molecules[m] = molecules[m].partition('_')[0] # Take user input for species and split species strings into separate strings # convert the list to tuple species = tuple(species.split(',')) nspec = len(species) # Populate column numbers for requested species and # update list of species if order is not appropriate columns = [] spec = [] for i in np.arange(nmol): for j in np.arange(nspec): if molecules[i] == species[j]: columns.append(i+2) spec.append(species[j]) # Convert spec to tuple spec = tuple(spec) # Concatenate spec with pressure for data and columns data = tuple(np.concatenate((['p'], spec))) usecols = tuple(np.concatenate(([0], columns))) # Load all data for all interested species data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure plt.figure(1, figsize=(10,5)) plt.clf() # Set different colours of lines colors = 'bgrcmykbgrcmyk' color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): plt.loglog(data[i+1], data[0], '-', color=colors[color_index], \ linewidth=2, label=str(spec[i])) color_index += 1 # Label the plot plt.xlabel('Mixing Fraction', fontsize=14) plt.ylabel('Pressure [bar]' , fontsize=14) plt.legend(loc='best', prop={'size':10}) # Temperature range (plt.xlim) and pressure range (plt.ylim) plt.ylim(max(data[0]), min(data[0])) plt.gca().invert_yaxis() # Place plot into plots directory with appropriate name plot_out = 'run_example.png' plt.savefig(plot_out) ``` ## Setting different C/O ratio and metallicity 1. Edit abundances file ./TEA/lib/abundances.txt - to get, for example, C/O=1 keep oxygen abundance the same, set C to 8.69 - rename abundances file to, for example, abundances_CO1.txt and place it in the working directory - to get, for example, higher metallicity for oxygen, let's say 10x solar, you will need to change the value from 8.69 to 9.69 2. Edit TEA.cfg - write the path to the new abundances file on line 81\ #Location of abundances file:\ abun_file = ./abundances_CO1.txt 3. Set new name for the TEA results, for example, run_CO1 4. Run and plot TEA as before from the terminal When C/O ratio is solar, water is the most abundant molecule.\ When C/O ratio is close or larger than 1, hydrocarbons (CH4, C2H2, C2H4) become more abundant than water. **Important note:**\ If you do not include hydrocarbons species in your atmospheric model, you will see no change in species abundances with the change of C/O ratio. # 1. Example: How does C/O ratio affect molecular species? In this example I use the TP profile and elemantal abundandances for C-rich and O-rich case from [Kevin Stevenson paper on WASP-12b](https://science.sciencemag.org/content/346/6211/838). Input files are given in ./CO-ratio/ directory:\ WASP12b-O-rich/atm_inputs/\ WASP12b-C-rich/atm_inputs/ ## ----- O-rich case ----- ```python # Rename current TEA.cfg file to TEA_example.cfg to save it os.rename('TEA.cfg','TEA_example.cfg') # Take ./CO-ratio/WASP12b-O-rich/atm_inputs/TEA-O-rich.cfg # Rename it to TEA.cfg and place it in the running directory shutil.copyfile('./CO-ratio/WASP12b-O-rich/atm_inputs/TEA_O-rich.cfg', 'TEA.cfg') ``` ### Run TEA (makeatm.py) to produce new pre-atm file for O-rich case ```python # Call TEA to make new atm file TEAcall = TEAsource + "tea/makeatm.py" # Run TEA to make pre-atm file (note that when calling makeatm you have only 2 arguments: makeatm.py output_dir) output = 'run_O-rich' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ### Run TEA (runatm) to produce tea file for O-rich case ```python # Path to the pre-atm file preAtm_file = './run_O-rich/atm_inputs/O-rich.atm' # Call TEA to make the final tea file TEAcall = TEAsource + "tea/runatm.py" # Run TEA (note that when calling runatm you have 3 arguments: runatm.py path_to_pre-atm output_dir) output= 'run_O-rich' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ### Plot O-rich example ```python # Atmospheric file name filename = './run_O-rich.tea' # Requested species names species = 'H,CO,CO2,CH4,H2O,HCN,C2H2,C2H4,N2,NH3,HS,H2S' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Get molecules names imol = np.where(lines == "#SPECIES\n")[0][0] + 1 molecules = lines[imol].split() nmol = len(molecules) for m in np.arange(nmol): molecules[m] = molecules[m].partition('_')[0] # Take user input for species and split species strings into separate strings # convert the list to tuple species = tuple(species.split(',')) nspec = len(species) # Populate column numbers for requested species and # update list of species if order is not appropriate columns = [] spec = [] for i in np.arange(nmol): for j in np.arange(nspec): if molecules[i] == species[j]: columns.append(i+2) spec.append(species[j]) # Convert spec to tuple spec = tuple(spec) # Concatenate spec with pressure for data and columns data = tuple(np.concatenate((['p'], spec))) usecols = tuple(np.concatenate(([0], columns))) # Load all data for all interested species data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure fig_O = plt.figure(10, figsize=(10,5)) plt.clf() # WASP-12b all plots colors = 'b', '#FF1CAE','#FF0000' , '#FFAA00', '#00FFFF', '#00FF00', '#91219E', '#BCEE68' , 'g', '#ffc3a0', 'c','m' color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): # addition for WASP-43b species that does not change with metalicity if i==0 or i==1 or i==8 or i==9 or i==10 or i==11: plt.loglog(data[i+1], data[0], '--', color=colors[color_index], \ linewidth=1, label=str(spec[i])) else: plt.loglog(data[i+1], data[0], '-', color=colors[color_index], \ linewidth=2, label=str(spec[i])) color_index += 1 # Label the plot plt.xlabel('Mixing Fraction' , fontsize=14) plt.ylabel('Pressure [bar]' , fontsize=14) # Font-size of x and y ticks plt.xticks(fontsize=14) plt.yticks(fontsize=14) # WASP-12b solar plt.text(8e-4, 1e-3, 'CO', color='#FF1CAE', fontsize=14) plt.text(6e-10,3e-2, 'CO$_{2}$', color='#FF0000', fontsize=14) plt.text(1e-16,1e-3, 'CH$_{4}$', color='#FFAA00', fontsize=14) plt.text(8e-4,0.2, 'H$_{2}$O', color='#00FFFF', fontsize=14) plt.text(2e-13,5e-3, 'HCN', color='#00FF00', fontsize=14) plt.text(8e-23,1.5e-4, 'C$_{2}$H$_{2}$', color='#91219E', fontsize=14) plt.text(2e-23,7e-3, 'C$_{2}$H$_{4}$', color='#BCEE68', fontsize=14) plt.text(1e-1, 1e-2, 'H', color='b', fontsize=14) plt.text(4e-6,1e-4, 'N$_{2}$', color='g', fontsize=14) plt.text(7e-13,3e-5, 'NH$_{3}$', color='#ffc3a0', fontsize=14) plt.text(1e-6, 1e-3, 'HS', color='c', fontsize=14) plt.text(4e-8,7e-1, 'H$_{2}$S', color='m', fontsize=14) # Annotate text plt.text(1e-16, 1e1, 'O-rich case', color='k', fontsize=22) # ======================= Kevin's PT profiles ========================== # # Kevin's PT profile from tp_crp.dat and tp_orp.dat # WASP12b, Stevenson et al 2014 pres = np.array([ 1.00000000e+02, 8.49753000e+01, 7.22081000e+01, 6.13591000e+01, 5.21401000e+01, 4.43062000e+01, 3.76494000e+01, 3.19927000e+01, 2.71859000e+01, 2.31013000e+01, 1.96304000e+01, 1.66810000e+01, 1.41747000e+01, 1.20450000e+01, 1.02353000e+01, 8.69749000e+00, 7.39072000e+00, 6.28029000e+00, 5.33670000e+00, 4.53488000e+00, 3.85353000e+00, 3.27455000e+00, 2.78256000e+00, 2.36449000e+00, 2.00923000e+00, 1.70735000e+00, 1.45083000e+00, 1.23285000e+00, 1.04762000e+00, 8.90215000e-01, 7.56463000e-01, 6.42807000e-01, 5.46228000e-01, 4.64159000e-01, 3.94421000e-01, 3.35160000e-01, 2.84804000e-01, 2.42013000e-01, 2.05651000e-01, 1.74753000e-01, 1.48497000e-01, 1.26186000e-01, 1.07227000e-01, 9.11163000e-02, 7.74264000e-02, 6.57933000e-02, 5.59081000e-02, 4.75081000e-02, 4.03702000e-02, 3.43047000e-02, 2.91505000e-02, 2.47708000e-02, 2.10490000e-02, 1.78865000e-02, 1.51991000e-02, 1.29155000e-02, 1.09750000e-02, 9.32603000e-03, 7.92483000e-03, 6.73415000e-03, 5.72237000e-03, 4.86260000e-03, 4.13201000e-03, 3.51119000e-03, 2.98365000e-03, 2.53536000e-03, 2.15443000e-03, 1.83074000e-03, 1.55568000e-03, 1.32194000e-03, 1.12332000e-03, 9.54548000e-04, 8.11131000e-04, 6.89261000e-04, 5.85702000e-04, 4.97702000e-04, 4.22924000e-04, 3.59381000e-04, 3.05386000e-04, 2.59502000e-04, 2.20513000e-04, 1.87382000e-04, 1.59228000e-04, 1.35305000e-04, 1.14976000e-04, 9.77010000e-05, 8.30218000e-05, 7.05480000e-05, 5.99484000e-05, 5.09414000e-05, 4.32876000e-05, 3.67838000e-05, 3.12572000e-05, 2.65609000e-05, 2.25702000e-05, 1.91791000e-05, 1.62975000e-05, 1.38489000e-05, 1.17681000e-05, 1.00000000e-05]) # Kevin tp_orp.dat solar # WASP12b, Stevenson et al 2014 temp = np.array([ 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.3 , 2948.26, 2947.68, 2946.6 , 2945.04, 2943.03, 2940.59, 2937.75, 2934.53, 2930.96, 2927.08, 2922.89, 2918.48, 2914.35, 2910.51, 2906.96, 2903.69, 2900.71, 2898.01, 2895.59, 2891.16, 2883.82, 2873.64, 2860.69, 2845.05, 2826.77, 2805.92, 2782.58, 2756.81, 2728.67, 2698.24, 2667.89, 2638.57, 2610.28, 2583.01, 2556.78, 2531.57, 2507.39, 2484.25, 2462.13, 2441.04, 2420.97, 2401.94, 2383.94, 2366.96, 2351.02, 2336.1 , 2322.21, 2309.35, 2297.52, 2286.71, 2276.94, 2268.2 , 2260.48, 2253.79, 2248.13, 2238.36, 2234.76, 2232.19, 2230.64, 2230.13]) # ======================= Kevin's PT profiles ========================== # # WASP-12b plt.xlim(1e-24, 1) # Pressure limits plt.ylim(max(pres), min(pres)) plt.gca().invert_yaxis() # ================ inset plot with PT profile ========================== # # WASP12b all plots b = plt.axes([.21, .22, .10, .19]) # WASP12b solar plt.semilogy(temp, pres, color='k', linestyle='-', linewidth=2) plt.xlim(2000, 3100) # WASP-12b plt.xticks(np.arange(2000, 3100, 500)) plt.xlabel('T [K]' , fontsize=8) plt.ylabel('P [bar]', fontsize=8) plt.yticks(fontsize=8) plt.xticks(fontsize=8) plt.ylim(max(pres), min(pres)) # ================ inset plot with PT profile ========================== # # Save plots plt.savefig('O-rich.png') ``` ## ----- C-rich case ----- ```python # Rename current TEA.cfg file to TEA_O-rich.cfg to save it os.rename('TEA.cfg','TEA_O-rich.cfg') # Take ./CO-ratio/WASP12b-C-rich/atm_inputs/TEA_C-rich.cfg # Rename it to TEA.cfg and place it in the running directory shutil.copyfile('./CO-ratio/WASP12b-C-rich/atm_inputs/TEA_C-rich.cfg', 'TEA.cfg') ``` ### Run TEA (makeatm.py) to produce new pre-atm file for C-rich case ```python # Call TEA to make new atm file TEAcall = TEAsource + "tea/makeatm.py" # Run TEA to make pre-atm file (note that when calling makeatm you have only 2 arguments: makeatm.py output_dir) output = 'run_C-rich' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ### Run TEA (runatm.py) to produce tea file for C-rich case ```python # Path to the pre-atm file preAtm_file = './run_C-rich/atm_inputs/C-rich.atm' # Call TEA to make the final tea file TEAcall = TEAsource + "tea/runatm.py" # Run TEA (note that when calling runatm you have 3 arguments: runatm.py path_to_pre-atm output_dir) output= 'run_C-rich' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ### Plot C-rich example ```python # Atmospheric file name filename = './run_C-rich.tea' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Load all data for all interested species data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure fig_C = plt.figure(11, figsize=(10,5)) plt.clf() color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): # addition for WASP-43b species that does not change with metalicity if i==0 or i==1 or i==8 or i==9 or i==10 or i==11: plt.loglog(data[i+1], data[0], '--', color=colors[color_index], \ linewidth=1, label=str(spec[i])) else: plt.loglog(data[i+1], data[0], '-', color=colors[color_index], \ linewidth=2, label=str(spec[i])) color_index += 1 # Label the plot plt.xlabel('Mixing Fraction' , fontsize=14) plt.ylabel('Pressure [bar]' , fontsize=14) # Font-size of x and y ticks plt.xticks(fontsize=14) plt.yticks(fontsize=14) # WASP-12b C/O=1.2 plt.text(8e-4, 10, 'CO', color='#FF1CAE', fontsize=14) plt.text(6e-17,1e-4, 'CO$_{2}$', color='#FF0000', fontsize=14) plt.text(2e-9,6e-5, 'CH$_{4}$', color='#FFAA00', fontsize=14) plt.text(1e-9,6e-3, 'H$_{2}$O', color='#00FFFF', fontsize=14) plt.text(5e-7,3e-2, 'HCN', color='#00FF00', fontsize=13) plt.text(5e-5,7e-2, 'C$_{2}$H$_{2}$', color='#91219E', fontsize=14) plt.text(3e-11, 1, 'C$_{2}$H$_{4}$', color='#BCEE68', fontsize=14) plt.text(2e-2, 1e-4, 'H', color='b', fontsize=14) plt.text(5e-5,8e-3, 'N$_{2}$', color='g', fontsize=14) plt.text(5e-12,2e-3, 'NH$_{3}$', color='#ffc3a0', fontsize=14) plt.text(4.5e-7, 9e-2, 'HS', color='c', fontsize=13) plt.text(2e-7,8e-5, 'H$_{2}$S', color='m', fontsize=13) # Annotate text plt.text(1e-16, 1e1, 'C-rich case', color='k', fontsize=22) # ======================= Kevin's PT profiles ========================== # # Kevin's PT profile from tp_crp.dat and tp_orp.dat # WASP12b, Stevenson et al 2014 pres = np.array([ 1.00000000e+02, 8.49753000e+01, 7.22081000e+01, 6.13591000e+01, 5.21401000e+01, 4.43062000e+01, 3.76494000e+01, 3.19927000e+01, 2.71859000e+01, 2.31013000e+01, 1.96304000e+01, 1.66810000e+01, 1.41747000e+01, 1.20450000e+01, 1.02353000e+01, 8.69749000e+00, 7.39072000e+00, 6.28029000e+00, 5.33670000e+00, 4.53488000e+00, 3.85353000e+00, 3.27455000e+00, 2.78256000e+00, 2.36449000e+00, 2.00923000e+00, 1.70735000e+00, 1.45083000e+00, 1.23285000e+00, 1.04762000e+00, 8.90215000e-01, 7.56463000e-01, 6.42807000e-01, 5.46228000e-01, 4.64159000e-01, 3.94421000e-01, 3.35160000e-01, 2.84804000e-01, 2.42013000e-01, 2.05651000e-01, 1.74753000e-01, 1.48497000e-01, 1.26186000e-01, 1.07227000e-01, 9.11163000e-02, 7.74264000e-02, 6.57933000e-02, 5.59081000e-02, 4.75081000e-02, 4.03702000e-02, 3.43047000e-02, 2.91505000e-02, 2.47708000e-02, 2.10490000e-02, 1.78865000e-02, 1.51991000e-02, 1.29155000e-02, 1.09750000e-02, 9.32603000e-03, 7.92483000e-03, 6.73415000e-03, 5.72237000e-03, 4.86260000e-03, 4.13201000e-03, 3.51119000e-03, 2.98365000e-03, 2.53536000e-03, 2.15443000e-03, 1.83074000e-03, 1.55568000e-03, 1.32194000e-03, 1.12332000e-03, 9.54548000e-04, 8.11131000e-04, 6.89261000e-04, 5.85702000e-04, 4.97702000e-04, 4.22924000e-04, 3.59381000e-04, 3.05386000e-04, 2.59502000e-04, 2.20513000e-04, 1.87382000e-04, 1.59228000e-04, 1.35305000e-04, 1.14976000e-04, 9.77010000e-05, 8.30218000e-05, 7.05480000e-05, 5.99484000e-05, 5.09414000e-05, 4.32876000e-05, 3.67838000e-05, 3.12572000e-05, 2.65609000e-05, 2.25702000e-05, 1.91791000e-05, 1.62975000e-05, 1.38489000e-05, 1.17681000e-05, 1.00000000e-05]) # Kevin tp_crp.dat C/O=1.2 # WASP12b, Stevenson et al 2014 temp = np.array([ 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.76, 2964.51, 2963.85, 2962.85, 2961.53, 2959.96, 2957.23, 2949.72, 2937.53, 2920.75, 2899.48, 2873.8 , 2844.07, 2810.51, 2773.19, 2732.13, 2687.4 , 2639.98, 2593.58, 2548.2 , 2503.84, 2460.5 , 2418.18, 2376.87, 2336.59, 2297.33, 2259.09, 2221.86, 2185.66, 2150.48, 2116.31, 2083.17, 2051.05, 2019.94, 1989.86, 1960.79, 1932.75, 1905.72, 1879.72, 1854.73, 1830.77, 1807.82, 1785.9 , 1764.99, 1745.1 , 1726.24, 1708.39, 1691.56, 1675.76, 1660.97, 1647.2 , 1634.46, 1622.73, 1612.02, 1602.33, 1593.66, 1586.01, 1579.39, 1573.78, 1564.09, 1560.52, 1557.97, 1556.44, 1555.93]) # ======================= Kevin's PT profiles ========================== # # WASP-12b plt.xlim(1e-24, 1) # Pressure limits plt.ylim(max(pres), min(pres)) plt.gca().invert_yaxis() # ================ inset plot with PT profile ========================== # # WASP12b all plots b = plt.axes([.21, .22, .10, .19]) # WASP12b C/O=1.2 plt.semilogy(temp, pres, color='orange', linestyle='--', linewidth=3) plt.xlim(1400, 3100) # WASP-12b plt.xticks(np.arange(1500, 3100, 1500)) # WASP-12b plt.xlabel('T [K]' , fontsize=8) plt.ylabel('P [bar]', fontsize=8) plt.yticks(fontsize=8) plt.xticks(fontsize=8) plt.ylim(max(pres), min(pres)) # ================ inset plot with PT profile ========================== # # Save plots plt.savefig('C-rich.png') ``` ## Compare C-rich vs O-rich case ```python # Revoke O-rich case dummy = plt.figure() new_manager = dummy.canvas.manager new_manager.canvas.figure = fig_O fig_O.set_canvas(new_manager.canvas) # Revoke C-rich case dummy = plt.figure() new_manager = dummy.canvas.manager new_manager.canvas.figure = fig_C fig_C.set_canvas(new_manager.canvas) ``` # 2. Example: How does metallicity affect molecular species? In this example, I use the T-P profile from [Kevin Stevenson paper on WASP-43b](https://science.sciencemag.org/content/346/6211/838). Input files are given in ./MH/ directory:\ WASP43b-1xsolar/atm_inputs/\ WASP43b-10xsolar/atm_inputs/\ WASP43b-50xsolar/atm_inputs/ ## ----- 1x solar ----- ```python ''' Rename and rewrite current TEA.cfg ''' # Rename current TEA.cfg file to TEA_C-rich.cfg os.rename('TEA.cfg','TEA_C-rich.cfg') # Take ./MH/WASP43b-1xsolar/atm_inputs/TEA_1xsolar.cfg # Rename it to TEA.cfg and place it in the running directory shutil.copyfile('./MH/WASP43b-1xsolar/atm_inputs/TEA_1xsolar.cfg', 'TEA.cfg') ''' Run TEA to produce new pre-atm file for 1xsolar case ''' # Call TEA to make new atm file TEAcall = TEAsource + "tea/makeatm.py" # Run TEA to make pre-atm file (note that when calling makeatm you have only 2 arguments: makeatm.py output_dir) output = 'run_1xsolar' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ''' Run TEA to produce new tea file for 1xsolar case ''' # Path to the pre-atm file preAtm_file = './run_1xsolar/atm_inputs/1xsolar.atm' # Call TEA to make the final tea file TEAcall = TEAsource + "tea/runatm.py" # Run TEA (note that when calling runatm you have 3 arguments: runatm.py path_to_pre-atm output_dir) output= 'run_1xsolar' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ## ----- 10x solar ----- ```python ''' Rename and rewrite current TEA.cfg ''' # Rename current TEA.cfg file to TEA_1xsolar.cfg os.rename('TEA.cfg','TEA_1xsolar.cfg') # Take ./MH/WASP43b-10xsolar/atm_inputs/TEA_10xsolar.cfg # Rename it to TEA.cfg and place it in the running directory shutil.copyfile('./MH/WASP43b-10xsolar/atm_inputs/TEA_10xsolar.cfg', 'TEA.cfg') ''' Run TEA to produce new pre-atm file for 10xsolar case ''' # Call TEA to make new atm file TEAcall = TEAsource + "tea/makeatm.py" # Run TEA to make pre-atm file (note that when calling makeatm you have only 2 arguments: makeatm.py output_dir) output = 'run_10xsolar' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ''' Run TEA to produce new tea file for 10xsolar case ''' # Path to the pre-atm file preAtm_file = './run_10xsolar/atm_inputs/10xsolar.atm' # Call TEA to make the final tea file TEAcall = TEAsource + "tea/runatm.py" # Run TEA (note that when calling runatm you have 3 arguments: runatm.py path_to_pre-atm output_dir) output= 'run_10xsolar' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ## ----- 50x solar ----- ```python ''' Rename and rewrite current TEA.cfg ''' # Rename current TEA.cfg file to TEA_10xsolar.cfg os.rename('TEA.cfg','TEA_10xsolar.cfg') # Take ./MH/WASP43b-50xsolar/atm_inputs/TEA_50xsolar.cfg # Rename it to TEA.cfg and place it in the running directory shutil.copyfile('./MH/WASP43b-50xsolar/atm_inputs/TEA_50xsolar.cfg', 'TEA.cfg') ''' Run TEA to produce new pre-atm file for 50xsolar case ''' # Call TEA to make new atm file TEAcall = TEAsource + "tea/makeatm.py" # Run TEA to make pre-atm file (note that when calling makeatm you have only 2 arguments: makeatm.py output_dir) output = 'run_50xsolar' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ''' Run TEA to produce new tea file for 50xsolar case ''' # Path to the pre-atm file preAtm_file = './run_50xsolar/atm_inputs/50xsolar.atm' # Call TEA to make the final tea file TEAcall = TEAsource + "tea/runatm.py" # Run TEA (note that when calling runatm you have 3 arguments: runatm.py path_to_pre-atm output_dir) output= 'run_50xsolar' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ### Plot different metallicity cases ```python # Atmospheric file name filename = './run_1xsolar.tea' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Load all data for all interested species data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure plt.figure(1, figsize=(10,5)) plt.clf() color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): # addition for WASP-43b species that does not change with metallicity if i==0 or i==3 or i==5 or i==6 or i==7 or i==9: plt.loglog(data[i+1], data[0], '--', color=colors[color_index], \ linewidth=2, label=str(spec[i])) else: plt.loglog(data[i+1], data[0], '-', color=colors[color_index], \ linewidth=3, label=str(spec[i])) color_index += 1 # Label the plot plt.xlabel('Mixing Fraction' , fontsize=14) plt.ylabel('Pressure [bar]' , fontsize=14) # Font-size of x and y ticks plt.xticks(fontsize=14) plt.yticks(fontsize=14) # WASP43b 1xsolar plt.text(2e-8, 1e-4, 'H', color='b', fontsize=14) plt.text(8e-4, 1, 'CO', color='#FF1CAE', fontsize=14) plt.text(5e-7,1e-4, 'CO$_{2}$', color='#FF0000', fontsize=14) plt.text(2e-12,3e-5, 'CH$_{4}$', color='#FFAA00', fontsize=14) plt.text(8e-4,1e-3, 'H$_{2}$O', color='#00FFFF', fontsize=14) plt.text(7e-14,2e-4, 'HCN', color='#00FF00', fontsize=14) plt.text(2e-19,2e-5, 'C$_{2}$H$_{2}$', color='#91219E', fontsize=14) plt.text(4e-19,6e-4, 'C$_{2}$H$_{4}$', color='#BCEE68', fontsize=14) plt.text(7e-5,1e-4, 'N$_{2}$', color='g', fontsize=14) plt.text(1.5e-10,2e-5, 'NH$_{3}$', color='#ffc3a0', fontsize=14) plt.text(2.5e-9, 6e-6, 'HS', color='c', fontsize=14) plt.text(1e-6,1e-5, 'H$_{2}$S', color='m', fontsize=14) # ======================= Kevin's PT profile ========================== # # Kevin's PT profile from WASP43b_PHASE7_NH3_H2S_TP.txt # WASP43b, Stevenson et al 2014 pres = np.array([ 3.16227770e+01, 2.63026800e+01, 2.18776160e+01, 1.81970090e+01, 1.51356120e+01, 1.25892540e+01, 1.04712850e+01, 8.70963590e+00, 7.24435960e+00, 6.02559590e+00, 5.01187230e+00, 4.16869380e+00, 3.46736850e+00, 2.88403150e+00, 2.39883290e+00, 1.99526230e+00, 1.65958690e+00, 1.38038430e+00, 1.14815360e+00, 9.54992590e-01, 7.94328230e-01, 6.60693450e-01, 5.49540870e-01, 4.57088190e-01, 3.80189400e-01, 3.16227770e-01, 2.63026800e-01, 2.18776160e-01, 1.81970090e-01, 1.51356120e-01, 1.25892540e-01, 1.04712850e-01, 8.70963590e-02, 7.24435960e-02, 6.02559590e-02, 5.01187230e-02, 4.16869380e-02, 3.46736850e-02, 2.88403150e-02, 2.39883290e-02, 1.99526230e-02, 1.65958690e-02, 1.38038430e-02, 1.14815360e-02, 9.54992590e-03, 7.94328230e-03, 6.60693450e-03, 5.49540870e-03, 4.57088190e-03, 3.80189400e-03, 3.16227770e-03, 2.63026800e-03, 2.18776160e-03, 1.81970090e-03, 1.51356120e-03, 1.25892540e-03, 1.04712850e-03, 8.70963590e-04, 7.24435960e-04, 6.02559590e-04, 5.01187230e-04, 4.16869380e-04, 3.46736850e-04, 2.88403150e-04, 2.39883290e-04, 1.99526230e-04, 1.65958690e-04, 1.38038430e-04, 1.14815360e-04, 9.54992590e-05, 7.94328230e-05, 6.60693450e-05, 5.49540870e-05, 4.57088190e-05, 3.80189400e-05, 3.16227770e-05, 2.63026800e-05, 2.18776160e-05, 1.81970090e-05, 1.51356120e-05, 1.25892540e-05, 1.04712850e-05, 8.70963590e-06, 7.24435960e-06, 6.02559590e-06, 5.01187230e-06, 4.16869380e-06, 3.46736850e-06, 2.88403150e-06, 2.39883290e-06]) # Kevin's PT profile from WASP43b_PHASE7_NH3_H2S_TP.txt # WASP43b, Stevenson et al 2014 temp = np.array([ 1811.8938 , 1810.9444 , 1810.1535 , 1809.4948 , 1808.9463 , 1808.4898 , 1808.1098 , 1807.7936 , 1807.5304 , 1807.3114 , 1807.1291 , 1806.9766 , 1806.8464 , 1806.7212 , 1806.53 , 1806.1269 , 1805.2849 , 1803.7403 , 1800.5841 , 1794.9518 , 1786.6255 , 1775.3705 , 1761.0973 , 1742.3631 , 1719.6396 , 1694.0976 , 1666.1517 , 1636.2055 , 1603.0265 , 1567.6227 , 1531.3326 , 1494.5529 , 1457.6432 , 1419.5923 , 1381.4921 , 1344.4864 , 1308.8483 , 1274.7949 , 1241.6997 , 1210.3302 , 1181.2851 , 1154.5844 , 1130.2066 , 1107.7735 , 1087.5396 , 1069.5885 , 1053.7529 , 1039.8606 , 1027.6493 , 1017.057 , 1007.9573 , 1000.1681 , 993.52384, 987.85759, 983.05871, 979.01311, 975.60886, 972.74937, 970.3489 , 968.33983, 966.66146, 965.26054, 964.09216, 963.11826, 962.30738, 961.63264, 961.0714 , 960.60473, 960.2169 , 959.89468, 959.627 , 959.40467, 959.22003, 959.06677, 958.93955, 958.83394, 958.74627, 958.6735 , 958.61313, 958.56303, 958.52146, 958.48696, 958.45833, 958.43458, 958.41487, 958.39852, 958.38496, 958.3737 , 958.36437, 958.35662]) # ======================= Kevin's PT profile ========================== # # WASP-43b Kevin plt.xlim(1e-21, 1) # Pressure limits plt.ylim(max(pres), min(pres)) plt.gca().invert_yaxis() # ================ inset plot with PT profile ========================== # # WASP-43 Kevin all plots b = plt.axes([.21, .25, .10, .19]) # WASP-43b all metallicities plt.semilogy(temp, pres, color='r', linestyle='-', linewidth=1) plt.xlim(700, 2000) plt.xticks(np.arange(1000, 2001, 500)) plt.xlabel('T [K]' , fontsize=8) plt.ylabel('P [bar]', fontsize=8) plt.yticks(fontsize=8) plt.xticks(fontsize=8) plt.ylim(max(pres), min(pres)) # ================ inset plot with PT profile ========================== # # Save plots plt.savefig('1xsolar.png') ''' 10xsolar ''' # Atmospheric file name filename = './run_10xsolar.tea' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Load all data for all interested species data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure plt.figure(2, figsize=(10,5)) plt.clf() color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): # addition for WASP-43b species that does not change with metallicity if i==0 or i==3 or i==5 or i==6 or i==7 or i==9: plt.loglog(data[i+1], data[0], '--', color=colors[color_index], \ linewidth=2, label=str(spec[i])) else: plt.loglog(data[i+1], data[0], '-', color=colors[color_index], \ linewidth=3, label=str(spec[i])) color_index += 1 # Label the plot plt.xlabel('Mixing Fraction', fontsize=14) plt.ylabel('Pressure [bar]' , fontsize=14) # Font-size of x and y ticks plt.xticks(fontsize=14) plt.yticks(fontsize=14) # WASP43b 10xsolar plt.text(2e-6, 2e-2, 'H', color='b', fontsize=14) plt.text(8e-3, 1, 'CO', color='#FF1CAE', fontsize=14) plt.text(1.5e-6,1e-4, 'CO$_{2}$', color='#FF0000', fontsize=14) plt.text(9e-12,8e-5, 'CH$_{4}$', color='#FFAA00', fontsize=14) plt.text(8e-3,1e-3, 'H$_{2}$O', color='#00FFFF', fontsize=14) plt.text(3.5e-13,4e-4, 'HCN', color='#00FF00', fontsize=14) plt.text(2e-19,2e-5, 'C$_{2}$H$_{2}$', color='#91219E', fontsize=14) plt.text(4e-21,1.5e-4, 'C$_{2}$H$_{4}$', color='#BCEE68', fontsize=14) plt.text(8e-4,1e-4, 'N$_{2}$', color='g', fontsize=14) plt.text(4e-10,7e-4, 'NH$_{3}$', color='#ffc3a0', fontsize=14) plt.text(3e-7, 3e-5, 'HS', color='c', fontsize=14) plt.text(1e-5,3, 'H$_{2}$S', color='m', fontsize=14) # WASP-43b Kevin plt.xlim(1e-21, 1) # Pressure limits plt.ylim(max(pres), min(pres)) plt.gca().invert_yaxis() # Save plots plt.savefig('10xsolar.png') ''' 50xsolar ''' # Set the atmospheric file filename = './run_50xsolar.tea' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Load all data for all interested species data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure plt.figure(3, figsize=(10,5)) plt.clf() color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): # addition for WASP-43b species that does not change with metallicity if i==0 or i==3 or i==5 or i==6 or i==7 or i==9: plt.loglog(data[i+1], data[0], '--', color=colors[color_index], \ linewidth=2, label=str(spec[i])) else: plt.loglog(data[i+1], data[0], '-', color=colors[color_index], \ linewidth=3, label=str(spec[i])) color_index += 1 # Label the plot plt.xlabel('Mixing Fraction', fontsize=14) plt.ylabel('Pressure [bar]' , fontsize=14) # Font-size of x and y ticks plt.xticks(fontsize=14) plt.yticks(fontsize=14) # WASP43b 50xsolar plt.text(4e-8, 2e-5, 'H', color='b', fontsize=14) plt.text(4e-2, 1, 'CO', color='#FF1CAE', fontsize=14) plt.text(3e-5,1e-4, 'CO$_{2}$', color='#FF0000', fontsize=14) plt.text(4e-12,6e-6, 'CH$_{4}$', color='#FFAA00', fontsize=14) plt.text(4e-2,1e-3, 'H$_{2}$O', color='#00FFFF', fontsize=14) plt.text(5e-13,3e-4, 'HCN', color='#00FF00', fontsize=14) plt.text(2e-19,2e-5, 'C$_{2}$H$_{2}$', color='#91219E', fontsize=14) plt.text(4e-21,1.5e-4, 'C$_{2}$H$_{4}$', color='#BCEE68', fontsize=14) plt.text(3.5e-3,1e-4, 'N$_{2}$', color='g', fontsize=14) plt.text(8e-7,3e-1, 'NH$_{3}$', color='#ffc3a0', fontsize=14) plt.text(8.5e-7, 5e-5, 'HS', color='c', fontsize=14) plt.text(3e-5,1e-5, 'H$_{2}$S', color='m', fontsize=14) # WASP-43b Kevin plt.xlim(1e-21, 1) # Pressure limits plt.ylim(max(pres), min(pres)) plt.gca().invert_yaxis() # Save plots plt.savefig('50xsolar.png') ``` # 3. Example: How do species abundances transition with temperature? ```python ''' Rename and rewrite current TEA.cfg ''' # Rename current TEA.cfg file to TEA_50xsolar.cfg os.rename('TEA.cfg','TEA_50xsolar.cfg') # Take ./Transitions/atm_inputs/TEA_transitions.cfg # Rename it to TEA.cfg and place it in the running directory shutil.copyfile('./Transitions/atm_inputs/TEA_transitions.cfg', 'TEA.cfg') ''' Run TEA to produce new pre-atm file for 1xsolar case ''' # Call TEA to make new atm file TEAcall = TEAsource + "tea/makeatm.py" # Run TEA to make pre-atm file (note that when calling makeatm you have only 2 arguments: makeatm.py output_dir) output = 'run_transitions' proc = subprocess.Popen([TEAcall, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ''' Run TEA to produce new tea file for 1xsolar case ''' # Path to the pre-atm file preAtm_file = './run_transitions/atm_inputs/transitions.atm' # Call TEA to make the final tea file TEAcall = TEAsource + "tea/runatm.py" # Run TEA (note that when calling runatm you have 3 arguments: runatm.py path_to_pre-atm output_dir) output= 'run_transitions' proc = subprocess.Popen([TEAcall, preAtm_file, output],stdout=subprocess.PIPE,universal_newlines=True) for line in proc.stdout: print(line, end="") stdout= proc.communicate() ``` ## Plot transitions ```python # Atmospheric file name filename = 'run_transitions.tea' # Species of interest species = 'CO,CO2,CH4,H2O,N2,NH3' # Open the atmospheric file and read f = open(filename, 'r') lines = np.asarray(f.readlines()) f.close() # Get molecules names imol = np.where(lines == "#SPECIES\n")[0][0] + 1 molecules = lines[imol].split() nmol = len(molecules) for m in np.arange(nmol): molecules[m] = molecules[m].partition('_')[0] # Take user input for species and split species strings into separate strings # convert the list to tuple species = tuple(species.split(',')) nspec = len(species) # Populate column numbers for requested species and # update list of species if order is not appropriate columns = [] spec = [] for i in np.arange(nmol): for j in np.arange(nspec): if molecules[i] == species[j]: columns.append(i+2) spec.append(species[j]) # Convert spec to tuple spec = tuple(spec) # Concatenate spec with temperature for data and columns data = tuple(np.concatenate((['T'], spec))) usecols = tuple(np.concatenate(([1], columns))) # Load all data for all interested specs data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \ converters=None, skiprows=8, usecols=usecols, unpack=True) # Open a figure plt.figure(5, figsize=(12,8)) plt.clf() # Set different colours of lines colors = 'rmkcgb' color_index = 0 # Plot all specs with different colours and labels for i in np.arange(nspec): if i==1: plt.semilogy(data[0], data[i+1], '-', color=colors[color_index], \ linewidth=2) plt.semilogy(data[0], data[i+1], '-', color=colors[color_index], linewidth=2) color_index += 1 # Attotation plt.annotate('CO', (2500, 8e-4), fontsize=18, color='r') plt.annotate('CO2', (2500, 2e-8), fontsize=18, color='m') plt.annotate('CH$_{4}$', (1450, 1e-5), fontsize=18, color='k') plt.annotate('H$_{2}$O' , (850, 1.3e-3), fontsize=18, color='cyan') plt.annotate('N$_{2}$', (2100, 1e-4), fontsize=18, color='g') plt.annotate('NH$_{3}$', (1140, 2.5e-6), fontsize=18, color='b') plt.xlabel('Temperature [K]' , fontsize=22) plt.ylabel('Mixing Fraction', fontsize=22) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.xlim(100, 3000) plt.ylim(3e-10, 3e-3) plt.savefig("Transitions.png") ``` ### Revert TEA.cfg to the starting position ```python ''' Rename and rewrite current TEA.cfg ''' # Rename current TEA.cfg file to TEA_50xsolar.cfg os.rename('TEA.cfg','TEA_transitions.cfg') # Revert to the starting conditions before running notebook shutil.copyfile('TEA_example.cfg', 'TEA.cfg') # Now all the varibales are reseted # You can Restart the Kernel and Clear all Output # Run All again ``` # Homework ``` 1. Generate 10x sub-solar metalicity case. Answer the question how this changes the major species abundances? Hint: - you need to edit abundances.txt file for all input elemental species and save it as a new file - you need to edit the abundances line in TEA.cfg and provide a path to the new abundances file - provide new name for the pre-atm file - when running makeatm.py and runatm.py give new output directory name 2. Generate C/O=0.7 case. Answer a question how and whether this ratio affects H2O and hydrocarbons? Hint: - you need to edit abundances.txt file for C and O only and and save it as a new file - you need to edit the abundances line in TEA.cfg and provide a path to the new abundances file - provide new name for the pre-atm file - when running makeatm.py and runatm.py give new output directory name ``` # References 1. TEA Code paper - [Blecic et al. 2016, TEA: A CODE CALCULATING THERMOCHEMICAL EQUILIBRIUM ABUNDANCE, ApJSS 225:4,2016](https://iopscience.iop.org/article/10.3847/0067-0049/225/1/4/pdf) 2. Compendium for TEA Paper containing all the code to produce the plots - [TEA Compendium](http://dx.doi.org/10.5281/zenodo.50378). Also available [on Guthub](https://github.com/dzesmin/RRC-BlecicEtal-2015a-ApJS-TEA) 3. TEA basic instructions on how to install and run TEA - [TEA Start Quide](https://github.com/dzesmin/TEA/blob/master/doc/start_guide.txt) 4. TEA user instructions - [TEA User Manual](https://github.com/dzesmin/TEA/blob/master/doc/TEA-UserManual.pdf) 5. TEA developer instructions - [TEA Code Description](https://github.com/dzesmin/TEA/blob/master/doc/TEA-CodeDescription.pdf) 6. Completely executed TEA examples - [TEA Examples](https://github.com/dzesmin/TEA-Examples) 7. Rules how one can continue developing TEA - [TEA RR License](https://github.com/dzesmin/TEA/blob/master/RR-LICENSE-0.3.txt) 8. Rules for making research compendium follwoing RR license - [How to make TEA compendium](https://github.com/dzesmin/TEA/blob/master/HOWTO-COMPENDIUM.txt) 9. Kevin Stevenson paper on WASP-12b - [WASP-12b](https://science.sciencemag.org/content/346/6211/838) 10. Kevin Stevenson paper on WASP-43b - [WASP-43b](https://science.sciencemag.org/content/346/6211/838) ```python ```
dzesminREPO_NAMETEAPATH_START.@TEA_extracted@TEA-master@doc@examples@jupyter_tutorial@TEA-tutorial.ipynb@.PATH_END.py
{ "filename": "Elsewhere.py", "repo_name": "xpsi-group/xpsi", "repo_path": "xpsi_extracted/xpsi-main/xpsi/Elsewhere.py", "type": "Python" }
from xpsi.global_imports import * from xpsi.cellmesh.global_mesh import construct_closed_cellMesh as _construct_closed_cellMesh from xpsi.cellmesh.rays import compute_rays as _compute_rays from xpsi.cellmesh.integrator_for_time_invariance import integrate as _integrator from xpsi.Parameter import Parameter from xpsi.ParameterSubspace import ParameterSubspace class AtmosError(xpsiError): """ Raised if the numerical atmosphere data were not preloaded. """ class RayError(xpsiError): """ Raised if a problem was encountered during ray integration. """ class IntegrationError(xpsiError): """ Raised if a problem was encountered during signal integration. """ class Elsewhere(ParameterSubspace): r""" The photospheric radiation field *elsewhere*. This means the radiation field exterior to the hot regions. The local comoving radiation field properties are *assumed* (for now) to be azimuthally invariant but can in principle vary colatitudinally. :param int sqrt_num_cells: Number of cells in both colatitude and azimuth which form a regular mesh on the surface. Must be an even number such that half of the cells are exactly in one hemisphere. The total number of cells is the square argument value. The mesh suffers from squeezing in the polar regions, leading to a high degree of non-congruence in cell shape over the surface. :param int num_rays: Number of rays to trace (integrate) at each colatitude, distributed in angle subtended between ray tangent 4-vector and radial outward unit vector w.r.t a local orthonormal tetrad. :param dict bounds: If ``custom is None``, these bounds are supplied for instantiation of a temperature parameter. The parameter name ``'elsewhere_temperature'`` must be a key in the dictionary unless the parameter is *fixed* or *derived*. If a bound is ``None`` that bound is set equal to a strict hard-coded bound. We note that the bounds for parameters used in the atmosphere model should be restricted (by the user) to be within the tabulated values, in case a numerical atmosphere extension is used. :param dict values: Either the fixed value of the temperature elsewhere, a callable if the temperature is *derived*, or a value upon initialisation if the temperature is free. The dictionary must have a key with name ``'elsewhere_temperature'`` if it is *fixed* or *derived*. :param str atm_ext: Used to determine which atmospheric extension to use. Options at the moment: "BB": Analytical blackbody (default), "Num4D": Numerical atmosphere using 4D-interpolation from the provided atmosphere data, "user": A user-provided extension which can be set up by replacing the contents of the file hot_user.pyx (and elsewhere_user.pyx if needed) and re-installing X-PSI (if not changed, "user" is the same as "BB"). :param iterable custom: Iterable over :class:`~.Parameter.Parameter` instances. If you supply custom parameter definitions, you need to overwrite the :func:`~.Elsewhere.Elsewhere._compute_cellParamVecs` method to handle your custom behaviour. :param int image_order_limit: The highest-order image to sum over. A value of *one* means primary images only (deflections :math:`<\pi`) whilst a value of *two* means primary and secondary images (deflections :math:`<2pi`) where visible, and so on. If ``None`` (the default), there is no hard limit. In this case the limit is determined quasi-naturally for each mesh element, meaning that images will be summed over until higher order images are not visible or the visibility limit is truncated due to lack of numerical precision (e.g. for rays that orbit very close to the Schwarzschild photon sphere three times or more). Higher-order images generally contribute less and less due to geometric projection effects (higher-order images become more tangential), and the images of elements get squeezed in solid angle at the stellar limb. In principle, effects such as relativistic beaming can counter this effect to a degree for certain source-receiver configurations, by increasing brightness whilst solid angle decreases, and thus the flux contribution relative to that from a primary image can be greater than suggested simply by geometric project effects. Nevertheless, inclusion of these images is more computationally expensive. If, when iterating through image orders, an image is not visible because the deflection required is greater than the highest deflection permitted at a given colatitude on a surface (accounting for the surface tilt due to rotation), then the iteration over image orders terminates. """ required_names = ['elsewhere_temperature (if no custom specification)'] def __init__(self, sqrt_num_cells = 64, num_rays = 1000, bounds = None, values = None, atm_ext="BB", custom = None, image_order_limit = None): self.sqrt_num_cells = sqrt_num_cells self.num_rays = num_rays self.image_order_limit = image_order_limit self.atm_ext = atm_ext if bounds is None: bounds = {} if values is None: values = {} if not custom: # setup default temperature parameter T = Parameter('elsewhere_temperature', strict_bounds = (3.0, 7.6), # very cold --> very hot bounds = bounds.get('elsewhere_temperature', None), doc = 'log10 of the effective temperature elsewhere', symbol = r'$\log_{10}(T_{\rm EW}\;[\rm{K}])$', value = values.get('elsewhere_temperature', None)) else: # let the custom subclass handle definitions; ignore bounds T = None super(Elsewhere, self).__init__(T, custom) @property def num_rays(self): """ Get the number of rays integrated per colatitude. """ return self._num_rays @num_rays.setter def num_rays(self, n): """ Set the number of rays integrated per colatitude. """ try: self._num_rays = int(n) except TypeError: raise TypeError('Number of rays must be an integer.') @property def sqrt_num_cells(self): """ Get the number of cell colatitudes. """ return self._sqrt_num_cells @sqrt_num_cells.setter def sqrt_num_cells(self, n): """ Set the number of cell colatitudes. """ try: _n = int(n) except TypeError: raise TypeError('Number of cells must be an integer.') else: if not _n >= 10 or _n%2 != 0: raise ValueError('Number of cells must be a positive even ' 'integer greater than or equal to ten.') self._sqrt_num_cells = _n self._num_cells = _n**2 @property def num_cells(self): """ Get the total number of cells in the mesh. """ return self._num_cells @property def atm_ext(self): """ ... """ return self._atm_ext @atm_ext.setter def atm_ext(self,extension): if extension=="BB": self._atm_ext = 1 elif extension=="Num4D": self._atm_ext = 2 elif extension=="user": self._atm_ext = 3 else: raise TypeError('Got an unrecognised atm_ext argument. Note that the only allowed ' 'atmosphere options for Elsewhere are at the moment "BB", "Num4D", and "user".') @property def image_order_limit(self): """ Get the image order limit. """ return self._image_order_limit @image_order_limit.setter def image_order_limit(self, limit): """ Set the image order limit. """ if limit is not None: if not isinstance(limit, int): raise TypeError('Image order limit must be an positive integer ' 'if not None.') self._image_order_limit = limit def print_settings(self): """ Print numerical settings. """ print('Number of cell colatitudes: ', self.sqrt_num_cells) print('Number of rays per colatitude: ', self.num_rays) def _construct_cellMesh(self, st, threads): """ Call a low-level routine to construct a mesh representation. :param st: Instance of :class:`~.Spacetime.Spacetime`. :param int threads: Number of ``OpenMP`` threads for mesh construction. """ (self._theta, self._phi, self._r, self._cellArea, self._maxAlpha, self._cos_gamma, self._effGrav) = _construct_closed_cellMesh(threads, self._sqrt_num_cells, self._num_cells, st.M, st.r_s, st.R, st.zeta, st.epsilon, st.star_shape_ind) def _compute_rays(self, st, threads): """ Trace (integrate) a set of rays. These rays represent a null mapping from photosphere to a point at some effective infinity. :param st: Instance of :class:`~.Spacetime.Spacetime`. :param int threads: Number of ``OpenMP`` threads for ray integration. """ self._r_s_over_r = _contig(st.r_s / self._r, dtype = _np.double) (terminate_calculation, self._deflection, self._cos_alpha, self._lag, self._maxDeflection) = _compute_rays(threads, self._sqrt_num_cells, st.r_s, self._r_s_over_r, self._maxAlpha, self._num_rays) if terminate_calculation == 1: raise RayError('Fatal numerical problem during ray integration.') def _compute_cellParamVecs(self, *args): """ Precompute photospheric source radiation field parameter vectors cell-by-cell. Free model parameters and derived (fixed) variables can be transformed into local comoving radiation field variables. Subclass and overwrite with custom functionality if you desire. :param tuple args: An *ndarray[n,n]* of mesh-point colatitudes. """ if args: # hot region mesh shape information cellParamVecs = _np.ones((args[0].shape[0], args[0].shape[1], len(self.vector)+1), dtype=_np.double) # get self.vector because there may be fixed variables # that also need to be directed to the integrators # for intensity evaluation cellParamVecs[...,:-1] *= _np.array(self.vector) return cellParamVecs else: self._cellParamVecs = _np.ones((self._theta.shape[0], self._theta.shape[1], len(self.vector)+1), dtype=_np.double) self._cellParamVecs[...,:-1] *= _np.array(self.vector) for i in range(self._cellParamVecs.shape[1]): self._cellParamVecs[:,i,-1] *= self._effGrav def embed(self, spacetime, threads): """ Embed the photosphere elsewhere. """ self._construct_cellMesh(spacetime, threads) self._compute_rays(spacetime, threads) self._compute_cellParamVecs() def integrate(self, st, energies, threads, *atmosphere): """ Integrate over the photospheric radiation field. :param st: Instance of :class:`~.Spacetime.Spacetime`. :param energies: A one-dimensional :class:`numpy.ndarray` of energies in keV. :param int threads: Number of ``OpenMP`` threads the integrator is permitted to spawn. """ if isinstance(energies, tuple): # resolve energy container type if not isinstance(energies[0], tuple): _energies = energies[0] else: _energies = energies[0][0] else: _energies = energies if self._atm_ext==2: if atmosphere == (): raise AtmosError('The numerical atmosphere data were not preloaded, ' 'even though that is required by the current atmosphere extension.') out = _integrator(threads, st.R, st.Omega, st.r_s, st.i, self._sqrt_num_cells, self._cellArea, self._r, self._r_s_over_r, self._theta, self._phi, self._cellParamVecs, self._num_rays, self._deflection, self._cos_alpha, self._maxDeflection, self._cos_gamma, _energies, atmosphere, self._atm_ext, self._image_order_limit) if out[0] == 1: raise IntegrationError('Fatal numerical error during elsewhere integration.') return out[1] Elsewhere._update_doc()
xpsi-groupREPO_NAMExpsiPATH_START.@xpsi_extracted@xpsi-main@xpsi@Elsewhere.py@.PATH_END.py
{ "filename": "test_tz_localize.py", "repo_name": "pandas-dev/pandas", "repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/datetimes/methods/test_tz_localize.py", "type": "Python" }
from datetime import ( datetime, timedelta, timezone, ) from zoneinfo import ZoneInfo import dateutil.tz from dateutil.tz import gettz import numpy as np import pytest from pandas import ( DatetimeIndex, Timestamp, bdate_range, date_range, offsets, to_datetime, ) import pandas._testing as tm @pytest.fixture(params=["pytz/US/Eastern", gettz("US/Eastern"), ZoneInfo("US/Eastern")]) def tz(request): if isinstance(request.param, str) and request.param.startswith("pytz/"): pytz = pytest.importorskip("pytz") return pytz.timezone(request.param.removeprefix("pytz/")) return request.param class TestTZLocalize: def test_tz_localize_invalidates_freq(self): # we only preserve freq in unambiguous cases # if localized to US/Eastern, this crosses a DST transition dti = date_range("2014-03-08 23:00", "2014-03-09 09:00", freq="h") assert dti.freq == "h" result = dti.tz_localize(None) # no-op assert result.freq == "h" result = dti.tz_localize("UTC") # unambiguous freq preservation assert result.freq == "h" result = dti.tz_localize("US/Eastern", nonexistent="shift_forward") assert result.freq is None assert result.inferred_freq is None # i.e. we are not _too_ strict here # Case where we _can_ keep freq because we're length==1 dti2 = dti[:1] result = dti2.tz_localize("US/Eastern") assert result.freq == "h" def test_tz_localize_utc_copies(self, utc_fixture): # GH#46460 times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] index = DatetimeIndex(times) res = index.tz_localize(utc_fixture) assert not tm.shares_memory(res, index) res2 = index._data.tz_localize(utc_fixture) assert not tm.shares_memory(index._data, res2) def test_dti_tz_localize_nonexistent_raise_coerce(self): # GH#13057 times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] index = DatetimeIndex(times) tz = "US/Eastern" with pytest.raises(ValueError, match="|".join(times)): index.tz_localize(tz=tz) with pytest.raises(ValueError, match="|".join(times)): index.tz_localize(tz=tz, nonexistent="raise") result = index.tz_localize(tz=tz, nonexistent="NaT") test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"] dti = to_datetime(test_times, utc=True) expected = dti.tz_convert("US/Eastern") tm.assert_index_equal(result, expected) def test_dti_tz_localize_ambiguous_infer(self, tz): # November 6, 2011, fall back, repeat 2 AM hour # With no repeated hours, we cannot infer the transition dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=offsets.Hour()) with pytest.raises(ValueError, match="Cannot infer dst time"): dr.tz_localize(tz) def test_dti_tz_localize_ambiguous_infer2(self, tz, unit): # With repeated hours, we can infer the transition dr = date_range( datetime(2011, 11, 6, 0), periods=5, freq=offsets.Hour(), tz=tz, unit=unit ) times = [ "11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00", "11/06/2011 03:00", ] di = DatetimeIndex(times).as_unit(unit) result = di.tz_localize(tz, ambiguous="infer") expected = dr._with_freq(None) tm.assert_index_equal(result, expected) result2 = DatetimeIndex(times, tz=tz, ambiguous="infer").as_unit(unit) tm.assert_index_equal(result2, expected) def test_dti_tz_localize_ambiguous_infer3(self, tz): # When there is no dst transition, nothing special happens dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=offsets.Hour()) localized = dr.tz_localize(tz) localized_infer = dr.tz_localize(tz, ambiguous="infer") tm.assert_index_equal(localized, localized_infer) def test_dti_tz_localize_ambiguous_times(self, tz): # March 13, 2011, spring forward, skip from 2 AM to 3 AM dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=offsets.Hour()) with pytest.raises(ValueError, match="2011-03-13 02:30:00"): dr.tz_localize(tz) # after dst transition, it works dr = date_range( datetime(2011, 3, 13, 3, 30), periods=3, freq=offsets.Hour(), tz=tz ) # November 6, 2011, fall back, repeat 2 AM hour dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=offsets.Hour()) with pytest.raises(ValueError, match="Cannot infer dst time"): dr.tz_localize(tz) # UTC is OK dr = date_range( datetime(2011, 3, 13), periods=48, freq=offsets.Minute(30), tz=timezone.utc ) @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] idx = DatetimeIndex(strdates) conv = idx.tz_localize(tzstr) fromdates = DatetimeIndex(strdates, tz=tzstr) assert conv.tz == fromdates.tz tm.assert_numpy_array_equal(conv.values, fromdates.values) @pytest.mark.parametrize("prefix", ["", "dateutil/"]) def test_dti_tz_localize(self, prefix): tzstr = prefix + "US/Eastern" dti = date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="ms") dti2 = dti.tz_localize(tzstr) dti_utc = date_range( start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="ms", tz="utc" ) tm.assert_numpy_array_equal(dti2.values, dti_utc.values) dti3 = dti2.tz_convert(prefix + "US/Pacific") tm.assert_numpy_array_equal(dti3.values, dti_utc.values) dti = date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="ms") with pytest.raises(ValueError, match="Cannot infer dst time"): dti.tz_localize(tzstr) dti = date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="ms") with pytest.raises(ValueError, match="2011-03-13 02:00:00"): dti.tz_localize(tzstr) def test_dti_tz_localize_utc_conversion(self, tz): # Localizing to time zone should: # 1) check for DST ambiguities # 2) convert to UTC rng = date_range("3/10/2012", "3/11/2012", freq="30min") converted = rng.tz_localize(tz) expected_naive = rng + offsets.Hour(5) tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range("3/11/2012", "3/12/2012", freq="30min") # Is this really how it should fail?? with pytest.raises(ValueError, match="2012-03-11 02:00:00"): rng.tz_localize(tz) def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): # note: this tz tests that a tz-naive index can be localized # and de-localized successfully, when there are no DST transitions # in the range. idx = date_range(start="2014-06-01", end="2014-08-30", freq="15min") tz = tz_aware_fixture localized = idx.tz_localize(tz) # can't localize a tz-aware object with pytest.raises( TypeError, match="Already tz-aware, use tz_convert to convert" ): localized.tz_localize(tz) reset = localized.tz_localize(None) assert reset.tzinfo is None expected = idx._with_freq(None) tm.assert_index_equal(reset, expected) def test_dti_tz_localize_naive(self): rng = date_range("1/1/2011", periods=100, freq="h") conv = rng.tz_localize("US/Pacific") exp = date_range("1/1/2011", periods=100, freq="h", tz="US/Pacific") tm.assert_index_equal(conv, exp._with_freq(None)) def test_dti_tz_localize_tzlocal(self): # GH#13583 offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) offset = int(offset.total_seconds() * 1000000000) dti = date_range(start="2001-01-01", end="2001-03-01") dti2 = dti.tz_localize(dateutil.tz.tzlocal()) tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) dti2 = dti.tz_localize(None) tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) def test_dti_tz_localize_ambiguous_nat(self, tz): times = [ "11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00", "11/06/2011 03:00", ] di = DatetimeIndex(times) localized = di.tz_localize(tz, ambiguous="NaT") times = [ "11/06/2011 00:00", np.nan, np.nan, "11/06/2011 02:00", "11/06/2011 03:00", ] di_test = DatetimeIndex(times, tz="US/Eastern") # left dtype is datetime64[ns, US/Eastern] # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] tm.assert_numpy_array_equal(di_test.values, localized.values) def test_dti_tz_localize_ambiguous_flags(self, tz, unit): # November 6, 2011, fall back, repeat 2 AM hour # Pass in flags to determine right dst transition dr = date_range( datetime(2011, 11, 6, 0), periods=5, freq=offsets.Hour(), tz=tz, unit=unit ) times = [ "11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00", "11/06/2011 03:00", ] # Test tz_localize di = DatetimeIndex(times).as_unit(unit) is_dst = [1, 1, 0, 0, 0] localized = di.tz_localize(tz, ambiguous=is_dst) expected = dr._with_freq(None) tm.assert_index_equal(expected, localized) result = DatetimeIndex(times, tz=tz, ambiguous=is_dst).as_unit(unit) tm.assert_index_equal(result, expected) localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) tm.assert_index_equal(dr, localized) localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool")) tm.assert_index_equal(dr, localized) # Test constructor localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst).as_unit(unit) tm.assert_index_equal(dr, localized) # Test duplicate times where inferring the dst fails times += times di = DatetimeIndex(times).as_unit(unit) # When the sizes are incompatible, make sure error is raised msg = "Length of ambiguous bool-array must be the same size as vals" with pytest.raises(Exception, match=msg): di.tz_localize(tz, ambiguous=is_dst) # When sizes are compatible and there are repeats ('infer' won't work) is_dst = np.hstack((is_dst, is_dst)) localized = di.tz_localize(tz, ambiguous=is_dst) dr = dr.append(dr) tm.assert_index_equal(dr, localized) def test_dti_tz_localize_ambiguous_flags2(self, tz): # When there is no dst transition, nothing special happens dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=offsets.Hour()) is_dst = np.array([1] * 10) localized = dr.tz_localize(tz) localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) tm.assert_index_equal(localized, localized_is_dst) def test_dti_tz_localize_bdate_range(self): dr = bdate_range("1/1/2009", "1/1/2010") dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=timezone.utc) localized = dr.tz_localize(timezone.utc) tm.assert_index_equal(dr_utc, localized) @pytest.mark.parametrize( "start_ts, tz, end_ts, shift", [ ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], [ "2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 01:59:59.999999999", "backward", ], [ "2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:20:00", timedelta(hours=1), ], [ "2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 01:20:00", timedelta(hours=-1), ], ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], [ "2018-03-11 02:33:00", "US/Pacific", "2018-03-11 01:59:59.999999999", "backward", ], [ "2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:33:00", timedelta(hours=1), ], [ "2018-03-11 02:33:00", "US/Pacific", "2018-03-11 01:33:00", timedelta(hours=-1), ], ], ) @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) def test_dti_tz_localize_nonexistent_shift( self, start_ts, tz, end_ts, shift, tz_type, unit ): # GH#8917 tz = tz_type + tz if isinstance(shift, str): shift = "shift_" + shift dti = DatetimeIndex([Timestamp(start_ts)]).as_unit(unit) result = dti.tz_localize(tz, nonexistent=shift) expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz).as_unit(unit) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("offset", [-1, 1]) def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, warsaw): # GH#8917 tz = warsaw dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")]) msg = "The provided timedelta will relocalize on a nonexistent time" with pytest.raises(ValueError, match=msg): dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@datetimes@methods@test_tz_localize.py@.PATH_END.py
{ "filename": "shard_op.py", "repo_name": "tensorflow/tensorflow", "repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/ops/shard_op.py", "type": "Python" }
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The implementation of `tf.data.Dataset.shard`.""" from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import gen_dataset_ops def _shard(input_dataset, num_shards, index, name): # pylint: disable=unused-private-name """See `Dataset.shard()` for details.""" return _ShardDataset(input_dataset, num_shards, index, name) class _ShardDataset(dataset_ops.UnaryUnchangedStructureDataset): """A `Dataset` for sharding its input.""" def __init__(self, input_dataset, num_shards, index, name): """See `Dataset.shard()` for details.""" self._input_dataset = input_dataset self._num_shards = ops.convert_to_tensor( num_shards, dtype=dtypes.int64, name="num_shards") self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index") self._name = name variant_tensor = gen_dataset_ops.shard_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access num_shards=self._num_shards, index=self._index, **self._common_args) super().__init__(input_dataset, variant_tensor)
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@ops@shard_op.py@.PATH_END.py
{ "filename": "sinc_interp.py", "repo_name": "CHIME-Pulsar-Timing/CHIME-Pulsar_automated_filterbank", "repo_path": "CHIME-Pulsar_automated_filterbank_extracted/CHIME-Pulsar_automated_filterbank-main/presto_without_presto/sinc_interp.py", "type": "Python" }
import numpy as Num import numpy.fft as FFT def kaiser_window(xs, halfwidth, alpha): """ kaiser_window(xs, halfwidth, alpha): Return the kaiser window function for the values 'xs' when the the half-width of the window should be 'haldwidth' with the folloff parameter 'alpha'. The following values are particularly interesting: alpha ----- 0 Rectangular Window 5 Similar to Hamming window 6 Similar to Hanning window 8.6 Almost identical to the Blackman window """ # TODO: (gijs) bug, i0 not defined win = i0(alpha*Num.sqrt(1.0-(xs/halfwidth)**2.0))/i0(alpha) return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0) def hanning_window(xs, halfwidth): """ hanning_window(xs, halfwidth): Return the Hanning window of halfwidth 'halfwidth' evaluated at the values 'xs'. """ win = 0.5 + 0.5*Num.cos(Num.pi*xs/halfwidth) return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0) def hamming_window(xs, halfwidth): """ hamming_window(xs, halfwidth): Return the Hamming window of halfwidth 'halfwidth' evaluated at the values 'xs'. """ win = 0.54 + 0.46*Num.cos(Num.pi*xs/halfwidth) return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0) def blackman_window(xs, halfwidth): """ blackman_window(xs, halfwidth): Return the Blackman window of halfwidth 'halfwidth' evaluated at the values 'xs'. """ rat = Num.pi*xs/halfwidth win = 0.42 + 0.5*Num.cos(rat) + 0.08*Num.cos(2.0*rat) return Num.where(Num.fabs(xs)<=halfwidth, win, 0.0) def rectangular_window(xs, halfwidth): """ rectangular_window(xs, halfwidth): Return a rectangular window of halfwidth 'halfwidth' evaluated at the values 'xs'. """ return Num.where(Num.fabs(xs)<=halfwidth, 1.0, 0.0) _window_function = {"rectangular": rectangular_window, "none": rectangular_window, "hanning": hanning_window, "hamming": hamming_window, "blackman": blackman_window, "kaiser": kaiser_window} def windowed_sinc_interp(data, newx, halfwidth=None, window='hanning', alpha=6.0): """ windowed_sinc_interp(data, newx, halfwidth=None, window='hanning', alpha=6.0): Return a single windowed-sinc-interpolated point from the data. """ if Num.fabs(round(newx)-newx) < 1e-5: return data[int(round(newx))] num_pts = (int(Num.floor(newx)), len(data)-int(Num.ceil(newx))-1) if halfwidth is None: halfwidth = min(num_pts) lo_pt = int(Num.floor(newx)) - halfwidth if lo_pt < 0: lo_pt < 0 print("Warning: trying to access below the lowest index!") hi_pt = lo_pt + 2*halfwidth if hi_pt >= len(data): hi_pt = len(data)-1 print("Warning: trying to access above the highest index!") halfwidth = (hi_pt-lo_pt)//2 pts = Num.arange(2*halfwidth)+lo_pt xs = newx - pts if window.lower() is "kaiser": win = _window_function[window](xs, len(data)//2, alpha) else: win = _window_function[window](xs, len(data)//2) return Num.add.reduce(Num.take(data, pts) * win * Num.sinc(xs)) def periodic_interp(data, zoomfact, window='hanning', alpha=6.0): """ periodic_interp(data, zoomfact, window='hanning', alpha=6.0): Return a periodic, windowed, sinc-interpolation of the data which is oversampled by a factor of 'zoomfact'. """ zoomfact = int(zoomfact) if (zoomfact < 1): print("zoomfact must be >= 1.") return 0.0 elif zoomfact==1: return data newN = len(data)*zoomfact # Space out the data comb = Num.zeros((zoomfact, len(data)), dtype='d') comb[0] += data comb = Num.reshape(Num.transpose(comb), (newN,)) # Compute the offsets xs = Num.zeros(newN, dtype='d') xs[:newN//2+1] = Num.arange(newN//2+1, dtype='d')/zoomfact xs[-newN//2:] = xs[::-1][newN//2-1:-1] # Calculate the sinc times window for the kernel if window.lower()=="kaiser": win = _window_function[window](xs, len(data)//2, alpha) else: win = _window_function[window](xs, len(data)//2) kernel = win * Num.sinc(xs) if (0): plotxy(Num.sinc(xs), color='yellow') plotxy(win) plotxy(kernel, color='red') closeplot() return FFT.irfft(FFT.rfft(kernel) * FFT.rfft(comb)) if __name__=='__main__': from presto.psr_utils import * from presto.Pgplot import * from numpy.random import normal # from spline import * fwhm = 0.01 ctr_phase = 0.505 noise_sigma = 0.2 # The theoretical profile with noise Ntheo = 1000 theo = gaussian_profile(Ntheo, ctr_phase, fwhm) + normal(0.0, noise_sigma, Ntheo) theo_phases = Num.arange(Ntheo, dtype='d')/Ntheo # The "sampled" data Ndata = 100 data = theo[::Ntheo//Ndata] data_phases = theo_phases[::Ntheo//Ndata] # The values to interpolate Ncalc = 30 lo_calc = ctr_phase-0.05 hi_calc = ctr_phase+0.05 calc_phases = span(lo_calc, hi_calc, Ncalc) plotxy(theo, theo_phases, rangex=[lo_calc-0.2, hi_calc+0.2]) plotxy(data, data_phases, line=None, symbol=3, color='green') # Do the interpolation one point at a time halfwidth = Ndata//2-5 calc_vals = [] for phs in calc_phases: calc_vals.append(windowed_sinc_interp(data, phs*len(data), halfwidth)) plotxy(calc_vals, calc_phases, line=None, symbol=3, color='red') # Interpolate the full profile using convolution zoomfact = 10 newvals = periodic_interp(data, 10) new_phases = Num.arange(Ndata*zoomfact, dtype='d')/(Ndata*zoomfact) plotxy(newvals, new_phases, line=1, symbol=None, color='yellow') # Interpolate using cubic splines if (0): sdata = interpolate.splrep(data, data_phases, s=0) svals = interpolate.splrep(new_phases, sdata, der=0) plotxy(svals, new_phases, line=1, symbol=None, color='cyan') elif (0): sdata = Spline(data_phases, data) svals = sdata(new_phases) plotxy(svals, new_phases, line=1, symbol=None, color='cyan') closeplot()
CHIME-Pulsar-TimingREPO_NAMECHIME-Pulsar_automated_filterbankPATH_START.@CHIME-Pulsar_automated_filterbank_extracted@CHIME-Pulsar_automated_filterbank-main@presto_without_presto@sinc_interp.py@.PATH_END.py
{ "filename": "test_interpolative.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/linalg/tests/test_interpolative.py", "type": "Python" }
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** import scipy.linalg.interpolative as pymatrixid import numpy as np from scipy.linalg import hilbert, svdvals, norm from scipy.sparse.linalg import aslinearoperator import time from numpy.testing import assert_, assert_allclose from pytest import raises as assert_raises def _debug_print(s): if 0: print(s) class TestInterpolativeDecomposition(object): def test_id(self): for dtype in [np.float64, np.complex128]: self.check_id(dtype) def check_id(self, dtype): # Test ID routines on a Hilbert matrix. # set parameters n = 300 eps = 1e-12 # construct Hilbert matrix A = hilbert(n).astype(dtype) if np.issubdtype(dtype, np.complexfloating): A = A * (1 + 1j) L = aslinearoperator(A) # find rank S = np.linalg.svd(A, compute_uv=False) try: rank = np.nonzero(S < eps)[0][0] except IndexError: rank = n # print input summary _debug_print("Hilbert matrix dimension: %8i" % n) _debug_print("Working precision: %8.2e" % eps) _debug_print("Rank to working precision: %8i" % rank) # set print format fmt = "%8.2e (s) / %5s" # test real ID routines _debug_print("-----------------------------------------") _debug_print("Real ID routines") _debug_print("-----------------------------------------") # fixed precision _debug_print("Calling iddp_id / idzp_id ...",) t0 = time.time() k, idx, proj = pymatrixid.interp_decomp(A, eps, rand=False) t = time.time() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_aid / idzp_aid ...",) t0 = time.time() k, idx, proj = pymatrixid.interp_decomp(A, eps) t = time.time() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_rid / idzp_rid ...",) t0 = time.time() k, idx, proj = pymatrixid.interp_decomp(L, eps) t = time.time() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # fixed rank k = rank _debug_print("Calling iddr_id / idzr_id ...",) t0 = time.time() idx, proj = pymatrixid.interp_decomp(A, k, rand=False) t = time.time() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_aid / idzr_aid ...",) t0 = time.time() idx, proj = pymatrixid.interp_decomp(A, k) t = time.time() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_rid / idzr_rid ...",) t0 = time.time() idx, proj = pymatrixid.interp_decomp(L, k) t = time.time() - t0 B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # check skeleton and interpolation matrices idx, proj = pymatrixid.interp_decomp(A, k, rand=False) P = pymatrixid.reconstruct_interp_matrix(idx, proj) B = pymatrixid.reconstruct_skel_matrix(A, k, idx) assert_(np.allclose(B, A[:,idx[:k]], eps)) assert_(np.allclose(B.dot(P), A, eps)) # test SVD routines _debug_print("-----------------------------------------") _debug_print("SVD routines") _debug_print("-----------------------------------------") # fixed precision _debug_print("Calling iddp_svd / idzp_svd ...",) t0 = time.time() U, S, V = pymatrixid.svd(A, eps, rand=False) t = time.time() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_asvd / idzp_asvd...",) t0 = time.time() U, S, V = pymatrixid.svd(A, eps) t = time.time() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddp_rsvd / idzp_rsvd...",) t0 = time.time() U, S, V = pymatrixid.svd(L, eps) t = time.time() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # fixed rank k = rank _debug_print("Calling iddr_svd / idzr_svd ...",) t0 = time.time() U, S, V = pymatrixid.svd(A, k, rand=False) t = time.time() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_asvd / idzr_asvd ...",) t0 = time.time() U, S, V = pymatrixid.svd(A, k) t = time.time() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) _debug_print("Calling iddr_rsvd / idzr_rsvd ...",) t0 = time.time() U, S, V = pymatrixid.svd(L, k) t = time.time() - t0 B = np.dot(U, np.dot(np.diag(S), V.T.conj())) _debug_print(fmt % (t, np.allclose(A, B, eps))) assert_(np.allclose(A, B, eps)) # ID to SVD idx, proj = pymatrixid.interp_decomp(A, k, rand=False) Up, Sp, Vp = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj) B = U.dot(np.diag(S).dot(V.T.conj())) assert_(np.allclose(A, B, eps)) # Norm estimates s = svdvals(A) norm_2_est = pymatrixid.estimate_spectral_norm(A) assert_(np.allclose(norm_2_est, s[0], 1e-6)) B = A.copy() B[:,0] *= 1.2 s = svdvals(A - B) norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B) assert_(np.allclose(norm_2_est, s[0], 1e-6)) # Rank estimates B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=dtype) for M in [A, B]: ML = aslinearoperator(M) rank_tol = 1e-9 rank_np = np.linalg.matrix_rank(M, norm(M, 2)*rank_tol) rank_est = pymatrixid.estimate_rank(M, rank_tol) rank_est_2 = pymatrixid.estimate_rank(ML, rank_tol) assert_(rank_est >= rank_np) assert_(rank_est <= rank_np + 10) assert_(rank_est_2 >= rank_np - 4) assert_(rank_est_2 <= rank_np + 4) def test_rand(self): pymatrixid.seed('default') assert_(np.allclose(pymatrixid.rand(2), [0.8932059, 0.64500803], 1e-4)) pymatrixid.seed(1234) x1 = pymatrixid.rand(2) assert_(np.allclose(x1, [0.7513823, 0.06861718], 1e-4)) np.random.seed(1234) pymatrixid.seed() x2 = pymatrixid.rand(2) np.random.seed(1234) pymatrixid.seed(np.random.rand(55)) x3 = pymatrixid.rand(2) assert_allclose(x1, x2) assert_allclose(x1, x3) def test_badcall(self): A = hilbert(5).astype(np.float32) assert_raises(ValueError, pymatrixid.interp_decomp, A, 1e-6, rand=False) def test_rank_too_large(self): # svd(array, k) should not segfault a = np.ones((4, 3)) with assert_raises(ValueError): pymatrixid.svd(a, 4) def test_full_rank(self): eps = 1.0e-12 # fixed precision A = np.random.rand(16, 8) k, idx, proj = pymatrixid.interp_decomp(A, eps) assert_(k == A.shape[1]) P = pymatrixid.reconstruct_interp_matrix(idx, proj) B = pymatrixid.reconstruct_skel_matrix(A, k, idx) assert_allclose(A, B.dot(P)) # fixed rank idx, proj = pymatrixid.interp_decomp(A, k) P = pymatrixid.reconstruct_interp_matrix(idx, proj) B = pymatrixid.reconstruct_skel_matrix(A, k, idx) assert_allclose(A, B.dot(P))
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@linalg@tests@test_interpolative.py@.PATH_END.py
{ "filename": "_dash.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/mapbox/layer/line/_dash.py", "type": "Python" }
import _plotly_utils.basevalidators class DashValidator(_plotly_utils.basevalidators.DataArrayValidator): def __init__( self, plotly_name="dash", parent_name="layout.mapbox.layer.line", **kwargs ): super(DashValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@mapbox@layer@line@_dash.py@.PATH_END.py
{ "filename": "feature_request.md", "repo_name": "threeML/threeML", "repo_path": "threeML_extracted/threeML-master/.github/ISSUE_TEMPLATE/feature_request.md", "type": "Markdown" }
--- name: "\U0001F680 Feature Request" about: "I have a suggestion (and may want to implement it \U0001F642)!" title: '' labels: '' assignees: '' --- ## Feature Request **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I have an issue when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. Add any considered drawbacks. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Teachability, Documentation, Adoption, Migration Strategy** If you can, explain how users will be able to use this and possibly write out a version the docs. Maybe a screenshot or design?
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@.github@ISSUE_TEMPLATE@feature_request.md@.PATH_END.py
{ "filename": "HGP2018_melt_benchmarks.py", "repo_name": "geodynamics/burnman", "repo_path": "burnman_extracted/burnman-main/misc/benchmarks/HGP2018_melt_benchmarks.py", "type": "Python" }
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit # for the Earth and Planetary Sciences # Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU # GPL v2 or later. """ HGP2018_melt_benchmarks ----------------------- This script tests the Holland et al. (2018) melt model in the CMS and MS systems. """ from __future__ import absolute_import import numpy as np import matplotlib.pyplot as plt from burnman import Composite from burnman.minerals import HGP_2018_ds633 from burnman import equilibrate if __name__ == "__main__": di = HGP_2018_ds633.di() liq = HGP_2018_ds633.CMS_melt() liq.set_composition([1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]) composition = {"Mg": 1.0, "Ca": 1.0, "Si": 2.0, "O": 6.0} assemblage = Composite([di, liq]) equality_constraints = [ ["P", np.linspace(1.0e5, 5.0e9, 101)], ["phase_fraction", (di, 0.0)], ] sols, prm = equilibrate(composition, assemblage, equality_constraints) plt.plot( [sol.assemblage.pressure / 1.0e9 for sol in sols], [sol.assemblage.temperature for sol in sols], label="Solution model diopside melting", ) liq = HGP_2018_ds633.diL() assemblage = Composite([di, liq]) equality_constraints = [ ["P", np.linspace(1.0e5, 5.0e9, 101)], ["phase_fraction", (di, 0.0)], ] sols, prm = equilibrate(composition, assemblage, equality_constraints) plt.plot( [sol.assemblage.pressure / 1.0e9 for sol in sols], [sol.assemblage.temperature for sol in sols], linestyle=":", label="Raw dataset diopside melting", ) plt.legend() plt.xlabel("Pressure (GPa)") plt.ylabel("Temperature (K))") plt.show() liq = HGP_2018_ds633.MS_melt() per = HGP_2018_ds633.per() fo = HGP_2018_ds633.fo() pren = HGP_2018_ds633.pren() crst = HGP_2018_ds633.crst() # peritectics / eutectics liq.set_composition([0.1, 0.9]) composition = {"Mg": 2.0, "Si": 1.5, "O": 5.0} assemblage = Composite([fo, pren, liq]) equality_constraints = [["P", 1.0e5], ["phase_fraction", (liq, 0.0)]] sol, prm = equilibrate(composition, assemblage, equality_constraints) T_fo_pren = assemblage.temperature xSi_fo_pren = liq.formula["Si"] / (liq.formula["Si"] + liq.formula["Mg"]) composition = {"Mg": 1.0, "Si": 2.0, "O": 5.0} assemblage = Composite([pren, crst, liq]) equality_constraints = [["P", 1.0e5], ["phase_fraction", (liq, 0.0)]] sol, prm = equilibrate(composition, assemblage, equality_constraints) T_pren_crst = assemblage.temperature xSi_pren_crst = liq.formula["Si"] / (liq.formula["Si"] + liq.formula["Mg"]) print("fo-pren peritectic") print(f"x(SiO2): {xSi_fo_pren:.3f}, T: {T_fo_pren:.2f} K") print("pren-crst eutectic") print(f"x(SiO2): {xSi_pren_crst:.3f}, T: {T_pren_crst:.2f} K") plt.plot([1.0 / 3.0, 1.0 / 3.0], [1750.0, 2250.0], color="black") plt.plot([1.0 / 3.0, xSi_fo_pren], [T_fo_pren, T_fo_pren], color="black") plt.plot([0.5, 0.5], [1750.0, T_fo_pren], color="black") plt.plot([0.5, 1.0], [T_pren_crst, T_pren_crst], color="black") for xSi0, xSi1, phase in [ [1.0 / 3.0, xSi_fo_pren, fo], [xSi_fo_pren, xSi_pren_crst, pren], [xSi_pren_crst, 0.999, crst], ]: xSis = np.linspace(xSi0, xSi1, 31) Ts = np.empty_like(xSis) for i, xSi in enumerate(xSis): composition = {"Mg": 1.0 - xSi, "Si": xSi, "O": (1.0 - xSi) + xSi * 2.0} assemblage = Composite([phase, liq]) assemblage.set_state(1.0e5, 2000.0) equality_constraints = [["P", 1.0e5], ["phase_fraction", (phase, 0.0)]] sol, prm = equilibrate(composition, assemblage, equality_constraints) Ts[i] = sol.assemblage.temperature plt.plot(xSis, Ts, color="black") plt.xlim(0.0, 1.0) plt.ylim(1750.0, 2250.0) plt.xlabel("xSiO$_2$/(xMgO + xSiO$_2$) (mole fraction)") plt.ylabel("Temperature (K)") plt.show()
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@misc@benchmarks@HGP2018_melt_benchmarks.py@.PATH_END.py
{ "filename": "_exponentformat.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/ternary/caxis/_exponentformat.py", "type": "Python" }
import _plotly_utils.basevalidators class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="exponentformat", parent_name="layout.ternary.caxis", **kwargs ): super(ExponentformatValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), role=kwargs.pop("role", "style"), values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@ternary@caxis@_exponentformat.py@.PATH_END.py
{ "filename": "test_index_tricks.py", "repo_name": "numpy/numpy", "repo_path": "numpy_extracted/numpy-main/numpy/lib/tests/test_index_tricks.py", "type": "Python" }
import pytest import numpy as np from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_raises_regex, ) from numpy.lib._index_tricks_impl import ( mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, index_exp, ndindex, c_, r_, s_, ix_ ) class TestRavelUnravelIndex: def test_basic(self): assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) # test that new shape argument works properly assert_equal(np.unravel_index(indices=2, shape=(2, 2)), (1, 0)) # test that an invalid second keyword argument # is properly handled, including the old name `dims`. with assert_raises(TypeError): np.unravel_index(indices=2, hape=(2, 2)) with assert_raises(TypeError): np.unravel_index(2, hape=(2, 2)) with assert_raises(TypeError): np.unravel_index(254, ims=(17, 94)) with assert_raises(TypeError): np.unravel_index(254, dims=(17, 94)) assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) assert_raises(ValueError, np.unravel_index, -1, (2, 2)) assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) assert_raises(ValueError, np.unravel_index, 4, (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) assert_equal( np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) arr = np.array([[3, 6, 6], [4, 5, 1]]) assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) assert_equal( np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) assert_equal( np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), [12, 13, 13]) assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), [[3, 6, 6], [4, 5, 1]]) assert_equal( np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), [[3, 6, 6], [4, 5, 1]]) assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) def test_empty_indices(self): msg1 = 'indices must be integral: the provided empty sequence was' msg2 = 'only int indices permitted' assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), (10, 3, 5)) assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)), [[], [], []]) assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), (10, 3)) assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), (10, 3)) assert_raises_regex(TypeError, msg2, np.ravel_multi_index, (np.array([]), np.array([])), (5, 3)) assert_equal(np.ravel_multi_index( (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), (5, 3)), []) def test_big_indices(self): # ravel_multi_index for big indices (issue #7546) if np.intp == np.int64: arr = ([1, 29], [3, 5], [3, 117], [19, 2], [2379, 1284], [2, 2], [0, 1]) assert_equal( np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), [5627771580, 117259570957]) # test unravel_index for big indices (issue #9538) assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1)) # test overflow checking for too big array (issue #7546) dummy_arr = ([0],[0]) half_max = np.iinfo(np.intp).max // 2 assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) assert_raises(ValueError, np.ravel_multi_index, dummy_arr, (half_max+1, 2)) assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) assert_raises(ValueError, np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') def test_dtypes(self): # Test with different data types for dtype in [np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64]: coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) shape = (5, 8) uncoords = 8*coords[0]+coords[1] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) uncoords = coords[0]+5*coords[1] assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], dtype=dtype) shape = (5, 8, 10) uncoords = 10*(8*coords[0]+coords[1])+coords[2] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) uncoords = coords[0]+5*(coords[1]+8*coords[2]) assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) def test_clipmodes(self): # Test clipmodes assert_equal( np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode=( 'wrap', 'raise', 'clip', 'raise')), np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) assert_raises( ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) def test_writeability(self): # See gh-7269 x, y = np.unravel_index([1, 2, 3], (4, 5)) assert_(x.flags.writeable) assert_(y.flags.writeable) def test_0d(self): # gh-580 x = np.unravel_index(0, ()) assert_equal(x, ()) assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) assert_raises_regex( ValueError, "out of bounds", np.unravel_index, [1], ()) @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) def test_empty_array_ravel(self, mode): res = np.ravel_multi_index( np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) assert(res.shape == (0,)) with assert_raises(ValueError): np.ravel_multi_index( np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) def test_empty_array_unravel(self): res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) # res is a tuple of three empty arrays assert(len(res) == 3) assert(all(a.shape == (0,) for a in res)) with assert_raises(ValueError): np.unravel_index([1], (2, 1, 0)) class TestGrid: def test_basic(self): a = mgrid[-1:1:10j] b = mgrid[-1:1:0.1] assert_(a.shape == (10,)) assert_(b.shape == (20,)) assert_(a[0] == -1) assert_almost_equal(a[-1], 1) assert_(b[0] == -1) assert_almost_equal(b[1]-b[0], 0.1, 11) assert_almost_equal(b[-1], b[0]+19*0.1, 11) assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) def test_linspace_equivalence(self): y, st = np.linspace(2, 10, retstep=True) assert_almost_equal(st, 8/49.0) assert_array_almost_equal(y, mgrid[2:10:50j], 13) def test_nd(self): c = mgrid[-1:1:10j, -2:2:10j] d = mgrid[-1:1:0.1, -2:2:0.2] assert_(c.shape == (2, 10, 10)) assert_(d.shape == (2, 20, 20)) assert_array_equal(c[0][0, :], -np.ones(10, 'd')) assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], 0.1*np.ones(20, 'd'), 11) assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], 0.2*np.ones(20, 'd'), 11) def test_sparse(self): grid_full = mgrid[-1:1:10j, -2:2:10j] grid_sparse = ogrid[-1:1:10j, -2:2:10j] # sparse grids can be made dense by broadcasting grid_broadcast = np.broadcast_arrays(*grid_sparse) for f, b in zip(grid_full, grid_broadcast): assert_equal(f, b) @pytest.mark.parametrize("start, stop, step, expected", [ (None, 10, 10j, (200, 10)), (-10, 20, None, (1800, 30)), ]) def test_mgrid_size_none_handling(self, start, stop, step, expected): # regression test None value handling for # start and step values used by mgrid; # internally, this aims to cover previously # unexplored code paths in nd_grid() grid = mgrid[start:stop:step, start:stop:step] # need a smaller grid to explore one of the # untested code paths grid_small = mgrid[start:stop:step] assert_equal(grid.size, expected[0]) assert_equal(grid_small.size, expected[1]) def test_accepts_npfloating(self): # regression test for #16466 grid64 = mgrid[0.1:0.33:0.1, ] grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] assert_array_almost_equal(grid64, grid32) # At some point this was float64, but NEP 50 changed it: assert grid32.dtype == np.float32 assert grid64.dtype == np.float64 # different code path for single slice grid64 = mgrid[0.1:0.33:0.1] grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] assert_(grid32.dtype == np.float64) assert_array_almost_equal(grid64, grid32) def test_accepts_longdouble(self): # regression tests for #16945 grid64 = mgrid[0.1:0.33:0.1, ] grid128 = mgrid[ np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1), ] assert_(grid128.dtype == np.longdouble) assert_array_almost_equal(grid64, grid128) grid128c_a = mgrid[0:np.longdouble(1):3.4j] grid128c_b = mgrid[0:np.longdouble(1):3.4j, ] assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble) assert_array_equal(grid128c_a, grid128c_b[0]) # different code path for single slice grid64 = mgrid[0.1:0.33:0.1] grid128 = mgrid[ np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1) ] assert_(grid128.dtype == np.longdouble) assert_array_almost_equal(grid64, grid128) def test_accepts_npcomplexfloating(self): # Related to #16466 assert_array_almost_equal( mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] ) # different code path for single slice assert_array_almost_equal( mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] ) # Related to #16945 grid64_a = mgrid[0.1:0.3:3.3j] grid64_b = mgrid[0.1:0.3:3.3j, ][0] assert_(grid64_a.dtype == grid64_b.dtype == np.float64) assert_array_equal(grid64_a, grid64_b) grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)] grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0] assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble) assert_array_equal(grid64_a, grid64_b) class TestConcatenator: def test_1d(self): assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) b = np.ones(5) c = r_[b, 0, 0, b] assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) def test_mixed_type(self): g = r_[10.1, 1:10] assert_(g.dtype == 'f8') def test_more_mixed_type(self): g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] assert_(g.dtype == 'f8') def test_complex_step(self): # Regression test for #12262 g = r_[0:36:100j] assert_(g.shape == (100,)) # Related to #16466 g = r_[0:36:np.complex64(100j)] assert_(g.shape == (100,)) def test_2d(self): b = np.random.rand(5, 5) c = np.random.rand(5, 5) d = r_['1', b, c] # append columns assert_(d.shape == (5, 10)) assert_array_equal(d[:, :5], b) assert_array_equal(d[:, 5:], c) d = r_[b, c] assert_(d.shape == (10, 5)) assert_array_equal(d[:5, :], b) assert_array_equal(d[5:, :], c) def test_0d(self): assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) class TestNdenumerate: def test_basic(self): a = np.array([[1, 2], [3, 4]]) assert_equal(list(ndenumerate(a)), [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) class TestIndexExpression: def test_regression_1(self): # ticket #1196 a = np.arange(2) assert_equal(a[:-1], a[s_[:-1]]) assert_equal(a[:-1], a[index_exp[:-1]]) def test_simple_1(self): a = np.random.rand(4, 5, 6) assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) class TestIx_: def test_regression_1(self): # Test empty untyped inputs create outputs of indexing type, gh-5804 a, = np.ix_(range(0)) assert_equal(a.dtype, np.intp) a, = np.ix_([]) assert_equal(a.dtype, np.intp) # but if the type is specified, don't change it a, = np.ix_(np.array([], dtype=np.float32)) assert_equal(a.dtype, np.float32) def test_shape_and_dtype(self): sizes = (4, 5, 3, 2) # Test both lists and arrays for func in (range, np.arange): arrays = np.ix_(*[func(sz) for sz in sizes]) for k, (a, sz) in enumerate(zip(arrays, sizes)): assert_equal(a.shape[k], sz) assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) assert_(np.issubdtype(a.dtype, np.integer)) def test_bool(self): bool_a = [True, False, True, True] int_a, = np.nonzero(bool_a) assert_equal(np.ix_(bool_a)[0], int_a) def test_1d_only(self): idx2d = [[1, 2, 3], [4, 5, 6]] assert_raises(ValueError, np.ix_, idx2d) def test_repeated_input(self): length_of_vector = 5 x = np.arange(length_of_vector) out = ix_(x, x) assert_equal(out[0].shape, (length_of_vector, 1)) assert_equal(out[1].shape, (1, length_of_vector)) # check that input shape is not modified assert_equal(x.shape, (length_of_vector,)) def test_c_(): a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) class TestFillDiagonal: def test_basic(self): a = np.zeros((3, 3), int) fill_diagonal(a, 5) assert_array_equal( a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5]]) ) def test_tall_matrix(self): a = np.zeros((10, 3), int) fill_diagonal(a, 5) assert_array_equal( a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) ) def test_tall_matrix_wrap(self): a = np.zeros((10, 3), int) fill_diagonal(a, 5, True) assert_array_equal( a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [5, 0, 0], [0, 5, 0]]) ) def test_wide_matrix(self): a = np.zeros((3, 10), int) fill_diagonal(a, 5) assert_array_equal( a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) ) def test_operate_4d_array(self): a = np.zeros((3, 3, 3, 3), int) fill_diagonal(a, 4) i = np.array([0, 1, 2]) assert_equal(np.where(a != 0), (i, i, i, i)) def test_low_dim_handling(self): # raise error with low dimensionality a = np.zeros(3, int) with assert_raises_regex(ValueError, "at least 2-d"): fill_diagonal(a, 5) def test_hetero_shape_handling(self): # raise error with high dimensionality and # shape mismatch a = np.zeros((3,3,7,3), int) with assert_raises_regex(ValueError, "equal length"): fill_diagonal(a, 2) def test_diag_indices(): di = diag_indices(4) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) a[di] = 100 assert_array_equal( a, np.array([[100, 2, 3, 4], [5, 100, 7, 8], [9, 10, 100, 12], [13, 14, 15, 100]]) ) # Now, we create indices to manipulate a 3-d array: d3 = diag_indices(2, 3) # And use it to set the diagonal of a zeros array to 1: a = np.zeros((2, 2, 2), int) a[d3] = 1 assert_array_equal( a, np.array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) ) class TestDiagIndicesFrom: def test_diag_indices_from(self): x = np.random.random((4, 4)) r, c = diag_indices_from(x) assert_array_equal(r, np.arange(4)) assert_array_equal(c, np.arange(4)) def test_error_small_input(self): x = np.ones(7) with assert_raises_regex(ValueError, "at least 2-d"): diag_indices_from(x) def test_error_shape_mismatch(self): x = np.zeros((3, 3, 2, 3), int) with assert_raises_regex(ValueError, "equal length"): diag_indices_from(x) def test_ndindex(): x = list(ndindex(1, 2, 3)) expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] assert_array_equal(x, expected) x = list(ndindex((1, 2, 3))) assert_array_equal(x, expected) # Test use of scalars and tuples x = list(ndindex((3,))) assert_array_equal(x, list(ndindex(3))) # Make sure size argument is optional x = list(ndindex()) assert_equal(x, [()]) x = list(ndindex(())) assert_equal(x, [()]) # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, [])
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@lib@tests@test_index_tricks.py@.PATH_END.py
{ "filename": "_bordercolor.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/gauge/_bordercolor.py", "type": "Python" }
import _plotly_utils.basevalidators class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__( self, plotly_name="bordercolor", parent_name="indicator.gauge", **kwargs ): super(BordercolorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@indicator@gauge@_bordercolor.py@.PATH_END.py
{ "filename": "test_recfunctions.py", "repo_name": "waynebhayes/SpArcFiRe", "repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/lib/tests/test_recfunctions.py", "type": "Python" }
from __future__ import division, absolute_import, print_function import numpy as np import numpy.ma as ma from numpy.ma.mrecords import MaskedRecords from numpy.ma.testutils import assert_equal from numpy.testing import ( run_module_suite, assert_, assert_raises, dec ) from numpy.lib.recfunctions import ( drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, find_duplicates, merge_arrays, append_fields, stack_arrays, join_by ) get_names = np.lib.recfunctions.get_names get_names_flat = np.lib.recfunctions.get_names_flat zip_descr = np.lib.recfunctions.zip_descr class TestRecFunctions(object): # Misc tests def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_zip_descr(self): # Test zip_descr (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) assert_equal(test, np.dtype([('', int), ('', int)])) test = zip_descr((x, x), flatten=False) assert_equal(test, np.dtype([('', int), ('', int)])) # Std & flexible-dtype test = zip_descr((x, z), flatten=True) assert_equal(test, np.dtype([('', int), ('A', '|S3'), ('B', float)])) test = zip_descr((x, z), flatten=False) assert_equal(test, np.dtype([('', int), ('', [('A', '|S3'), ('B', float)])])) # Standard & nested dtype test = zip_descr((x, w), flatten=True) assert_equal(test, np.dtype([('', int), ('a', int), ('ba', float), ('bb', int)])) test = zip_descr((x, w), flatten=False) assert_equal(test, np.dtype([('', int), ('', [('a', int), ('b', [('ba', float), ('bb', int)])])])) def test_drop_fields(self): # Test drop_fields a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) # A basic field test = drop_fields(a, 'a') control = np.array([((2, 3.0),), ((5, 6.0),)], dtype=[('b', [('ba', float), ('bb', int)])]) assert_equal(test, control) # Another basic field (but nesting two fields) test = drop_fields(a, 'b') control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) # A nested sub-field test = drop_fields(a, ['ba', ]) control = np.array([(1, (3.0,)), (4, (6.0,))], dtype=[('a', int), ('b', [('bb', int)])]) assert_equal(test, control) # All the nested sub-field from a field: zap that field test = drop_fields(a, ['ba', 'bb']) control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) test = drop_fields(a, ['a', 'b']) assert_(test is None) def test_rename_fields(self): # Test rename fields a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], dtype=[('a', int), ('b', [('ba', float), ('bb', (float, 2))])]) test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] control = a.view(newdtype) assert_equal(test.dtype, newdtype) assert_equal(test, control) def test_get_names(self): # Test get_names ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_names(ndtype) assert_equal(test, ('A', 'B')) ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) test = get_names(ndtype) assert_equal(test, ('a', ('b', ('ba', 'bb')))) def test_get_names_flat(self): # Test get_names_flat ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_names_flat(ndtype) assert_equal(test, ('A', 'B')) ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) test = get_names_flat(ndtype) assert_equal(test, ('a', 'b', 'ba', 'bb')) def test_get_fieldstructure(self): # Test get_fieldstructure # No nested fields ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_fieldstructure(ndtype) assert_equal(test, {'A': [], 'B': []}) # One 1-nested field ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) test = get_fieldstructure(ndtype) assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) # One 2-nested fields ndtype = np.dtype([('A', int), ('B', [('BA', int), ('BB', [('BBA', int), ('BBB', int)])])]) test = get_fieldstructure(ndtype) control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} assert_equal(test, control) def test_find_duplicates(self): # Test find_duplicates a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) test = find_duplicates(a, ignoremask=False, return_index=True) control = [0, 2] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='A', return_index=True) control = [0, 1, 2, 3, 5] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='B', return_index=True) control = [0, 1, 2, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='BA', return_index=True) control = [0, 1, 2, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='BB', return_index=True) control = [0, 1, 2, 3, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) def test_find_duplicates_ignoremask(self): # Test the ignoremask option of find_duplicates ndtype = [('a', int)] a = ma.array([1, 1, 1, 2, 2, 3, 3], mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) test = find_duplicates(a, ignoremask=True, return_index=True) control = [0, 1, 3, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, ignoremask=False, return_index=True) control = [0, 1, 2, 3, 4, 6] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) class TestRecursiveFillFields(object): # Test recursive_fill_fields. def test_simple_flexible(self): # Test recursive_fill_fields on flexible-array a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) b = np.zeros((3,), dtype=a.dtype) test = recursive_fill_fields(a, b) control = np.array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) def test_masked_flexible(self): # Test recursive_fill_fields on masked flexible-array a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], dtype=[('A', int), ('B', float)]) b = ma.zeros((3,), dtype=a.dtype) test = recursive_fill_fields(a, b) control = ma.array([(1, 10.), (2, 20.), (0, 0.)], mask=[(0, 1), (1, 0), (0, 0)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) class TestMergeArrays(object): # Test merge_arrays def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array( [(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_solo(self): # Test merge_arrays on a single array. (_, x, _, z) = self.data test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) assert_equal(test, control) test = merge_arrays((x,)) assert_equal(test, control) test = merge_arrays(z, flatten=False) assert_equal(test, z) test = merge_arrays(z, flatten=True) assert_equal(test, z) def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening w = self.data[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) test = merge_arrays(w, flatten=True) control = np.array([(1, 2, 3.0), (4, 5, 6.0)], dtype=[('a', int), ('ba', float), ('bb', int)]) assert_equal(test, control) def test_standard(self): # Test standard & standard # Test merge arrays (_, x, y, _) = self.data test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) test = merge_arrays((x, y), usemask=True) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_flatten(self): # Test standard & flexible (_, x, _, z) = self.data test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) assert_equal(test, control) test = merge_arrays((x, z), flatten=False) control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], dtype=[('f0', int), ('f1', [('A', '|S3'), ('B', float)])]) assert_equal(test, control) def test_flatten_wflexible(self): # Test flatten standard & nested (w, x, _, _) = self.data test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), ('a', int), ('ba', float), ('bb', int)]) assert_equal(test, control) test = merge_arrays((x, w), flatten=False) controldtype = [('f0', int), ('f1', [('a', int), ('b', [('ba', float), ('bb', int)])])] control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], dtype=controldtype) assert_equal(test, control) def test_wmasked_arrays(self): # Test merge_arrays masked arrays (_, x, _, _) = self.data mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], mask=[(0, 1), (0, 0), (1, 0)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) test = merge_arrays((x, mx), usemask=True, asrecarray=True) assert_equal(test, control) assert_(isinstance(test, MaskedRecords)) def test_w_singlefield(self): # Test single field test = merge_arrays((np.array([1, 2]).view([('a', int)]), np.array([10., 20., 30.])),) control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('a', int), ('f1', float)]) assert_equal(test, control) def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. z = self.data[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], # dtype=[('A', '|S3'), ('B', float), ('C', int)]) #assert_equal(test, control) # Hack to avoid pyflakes warnings about unused variables merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): (_, x, y, z) = self.data test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), ('f1', int), ('f2', [('A', '|S3'), ('B', float)])]) assert_equal(test, control) class TestAppendFields(object): # Test append_fields def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_append_single(self): # Test simple case (_, x, _, _) = self.data test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('f0', int), ('A', int)],) assert_equal(test, control) def test_append_double(self): # Test simple case (_, x, _, _) = self.data test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], dtype=[('f0', int), ('A', int), ('B', int)],) assert_equal(test, control) def test_append_on_flex(self): # Test append_fields on flexible type arrays z = self.data[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('C', int)],) assert_equal(test, control) def test_append_on_nested(self): # Test append_fields on nested fields w = self.data[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), (-1, (-1, -1.), 30)], mask=[( 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], dtype=[('a', int), ('b', [('ba', float), ('bb', int)]), ('C', int)],) assert_equal(test, control) class TestStackArrays(object): # Test stack_arrays def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_solo(self): # Test stack_arrays on single arrays (_, x, _, _) = self.data test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) test = stack_arrays(x) assert_equal(test, x) assert_(test is x) def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields (_, x, y, _) = self.data test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) assert_equal(test, control) test = stack_arrays((x, y), usemask=False) control = np.array([1, 2, 10, 20, 30]) assert_equal(test, control) test = stack_arrays((y, x), usemask=False) control = np.array([10, 20, 30, 1, 2]) assert_equal(test, control) def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields (_, x, _, z) = self.data test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), (-1, 'A', 1), (-1, 'B', 2)], mask=[(0, 1, 1), (0, 1, 1), (1, 0, 0), (1, 0, 0)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, x)) control = ma.array([('A', 1, -1), ('B', 2, -1), (-1, -1, 1), (-1, -1, 2), ], mask=[(0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('f2', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, z, x)) control = ma.array([('A', 1, -1), ('B', 2, -1), ('A', 1, -1), ('B', 2, -1), (-1, -1, 1), (-1, -1, 2), ], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('f2', int)]) assert_equal(test, control) def test_matching_named_fields(self): # Test combination of arrays w/ matching field names (_, x, _, z) = self.data zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) control = ma.array([('A', 1, -1), ('B', 2, -1), ( 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, zz, x)) ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), ('a', 10., 100., -1), ('b', 20., 200., -1), ('c', 30., 300., -1), (-1, -1, -1, 1), (-1, -1, -1, 2)], dtype=ndtype, mask=[(0, 0, 1, 1), (0, 0, 1, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (1, 1, 1, 0), (1, 1, 1, 0)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. (_, _, _, z) = self.data zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} test = stack_arrays((z, zz), defaults=defaults) control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), ( 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_autoconversion(self): # Tests autoconversion adtype = [('A', int), ('B', bool), ('C', float)] a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) bdtype = [('A', int), ('B', float), ('C', float)] b = ma.array([(4, 5, 6)], dtype=bdtype) control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], dtype=bdtype) test = stack_arrays((a, b), autoconvert=True) assert_equal(test, control) assert_equal(test.mask, control.mask) try: test = stack_arrays((a, b), autoconvert=False) except TypeError: pass else: raise AssertionError def test_checktitles(self): # Test using titles in the field names adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] b = ma.array([(4, 5, 6)], dtype=bdtype) test = stack_arrays((a, b)) control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], dtype=bdtype) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_subdtype(self): z = np.array([ ('A', 1), ('B', 2) ], dtype=[('A', '|S3'), ('B', float, (1,))]) zz = np.array([ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) res = stack_arrays((z, zz)) expected = ma.array( data=[ (b'A', [1.0], 0), (b'B', [2.0], 0), (b'a', [10.0], 100.0), (b'b', [20.0], 200.0), (b'c', [30.0], 300.0)], mask=[ (False, [False], True), (False, [False], True), (False, [False], False), (False, [False], False), (False, [False], False) ], dtype=zz.dtype ) assert_equal(res.dtype, expected.dtype) assert_equal(res, expected) assert_equal(res.mask, expected.mask) class TestJoinBy(object): def setup(self): self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) def test_inner_join(self): # Basic test of join_by a, b = self.a, self.b test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), (9, 59, 69, 109, 104)], dtype=[('a', int), ('b1', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_join(self): a, b = self.a, self.b # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), # (7, 57, 107, 102), (8, 58, 108, 103), # (9, 59, 109, 104)], # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) # Hack to avoid pyflakes unused variable warnings join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), (9, 59, 109, 104)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) def test_join_subdtype(self): # tests the bug in https://stackoverflow.com/q/44769632/102441 from numpy.lib import recfunctions as rfn foo = np.array([(1,)], dtype=[('key', int)]) bar = np.array([(1, np.array([1,2,3]))], dtype=[('key', int), ('value', 'uint16', 3)]) res = join_by('key', foo, bar) assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): a, b = self.a, self.b test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (5, 65, -1, 100), (6, 56, 106, -1), (6, 66, -1, 101), (7, 57, 107, -1), (7, 67, -1, 102), (8, 58, 108, -1), (8, 68, -1, 103), (9, 59, 109, -1), (9, 69, -1, 104), (10, 70, -1, 105), (11, 71, -1, 106), (12, 72, -1, 107), (13, 73, -1, 108), (14, 74, -1, 109)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_leftouter_join(self): a, b = self.a, self.b test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (6, 56, 106, -1), (7, 57, 107, -1), (8, 58, 108, -1), (9, 59, 109, -1)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_different_field_order(self): # gh-8940 a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) # this should not give a FutureWarning: j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) def test_duplicate_keys(self): a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) @dec.knownfailureif(True) def test_same_name_different_dtypes_key(self): a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) expected_dtype = np.dtype([ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_same_name_different_dtypes(self): # gh-9338 a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')]) expected_dtype = np.dtype([ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_subarray_key(self): a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')]) a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype) b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')]) b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype) expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')]) expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype) res = join_by('pos', a, b) assert_equal(res.dtype, expected_dtype) assert_equal(res, expected) def test_padded_dtype(self): dt = np.dtype('i1,f4', align=True) dt.names = ('k', 'v') assert_(len(dt.descr), 3) # padding field is inserted a = np.array([(1, 3), (3, 2)], dt) b = np.array([(1, 1), (2, 2)], dt) res = join_by('k', a, b) # no padding fields remain expected_dtype = np.dtype([ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4') ]) assert_equal(res.dtype, expected_dtype) class TestJoinBy2(object): @classmethod def setup(cls): cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) def test_no_r1postfix(self): # Basic test of join_by no_r1postfix a, b = self.a, self.b test = join_by( 'a', a, b, r1postfix='', r2postfix='2', jointype='inner') control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], dtype=[('a', int), ('b', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_no_postfix(self): assert_raises(ValueError, join_by, 'a', self.a, self.b, r1postfix='', r2postfix='') def test_no_r2postfix(self): # Basic test of join_by no_r2postfix a, b = self.a, self.b test = join_by( 'a', a, b, r1postfix='1', r2postfix='', jointype='inner') control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], dtype=[('a', int), ('b1', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_two_keys_two_vars(self): a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(50, 60), np.arange(10, 20))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(65, 75), np.arange(0, 10))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5), (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7), (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], dtype=[('k', int), ('a', int), ('b1', int), ('b2', int), ('c1', int), ('c2', int)]) test = join_by( ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') assert_equal(test.dtype, control.dtype) assert_equal(test, control) class TestAppendFieldsObj(object): """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 def setup(self): from datetime import date self.data = dict(obj=date(2000, 1, 1)) def test_append_to_objects(self): "Test append_fields when the base array contains objects" obj = self.data['obj'] x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) test = append_fields(x, 'C', data=y, usemask=False) control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)], dtype=[('A', object), ('B', float), ('C', int)]) assert_equal(test, control) if __name__ == '__main__': run_module_suite()
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@lib@tests@test_recfunctions.py@.PATH_END.py
{ "filename": "__init__.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/barpolar/selected/__init__.py", "type": "Python" }
import sys from typing import TYPE_CHECKING if sys.version_info < (3, 7) or TYPE_CHECKING: from ._marker import Marker from ._textfont import Textfont else: from _plotly_utils.importers import relative_import __all__, __getattr__, __dir__ = relative_import( __name__, [], ["._marker.Marker", "._textfont.Textfont"] )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@barpolar@selected@__init__.py@.PATH_END.py
{ "filename": "test_celestial_transformations.py", "repo_name": "waynebhayes/SpArcFiRe", "repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/coordinates/tests/test_celestial_transformations.py", "type": "Python" }
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest import numpy as np from ... import units as u from ..distances import Distance from ..builtin_frames import (ICRS, FK5, FK4, FK4NoETerms, Galactic, Supergalactic, Galactocentric, HCRS, GCRS, LSR) from .. import SkyCoord from ...tests.helper import (quantity_allclose as allclose, assert_quantity_allclose as assert_allclose) from .. import EarthLocation, CartesianRepresentation from ...time import Time from ...extern.six.moves import range # used below in the next parametrized test m31_sys = [ICRS, FK5, FK4, Galactic] m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (10.0004738, 40.9952444), (121.1744050, -21.5729360)] m31_dist = Distance(770, u.kpc) convert_precision = 1 * u.arcsec roundtrip_precision = 1e-4 * u.degree dist_precision = 1e-9 * u.kpc m31_params = [] for i in range(len(m31_sys)): for j in range(len(m31_sys)): if i < j: m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j])) @pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params) def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo): """ This tests a variety of coordinate conversions for the Chandra point-source catalog location of M31 from NED. """ coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist) coo2 = coo1.transform_to(tosys) if tosys is FK4: coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950', scale='utc'))) assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision else: assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision assert coo1.distance.unit == u.kpc assert coo2.distance.unit == u.kpc assert m31_dist.unit == u.kpc assert (coo2.distance - m31_dist) < dist_precision # check round-tripping coo1_2 = coo2.transform_to(fromsys) assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision assert (coo1_2.distance - m31_dist) < dist_precision def test_precession(): """ Ensures that FK4 and FK5 coordinates precess their equinoxes """ j2000 = Time('J2000', scale='utc') b1950 = Time('B1950', scale='utc') j1975 = Time('J1975', scale='utc') b1975 = Time('B1975', scale='utc') fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian) assert fk4.equinox.byear == b1950.byear fk4_2 = fk4.transform_to(FK4(equinox=b1975)) assert fk4_2.equinox.byear == b1975.byear fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian) assert fk5.equinox.jyear == j2000.jyear fk5_2 = fk5.transform_to(FK4(equinox=j1975)) assert fk5_2.equinox.jyear == j1975.jyear def test_fk5_galactic(): """ Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic. """ fk5 = FK5(ra=1*u.deg, dec=2*u.deg) direct = fk5.transform_to(Galactic) indirect = fk5.transform_to(FK4).transform_to(Galactic) assert direct.separation(indirect).degree < 1.e-10 direct = fk5.transform_to(Galactic) indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic) assert direct.separation(indirect).degree < 1.e-10 def test_galactocentric(): # when z_sun=0, transformation should be very similar to Galactic icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg, dec=np.linspace(-90, 90, 10)*u.deg, distance=1.*u.kpc) g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz diff = np.abs(g_xyz - gc_xyz) assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc) assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc) # generate some test coordinates g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg, distance=[np.sqrt(2)]*4*u.kpc) xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc) # check that ND arrays work # from Galactocentric to Galactic x = np.linspace(-10., 10., 100) * u.kpc y = np.linspace(-10., 10., 100) * u.kpc z = np.zeros_like(x) g1 = Galactocentric(x=x, y=y, z=z) g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1), z=z.reshape(100, 1, 1)) g1t = g1.transform_to(Galactic) g2t = g2.transform_to(Galactic) assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0]) # from Galactic to Galactocentric l = np.linspace(15, 30., 100) * u.deg b = np.linspace(-10., 10., 100) * u.deg d = np.ones_like(l.value) * u.kpc g1 = Galactic(l=l, b=b, distance=d) g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1), distance=d.reshape(100, 1, 1)) g1t = g1.transform_to(Galactocentric) g2t = g2.transform_to(Galactocentric) np.testing.assert_almost_equal(g1t.cartesian.xyz.value, g2t.cartesian.xyz.value[:, :, 0, 0]) def test_supergalactic(): """ Check Galactic<->Supergalactic and Galactic<->ICRS conversion. """ # Check supergalactic North pole. npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree) assert allclose(npole.transform_to(Supergalactic).sgb.deg, +90, atol=1e-9) # Check the origin of supergalactic longitude. lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree) lon0_gal = lon0.transform_to(Galactic) assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9) assert allclose(lon0_gal.b.deg, 0, atol=1e-9) # Test Galactic<->ICRS with some positions that appear in Foley et al. 2008 # (http://adsabs.harvard.edu/abs/2008A%26A...484..143F) # GRB 021219 supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree) icrs = SkyCoord('18h50m27s +31d57m17s') assert supergalactic.separation(icrs) < 0.005 * u.degree # GRB 030320 supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree) icrs = SkyCoord('17h51m36s -25d18m52s') assert supergalactic.separation(icrs) < 0.005 * u.degree class TestHCRS(): """ Check HCRS<->ICRS coordinate conversions. Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and `tarr` as defined below, the ICRS Solar positions were predicted using, e.g. coord.ICRS(coord.get_body_barycentric(tarr, 'sun')). """ def setup(self): self.t1 = Time("2013-02-02T23:00") self.t2 = Time("2013-08-02T23:00") self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"]) self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg, dec=-22.36943723*u.deg, distance=406615.66347377*u.km) # array of positions corresponds to times in `tarr` self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg, dec=[-22.36943605, -25.07431079]*u.deg, distance=[406615.66347377, 375484.13558956]*u.km) # corresponding HCRS positions self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km), obstime=self.t1) twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km) self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr) self.tolerance = 5*u.km def test_from_hcrs(self): # test scalar transform transformed = self.sun_hcrs_t1.transform_to(ICRS()) separation = transformed.separation_3d(self.sun_icrs_scalar) assert_allclose(separation, 0*u.km, atol=self.tolerance) # test non-scalar positions and times transformed = self.sun_hcrs_tarr.transform_to(ICRS()) separation = transformed.separation_3d(self.sun_icrs_arr) assert_allclose(separation, 0*u.km, atol=self.tolerance) def test_from_icrs(self): # scalar positions transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1)) separation = transformed.separation_3d(self.sun_hcrs_t1) assert_allclose(separation, 0*u.km, atol=self.tolerance) # nonscalar positions transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr)) separation = transformed.separation_3d(self.sun_hcrs_tarr) assert_allclose(separation, 0*u.km, atol=self.tolerance) class TestHelioBaryCentric(): """ Check GCRS<->Heliocentric and Barycentric coordinate conversions. Uses the WHT observing site (information grabbed from data/sites.json). """ def setup(self): wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m) self.obstime = Time("2013-02-02T23:00") self.wht_itrs = wht.get_itrs(obstime=self.obstime) def test_heliocentric(self): gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime)) helio = gcrs.transform_to(HCRS(obstime=self.obstime)) # Check it doesn't change from previous times. previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m assert_allclose(helio.cartesian.xyz, previous) # And that it agrees with SLALIB to within 14km helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au assert np.sqrt(((helio.cartesian.xyz - helio_slalib)**2).sum()) < 14. * u.km def test_barycentric(self): gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime)) bary = gcrs.transform_to(ICRS()) previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m assert_allclose(bary.cartesian.xyz, previous) # And that it agrees with SLALIB answer to within 14km bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au assert np.sqrt(((bary.cartesian.xyz - bary_slalib)**2).sum()) < 14. * u.km def test_lsr_sanity(): # random numbers, but zero velocity in ICRS frame icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s) lsr = icrs.transform_to(LSR) lsr_diff = lsr.data.differentials['s'] cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data) lsr_vel = ICRS(cart_lsr_vel) gal_lsr = lsr_vel.transform_to(Galactic).cartesian.xyz assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()), lsr.v_bary.d_xyz) # moving with LSR velocity lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc, pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s) icrs = lsr.transform_to(ICRS) icrs_diff = icrs.data.differentials['s'] cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data) vel = ICRS(cart_vel) gal_icrs = vel.transform_to(Galactic).cartesian.xyz assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()), -lsr.v_bary.d_xyz)
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@coordinates@tests@test_celestial_transformations.py@.PATH_END.py
{ "filename": "analyze.py", "repo_name": "jmd-dk/concept", "repo_path": "concept_extracted/concept-master/test/fluid_drift_rigid/analyze.py", "type": "Python" }
# This file has to be run in pure Python mode! # Imports from the CO𝘕CEPT code from commons import * from snapshot import load import species plt = get_matplotlib().pyplot # Absolute path and name of this test this_dir = os.path.dirname(os.path.realpath(__file__)) this_test = os.path.basename(os.path.dirname(this_dir)) # Read in data from the CO𝘕CEPT snapshots species.allow_similarly_named_components = True fluid_components = [] particle_components = [] a = [] for fname in sorted( glob(f'{this_dir}/output/snapshot_a=*'), key=(lambda s: s[(s.index('=') + 1):]), ): snapshot = load(fname, compare_params=False) for component in snapshot.components: if component.representation == 'fluid': fluid_components.append(component) elif component.representation == 'particles': particle_components.append(component) a.append(snapshot.params['a']) gridsize = fluid_components[0].gridsize N = particle_components[0].N N_snapshots = len(a) # Sort data chronologically order = np.argsort(a) a = [a[o] for o in order] fluid_components = [fluid_components[o] for o in order] particle_components = [particle_components[o] for o in order] # Begin analysis masterprint(f'Analysing {this_test} data ...') # Extract ϱ(x) of fluids and y(x) of particles. # To compare ϱ to y, a scaling is needed. # Since the x's in ϱ(x) are discretised, but the x's in y(x) are not, # we interpolate y to the discretised x-values. x_fluid = asarray([boxsize*i/gridsize for i in range(gridsize)]) ϱ = [] y = [] y_interp = [] for fluid, particles in zip(fluid_components, particle_components): ϱ.append(fluid.ϱ.grid_noghosts[:gridsize, 0, 0]) y_i = particles.posy.copy() A_fluid = 0.5*(max(ϱ[0]) - min(ϱ[0])) offset_fluid = 0.5*(max(ϱ[0]) + min(ϱ[0])) A_particles = 0.5*(max(y_i) - min(y_i)) offset_particles = 0.5*(max(y_i) + min(y_i)) y_i -= offset_particles y_i *= A_fluid/A_particles y_i += offset_fluid y.append(y_i) # Interpolation is made by a simple polynomial fit, # but with a large order. order = 15 y_interp.append(np.polyval(np.polyfit(particles.posx, y_i, order), x_fluid)) # Plot fig_file = f'{this_dir}/result.png' fig, axes = plt.subplots(N_snapshots, sharex=True, figsize=(8, 3*N_snapshots)) for ax, particles, ϱ_i, y_i, y_interp_i, a_i in zip( axes, particle_components, ϱ, y, y_interp, a, ): indices_sorted = np.argsort(particles.posx) index_min = np.argmin(particles.posx) index_max = np.argmax(particles.posx) ax.plot( np.concatenate(( [max(particles.posx) - boxsize], particles.posx[indices_sorted], [min(particles.posx) + boxsize], )), np.concatenate(( [y_i[index_max]], y_i[indices_sorted], [y_i[index_min]], )), '-', label='Particle simulation', ) ax.plot(x_fluid, ϱ_i, '.', markersize=10, alpha=0.7, label='Fluid simulation') ax.set_ylabel( 'scaled and shifted $y$,\n' r'$\varrho$ $\mathrm{{[{}\,m_{{\odot}}\,{}^{{-3}}]}}$' .format( significant_figures( 1/units.m_sun, 3, fmt='TeX', incl_zeros=False, ), unit_length, ) ) ax.set_title(rf'$a={a_i:.3g}$') axes[ 0].set_xlim(0, boxsize) axes[-1].set_xlabel(rf'$x\,\mathrm{{[{unit_length}]}}$') axes[ 0].legend() fig.tight_layout() fig.savefig(fig_file, dpi=150) # Fluid elements in yz-slices should all have the same ϱ # and all fluid elements should have the same u = J/ϱ. tol_fac_ϱ = 1e-6 tol_fac_u = 1e-3 for fluid, a_i in zip(fluid_components, a): for fluidscalar in fluid.iterate_fluidscalars(): varnum = fluidscalar.varnum grid = fluidscalar.grid_noghosts[:gridsize, :gridsize, :gridsize] if varnum == 0: # ϱ ϱ_grid = grid for i in range(gridsize): yz_slice = grid[i, :, :] if not isclose( np.std(yz_slice), 0, rel_tol=0, abs_tol=max((tol_fac_ϱ*np.std(grid), 1e+1*gridsize**2*machine_ϵ)), ): abort( f'Non-uniformities have emerged at a = {a_i} ' f'in yz-slices of fluid scalar variable {fluidscalar}.\n' f'See "{fig_file}" for a visualization.' ) elif varnum == 1: # J u_grid = grid/ϱ_grid if not isclose( np.std(u_grid), 0, rel_tol=0, abs_tol=(tol_fac_u*abs(np.mean(u_grid)) + machine_ϵ), ): abort( f'Non-uniformities have emerged at a = {a_i} ' f'in fluid scalar variable {fluidscalar}' ) # Compare ϱ to the fluid from the snapshots tol_fac = 0.02 for ϱ_i, y_interp_i, a_i in zip(ϱ, y_interp, a): if not isclose( mean(abs(ϱ_i - y_interp_i)), 0, rel_tol=0, abs_tol=(tol_fac*np.std(ϱ_i) + machine_ϵ), ): abort( f'Fluid drift differs from particle drift at a = {a_i:.3g}.\n' f'See "{fig_file}" for a visualization.' ) # Done analysing masterprint('done')
jmd-dkREPO_NAMEconceptPATH_START.@concept_extracted@concept-master@test@fluid_drift_rigid@analyze.py@.PATH_END.py
{ "filename": "plot_gmm.py", "repo_name": "scikit-learn/scikit-learn", "repo_path": "scikit-learn_extracted/scikit-learn-main/examples/mixture/plot_gmm.py", "type": "Python" }
""" ================================= Gaussian Mixture Model Ellipsoids ================================= Plot the confidence ellipsoids of a mixture of two Gaussians obtained with Expectation Maximisation (``GaussianMixture`` class) and Variational Inference (``BayesianGaussianMixture`` class models with a Dirichlet process prior). Both models have access to five components with which to fit the data. Note that the Expectation Maximisation model will necessarily use all five components while the Variational Inference model will effectively only use as many as are needed for a good fit. Here we can see that the Expectation Maximisation model splits some components arbitrarily, because it is trying to fit too many components, while the Dirichlet Process model adapts it number of state automatically. This example doesn't show it, as we're in a low-dimensional space, but another advantage of the Dirichlet process model is that it can fit full covariance matrices effectively even when there are less examples per cluster than there are dimensions in the data, due to regularization properties of the inference algorithm. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import itertools import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from scipy import linalg from sklearn import mixture color_iter = itertools.cycle(["navy", "c", "cornflowerblue", "gold", "darkorange"]) def plot_results(X, Y_, means, covariances, index, title): splot = plt.subplot(2, 1, 1 + index) for i, (mean, covar, color) in enumerate(zip(means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2.0 * np.sqrt(2.0) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180.0 * angle / np.pi # convert to degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) plt.xlim(-9.0, 5.0) plt.ylim(-3.0, 6.0) plt.xticks(()) plt.yticks(()) plt.title(title) # Number of samples per component n_samples = 500 # Generate random sample, two components np.random.seed(0) C = np.array([[0.0, -0.1], [1.7, 0.4]]) X = np.r_[ np.dot(np.random.randn(n_samples, 2), C), 0.7 * np.random.randn(n_samples, 2) + np.array([-6, 3]), ] # Fit a Gaussian mixture with EM using five components gmm = mixture.GaussianMixture(n_components=5, covariance_type="full").fit(X) plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0, "Gaussian Mixture") # Fit a Dirichlet process Gaussian mixture using five components dpgmm = mixture.BayesianGaussianMixture(n_components=5, covariance_type="full").fit(X) plot_results( X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1, "Bayesian Gaussian Mixture with a Dirichlet process prior", ) plt.show()
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@examples@mixture@plot_gmm.py@.PATH_END.py
{ "filename": "_hoverinfo.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/_hoverinfo.py", "type": "Python" }
import _plotly_utils.basevalidators class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator): def __init__(self, plotly_name="hoverinfo", parent_name="scatterpolargl", **kwargs): super(HoverinfoValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "none"), extras=kwargs.pop("extras", ["all", "none", "skip"]), flags=kwargs.pop("flags", ["r", "theta", "text", "name"]), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@_hoverinfo.py@.PATH_END.py
{ "filename": "sim4D.ipynb", "repo_name": "CRPropa/CRPropa3", "repo_path": "CRPropa3_extracted/CRPropa3-master/doc/pages/example_notebooks/sim4D/sim4D.ipynb", "type": "Jupyter Notebook" }
# 4D Simulation The following is a simple 4D simulation where cosmic rays are emitted by a source at a specified spatial position at a specified time-point. A cosmic ray is detected if it arrives at the observer position within a specified time window. **Note:** In CRPropa, time is always expressed in terms of redshift $z$, whereas positions are always expressed in terms of comoving coordinates as Cartesian 3-vectors. ### Simulation setup The simulation setup is that of a 3D simulation with a few additions: 1. We add a source property for the redshift at emission. This can be either ```SourceRedshift```, ```SourceUniformRedshift``` or ```SourceRedshiftEvolution```. 2. The simulation module ```FutureRedshift``` implements adiabatic energy loss and updates the redshift. In contrast to ```Redshift``` it allows particles to be propagated into the future $z < 0$ which enables faster convergence for finite observation windows. 3. The observer feature ```ObserverRedshiftWindow``` specifies a time window $z_\mathrm{min} < z < z_\mathrm{max}$ in which particles are detected if they hit the observer. Note that this can also be done after the simulation by cutting on the redshifts at observation. For this we also output the current redshift at observation. 4. A minimum redshift is defined via MinimumRedshift which we set to the lower bound of the observer time window. ### Periodic boundaries Due to the additional time dimension, particles are detected much less often. In order to increase the otherwhise horrible simulation efficiency, a ```PeriodicBox``` is defined: Particles that leave this simulation volume, enter again from the opposite side and their source position is moved accordingly. As a result the periodic boundaries keep the particles close to the observer and therefore increase the chance of detection. A careful setup is required however: 1. Sources should only be defined inside the volume as sources outside are filled up by the periodic conditions. 2. The magnetic field at the boundaries should be periodic as well. This is the case for ```initTurbulence``` as long as the simulation volume coincides with (multiples of) the magnetic field grid. ### Source positions In the example below, a single source is defined. For specifying multiple identical discrete sources ```SourceMultiplePositions``` can be used. Multiple non-identical sources can be added to a ```SourceList```. For continous source distributions ```SourceUniformSphere```, ```SourceUniformBox``` and ```SourceUniformCylinder``` can be used. ```SourceDensityGrid``` allows to specify a source distribution via a 3D grid. ### Note: This simulation may take **several** minutes. ```python from crpropa import * # set up random turbulent field Brms = 1 * nG lMin = 60 * kpc lMax = 800 * kpc sIndex = 5./3. turbSpectrum = SimpleTurbulenceSpectrum(Brms, lMin, lMax, sIndex) gridprops = GridProperties(Vector3d(0), 256, 30 * kpc) Bfield = SimpleGridTurbulence(turbSpectrum, gridprops, 42) # simulation setup sim = ModuleList() sim.add(PropagationCK(Bfield)) sim.add(FutureRedshift()) #sim.add(FutureRedshift()) # Switch back to FutureRedshift when PR #416 is merged sim.add(Redshift()) sim.add(PhotoPionProduction(CMB())) sim.add(PhotoPionProduction(IRB_Kneiske04())) sim.add(PhotoDisintegration(CMB())) sim.add(PhotoDisintegration(IRB_Kneiske04())) sim.add(ElectronPairProduction(CMB())) sim.add(ElectronPairProduction(IRB_Kneiske04())) sim.add(NuclearDecay()) sim.add(MinimumEnergy(1 * EeV)) #sim.add(MinimumRedshift(-0.1)) # Switch back to z_min=-0.1 when PR #416 is merged sim.add(MinimumRedshift(0.)) # periodic boundaries extent = 256 * 30 * kpc # size of the magnetic field grid sim.add(PeriodicBox(Vector3d(-extent), Vector3d(2 * extent))) # define the observer obs = Observer() obs.add(ObserverSurface( Sphere(Vector3d(0.), 0.5 * Mpc))) obs.add(ObserverRedshiftWindow(-0.1, 0.1)) output = TextOutput('output.txt', Output.Event3D) output.enable(output.RedshiftColumn) obs.onDetection(output) sim.add(obs) # define the source(s) source = Source() source.add(SourcePosition(Vector3d(10, 0, 0) * Mpc)) source.add(SourceIsotropicEmission()) source.add(SourceParticleType(nucleusId(1, 1))) source.add(SourcePowerLawSpectrum(1 * EeV, 200 * EeV, -1)) source.add(SourceRedshiftEvolution(1.5, 0.001, 3)) # run simulation sim.setShowProgress(True) sim.run(source, 10000) output.close() ``` crpropa::ModuleList: Number of Threads: 16 Run ModuleList Started Tue Jan 9 10:49:16 2024 : [ Finished ] 100% Needed: 00:00:34 - Finished at Tue Jan 9 10:49:50 2024 ```python columnnames=['D', 'z', 'ID', 'E', 'X', 'Y', 'Z', 'Px', 'Py', 'Pz','ID0', 'E0', 'X0', 'Y0', 'Z0', 'P0x', 'P0y', 'P0z'] types = [float] * len(columnnames) import numpy as np data = np.loadtxt('./output.txt', dtype={'names': columnnames, 'formats': types}) ``` ```python import matplotlib.pyplot as plt bins = np.linspace(-0.1,0.1, 10) plt.hist(data['z'], bins=bins, histtype='step') plt.xlabel(r'observed redshift $z$') plt.show() ``` ![png](output_4_0.png) ```python ```
CRPropaREPO_NAMECRPropa3PATH_START.@CRPropa3_extracted@CRPropa3-master@doc@pages@example_notebooks@sim4D@sim4D.ipynb@.PATH_END.py
{ "filename": "Sherlock_observation_planning_documentation.ipynb", "repo_name": "franpoz/SHERLOCK", "repo_path": "SHERLOCK_extracted/SHERLOCK-master/examples/Observation_planning/Sherlock_observation_planning_documentation.ipynb", "type": "Jupyter Notebook" }
# Documentation for the Sherlockpipe's observation planning tool In this document we will give an example of how to use the observation planning tool of Sherlock by applying it to TIC 2527981. This star was observed by TESS in its sector 27. ## Setup To generate an observation plan for your target, you must already have done a fit of your signal using SHERLOCK and go in the resulting folder (usually named "fit_0"). In this part we will briefly do a quick recap on how we get there. First we need a .yaml file with the properties of our target. Here is the one used in our case, named *input.yaml*: ``` TARGETS: 'TIC 2527981': SECTORS: [27] AUTO_DETREND_ENABLED: True INITIAL_SMOOTH_ENABLED: True INITIAL_HIGH_RMS_MASK: True INITIAL_HIGH_RMS_THRESHOLD: 1.5 DETREND_METHOD: 'biweight' DETRENDS_NUMBER: 12 DETREND_CORES: 80 MAX_RUNS: 4 SNR_MIN: 6 SDE_MIN: 7 CPU_CORES: 80 OVERSAMPLING: 3 ``` Then we initiate the run with the line: `nice -15 python3 -m sherlockpipe --properties input.yaml` _Yes, we even "nice" it to be cool with our colleagues sharing the same clusters !_ ---------------------------- After the run, we get an output folder, called mmmmmmmmmmm, where all the results appear. We will fit the first candidate mmmmmmm add image You must also have a csv file containing basic informations about the (ground) observatories you want to consider, such as this : ``` name,tz,lat,lon,alt Trappist-North,,31.2061,-7.8664,2751 Speculoos-South,,-24.6272,-70.4042,2518 Extremely-Great-Gigantic-Prodigiously-Large-and-Abusively-Notorious-Telescope,,51.385362, -68.711408,42 ``` The parameters are defined as: 1. name : name of the observatory (call it whatever makes it for you, it's not regulated). 2. tz : the time zone of the observatory, you can leave it empty, SHERLOCK gets it by itself. 3. lat : Observatory's latitude 4. lon : Observatory's longitude 5. alt : Observatory's altitude Once you have these files, you can execute the planning module of SHERLOCK with this line :\ `python3 -m sherlockpipe.plan --observatories Observatories.csv` If you encounter any issue, please refer to the "Troubleshooting" file. It is still at the draft state, as we need your bugs to expand it :)\ If your error is not solved in the "Troubleshooting" file, please let us know about it, so we can work on a patch ! ## Output During the execution, SHERLOCK will create a "plan" folder in which you will find two files, one csv and one pdf. The csv file contains the following informations: - observatory : observatory's name as you defined it. - timezone : time zone of the observatory. - start_obs : date and time where the observation would start. Format is yyyy-mm-dd for the date, then "T" for the time, formated hh:mm:ss.sss in 24h format. - end_obs : date and time where the observation would end, same format as for "start_obs". - ingress : time where the transit should begin (best estimation), same format as for "start_obs". - egress : time where the transit should end (best estimation), same format as for "start_obs". - midtime : middle time of the transit (best estimation), same format as for "start_obs". - midtime_up_err_h : maximum time deviation from the midtime, in hours (?) mmmmmmmmmmmm. - midtime_low_err_h : mmmmmmmmm deviation from the midtime, in hours. - twilight_evening : earliest time at which the observation can start, same format as for "start_obs". - twilight_morning : Latest time at which the observation can end, same format as for "start_obs". - observable : Minimum fraction of the transit that must be observable to consider an observation. - moon_phase : Phase of the Moon, from 0 (new Moon) to 1 (full Moon). - moon_dist : Angular distance between the Moon and the target, in degrees. In the pdf file, you will find a quick recap of the targeted star, signal, few key parameters for the observation and the observatories. After that, begin a large table containing all the elements required to schedule an observation, along with small visual interpretation of the conditions of the observations. The first column "Observatory" is the name of the observatory as you defined it with the second column "TZ" its time zone. The third one, "Event times", gives the key times for the observation such as : - TWE : "Twilight Evening", time in the evening from when an observation is possible. - SO : Start of the observation. - I : Expected time of ingress (begining of the transit). - M : Expected time of the middle time of the transit. - E : Expected time of the egress (end of the transit). - EO : End of the observation. - TWM : "Twilight Morning", time in the morning until whent an observation is possible. The next column, "TT Error" gives the error margins for the time where the transit should happen, in hours. "Moon" gives a recap of the state of the moon durring the observation night, with first its phase (in %) and then its angular distance to the target (in °). Then comes the "Image" column, where there is a lot to say. The abscice is the time which is not visually quantified as the values are in the column "Event times"). The background shows when it is the night (grey) or day (white). The blue line is a visualisation of the elevation of the target, with the values on the right axis in degrees and the air mass on the left. The bottom green patch is the part of the sky where the target would be too low to observe. Vertical lines are : - Black : Expected time of the middle time of the transit. - Orange : Expected times of the ingress and egress. - Pink/violet : Start and end of the observation. - Red : Temporal incertainity for the ingres (left line) and egress (right line).
franpozREPO_NAMESHERLOCKPATH_START.@SHERLOCK_extracted@SHERLOCK-master@examples@Observation_planning@Sherlock_observation_planning_documentation.ipynb@.PATH_END.py
{ "filename": "librarian.py", "repo_name": "jvines/astroARIADNE", "repo_path": "astroARIADNE_extracted/astroARIADNE-master/astroARIADNE/librarian.py", "type": "Python" }
# @auto-fold regex /^\s*if/ /^\s*else/ /^\s*elif/ /^\s*def/ """Helper class to look up broadband photometry and stellar parameters.""" __all__ = ['Librarian'] import os import sys import warnings import astropy.units as u import numpy as np from astropy.coordinates import SkyCoord from astropy.utils.exceptions import AstropyWarning from astropy.table import Table from astroquery.gaia import Gaia from astroquery.mast import Catalogs from astroquery.vizier import Vizier from astroquery.xmatch import XMatch from regions import CircleSkyRegion from .error import CatalogWarning from .config import filter_names warnings.filterwarnings('ignore', category=UserWarning, append=True) warnings.filterwarnings('ignore', category=AstropyWarning, append=True) Vizier.ROW_LIMIT = -1 Vizier.columns = ['all'] Catalogs.ROW_LIMIT = -1 Catalogs.columns = ['all'] class Librarian: """Class that handles querying for photometry and astrometry data.""" # pyphot filter names filter_names = filter_names # Catalogs magnitude names __apass_mags = ['Vmag', 'Bmag', 'g_mag', 'r_mag', 'i_mag'] __apass_errs = ['e_Vmag', 'e_Bmag', 'e_g_mag', 'e_r_mag', 'e_i_mag'] __apass_filters = ['GROUND_JOHNSON_V', 'GROUND_JOHNSON_B', 'SDSS_g', 'SDSS_r', 'SDSS_i'] __ascc_mags = ['Vmag', 'Bmag'] # , 'Jmag', 'Hmag', 'Kmag'] __ascc_errs = ['e_Vmag', 'e_Bmag'] # , 'e_Jmag', 'e_Hmag', 'e_Kmag'] __ascc_filters = ['GROUND_JOHNSON_V', 'GROUND_JOHNSON_B'] # '2MASS_J', '2MASS_H', '2MASS_Ks'] __wise_mags = ['W1mag', 'W2mag'] __wise_errs = ['e_W1mag', 'e_W2mag'] __wise_filters = ['WISE_RSR_W1', 'WISE_RSR_W2'] __ps1_mags = ['gmag', 'rmag', 'imag', 'zmag', 'ymag'] __ps1_errs = ['e_gmag', 'e_rmag', 'e_imag', 'e_zmag', 'e_ymag'] __ps1_filters = ['PS1_g', 'PS1_r', 'PS1_i', 'PS1_z', 'PS1_y'] __tmass_mags = ['Jmag', 'Hmag', 'Kmag'] __tmass_errs = ['e_Jmag', 'e_Hmag', 'e_Kmag'] __tmass_filters = ['2MASS_J', '2MASS_H', '2MASS_Ks'] __gaia_mags = ['Gmag', 'BPmag', 'RPmag'] __gaia_errs = ['e_Gmag', 'e_BPmag', 'e_RPmag'] __gaia_filters = ['GaiaDR2v2_G', 'GaiaDR2v2_BP', 'GaiaDR2v2_RP'] # __sdss_mags = ['gmag', 'rmag', 'imag'] __sdss_mags = ['umag', 'gmag', 'rmag', 'imag', 'zmag'] # __sdss_errs = ['e_gmag', 'e_rmag', 'e_imag'] __sdss_errs = ['e_umag', 'e_gmag', 'e_rmag', 'e_imag', 'e_zmag'] # __sdss_filters = ['SDSS_g', 'SDSS_r', 'SDSS_i'] __sdss_filters = ['SDSS_u', 'SDSS_g', 'SDSS_r', 'SDSS_i', 'SDSS_z'] __galex_mags = ['FUV', 'NUV'] __galex_errs = ['e_FUV', 'e_NUV'] __galex_filters = ['GALEX_FUV', 'GALEX_NUV'] __irac_mags = ['_3.6mag', '_4.5mag'] __irac_errs = ['e_3.6mag', 'e_4.5mag'] __irac_filters = ['SPITZER_IRAC_36', 'SPITZER_IRAC_45'] __tycho_mags = ['BTmag', 'VTmag'] __tycho_errs = ['e_BTmag', 'e_VTmag'] __tycho_filters = ['TYCHO_B_MvB', 'TYCHO_V_MvB'] __tess_mags = ['Tmag'] __tess_errs = ['e_Tmag'] __tess_filters = ['TESS'] __skymapper_mags = ['uPSF', 'vPSF', 'gPSF', 'rPSF', 'iPSF', 'zPSF'] __skymapper_errs = ['e_uPSF', 'e_vPSF', 'e_gPSF', 'e_rPSF', 'e_iPSF', 'e_zPSF'] __skymapper_filters = ['SkyMapper_u', 'SkyMapper_v', 'SkyMapper_g', 'SkyMapper_r', 'SkyMapper_i', 'SkyMapper_z'] # APASS DR9, WISE, PAN-STARRS DR1, GAIA DR2, 2MASS, SDSS DR9 catalogs = { 'APASS': [ 'II/336/apass9', list(zip(__apass_mags, __apass_errs, __apass_filters)) ], 'Wise': [ 'II/328/allwise', list(zip(__wise_mags, __wise_errs, __wise_filters)) ], 'Pan-STARRS': [ 'II/349/ps1', list(zip(__ps1_mags, __ps1_errs, __ps1_filters)) ], 'Gaia': [ 'I/355/gaiadr3', list(zip(__gaia_mags, __gaia_errs, __gaia_filters)) ], '2MASS': [ 'II/246/out', list(zip(__tmass_mags, __tmass_errs, __tmass_filters)) ], 'SDSS': [ 'V/147/sdss12', list(zip(__sdss_mags, __sdss_errs, __sdss_filters)) ], 'GALEX': [ 'II/312/ais', list(zip(__galex_mags, __galex_errs, __galex_filters)) ], 'ASCC': [ 'I/280B/ascc', list(zip(__ascc_mags, __ascc_errs, __ascc_filters)) ], 'TYCHO2': [ 'I/259/tyc2', list(zip(__tycho_mags, __tycho_errs, __tycho_filters)) ], 'GLIMPSE': [ 'II/293/glimpse', list(zip(__irac_mags, __irac_errs, __irac_filters)) ], 'TESS': [ 'TIC', list(zip(__tess_mags, __tess_errs, __tess_filters)) ], 'SkyMapper': [ 'II/358/smss', list(zip(__skymapper_mags, __skymapper_errs, __skymapper_filters)) ], 'STROMGREN_PAUNZ': [ 'J/A+A/580/A23/catalog', -1 ], 'STROMGREN_HAUCK': [ 'II/215/catalog', -1 ], 'MERMILLIOD': [ 'II/168/ubvmeans', -1 ], } def __init__(self, starname, ra, dec, radius=None, g_id=None, mags=True, ignore=None): self.starname = starname self.ra = ra self.dec = dec self.ignore = ignore if ignore is not None else [] self.tic = None self.kic = None self.ids = [] self.dr2_id = None self.used_filters = np.zeros(self.filter_names.shape[0]) self.mags = np.zeros(self.filter_names.shape[0]) self.mag_errs = np.zeros(self.filter_names.shape[0]) # self.create_logfile() if radius is None: self.radius = 3 * u.arcmin else: self.radius = radius if g_id is None: print('No Gaia ID provided. Searching for nearest source.') self.g_id = self._get_gaia_id(self.ra, self.dec, self.radius) print('Gaia ID found: {0}'.format(self.g_id)) else: self.g_id = g_id self.gaia_params() if mags: self.gaia_query() self.get_magnitudes() idx = self.used_filters >= 1 self.used_filters[idx] = 1 # self.close_logfile() pass def gaia_params(self): """Retrieve parallax, radius, teff and lum from Gaia.""" # If gaia DR3 id is provided, query by id query = f""" SELECT dr3.parallax, dr3.parallax_error, dr3.pmra, dr3.pmra_error, dr3.pmdec, dr3.pmdec_error, dr3.radial_velocity, dr3.radial_velocity_error, dr2.teff_val, dr2.teff_percentile_lower, dr2.teff_percentile_upper, dr2.radius_val, dr2.radius_percentile_lower, dr2.radius_percentile_upper, dr2.lum_val, dr2.lum_percentile_lower, dr2.lum_percentile_upper, dr2.source_id2 AS source_id FROM gaiadr3.gaia_source AS dr3 JOIN (SELECT n.dr3_source_id AS source_id, n.dr2_source_id AS source_id2, dr2.teff_val, dr2.teff_percentile_lower, dr2.teff_percentile_upper, dr2.radius_val, dr2.radius_percentile_lower, dr2.radius_percentile_upper, dr2.lum_val, dr2.lum_percentile_lower, dr2.lum_percentile_upper FROM gaiadr3.dr2_neighbourhood AS n JOIN gaiadr2.gaia_source AS dr2 ON n.dr2_source_id = dr2.source_id WHERE n.dr3_source_id = {self.g_id} ) AS dr2 ON dr3.source_id = dr2.source_id WHERE dr3.source_id = {self.g_id} """ j = Gaia.launch_job_async(query) res = j.get_results() if len(res) > 1: res = Table.from_pandas(res.to_pandas().dropna()) self.dr2_id = res['source_id'][0] self.plx, self.plx_e = self._get_parallax(res) self.temp, self.temp_e = self._get_teff(res) self.rad, self.rad_e = self._get_radius(res) self.lum, self.lum_e = self._get_lum(res) self.dist, self.dist_e = self._get_distance(self.ra, self.dec, self.radius, self.g_id) pass def gaia_query(self): """Query Gaia to get different catalog IDs.""" cats = ['tycho2', 'panstarrs1', 'sdssdr9', 'allwise', 'tmass', 'apassdr9'] names = ['tycho', 'ps', 'sdss', 'allwise', 'tmass', 'apass'] IDS = { 'TYCHO2': '', 'APASS': '', '2MASS': '', 'Pan-STARRS': '', 'SDSS': '', 'Wise': '', 'Gaia': self.g_id, 'SkyMapper': self.g_id, } for c, n in zip(cats, names): if c == 'apassdr9': cat = 'APASS' elif c == 'tmass': cat = '2MASS' c = 'tmass' elif c == 'panstarrs1': cat = 'Pan-STARRS' elif c == 'sdssdr9': cat = 'SDSS' elif c == 'allwise': cat = 'Wise' elif c == 'tycho2': cat = 'TYCHO2' if cat in self.ignore: IDS[cat] = 'skipped' CatalogWarning(cat, 7).warn() continue query = f""" SELECT {n}.original_ext_source_id FROM gaiadr2.gaia_source AS gaia JOIN gaiadr2.{c}_best_neighbour AS {n} ON gaia.source_id={n}.source_id WHERE gaia.source_id={self.dr2_id} """ j = Gaia.launch_job_async(query) r = j.get_results() if len(r): IDS[cat] = r[0][0] else: IDS[cat] = 'skipped' print('Star not found in catalog ' + cat, end='.\n') IDS['GALEX'] = '' IDS['TESS'] = '' IDS['MERMILLIOD'] = '' IDS['STROMGREN_PAUNZ'] = '' IDS['STROMGREN_HAUCK'] = '' self.ids = IDS def get_magnitudes(self): """Retrieve the magnitudes of the star. Looks into APASS, WISE, Pan-STARRS, Gaia, 2MASS and SDSS surveys looking for different magnitudes for the star, along with the associated uncertainties. """ print('Looking online for archival magnitudes for star', end=' ') print(self.starname) catalogs = [c[1][0] for c in self.catalogs.items()] cats = self.get_catalogs(self.ra, self.dec, self.radius, catalogs) skips = ['ASCC', 'GLIMPSE'] for c in self.catalogs.keys(): if c in skips: continue if c in self.ignore: CatalogWarning(c, 7).warn() continue if self.ids[c] == 'skipped': continue if c != 'TESS': try: current_cat = cats[self.catalogs[c][0]] current_cat.sort('_r') except TypeError: CatalogWarning(c, 5).warn() continue else: self._retrieve_from_tess() continue if c == 'APASS': self._get_apass(current_cat) continue elif c == 'Wise': self._get_wise(current_cat) continue elif c == 'TYCHO2': self._get_ascc_tycho2_stromgren(cats, False, 'TYCHO2') self._get_ascc_tycho2_stromgren(cats, False, 'ASCC') continue elif c == 'SDSS': self._get_sdss(current_cat) continue elif c == 'Pan-STARRS': self._get_ps1(current_cat) continue elif c == 'Gaia': self._get_gaia(current_cat) continue elif c == '2MASS': self._get_2mass_glimpse(cats, '2MASS') self._get_2mass_glimpse(cats, 'GLIMPSE') continue elif c == 'GALEX': current_cat = self._gaia_galex_xmatch(cats, self.ra, self.dec, self.radius) if len(current_cat) == 0: CatalogWarning(c, 5).warn() continue self._retrieve_from_galex(current_cat, c) continue elif c == 'SkyMapper': self._get_skymapper(current_cat) elif c == 'MERMILLIOD': current_cat = self._gaia_mermilliod_xmatch(self.ra, self.dec, self.radius) if len(current_cat) == 0: CatalogWarning(c, 5).warn() continue self._retrieve_from_mermilliod(current_cat) continue elif c == 'STROMGREN_PAUNZ': current_cat = self._gaia_paunzen_xmatch(self.ra, self.dec, self.radius) if len(current_cat) == 0: CatalogWarning(c, 5).warn() continue self._retrieve_from_stromgren(current_cat, 'STROMGREN_PAUNZEN') continue elif c == 'STROMGREN_HAUCK': current_cat = self._gaia_hauck_xmatch(self.ra, self.dec, self.radius) if len(current_cat) == 0: CatalogWarning(c, 5).warn() continue self._retrieve_from_stromgren(current_cat, 'STROMGREN_HAUCK') continue pass def _retrieve_from_tess(self): print('Checking catalog TICv8') tic = self.get_TIC(self.ra, self.dec, self.radius) tic.sort('dstArcSec') mask = tic['GAIA'] == str(self.g_id) cat = tic[mask] if len(cat) > 0: is_star = cat['objType'][0] == 'STAR' if not is_star: CatalogWarning('TESS', 8).warn() return self.tic = int(cat['ID'][0]) kic = cat['KIC'][0] self.kic = int(kic) if not np.ma.is_masked(kic) else None m, e, f = self.catalogs['TESS'][1][0] filt_idx = np.where(f == self.filter_names)[0] if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() return mag = cat[m][0] err = cat[e][0] if not self._qc_mags(mag, err, m): return self._add_mags(mag, err, f) else: CatalogWarning('TIC', 5).warn() def _retrieve_from_cat(self, cat, name): if len(cat): for m, e, f in self.catalogs[name][1]: filt_idx = np.where(f == self.filter_names)[0] if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() continue mag = cat[m][0] err = cat[e][0] if not self._qc_mags(mag, err, m): continue self._add_mags(mag, err, f) else: CatalogWarning(name, 5).warn() def _retrieve_from_mermilliod(self, cat): print('Checking catalog Mermilliod') mask = cat['source_id'] == self.dr2_id cat = cat[mask][0] v = cat['Vmag'] v_e = cat['e_Vmag'] bv = cat['B-V'] bv_e = cat['e_B-V'] ub = cat['U-B'] ub_e = cat['e_U-B'] if not self._qc_mags(v, v_e, 'vmag'): return filts = ['GROUND_JOHNSON_V'] mags = [v] err = [v_e] if self._qc_mags(bv, bv_e, 'B-V'): b = bv + v b_e = np.sqrt(v_e ** 2 + bv_e ** 2) filts.append('GROUND_JOHNSON_B') mags.append(b) err.append(b_e) if self._qc_mags(ub, ub_e, 'U-B'): u = ub + b u_e = np.sqrt(b_e ** 2 + ub_e ** 2) filts.append('GROUND_JOHNSON_U') mags.append(u) err.append(u_e) for m, e, f in zip(mags, err, filts): filt_idx = np.where(f == self.filter_names)[0] if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() continue self._add_mags(m, e, f) def _retrieve_from_stromgren(self, cat, n): print('Checking catalog ' + n) mask = cat['source_id'] == self.dr2_id cat = cat[mask][0] y = cat['Vmag'] y_e = cat['e_Vmag'] if not self._qc_mags(y, y_e, 'ymag'): return if np.isnan(y_e): y_e = 0 by = cat['b-y'] by_e = cat['e_b-y'] m1 = cat['m1'] m1_e = cat['e_m1'] c1 = cat['c1'] c1_e = cat['e_c1'] b = by + y v = m1 + 2 * by + y u = c1 + 2 * m1 + 3 * by + y b_e = np.sqrt(by_e ** 2 + y_e ** 2) v_e = np.sqrt(m1_e ** 2 + 4 * by_e ** 2 + y_e ** 2) u_e = np.sqrt(c1_e ** 2 + 4 * m1_e ** 2 + 9 * by_e ** 2 + y_e ** 2) mags = [u, v, b, y] err = [u_e, v_e, b_e, y_e] filts = ['STROMGREN_u', 'STROMGREN_v', 'STROMGREN_b', 'STROMGREN_y'] for m, e, f in zip(mags, err, filts): filt_idx = np.where(f == self.filter_names)[0] if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() continue self._add_mags(m, e, f) pass def _retrieve_from_galex(self, cat, name): print('Checking catalog GALEX') mask = cat['source_id'] == self.dr2_id cat = cat[mask][0] Fexf = cat['Fexf'] Nexf = cat['Nexf'] Fafl = cat['Fafl'] Nafl = cat['Nafl'] for m, e, f in self.catalogs[name][1]: if f == 'GALEX_FUV' and (Fexf > 0 or Fafl > 0): CatalogWarning(f, 8).warn() continue if f == 'GALEX_NUV' and (Nexf > 0 or Nafl > 0): CatalogWarning(f, 8).warn() continue filt_idx = np.where(f == self.filter_names)[0] if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() continue mag = cat[m] err = cat[e] if not self._qc_mags(mag, err, m): continue self._add_mags(mag, err, f) def _retrieve_from_2mass(self, cat, name): qflg = cat['Qflg'] cflg = cat['Cflg'] for m, e, f in self.catalogs[name][1]: filt_idx = np.where(f == self.filter_names)[0] if f == '2MASS_J': if qflg[0][0] not in 'ABCD' or cflg[0][0] != '0': CatalogWarning(f, 8).warn() continue if f == '2MASS_H': if qflg[0][1] not in 'ABCD' or cflg[0][1] != '0': CatalogWarning(f, 8).warn() continue if f == '2MASS_Ks': if qflg[0][2] not in 'ABCD' or cflg[0][2] != '0': CatalogWarning(f, 8).warn() continue if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() continue mag = cat[m][0] err = cat[e][0] if not self._qc_mags(mag, err, m): continue self._add_mags(mag, err, f) def _retrieve_from_wise(self, cat, name): qph = cat['qph'] for m, e, f in self.catalogs[name][1]: filt_idx = np.where(f == self.filter_names)[0] if f == 'WISE_RSR_W1': if qph[0][0] not in 'ABC': CatalogWarning(f, 8).warn() continue if f == 'WISE_RSR_W2': if qph[0][1] not in 'ABC': CatalogWarning(f, 8).warn() continue if self.used_filters[filt_idx] == 1: CatalogWarning(f, 6).warn() continue mag = cat[m][0] err = cat[e][0] if not self._qc_mags(mag, err, m): continue self._add_mags(mag, err, f) def _add_mags(self, mag, er, filt): filt_idx = np.where(filt == self.filter_names)[0] if er == 0 or np.ma.is_masked(er): self.used_filters[filt_idx] = 2 else: self.used_filters[filt_idx] = 1 self.mags[filt_idx] = mag self.mag_errs[filt_idx] = er def _get_ascc_tycho2_stromgren(self, cats, near, name): print('Checking catalog ' + name) try: cat = cats[self.catalogs[name][0]] cat.sort('_r') except TypeError: CatalogWarning(name, 5).warn() return if not near: try: tyc1, tyc2, tyc3 = self.ids['TYCHO2'].split('-') except TypeError: tyc1, tyc2, tyc3 = self.ids['TYCHO2'].split('b-') mask = cat['TYC1'] == int(tyc1) mask *= cat['TYC2'] == int(tyc2) mask *= cat['TYC3'] == int(tyc3) else: mask = [0] if 'STROMGREN' not in name: self._retrieve_from_cat(cat[mask], name) else: self._retrieve_from_stromgren(cat[mask]) def _get_apass(self, cat): print('Checking catalog APASS') CatalogWarning('APASS', 5).warn() # mask = cat['recno'] == int(self.ids['APASS']) # self._retrieve_from_cat(cat[mask], 'APASS') def _get_wise(self, cat): print('Checking catalog All-WISE') mask = cat['AllWISE'] == self.ids['Wise'] is_star = cat[mask]['ex'] == 0 if is_star: self._retrieve_from_wise(cat[mask], 'Wise') else: CatalogWarning('WISE', 8).warn() def _get_2mass_glimpse(self, cats, name): print('Checking catalog ' + name) try: cat = cats[self.catalogs[name][0]] cat.sort('_r') except TypeError: CatalogWarning(name, 5).warn() return if name == '2MASS': mask = cat['_2MASS'] == self.ids['2MASS'] self._retrieve_from_2mass(cat[mask], '2MASS') else: mask = cat['_2MASS'] == self.ids['2MASS'] self._retrieve_from_cat(cat[mask], 'GLIMPSE') def _get_sdss(self, cat): print('Checking catalog SDSS DR12') mask = cat['objID'] == int(self.ids['SDSS']) is_star = cat[mask]['class'] == 6 is_good_quality = cat[mask]['Q'] == 3 or cat[mask]['Q'] == 2 if is_star and is_good_quality: self._retrieve_from_cat(cat[mask], 'SDSS') else: CatalogWarning('SDSS', 8).warn() def _get_ps1(self, cat): print('Checking catalog Pan-STARRS1') mask = cat['objID'] == self.ids['Pan-STARRS'] is_star = not (cat[mask]['Qual'] & 1 and cat[mask]['Qual'] & 2) is_good_quality = (cat[mask]['Qual'] & 4 or cat[mask]['Qual'] & 16) is_good_quality = is_good_quality and not cat[mask]['Qual'] & 128 if is_star and is_good_quality: self._retrieve_from_cat(cat[mask], 'Pan-STARRS') else: CatalogWarning('Pan-STARRS', 8).warn() def _get_gaia(self, cat): print('Checking catalog Gaia DR3') mask = cat['DR3Name'] == f'Gaia DR3 {self.ids["Gaia"]}' self._retrieve_from_cat(cat[mask], 'Gaia') def _get_skymapper(self, cat): print('Checking catalog SkyMapper DR1.1') mask = cat['Gaiadr2Id1'] == self.ids['Gaia'] is_good_quality = cat[mask]['flags'] == 0 if is_good_quality: self._retrieve_from_cat(cat[mask], 'SkyMapper') else: CatalogWarning('SkyMapper', 8).warn() @staticmethod def _get_distance(ra, dec, radius, g_id): """Retrieve Bailer-Jones EDR3 distance.""" tries = [0.5, 0.25, 0.1, 1, 2, 3, 4][::-1] for t in tries: try: failed = False cat = Vizier.query_region( SkyCoord( ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs'), radius=radius / t, catalog='I/352/gedr3dis')['I/352/gedr3dis'] break except TypeError: failed = True continue if failed: CatalogWarning(-1, 9).warn() return -999, -999 cat.sort('_r') idx = np.where(cat['Source'] == g_id)[0] if len(idx) == 0: # Raise exception, for now do nothing return -1, -1 dist = cat[idx]['rgeo'][0] lo = dist - cat[idx]['b_rgeo'][0] hi = cat[idx]['B_rgeo'][0] - dist return dist, max(lo, hi) @staticmethod def _get_parallax(res): plx = res['parallax'][0] if plx <= 0: CatalogWarning(0, 0).warn() return -1, -1 plx_e = res['parallax_error'][0] # Parallax correction −52.8 ± 2.4 µas from Zinn+19 return plx + 0.0528, np.sqrt(plx_e ** 2 + 0.0024 ** 2) @staticmethod def _get_radius(res): rad = res['radius_val'][0] if np.ma.is_masked(rad): CatalogWarning('radius', 1).warn() return 0, 0 lo = res['radius_percentile_lower'][0] up = res['radius_percentile_upper'][0] rad_e = max([rad - lo, up - rad]) return rad, 5 * rad_e @staticmethod def _get_teff(res): teff = res['teff_val'][0] if np.ma.is_masked(teff): CatalogWarning('teff', 1).warn() return 0, 0 lo = res['teff_percentile_lower'][0] up = res['teff_percentile_upper'][0] teff_e = max([teff - lo, up - teff]) return teff, teff_e @staticmethod def _get_lum(res): lum = res['lum_val'][0] if np.ma.is_masked(lum): CatalogWarning('lum', 1).warn() return 0, 0 lo = res['lum_percentile_lower'][0] up = res['lum_percentile_upper'][0] lum_e = max([lum - lo, up - lum]) return lum, lum_e @staticmethod def _get_gaia_id(ra, dec, radius): c = SkyCoord(ra, dec, unit=(u.deg, u.deg), frame='icrs') j = Gaia.cone_search_async(c, radius, table_name='gaiadr3.gaia_source') res = j.get_results() return res['source_id'][0] @staticmethod def get_catalogs(ra, dec, radius, catalogs): """Retrieve available catalogs for a star from Vizier.""" tries = [0.5, 0.25, 0.1, 1, 2, 3, 4][::-1] for t in tries: cats = Vizier.query_region( SkyCoord( ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs' ), radius=radius / t, catalog=catalogs ) if len(cats): break return cats @staticmethod def get_TIC(ra, dec, radius): """Retrieve TIC from MAST.""" cat = Catalogs.query_region( SkyCoord( ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs' ), radius=radius, catalog='TIC' ) return cat @staticmethod def _qc_mags(mag, err, m): if np.ma.is_masked(mag): CatalogWarning(m, 2).warn() return False if np.ma.is_masked(err): CatalogWarning(m, 3).warn() return True if err == 0: CatalogWarning(m, 4).warn() return True if err > 1: return False return True @staticmethod def _gaia_galex_xmatch(cats, ra, dec, radius): galex = cats['II/312/ais'] coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame='icrs') region = CircleSkyRegion(coord, radius=radius) xm = XMatch.query(cat1='vizier:I/345/gaia2', cat2=galex, colRA2='RAJ2000', colDec2='DEJ2000', area=region, max_distance=radius) xm.sort('angDist') return xm @staticmethod def _gaia_mermilliod_xmatch(ra, dec, radius): coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame='icrs') region = CircleSkyRegion(coord, radius=radius) xm = XMatch.query(cat1='vizier:I/345/gaia2', cat2='vizier:II/168/ubvmeans', colRA2='_RA', colDec2='_DE', area=region, max_distance=radius) xm.sort('angDist') return xm @staticmethod def _gaia_paunzen_xmatch(ra, dec, radius): coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame='icrs') region = CircleSkyRegion(coord, radius=radius) xm = XMatch.query(cat1='vizier:I/345/gaia2', cat2='vizier:J/A+A/580/A23/catalog', colRA2='RAICRS', colDec2='DEICRS', area=region, max_distance=radius) xm.sort('angDist') return xm @staticmethod def _gaia_hauck_xmatch(ra, dec, radius): coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame='icrs') region = CircleSkyRegion(coord, radius=radius) xm = XMatch.query(cat1='vizier:I/345/gaia2', cat2='vizier:II/215/catalog', colRA2='_RA.icrs', colDec2='_DE.icrs', area=region, max_distance=radius) xm.sort('angDist') return xm def create_logfile(self): """Activate log file.""" self.old_stdout = sys.stdout self.log_file = open(os.getcwd() + '/' + self.starname + 'output.log', 'w+') sys.stdout = self.log_file def close_logfile(self): """Deactivate log file.""" sys.stdout = self.old_stdout self.log_file.close()
jvinesREPO_NAMEastroARIADNEPATH_START.@astroARIADNE_extracted@astroARIADNE-master@astroARIADNE@librarian.py@.PATH_END.py
{ "filename": "__init__.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/marker/__init__.py", "type": "Python" }
import sys from typing import TYPE_CHECKING if sys.version_info < (3, 7) or TYPE_CHECKING: from ._colorsrc import ColorsrcValidator from ._color import ColorValidator else: from _plotly_utils.importers import relative_import __all__, __getattr__, __dir__ = relative_import( __name__, [], ["._colorsrc.ColorsrcValidator", "._color.ColorValidator"] )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@marker@__init__.py@.PATH_END.py
{ "filename": "mass_function.py", "repo_name": "axion-alp-dm/EBL_calculation", "repo_path": "EBL_calculation_extracted/EBL_calculation-main/models_tests/mass_function.py", "type": "Python" }
#matplotlib inline #config InlineBackend.figure_format = 'retina' import sys, platform, os import matplotlib from matplotlib import pyplot as plt import numpy as np #Assume installed from github using "git clone --recursive https://github.com/cmbant/CAMB.git" #This file is then in the docs folders camb_path = os.path.realpath(os.path.join(os.getcwd(), '..')) sys.path.insert(0, camb_path) sys.path.insert(0, '/home/porrassa/miniconda3/envs/EBL_calculation/bin') sys.path.insert(0, '/home/porrassa/miniconda3/pkgs') sys.path.insert(0, '/home/porrassa/miniconda3/lib/python3.9/site-packages') #sys.path.insert(0, '') #sys.path.insert(0, '') #sys.path.insert(0, '') #sys.path.insert(0, '/home/porrassa/miniconda3/envs/EBL_calculation/share') #sys.path.insert(0, '/home/porrassa/miniconda3/envs/EBL_calculation/conda-meta') print(sys.path) import camb from camb import model, initialpower print('Using CAMB %s installed at %s'%(camb.__version__,os.path.dirname(camb.__file__))) print('aaa')
axion-alp-dmREPO_NAMEEBL_calculationPATH_START.@EBL_calculation_extracted@EBL_calculation-main@models_tests@mass_function.py@.PATH_END.py
{ "filename": "uratmatch.py", "repo_name": "desihub/desitarget", "repo_path": "desitarget_extracted/desitarget-main/py/desitarget/uratmatch.py", "type": "Python" }
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ desitarget.uratmatch ==================== Useful `URAT`_ matching and manipulation routines. .. _`URAT`: http://cdsarc.u-strasbg.fr/viz-bin/cat/I/329 """ import os import numpy as np import fitsio import requests import pickle from importlib import resources from time import time from astropy.io import ascii from glob import glob import healpy as hp from desitarget.internal import sharedmem from desimodel.footprint import radec2pix from desitarget.geomask import add_hp_neighbors, radec_match_to from desitarget import io # ADM set up the DESI default logger from desiutil.log import get_logger log = get_logger() # ADM start the clock start = time() # ADM columns contained in our version of the URAT fits files. uratdatamodel = np.array([], dtype=[ ('URAT_ID', '>i8'), ('RA', '>f8'), ('DEC', '>f8'), ('APASS_G_MAG', '>f4'), ('APASS_G_MAG_ERROR', '>f4'), ('APASS_R_MAG', '>f4'), ('APASS_R_MAG_ERROR', '>f4'), ('APASS_I_MAG', '>f4'), ('APASS_I_MAG_ERROR', '>f4'), ('PMRA', '>f4'), ('PMDEC', '>f4'), ('PM_ERROR', '>f4') ]) def get_urat_dir(): """Convenience function to grab the URAT environment variable. Returns ------- :class:`str` The directory stored in the $URAT_DIR environment variable. """ # ADM check that the $URAT_DIR environment variable is set. uratdir = os.environ.get('URAT_DIR') if uratdir is None: msg = "Set $URAT_DIR environment variable!" log.critical(msg) raise ValueError(msg) return uratdir def _get_urat_nside(): """Grab the HEALPixel nside to be used throughout this module. Returns ------- :class:`int` The HEALPixel nside number for URAT file creation and retrieval. """ nside = 32 return nside def scrape_urat(url="http://cdsarc.u-strasbg.fr/ftp/I/329/URAT1/v12/", nfiletest=None): """Retrieve the binary versions of the URAT files. Parameters ---------- url : :class:`str` The web directory that hosts the archived binary URAT files. nfiletest : :class:`int`, optional, defaults to ``None`` If an integer is sent, only retrieve this number of files, for testing. Returns ------- Nothing But the archived URAT files are written to $URAT_DIR/binary. Notes ----- - The environment variable $URAT_DIR must be set. - Runs in about 50 minutes for 575 URAT files. """ # ADM check that the URAT_DIR is set and retrieve it. uratdir = get_urat_dir() # ADM construct the directory to which to write files. bindir = os.path.join(uratdir, 'binary') # ADM the directory better be empty for the wget! if os.path.exists(bindir): if len(os.listdir(bindir)) > 0: msg = "{} should be empty to wget URAT binary files!".format(bindir) log.critical(msg) raise ValueError(msg) # ADM make the directory, if needed. else: log.info('Making URAT directory for storing binary files') os.makedirs(bindir) index = requests.get(url) # ADM retrieve any file name that starts with z. # ADM the [1::2] pulls back just the odd lines from the split list. garbled = index.text.split("z")[1::2] filelist = ["z{}".format(g[:3]) for g in garbled] # ADM if nfiletest was passed, just work with that number of files. test = nfiletest is not None if test: filelist = filelist[:nfiletest] nfiles = len(filelist) # ADM loop through the filelist. start = time() for nfile, fileinfo in enumerate(filelist): # ADM make the wget command to retrieve the file and issue it. cmd = 'wget -q {} -P {}'.format(os.path.join(url, fileinfo), bindir) os.system(cmd) if nfile % 25 == 0 or test: elapsed = time() - start rate = nfile / elapsed log.info( '{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed' .format(nfile+1, nfiles, rate, elapsed/60.) ) log.info('Done...t={:.1f}s'.format(time()-start)) return def urat_binary_to_csv(): """Convert files in $URAT_DIR/binary to files in $URAT_DIR/csv. Returns ------- Nothing But the archived URAT binary files in $URAT_DIR/binary are converted to CSV files in the $URAT_DIR/csv. Notes ----- - The environment variable $URAT_DIR must be set. - Relies on the executable urat/fortran/v1dump, which is only tested at NERSC and might need compiled by the user. - Runs in about 40 minutes for 575 files. """ # ADM check that the URAT_DIR is set. uratdir = get_urat_dir() # ADM a quick check that the csv directory is empty before writing. csvdir = os.path.join(uratdir, 'csv') if os.path.exists(csvdir): if len(os.listdir(csvdir)) > 0: msg = "{} should be empty to make URAT files!".format(csvdir) log.critical(msg) raise ValueError(msg) # ADM make the directory, if needed. else: log.info('Making URAT directory for storing CSV files') os.makedirs(csvdir) log.info('Begin converting URAT files to CSV...t={:.1f}s' .format(time()-start)) # ADM check the v1dump executable has been compiled. readme = resources.files('desitarget').joinpath('urat/fortran/README') cmd = resources.files('desitarget').joinpath('urat/fortran/v1dump') if not (os.path.exists(cmd) and os.access(cmd, os.X_OK)): msg = "{} must have been compiled (see {})".format(cmd, readme) log.critical(msg) raise ValueError(msg) # ADM execute v1dump. os.system(cmd) log.info('Done...t={:.1f}s'.format(time()-start)) return def urat_csv_to_fits(numproc=5): """Convert files in $URAT_DIR/csv to files in $URAT_DIR/fits. Parameters ---------- numproc : :class:`int`, optional, defaults to 5 The number of parallel processes to use. Returns ------- Nothing But the archived URAT CSV files in $URAT_DIR/csv are converted to FITS files in the directory $URAT_DIR/fits. Also, a look-up table is written to $URAT_DIR/fits/hpx-to-files.pickle for which each index is an nside=_get_urat_nside(), nested scheme HEALPixel and each entry is a list of the FITS files that touch that HEAPixel. Notes ----- - The environment variable $URAT_DIR must be set. - if numproc==1, use the serial code instead of the parallel code. - Runs in about 10 minutes with numproc=25 for 575 files. """ # ADM the resolution at which the URAT HEALPix files should be stored. nside = _get_urat_nside() # ADM check that the URAT_DIR is set. uratdir = get_urat_dir() log.info("running on {} processors".format(numproc)) # ADM construct the directories for reading/writing files. csvdir = os.path.join(uratdir, 'csv') fitsdir = os.path.join(uratdir, 'fits') # ADM make sure the output directory is empty. if os.path.exists(fitsdir): if len(os.listdir(fitsdir)) > 0: msg = "{} should be empty to make URAT FITS files!".format(fitsdir) log.critical(msg) raise ValueError(msg) # ADM make the output directory, if needed. else: log.info('Making URAT directory for storing FITS files') os.makedirs(fitsdir) # ADM construct the list of input files. infiles = sorted(glob("{}/*csv*".format(csvdir))) nfiles = len(infiles) # ADM the critical function to run on every file. def _write_urat_fits(infile): """read an input name for a csv file and write it to FITS""" outbase = os.path.basename(infile) outfilename = "{}.fits".format(outbase.split(".")[0]) outfile = os.path.join(fitsdir, outfilename) # ADM astropy understands without specifying format='csv'. fitstable = ascii.read(infile) # ADM map the ascii-read csv to typical DESI quantities. nobjs = len(fitstable) done = np.zeros(nobjs, dtype=uratdatamodel.dtype) # ADM have to do this one-by-one, given the format. done["RA"] = fitstable['col1']/1000./3600. done["DEC"] = fitstable['col2']/1000./3600. - 90. done["PMRA"] = fitstable['col16']/10. done["PMDEC"] = fitstable['col17']/10. done["PM_ERROR"] = fitstable['col18']/10. done["APASS_G_MAG"] = fitstable['col36']/1000. done["APASS_R_MAG"] = fitstable['col37']/1000. done["APASS_I_MAG"] = fitstable['col38']/1000. done["APASS_G_MAG_ERROR"] = fitstable['col41']/1000. done["APASS_R_MAG_ERROR"] = fitstable['col42']/1000. done["APASS_I_MAG_ERROR"] = fitstable['col43']/1000. done["URAT_ID"] = fitstable['col46'] fitsio.write(outfile, done, extname='URATFITS') # ADM return the HEALPixels that this file touches. pix = set(radec2pix(nside, done["RA"], done["DEC"])) return [pix, os.path.basename(outfile)] # ADM this is just to count processed files in _update_status. nfile = np.zeros((), dtype='i8') t0 = time() def _update_status(result): """wrapper function for the critical reduction operation, that occurs on the main parallel process""" if nfile % 25 == 0 and nfile > 0: rate = nfile / (time() - t0) elapsed = time() - t0 log.info( '{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed' .format(nfile, nfiles, rate, elapsed/60.) ) nfile[...] += 1 # this is an in-place modification return result # - Parallel process input files... if numproc > 1: pool = sharedmem.MapReduce(np=numproc) with pool: pixinfile = pool.map(_write_urat_fits, infiles, reduce=_update_status) # ADM ...or run in serial. else: pixinfile = list() for file in infiles: pixinfile.append(_update_status(_write_urat_fits(file))) # ADM create a list for which each index is a HEALPixel and each # ADM entry is a list of files that touch that HEALPixel. npix = hp.nside2npix(nside) pixlist = [[] for i in range(npix)] for pixels, file in pixinfile: for pix in pixels: pixlist[pix].append(file) # ADM write out the HEALPixel->files look-up table. outfilename = os.path.join(fitsdir, "hpx-to-files.pickle") outfile = open(outfilename, "wb") pickle.dump(pixlist, outfile) outfile.close() log.info('Done...t={:.1f}s'.format(time()-t0)) return def urat_fits_to_healpix(numproc=5): """Convert files in $URAT_DIR/fits to files in $URAT_DIR/healpix. Parameters ---------- numproc : :class:`int`, optional, defaults to 5 The number of parallel processes to use. Returns ------- Nothing But the archived URAT FITS files in $URAT_DIR/fits are rearranged by HEALPixel in the directory $URAT_DIR/healpix. The HEALPixel sense is nested with nside=_get_urat_nside(), and each file in $URAT_DIR/healpix is called healpix-xxxxx.fits, where xxxxx corresponds to the HEALPixel number. Notes ----- - The environment variable $URAT_DIR must be set. - if numproc==1, use the serial code instead of the parallel code. - Runs in about 10 minutes with numproc=25. """ # ADM the resolution at which the URAT HEALPix files should be stored. nside = _get_urat_nside() # ADM check that the URAT_DIR is set. uratdir = get_urat_dir() # ADM construct the directories for reading/writing files. fitsdir = os.path.join(uratdir, 'fits') hpxdir = os.path.join(uratdir, 'healpix') # ADM make sure the output directory is empty. if os.path.exists(hpxdir): if len(os.listdir(hpxdir)) > 0: msg = "{} should be empty to make URAT HEALPix files!".format(hpxdir) log.critical(msg) raise ValueError(msg) # ADM make the output directory, if needed. else: log.info('Making URAT directory for storing HEALPix files') os.makedirs(hpxdir) # ADM read the pixel -> file look-up table. infilename = os.path.join(fitsdir, "hpx-to-files.pickle") infile = open(infilename, "rb") pixlist = pickle.load(infile) npixels = len(pixlist) # ADM include the pixel number explicitly in the look-up table. pixlist = list(zip(np.arange(npixels), pixlist)) # ADM the critical function to run on every file. def _write_hpx_fits(pixlist): """from files that touch a pixel, write out objects in each pixel""" pixnum, files = pixlist # ADM only proceed if some files touch a pixel. if len(files) > 0: # ADM track if it's our first time through the files loop. first = True # ADM Read in files that touch a pixel. for file in files: filename = os.path.join(fitsdir, file) objs = fitsio.read(filename) # ADM only retain objects in the correct pixel. pix = radec2pix(nside, objs["RA"], objs["DEC"]) if first: done = objs[pix == pixnum] first = False else: done = np.hstack([done, objs[pix == pixnum]]) # ADM construct the name of the output file. outfilename = io.hpx_filename(pixnum) outfile = os.path.join(hpxdir, outfilename) # ADM write out the file. hdr = fitsio.FITSHDR() hdr['HPXNSIDE'] = nside hdr['HPXNEST'] = True fitsio.write(outfile, done, extname='URATHPX', header=hdr) return # ADM this is just to count processed files in _update_status. npix = np.zeros((), dtype='i8') t0 = time() def _update_status(result): """wrapper function for the critical reduction operation, that occurs on the main parallel process""" if npix % 500 == 0 and npix > 0: rate = npix / (time() - t0) elapsed = time() - t0 log.info( '{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed' .format(npix, npixels, rate, elapsed/60.) ) npix[...] += 1 # this is an in-place modification return result # - Parallel process input files... if numproc > 1: pool = sharedmem.MapReduce(np=numproc) with pool: _ = pool.map(_write_hpx_fits, pixlist, reduce=_update_status) # ADM ...or run in serial. else: for pix in pixlist: _update_status(_write_hpx_fits(pix)) log.info('Done...t={:.1f}s'.format(time()-t0)) return def make_urat_files(numproc=5, download=False): """Make the HEALPix-split URAT files in one fell swoop. Parameters ---------- numproc : :class:`int`, optional, defaults to 5 The number of parallel processes to use. download : :class:`bool`, optional, defaults to ``False`` If ``True`` then wget the URAT binary files from Vizier. Returns ------- Nothing But produces: - URAT DR1 binary files in $URAT_DIR/binary (if download=True). - URAT CSV files with all URAT columns in $URAT_DIR/csv. - FITS files with columns from `uratdatamodel` in $URAT_DIR/fits. - FITS files reorganized by HEALPixel in $URAT_DIR/healpix. The HEALPixel sense is nested with nside=_get_urat_nside(), and each file in $URAT_DIR/healpix is called healpix-xxxxx.fits, where xxxxx corresponds to the HEALPixel number. Notes ----- - The environment variable $URAT_DIR must be set. - if numproc==1, use the serial, instead of the parallel, code. - Runs in about 2 hours with numproc=25 if download is ``True``. - Runs in about 1 hour with numproc=25 if download is ``False``. """ t0 = time() log.info('Begin making URAT files...t={:.1f}s'.format(time()-t0)) # ADM check that the URAT_DIR is set. uratdir = get_urat_dir() # ADM a quick check that the fits and healpix directories are empty # ADM before embarking on the slower parts of the code. csvdir = os.path.join(uratdir, 'csv') fitsdir = os.path.join(uratdir, 'fits') hpxdir = os.path.join(uratdir, 'healpix') for direc in [csvdir, fitsdir, hpxdir]: if os.path.exists(direc): if len(os.listdir(direc)) > 0: msg = "{} should be empty to make URAT files!".format(direc) log.critical(msg) raise ValueError(msg) if download: scrape_urat() log.info('Retrieved URAT files from Vizier...t={:.1f}s' .format(time()-t0)) urat_binary_to_csv() log.info('Converted binary files to CSV...t={:.1f}s'.format(time()-t0)) urat_csv_to_fits(numproc=numproc) log.info('Converted CSV files to FITS...t={:.1f}s'.format(time()-t0)) urat_fits_to_healpix(numproc=numproc) log.info('Rearranged FITS files by HEALPixel...t={:.1f}s'.format(time()-t0)) return def find_urat_files(objs, neighbors=True, radec=False): """Find full paths to URAT healpix files for objects by RA/Dec. Parameters ---------- objs : :class:`~numpy.ndarray` Array of objects. Must contain the columns "RA" and "DEC". neighbors : :class:`bool`, optional, defaults to ``True`` Also return all pixels that touch the files of interest to prevent edge effects (e.g. if a URAT source is 1 arcsec away from a primary source and so in an adjacent pixel). radec : :class:`bool`, optional, defaults to ``False`` If ``True`` then the passed `objs` is an [RA, Dec] list instead of a rec array that contains "RA" and "DEC". Returns ------- :class:`list` A list of all URAT files to read to account for objects at the passed locations. Notes ----- - The environment variable $URAT_DIR must be set. """ # ADM the resolution at which the URAT HEALPix files are stored. nside = _get_urat_nside() # ADM check that the URAT_DIR is set and retrieve it. uratdir = get_urat_dir() hpxdir = os.path.join(uratdir, 'healpix') # ADM remember to pass "strict", as URAT doesn't cover the whole sky. return io.find_star_files(objs, hpxdir, nside, strict=True, neighbors=neighbors, radec=radec) def match_to_urat(objs, matchrad=1., radec=False): """Match objects to URAT healpix files and return URAT information. Parameters ---------- objs : :class:`~numpy.ndarray` Must contain at least "RA" and "DEC". matchrad : :class:`float`, optional, defaults to 1 arcsec The radius at which to match in arcseconds. radec : :class:`bool`, optional, defaults to ``False`` If ``True`` then the passed `objs` is an [RA, Dec] list instead of a rec array. Returns ------- :class:`~numpy.ndarray` The matching URAT information for each object. The returned format is as for desitarget.uratmatch.uratdatamodel with an extra column "URAT_SEP" which is the matching distance in ARCSECONDS. Notes ----- - For objects that do NOT have a match in URAT, the "URAT_ID" and "URAT_SEP" columns are -1, and other columns are zero. - Retrieves the CLOSEST match to URAT for each passed object. - Because this reads in HEALPixel split files, it's (far) faster for objects that are clumped rather than widely distributed. """ # ADM parse whether a structure or coordinate list was passed. if radec: ra, dec = objs else: ra, dec = objs["RA"], objs["DEC"] # ADM set up an array of URAT information for the output. nobjs = len(ra) done = np.zeros(nobjs, dtype=uratdatamodel.dtype) # ADM objects without matches should have URAT_ID, URAT_SEP of -1. done["URAT_ID"] = -1 urat_sep = np.zeros(nobjs) - 1 # ADM determine which URAT files need to be scraped. uratfiles = find_urat_files([ra, dec], radec=True) nfiles = len(uratfiles) # ADM catch the case of no matches to URAT. if nfiles > 0: # ADM loop through the URAT files and find matches. for ifn, fn in enumerate(uratfiles): if ifn % 500 == 0 and ifn > 0: log.info('{}/{} files; {:.1f} total mins elapsed' .format(ifn, nfiles, (time()-start)/60.)) urat = fitsio.read(fn) idurat, idobjs, dist = radec_match_to( [urat["RA"], urat["DEC"]], [ra, dec], sep=matchrad, radec=True, return_sep=True) # ADM update matches whenever we have a CLOSER match. ii = (urat_sep[idobjs] == -1) | (urat_sep[idobjs] > dist) done[idobjs[ii]] = urat[idurat[ii]] urat_sep[idobjs[ii]] = dist[ii] # ADM add the separation distances to the output array. dt = uratdatamodel.dtype.descr + [("URAT_SEP", ">f4")] output = np.zeros(nobjs, dtype=dt) for col in uratdatamodel.dtype.names: output[col] = done[col] output["URAT_SEP"] = urat_sep return output
desihubREPO_NAMEdesitargetPATH_START.@desitarget_extracted@desitarget-main@py@desitarget@uratmatch.py@.PATH_END.py
{ "filename": "pdf_sampler.py", "repo_name": "bsafdi/NPTFit", "repo_path": "NPTFit_extracted/NPTFit-master/NPTFit/pdf_sampler.py", "type": "Python" }
############################################################################### # pdf_sampler.py ############################################################################### # # Function to make random draws from an arbitrary PDF using inversion sampling. # ############################################################################### import numpy as np class PDFSampler: def __init__(self, xvals, pofx): """ At outset sort and calculate CDF so not redone at each call :param xvals: array of x values :param pofx: array of associated p(x) values (does not need to be normalised) """ self.xvals = xvals self.pofx = pofx # Check p(x) >= 0 for all x, otherwise stop assert(np.all(pofx >= 0)), "pdf cannot be negative" # Sort values by their p(x) value, for more accurate sampling self.sortxvals = np.argsort(self.pofx) self.pofx = self.pofx[self.sortxvals] # Calculate cdf self.cdf = np.cumsum(self.pofx) def __call__(self, samples): """ When class called returns samples number of draws from pdf :param samples: number of draws you want from the pdf :returns: number of random draws from the provided PDF """ # Random draw from a uniform, up to max of the cdf, which need # not be 1 as the pdf does not have to be normalised unidraw = np.random.uniform(high=self.cdf[-1], size=samples) cdfdraw = np.searchsorted(self.cdf, unidraw) cdfdraw = self.sortxvals[cdfdraw] return self.xvals[cdfdraw]
bsafdiREPO_NAMENPTFitPATH_START.@NPTFit_extracted@NPTFit-master@NPTFit@pdf_sampler.py@.PATH_END.py
{ "filename": "make_fiducial.py", "repo_name": "misharash/cobaya_mock_cmb", "repo_path": "cobaya_mock_cmb_extracted/cobaya_mock_cmb-main/make_fiducial.py", "type": "Python" }
#!/usr/bin/env python3 # Example script to create fiducial values for mock CMB likelihoods from cobaya.model import get_model # from best fit with fixed massless neutrinos and nuisance-marginalized high-l fiducial_params = { # LambdaCDM parameters 'H0': 68.1461, # '100*theta_s': 1.041920539e+00, 'omega_b': 2.241606328e-02, 'N_ur': 3.046, # three massless neutrinos 'omega_cdm': 1.194462584e-01, 'A_s': 2.109371924e-09, # 'sigma8': 8.245006041e-01, 'n_s': 9.660360599e-01, 'tau_reio': 5.142494234e-02 } fiducial_params_extra = { 'recombination': 'recfast', 'non linear': 'halofit' } fiducial_params_full = fiducial_params.copy() fiducial_params_full.update(fiducial_params_extra) info_fiducial = { 'params': fiducial_params, 'likelihood': {'cobaya_mock_cmb.MockSO': {'python_path': '.'}, 'cobaya_mock_cmb.MockSOBaseline': {'python_path': '.'}, 'cobaya_mock_cmb.MockSOGoal': {'python_path': '.'}, 'cobaya_mock_cmb.MockCMBS4': {'python_path': '.'}, 'cobaya_mock_cmb.MockCMBS4sens0': {'python_path': '.'}, 'cobaya_mock_cmb.MockPlanck': {'python_path': '.'}}, 'theory': {'classy': {"extra_args": fiducial_params_extra}}} model_fiducial = get_model(info_fiducial) model_fiducial.logposterior({}) Cls = model_fiducial.provider.get_Cl(units="muK2") for likelihood in model_fiducial.likelihood.values(): likelihood.create_fid_values(Cls, fiducial_params_full, override=True)
misharashREPO_NAMEcobaya_mock_cmbPATH_START.@cobaya_mock_cmb_extracted@cobaya_mock_cmb-main@make_fiducial.py@.PATH_END.py
{ "filename": "test_jira_api.py", "repo_name": "langchain-ai/langchain", "repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/utilities/test_jira_api.py", "type": "Python" }
"""Integration test for JIRA API Wrapper.""" from langchain_community.utilities.jira import JiraAPIWrapper def test_search() -> None: """Test for Searching issues on JIRA""" jql = "project = TP" jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("jql", jql) assert "issues" in output def test_getprojects() -> None: """Test for getting projects on JIRA""" jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("get_projects", "") assert "projects" in output def test_create_ticket() -> None: """Test the Create Ticket Call that Creates a Issue/Ticket on JIRA.""" issue_string = ( '{"summary": "Test Summary", "description": "Test Description",' ' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}' ) jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("create_issue", issue_string) assert "id" in output assert "key" in output def test_create_confluence_page() -> None: """Test for getting projects on JIRA""" jira = JiraAPIWrapper() # type: ignore[call-arg] create_page_dict = ( '{"space": "ROC", "title":"This is the title",' '"body":"This is the body. You can use ' '<strong>HTML tags</strong>!"}' ) output = jira.run("create_page", create_page_dict) assert "type" in output assert "page" in output def test_other() -> None: """Non-exhaustive test for accessing other JIRA API methods""" jira = JiraAPIWrapper() # type: ignore[call-arg] issue_create_dict = """ { "function":"issue_create", "kwargs": { "fields": { "summary": "Test Summary", "description": "Test Description", "issuetype": {"name": "Bug"}, "project": {"key": "TP"} } } } """ output = jira.run("other", issue_create_dict) assert "id" in output assert "key" in output
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@utilities@test_jira_api.py@.PATH_END.py
{ "filename": "set_telescope_pointing_separated.py", "repo_name": "spacetelescope/mirage", "repo_path": "mirage_extracted/mirage-master/mirage/utils/set_telescope_pointing_separated.py", "type": "Python" }
#!/usr/bin/env python # Copyright (C) 2010-2011 Association of Universities for Research in Astronomy (AURA) # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # 3. The name of AURA and its representatives may not be used to # endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. # #This local copy of set_telescope_pointing has been updated such #that a PAV3 roll angle can be accepted as input in the find_wcs #function. In the case where no quaternion is found, the input roll #angle is used when calculating WCS information. #-25 July 2017, Bryan Hilbert ''' This script adds absolute pointing information to the FITS files provided to it on the command line (one or more). Currently it only uses a constant value for the engineering keywords since the Engineering Database does not yet contain them. It assumes the following keywords are present in the file header: V2_REF (arcseconds) V3_REF (arcseconds) VPARITY (+1 or -1) V3I_YANG (decimal degrees) The keywords added are: RA_V1 DEC_V1 PA_V3 CRVAL1 CRVAL2 PC1_1 PC1_2 PC2_1 PC2_2 It does not currently place the new keywords in any particular location in the header other than what is required by the standard. ''' from __future__ import print_function, division from collections import namedtuple import logging import sys import astropy.io.fits as fits import numpy as np import pysiaf #from jwst.lib.engdb_tools import ( # ENGDB_BASE_URL, # ENGDB_Service, #) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) logger.addHandler(handler) # Define the return from get_pointing Pointing_Quaternions = namedtuple( 'Pointing_Quaternions', ['q', 'j2fgs_matrix', 'fsmcorr', 'obstime'] ) def add_wcs(filename, roll=0.): """ Given the name of a valid partially populated level 1b JWST file, determine the simple WCS parameters from the SIAF keywords in that file and the engineering parameters that contain information about the telescope pointing. It presumes all the accessed keywords are present (see first block). Parameters ---------- filename : str file name roll : float PA_V3 in degrees """ hdulist = fits.open(filename, 'update') pheader = hdulist[0].header fheader = hdulist[1].header obsstart = float(pheader['EXPSTART']) obsend = float(pheader['EXPEND']) try: v2ref = float(pheader['V2_REF']) v3ref = float(pheader['V3_REF']) v3idlyang = float(pheader['V3I_YANG']) vparity = int(pheader['VPARITY']) except: v2ref = float(fheader['V2_REF']) v3ref = float(fheader['V3_REF']) v3idlyang = float(fheader['V3I_YANG']) vparity = int(fheader['VPARITY']) # ########################################## # WARNINGWARNINGWARNINGWARNINGWARNINGWARNING # # Get engineering parameters about scope pointing. # In normal operations, if the paramters cannot be found # this should fail. # However, for prelaunch, we'll dummy out. #try: # q, j2fgs_matrix, fsmcorr, obstime = get_pointing(obsstart, obsend) #except ValueError as exception: #ra = pheader['TARG_RA'] #dec = pheader['TARG_DEC'] ra = fheader['CRVAL1'] dec = fheader['CRVAL2'] #roll = 0 #logger.warning( # 'Cannot retrieve telescope pointing.' # '\n{}' # '\nUsing TARG_RA={}, TARG_DEC={} and PA_V3={} ' # 'to set pointing.'.format(exception, ra, dec, roll) #) local_roll = compute_local_roll(roll, ra, dec, v2ref, v3ref) wcsinfo = (ra, dec, local_roll) crval1, crval2, pa_aper_deg = wcsinfo # compute pointing of V1 axis attitude_matrix = pysiaf.rotations.attitude(v2ref, v3ref, ra, dec, local_roll) v1_ra_deg, v1_dec_deg = pysiaf.rotations.pointing(attitude_matrix, 0., 0.) v3_pa_deg = roll pa_aper_deg = local_roll - vparity * v3idlyang #else: # # compute relevant WCS information # logger.info('Successful read of engineering quaternions.') # logger.debug('q={}'.format(q)) # logger.debug('j2fgs_matrix={}'.format(j2fgs_matrix)) # logger.debug('fsmcorr={}'.format(fsmcorr)) # wcsinfo, vinfo = calc_wcs(v2ref, v3ref, v3idlyang, vparity, # q, j2fgs_matrix, fsmcorr) # crval1, crval2, pa_aper_deg = wcsinfo # v1_ra_deg, v1_dec_deg, v3_pa_deg = vinfo # local_roll = compute_local_roll(v3_pa_deg, crval1, crval2, v2ref, v3ref) # # logger.info( # 'Computed coordinates from quaternions:' # '\n\tRA = {} DEC={} PA_V3={}'.format(crval1, crval2, v3_pa_deg) # ) #fheader['RA_V1'] = v1_ra_deg #fheader['DEC_V1'] = v1_dec_deg #fheader['PA_V3'] = v3_pa_deg #fheader['CRVAL1'] = crval1 #fheader['CRVAL2'] = crval2 fheader['PC1_1'] = -np.cos(pa_aper_deg * D2R) fheader['PC1_2'] = np.sin(pa_aper_deg * D2R) fheader['PC2_1'] = np.sin(pa_aper_deg * D2R) fheader['PC2_2'] = np.cos(pa_aper_deg * D2R) fheader['RA_REF'] = crval1 fheader['DEC_REF'] = crval2 fheader['ROLL_REF'] = local_roll fheader['WCSAXES'] = len(fheader['CTYPE*']) hdulist.flush() hdulist.close() logger.info('WCS info for {} complete.'.format(filename)) def m_v_to_siaf(ya, v3, v2, vidlparity): # This is a 321 rotation mat = np.array([[np.cos(v3)*np.cos(v2), np.cos(v3)*np.sin(v2), np.sin(v3)], [-np.cos(ya)*np.sin(v2)+np.sin(ya)*np.sin(v3)*np.cos(v2), np.cos(ya)*np.cos(v2)+np.sin(ya)*np.sin(v3)*np.sin(v2), -np.sin(ya)*np.cos(v3)], [-np.sin(ya)*np.sin(v2)-np.cos(ya)*np.sin(v3)*np.cos(v2), np.sin(ya)*np.cos(v2)-np.cos(ya)*np.sin(v3)*np.sin(v2), np.cos(ya)*np.cos(v3)]]) pmat = np.array([[0., vidlparity, 0.], [0., 0., 1.], [1., 0., 0.]]) return np.dot(pmat, mat) def vector_to_ra_dec(v): """Returns tuple of spherical angles from unit direction Vector """ ra = np.arctan2(v[1], v[0]) dec = np.arcsin(v[2]) if ra < 0.: ra += 2. * np.pi return(ra, dec) R2D = 180./np.pi D2R = np.pi/180. A2R = D2R/3600. R2A = 3600.*R2D # Define the FGS1 to SI-FOV DCM, Transpose of DCM in SE-20 section 5.8.4.2. m1 = np.array( [[0.9999994955442, 0.0000000000000, 0.0010044457459], [0.0000011174826, 0.9999993811310, -0.0011125359826], [-0.0010044451243, 0.0011125365439, 0.9999988766756]]) m2 = np.array( [[0, 0, 1], [1, 0, 0], [0, 1, 0]]) m_fgs1_to_sifov = np.dot(m2, m1) m_fgs1_to_sifovT = m_fgs1_to_sifov.transpose() # Define the SI-FOV to V-frame DCM, From Luis' IOC m_sifov_to_v = np.array( [[0.99999742598, 0., 0.00226892608], [0., 1., 0.], [-0.00226892608, 0., 0.99999742598]]) def calc_wcs(v2ref, v3ref, v3idlyang, vidlparity, q, j2fgs_matrix, fsmcorr): ''' v2ref (arcsec), v3ref (arcsec), v3idlyang (deg), vidlparity (+1 or -1), are the relevant siaf parameters. The assumed units are shown in parentheses. It is assumed that the siaf ref position is the corresponding WCS reference position. Parameter q is the SA_ZATTEST<n> engineering parameters where n ranges from 1 to 4. Parameter j2fgs_matrix is the transformation matrix specified by engineering parameters SA_ZRFGS2J<n><m> where both n and m range from 1 to 3. This is to be provided as a 1d list using this order: 11, 21, 31, 12, 22, 32, 13, 23, 33 Parameter fsmcorr are two values provided as a list consisting of: [SA_ZADUCMDX, SA_ZADUCMDY] This routine returns two tuples: The first is of (CRVAL1, CRVAL2, PA_y_axis) The second is of (V1ra, V1dec, V3pa) All angles are in decimal degrees. ''' q1, q2, q3, q4 = q m_eci2j = np.array( [[1. - 2.*q2*q2 - 2.*q3*q3, 2.*(q1*q2 + q3*q4), 2.*(q3*q1 - q2*q4)], [2.*(q1*q2 - q3*q4), 1. - 2.*q3*q3 - 2.*q1*q1, 2.*(q2*q3 + q1*q4)], [2.*(q3*q1 + q2*q4), 2.*(q2*q3 - q1*q4), 1. - 2.*q1*q1 - 2.*q2*q2]]) mj2fgs1 = np.array(j2fgs_matrix).reshape((3, 3)).transpose() m_sifov_fsm_delta = np.array( [[1., fsmcorr[0]/22.01, fsmcorr[1]/21.68], [-fsmcorr[0]/22.01, 1., 0.], [-fsmcorr[1]/21.68, 0., 1.]]) mpartial = np.dot(m_sifov_to_v, np.dot(m_sifov_fsm_delta, np.dot(m_fgs1_to_sifov, mj2fgs1))) m_eci2v = np.dot(mpartial, m_eci2j) v1pt = m_eci2v[0] xeci_ra, xeci_dec = vector_to_ra_dec(v1pt) # V1 is given by the first row. v1_ra, v1_dec = vector_to_ra_dec(m_eci2v[0]) # V3 is given by the third row v3_ra, v3_dec = vector_to_ra_dec(m_eci2v[2]) # The V3PA @ V1 is given by y = np.cos(v3_dec) * np.sin(v3_ra-v1_ra) x = np.sin(v3_dec) * np.cos(v1_dec) - \ np.cos(v3_dec) * np.sin(v1_dec) * np.cos((v3_ra - v1_ra)) V3PA = np.arctan2(y, x) m_eci2siaf = np.dot(m_v_to_siaf(v3idlyang * D2R, v3ref * A2R, v2ref * A2R, vidlparity), m_eci2v) siaf_x = 0. * A2R siaf_y = 0. * A2R refpos = np.array( [siaf_x, siaf_y, np.sqrt(1.-siaf_x * siaf_x - siaf_y * siaf_y)]) msky = np.dot(m_eci2siaf.transpose(), refpos) vaper_ra, vaper_dec = vector_to_ra_dec(msky) vysiaf = np.array([0., 1., 0.]) myeci = np.dot(m_eci2siaf.transpose(), vysiaf) # The Y axis of the aperture is given by vy_ra, vy_dec = vector_to_ra_dec(myeci) # The VyPA @ xref,yref is given by y = np.cos(vy_dec) * np.sin(vy_ra-vaper_ra) x = np.sin(vy_dec) * np.cos(vaper_dec) - \ np.cos(vy_dec) * np.sin(vaper_dec) * np.cos((vy_ra - vaper_ra)) vypa = np.arctan2(y, x) wcsinfo = (vaper_ra*R2D, vaper_dec*R2D, vypa*R2D) vinfo = (v1_ra*R2D, v1_dec*R2D, V3PA*R2D) return wcsinfo, vinfo def get_pointing(obstart, obsend, result_type='first'): """ Get telescope pointing engineering data. Parameters ---------- obstart, obsend: float MJD observation start/end times result_type: str What to return. Possible options are: `first`: Return the first non-zero matricies `all`: Return all non-zero matricies within the given range. Returns ------- q, j2fgs_matrix, fsmcorr, obstime The engineering pointing parameters. If the `result_type` returns multiple values, what is returned will be a list of 4-tuples. Raises ------ ValueError Cannot retrieve engineering information Notes ----- For the moment, the first found values will be used. This will need be re-examined when more information is available. """ logger.info( 'Determining pointing between observations times (mjd):' '\n\tobstart = {}' '\n\tobsend = {}'.format(obstart, obsend) ) logger.info( 'Querying engineering DB: {}'.format(ENGDB_BASE_URL) ) try: engdb = ENGDB_Service() except Exception as exception: raise ValueError( 'Cannot open engineering DB connection' '\nException: {}'.format( exception ) ) params = { 'SA_ZATTEST1': None, 'SA_ZATTEST2': None, 'SA_ZATTEST3': None, 'SA_ZATTEST4': None, 'SA_ZRFGS2J11': None, 'SA_ZRFGS2J21': None, 'SA_ZRFGS2J31': None, 'SA_ZRFGS2J12': None, 'SA_ZRFGS2J22': None, 'SA_ZRFGS2J32': None, 'SA_ZRFGS2J13': None, 'SA_ZRFGS2J23': None, 'SA_ZRFGS2J33': None, 'SA_ZADUCMDX': None, 'SA_ZADUCMDY': None, } for param in params: try: params[param] = engdb.get_values( param, obstart, obsend, time_format='mjd', include_obstime=True ) except Exception as exception: raise ValueError( 'Cannot retrive {} from engineering.' '\nFailure was {}'.format( param, exception ) ) # Find the first set of non-zero values results = [] for idx in range(len(params['SA_ZATTEST1'])): values = [ params[param][idx].value for param in params ] if any(values): # The tagged obstime will come from the SA_ZATTEST1 mneunonic obstime = params['SA_ZATTEST1'][idx].obstime # Fill out the matricies q = np.array([ params['SA_ZATTEST1'][idx].value, params['SA_ZATTEST2'][idx].value, params['SA_ZATTEST3'][idx].value, params['SA_ZATTEST4'][idx].value, ]) j2fgs_matrix = np.array([ params['SA_ZRFGS2J11'][idx].value, params['SA_ZRFGS2J21'][idx].value, params['SA_ZRFGS2J31'][idx].value, params['SA_ZRFGS2J12'][idx].value, params['SA_ZRFGS2J22'][idx].value, params['SA_ZRFGS2J32'][idx].value, params['SA_ZRFGS2J13'][idx].value, params['SA_ZRFGS2J23'][idx].value, params['SA_ZRFGS2J33'][idx].value, ]) fsmcorr = np.array([ params['SA_ZADUCMDX'][idx].value, params['SA_ZADUCMDY'][idx].value, ]) results.append(Pointing_Quaternions( q=q, j2fgs_matrix=j2fgs_matrix, fsmcorr=fsmcorr, obstime=obstime )) # Short circuit if all we're looking for is the first. if result_type == 'first': break if not len(results): raise ValueError( 'No non-zero quanternion found ' 'in the DB between MJD {} and {}'.format(obstart, obsend) ) if result_type == 'first': return results[0] else: return results def get_pointing_stub(obstart, obsend): ''' For the time being this simply returns the same damn values regardless of the input time (awaiting the time that these parameters are actually in the engineering database) ''' # The following values of q correspond to the engineering keyword values: # SA_ZATEST1, SA_ZATEST2, SA_ZATEST3, SA_ZATEST4 q = np.array([-0.36915286, 0.33763282, 0.05758533, 0.86395264]) # The following values of j2fgs_matrix correspond to the engineering # keyword values of: # SA_ZRFGS2K11 SA_ZRFGS2K21 SA_ZRFGS2K31 # SA_ZRFGS2K21 SA_ZRFGS2K22 SA_ZRFGS2K32 # SA_ZRFGS2K31 SA_ZRFGS2K32 SA_ZRFGS2K33 j2fgs_matrix = np.array( [-1.00444000e-03, 3.38145836e-03, 9.99993778e-01, 9.99999496e-01, -3.90000000e-14, 1.00444575e-03, 3.39649146e-06, 9.99994283e-01, -3.38145665e-03]) # The following values of fsmcorr correspond to the engineering keywords: # SA_ZADUCMDX, SA_ZADUCMDY fsmcorr = np.array([0., 0.]) return q, j2fgs_matrix, fsmcorr def compute_local_roll(pa_v3, ra_ref, dec_ref, v2_ref, v3_ref): """ Computes the position angle of V3 (measured N to E) at the reference point of an aperture. Parameters ---------- pa_v3 : float Position angle of V3 at (V2, V3) = (0, 0) [in deg] v2_ref, v3_ref : float Reference point in the V2, V3 frame [in arcsec] ra_ref, dec_ref : float RA and DEC corresponding to V2_REF and V3_REF, [in deg] Returns ------- new_roll : float The value of ROLL_REF (in deg) """ v2 = np.deg2rad(v2_ref / 3600) v3 = np.deg2rad(v3_ref / 3600) ra_ref = np.deg2rad(ra_ref) dec_ref = np.deg2rad(dec_ref) pa_v3 = np.deg2rad(pa_v3) M = np.array([[np.cos(ra_ref) * np.cos(dec_ref), -np.sin(ra_ref) * np.cos(pa_v3) + np.cos(ra_ref) * np.sin(dec_ref) * np.sin(pa_v3), -np.sin(ra_ref) * np.sin(pa_v3) - np.cos(ra_ref) * np.sin(dec_ref) * np.cos(pa_v3)], [np.sin(ra_ref) * np.cos(dec_ref), np.cos(ra_ref) * np.cos(pa_v3) + np.sin(ra_ref) * np.sin(dec_ref) * np.sin(pa_v3), np.cos(ra_ref) * np.sin(pa_v3) - np.sin(ra_ref) * np.sin(dec_ref) * np.cos(pa_v3)], [np.sin(dec_ref), -np.cos(dec_ref) * np.sin(pa_v3), np.cos(dec_ref) * np.cos(pa_v3)] ]) return _roll_angle_from_matrix(M, v2, v3) def _roll_angle_from_matrix(matrix, v2, v3): X = -(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) * np.sin(v3) + matrix[2, 2] * np.cos(v3) Y = (matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) + \ (matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]) * np.sin(v2) new_roll = np.rad2deg(np.arctan2(Y, X)) if new_roll < 0: new_roll += 360 return new_roll if __name__ == '__main__': if len(sys.argv) <= 1: raise ValueError('missing filename argument(s)') for filename in sys.argv[1:]: logger.info('Setting pointing for {}'.format(filename)) add_wcs(filename)
spacetelescopeREPO_NAMEmiragePATH_START.@mirage_extracted@mirage-master@mirage@utils@set_telescope_pointing_separated.py@.PATH_END.py
{ "filename": "CHANGELOG.md", "repo_name": "AndrewAnnex/SpiceyPy", "repo_path": "SpiceyPy_extracted/SpiceyPy-main/CHANGELOG.md", "type": "Markdown" }
# Change Log All notable changes to SpiceyPy will be documented here The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project tries to adhere to [Semantic Versioning](http://semver.org/). ## [6.0.0] - 2023-08-31 Fixed several major typos and fortran array ordering issues in tkfram, zzdynrot as well as failing tests on arm64 macos. ### Added - SpiceNOTENOUGHDATA2 exception #466 - Context manager for SPICE kernels #458 - CITATION.cff file - DeprecationWarning for zzdynrot ### Changed - tkfram_c now used in tkfram wrapper function - updated setup.cfg - type hints for sincpt to be more correct ### Deprecated - python 3.6 - python 3.7 ### Removed - codecov as a dependency in dev ### Fixed - fixed zzdynrot and tkfram return matrix element ordering - typo in exceptions.rst #459 - fixed test test_sphlat - fixed sphlat to use correct libspice function call - fixed tests for dskx02, dskxsi, gfsntc for arm64 macos floating point issues #470 - fixed typo in test_oscelt and test_oscltx ## [5.1.2] - 2022-10-14 fix for exception error message toolkit version ### Fixed - exceptions now use dynamic lookup of toolkit version for error messages ## [5.1.1] - 2022-07-30 fixes macOS arm64 cross compile ### Fixed - updated get_spice.py to allow for arch override for macos arm64 ## [5.1.0] - 2022-07-09 adds wrappers for the majority of new function in n67 ### Added - azlcpo - azlrec - chbigr - chbint - chbval - ckfrot - ckfxfm - ckgr02 - ckgr03 - ckmeta - cknr02 - cknr03 - dafhsf - dasadc - dasadd - dasadi - dashfs - daslla - dasllc - dasonw - dasops - dasrdd - dasrdi - dasudd - dasudi - daswbr - dazldr - dlabns - dlaens - dlaopn - dnearp - drdazl - ednmpt - edpnt - evsgp4 - getfvn - hrmesp - invstm - lgresp - lgrint - qderiv - recazl - stlabx - tagnpt - tkfram - tparch - trgsep - twovxf - vprojg ### Fixed - fixed docstring for frinfo - fixed freebsd support in getspice ## [5.0.1] - 2022-03-23 minor update to make ld_library_path update safer ### Fixed - override of ld_library_path is now temporary ### Changed - updated copyrights for 2022 ## [5.0.0] - 2022-02-17 ### Changed - switched to N67 CSPICE, no new wrapper functions yet - removed deprecated named args mentioned in 4.0.1 release notes ### Removed - deprecation warnings for params of mtxmg, mtxvg, mxm, mxmg, mxmt, mxmtg, mxvg, vtmvg, xposeg, unormg, vaddg, vdistg, vdotg, vequg, vhatg, vminug, vnromg, vrelg, vsclg, vsepg, vsubg, vzerog - ncol/nrow params for: mtxmg, mtxvg, mxm, mxmg, mxmt, mxmtg, mxvg, vtmvg, xposeg - ndim param for: unormg, vaddg, vdistg, vdotg, vequg, vhatg, vminug, vnromg, vrelg, vsclg, vsepg, vsubg, vzerog ## [4.0.3] - 2021-11-14 ### Added - changelog now rendered in docs - runtime override of cspice via env var or ld_library_path - pyproject.toml and setup.cfg - CSPICE N66 patches from NAIF/conda-forge feedstock - builds for aarch64 and macos arm64 ### Changed - switched to src layout - switched "cspice.dll/.so" to "libcspice.dll/so" - updated get_spice.py to build cspice from source - moved most metadata to setup.cfg - updated ci workflows to build wheels for major platforms using cibuildwheels - updated install commands in docs to use pip instead of setup.py ## [4.0.2] - 2021-08-13 ### Fixed - getfat variables size #420 - safer cleanups in tests ## [4.0.1] - 2021-05-31 ### Added - docs info about ARM support, currently limited to conda-forge spiceypy - docs citation info/basic intro - hash checksums for test kernels - offline install ci tests - warn_deprecated_args function to aid future deprecations ### Deprecated - added deprecation warnings for ncol/nrow params for: mtxmg, mtxvg, mxm, mxmg, mxmt, mxmtg, mxvg, vtmvg, xposeg pending next major release. - added deprecation warnings for ndim param for: unormg, vaddg, vdistg, vdotg, vequg, vhatg, vminug, vnromg, vrelg, vsclg, vsepg, vsubg, vzerog pending next major release. ### Changed - copyright year - a number of typehints to accept np.ndarray - changed test_wrapper to use a pytest autouse fixture to call reset/kclear automatically for most tests ### Fixed - missing docs for xf2eul - numpy bool_ deprecation warnings - numpy float warning - type hint for appndd ## [4.0.0] - 2020-12-07 ### Added - bodeul ### Changed - main branch is now the default branch - switched to use 'fromisoformat' in datetime2et ### Fixed - fixed nintvls spelling inconsistency ## [3.1.1] - 2020-05-25 ### Fixed - missing get_spice.py in manifest ## [3.1.0] - 2020-05-25 ### Added - added irfnam, irfnum, irfrot, irftrn - added kpsolv, kepleq - better exceptions, many hundred spice toolkit defined exceptions - copy button to docs codeblocks - added CSPICE_SRC_DRI envvar override to specify cspice src directory during install - added CSPICE_SHARED_LIB envvar override to specify cspice.so/.dll/.dylib during install ### Changed - switch to codecov for code coverage - various support type changes - renamed getspice.py to get_spice.py ### Fixed - fixed missing doc strings for callbacks ### Removed - coveralls - test cmd class in setup.py - direct references to deprecated numpy matrix class ## [3.0.2] - 2020-02-19 ### Added - et2datetime function - funding.yml ### Changed - changed http to https in docs/docstrings ### Fixed - many small issues with the docs - author name in joss paper - fixing SyntaxWarning in python 3.8 - year in docs - issue with urllib usage in gettestkernels ## [3.0.1] - 2020-01-10 ### Changed - removed old logic from getspice for old openssl versions ### Removed - import of six in getspice ## [3.0.0] - 2020-01-09 ### Added - Python 3.8 support ### Changed - using black for code linting - now using type hints - vectorized functions now return numpy arrays instead of lists of arrays ### Deprecated - python 3.5 - python 2.7 ## [2.3.2] - 2019-12-19 ### Added - wrapper for ev2lin - numpy string support ### Fixed - some equality checks ### Changed - updated MANIFEST.in to include test code - vectorization of et2utc - vectorization of scencd - vectroization of sc2e ## [2.3.1] - 2019-10-18 ### Changed - updated MANIFEST.in to include test code ## [2.3.0] - 2019-09-25 ### Added - wrapper for tkfram - wrapper for ckfrot - wrapper for zzdynrot ### Fixed - issue with dafgda absolute value problem, see issue #302 ## [2.2.1] - 2019-08-19 ### Changed - set numpy version to 1.16.4 for python 2 - other dependency changes to setup.py and requirements.txt ### Fixed - typo in a unit test fixed ## [2.2.0] - 2019-02-24 ### Added - gfevnt wrapper - easier spice cell inits - python datetime to et converter - issue template - code of conduct - NAIF python lessons to docs ### Changed - functions that modify a results spicecell now optionally create a return spicecell - convrt now "vectorized" - prioritized citation info in readme ### Removed - removed anaconda build steps from appveyor, conda-fordge replaces it ### Fixed - newlines in changelog ## [2.1.2] - 2018-08-17 ### Added - python 3.7 builds on travis / appveyor ### Changed - numpy to ctypes and back conversions improved ### Removed - a few bool related internal things in support_types - conda builds on appveyor removed in favor of conda-forge distribution of spiceypy ### Fixed - issues relating to c_bool usage. everything is now c_int ## [2.1.1] - 2018-04-24 ### Added - wrapper functions for gffove and gfocce and associated callbacks - proxymanager for spice download by B. Seignovert ### Changed - simplifications in libspicehelper ### Fixed - issue with cassini example in doc - termpt docstring by Marcel Stefko - various things in ci build configs - missing dll/so file issue with pip installs ## [2.1.0] - 2017-11-09 ### Added - Completed wrapping of all new N66 DSK functions - 3.6 classifier - context manager for turning on/off found flag catches - contributor guide - freebsd support - added tests for dozens of functions, wrapped almost all remaining functions ### Fixed - added six and numpy to setup_requires setup.py kwargs - bugs in some tests ### Changed - changed naming of vectorToList to cVectorToPython - Updated getspice module to use urllib3 for OpenSSL library versions older than OpenSSL 1.0.1g. - getspice module provides now a class that handles the downloading and unpacking of N066 CSPICE distribution. - Updated setup to pack the CSPICE installation code into a class that extends the setuptools.command.install command. - made vectorized functions more consistent - changed tests to point to smaller kernel set hosted on github ## [2.0.0] - 2017-06-09 ### Added - Implemented most of the new functions from N66 SPICE - IntMatrixType support type - SpiceDLADescr struct ### Changed - now backing N66 CSPICE - now builds 2.7, 3.4, 3.5, 3.6 ### Deprecated - 32 bit builds ### Fixed - toPythonString now strips whitespace ## [1.1.1] - 2017-04-23 ### Added - added python3.6 builds ### Fixed - fixed formatting on changelog - fixed issues with rtd builds ### Changed - version updated - converted all downloads to use https ## [1.1.0] - 2016-10-19 ### Added - wrapper functions and tests for fovray, fovtrg, pxfrm2, occult #158 - wrapper functions and tests for spklef, spkopa, spkpds, spksub, spkuds, spkuef #155 - tests for srxpt and sincpt #154 - a bunch of other tests for CK related functions - example added to docs - automated artifact deployments (mostly) to pypi and conda cloud ### Fixed - improved use of six api to have better spicecells ### Changed - Start versioning based on the current English version at 0.3.0 to help - refactored tests to be cleaner with kernel files - fixed spice toolkit version to N65 pending new toolkit release. ## [1.0.0] - 2016-03-27 ### Added - DOI citation information ### Changed - updated versions for pytest, coverage, coveralls - README updates ## [0.7.0] - 2016-03-26 ### Added - python wheel builds in appveyor #117 - wrapper for gfilum function ### Changed - converted README to rst format ### Fixed - inconsistencies in doc strings #143 - issue #136 ## [0.6.8] - 2016-03-07 Got to a semi complete api here, lots of commits before things so this version can be considered a bit of a baseline ### Added - many things ### Changed - the game ### Deprecated - nothing important ### Removed - what had to go ### Fixed - it
AndrewAnnexREPO_NAMESpiceyPyPATH_START.@SpiceyPy_extracted@SpiceyPy-main@CHANGELOG.md@.PATH_END.py
{ "filename": "_legend.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/cone/_legend.py", "type": "Python" }
import _plotly_utils.basevalidators class LegendValidator(_plotly_utils.basevalidators.SubplotidValidator): def __init__(self, plotly_name="legend", parent_name="cone", **kwargs): super(LegendValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, dflt=kwargs.pop("dflt", "legend"), edit_type=kwargs.pop("edit_type", "style"), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@cone@_legend.py@.PATH_END.py
{ "filename": "schema.py", "repo_name": "langchain-ai/langchain", "repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/evaluation/schema.py", "type": "Python" }
"""Interfaces to be implemented by general evaluators.""" from __future__ import annotations import logging from abc import ABC, abstractmethod from enum import Enum from typing import Any, Optional, Sequence, Tuple, Union from warnings import warn from langchain_core.agents import AgentAction from langchain_core.language_models import BaseLanguageModel from langchain_core.runnables.config import run_in_executor from langchain.chains.base import Chain logger = logging.getLogger(__name__) class EvaluatorType(str, Enum): """The types of the evaluators.""" QA = "qa" """Question answering evaluator, which grades answers to questions directly using an LLM.""" COT_QA = "cot_qa" """Chain of thought question answering evaluator, which grades answers to questions using chain of thought 'reasoning'.""" CONTEXT_QA = "context_qa" """Question answering evaluator that incorporates 'context' in the response.""" PAIRWISE_STRING = "pairwise_string" """The pairwise string evaluator, which predicts the preferred prediction from between two models.""" SCORE_STRING = "score_string" """The scored string evaluator, which gives a score between 1 and 10 to a prediction.""" LABELED_PAIRWISE_STRING = "labeled_pairwise_string" """The labeled pairwise string evaluator, which predicts the preferred prediction from between two models based on a ground truth reference label.""" LABELED_SCORE_STRING = "labeled_score_string" """The labeled scored string evaluator, which gives a score between 1 and 10 to a prediction based on a ground truth reference label.""" AGENT_TRAJECTORY = "trajectory" """The agent trajectory evaluator, which grades the agent's intermediate steps.""" CRITERIA = "criteria" """The criteria evaluator, which evaluates a model based on a custom set of criteria without any reference labels.""" LABELED_CRITERIA = "labeled_criteria" """The labeled criteria evaluator, which evaluates a model based on a custom set of criteria, with a reference label.""" STRING_DISTANCE = "string_distance" """Compare predictions to a reference answer using string edit distances.""" EXACT_MATCH = "exact_match" """Compare predictions to a reference answer using exact matching.""" REGEX_MATCH = "regex_match" """Compare predictions to a reference answer using regular expressions.""" PAIRWISE_STRING_DISTANCE = "pairwise_string_distance" """Compare predictions based on string edit distances.""" EMBEDDING_DISTANCE = "embedding_distance" """Compare a prediction to a reference label using embedding distance.""" PAIRWISE_EMBEDDING_DISTANCE = "pairwise_embedding_distance" """Compare two predictions using embedding distance.""" JSON_VALIDITY = "json_validity" """Check if a prediction is valid JSON.""" JSON_EQUALITY = "json_equality" """Check if a prediction is equal to a reference JSON.""" JSON_EDIT_DISTANCE = "json_edit_distance" """Compute the edit distance between two JSON strings after canonicalization.""" JSON_SCHEMA_VALIDATION = "json_schema_validation" """Check if a prediction is valid JSON according to a JSON schema.""" class LLMEvalChain(Chain): """A base class for evaluators that use an LLM.""" @classmethod @abstractmethod def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> LLMEvalChain: """Create a new evaluator from an LLM.""" class _EvalArgsMixin: """Mixin for checking evaluation arguments.""" @property def requires_reference(self) -> bool: """Whether this evaluator requires a reference label.""" return False @property def requires_input(self) -> bool: """Whether this evaluator requires an input string.""" return False @property def _skip_input_warning(self) -> str: """Warning to show when input is ignored.""" return f"Ignoring input in {self.__class__.__name__}, as it is not expected." @property def _skip_reference_warning(self) -> str: """Warning to show when reference is ignored.""" return ( f"Ignoring reference in {self.__class__.__name__}, as it is not expected." ) def _check_evaluation_args( self, reference: Optional[str] = None, input: Optional[str] = None, ) -> None: """Check if the evaluation arguments are valid. Args: reference (Optional[str], optional): The reference label. input (Optional[str], optional): The input string. Raises: ValueError: If the evaluator requires an input string but none is provided, or if the evaluator requires a reference label but none is provided. """ if self.requires_input and input is None: raise ValueError(f"{self.__class__.__name__} requires an input string.") elif input is not None and not self.requires_input: warn(self._skip_input_warning) if self.requires_reference and reference is None: raise ValueError(f"{self.__class__.__name__} requires a reference string.") elif reference is not None and not self.requires_reference: warn(self._skip_reference_warning) class StringEvaluator(_EvalArgsMixin, ABC): """Grade, tag, or otherwise evaluate predictions relative to their inputs and/or reference labels.""" @property def evaluation_name(self) -> str: """The name of the evaluation.""" return self.__class__.__name__ @property def requires_reference(self) -> bool: """Whether this evaluator requires a reference label.""" return False @abstractmethod def _evaluate_strings( self, *, prediction: Union[str, Any], reference: Optional[Union[str, Any]] = None, input: Optional[Union[str, Any]] = None, **kwargs: Any, ) -> dict: """Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): The LLM or chain prediction to evaluate. reference (Optional[str], optional): The reference label to evaluate against. input (Optional[str], optional): The input to consider during evaluation. kwargs: Additional keyword arguments, including callbacks, tags, etc. Returns: dict: The evaluation results containing the score or value. It is recommended that the dictionary contain the following keys: - score: the score of the evaluation, if applicable. - value: the string value of the evaluation, if applicable. - reasoning: the reasoning for the evaluation, if applicable. """ # noqa: E501 async def _aevaluate_strings( self, *, prediction: Union[str, Any], reference: Optional[Union[str, Any]] = None, input: Optional[Union[str, Any]] = None, **kwargs: Any, ) -> dict: """Asynchronously evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): The LLM or chain prediction to evaluate. reference (Optional[str], optional): The reference label to evaluate against. input (Optional[str], optional): The input to consider during evaluation. kwargs: Additional keyword arguments, including callbacks, tags, etc. Returns: dict: The evaluation results containing the score or value. It is recommended that the dictionary contain the following keys: - score: the score of the evaluation, if applicable. - value: the string value of the evaluation, if applicable. - reasoning: the reasoning for the evaluation, if applicable. """ # noqa: E501 return await run_in_executor( None, self._evaluate_strings, prediction=prediction, reference=reference, input=input, **kwargs, ) def evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: """Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): The LLM or chain prediction to evaluate. reference (Optional[str], optional): The reference label to evaluate against. input (Optional[str], optional): The input to consider during evaluation. kwargs: Additional keyword arguments, including callbacks, tags, etc. Returns: dict: The evaluation results containing the score or value. """ # noqa: E501 self._check_evaluation_args(reference=reference, input=input) return self._evaluate_strings( prediction=prediction, reference=reference, input=input, **kwargs ) async def aevaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: """Asynchronously evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): The LLM or chain prediction to evaluate. reference (Optional[str], optional): The reference label to evaluate against. input (Optional[str], optional): The input to consider during evaluation. kwargs: Additional keyword arguments, including callbacks, tags, etc. Returns: dict: The evaluation results containing the score or value. """ # noqa: E501 self._check_evaluation_args(reference=reference, input=input) return await self._aevaluate_strings( prediction=prediction, reference=reference, input=input, **kwargs ) class PairwiseStringEvaluator(_EvalArgsMixin, ABC): """Compare the output of two models (or two outputs of the same model).""" @abstractmethod def _evaluate_string_pairs( self, *, prediction: str, prediction_b: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: """Evaluate the output string pairs. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. reference (Optional[str], optional): The expected output / reference string. input (Optional[str], optional): The input string. kwargs: Additional keyword arguments, such as callbacks and optional reference strings. Returns: dict: A dictionary containing the preference, scores, and/or other information. """ # noqa: E501 async def _aevaluate_string_pairs( self, *, prediction: str, prediction_b: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: """Asynchronously evaluate the output string pairs. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. reference (Optional[str], optional): The expected output / reference string. input (Optional[str], optional): The input string. kwargs: Additional keyword arguments, such as callbacks and optional reference strings. Returns: dict: A dictionary containing the preference, scores, and/or other information. """ # noqa: E501 return await run_in_executor( None, self._evaluate_string_pairs, prediction=prediction, prediction_b=prediction_b, reference=reference, input=input, **kwargs, ) def evaluate_string_pairs( self, *, prediction: str, prediction_b: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: """Evaluate the output string pairs. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. reference (Optional[str], optional): The expected output / reference string. input (Optional[str], optional): The input string. kwargs: Additional keyword arguments, such as callbacks and optional reference strings. Returns: dict: A dictionary containing the preference, scores, and/or other information. """ # noqa: E501 self._check_evaluation_args(reference=reference, input=input) return self._evaluate_string_pairs( prediction=prediction, prediction_b=prediction_b, reference=reference, input=input, **kwargs, ) async def aevaluate_string_pairs( self, *, prediction: str, prediction_b: str, reference: Optional[str] = None, input: Optional[str] = None, **kwargs: Any, ) -> dict: """Asynchronously evaluate the output string pairs. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. reference (Optional[str], optional): The expected output / reference string. input (Optional[str], optional): The input string. kwargs: Additional keyword arguments, such as callbacks and optional reference strings. Returns: dict: A dictionary containing the preference, scores, and/or other information. """ # noqa: E501 self._check_evaluation_args(reference=reference, input=input) return await self._aevaluate_string_pairs( prediction=prediction, prediction_b=prediction_b, reference=reference, input=input, **kwargs, ) class AgentTrajectoryEvaluator(_EvalArgsMixin, ABC): """Interface for evaluating agent trajectories.""" @property def requires_input(self) -> bool: """Whether this evaluator requires an input string.""" return True @abstractmethod def _evaluate_agent_trajectory( self, *, prediction: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], input: str, reference: Optional[str] = None, **kwargs: Any, ) -> dict: """Evaluate a trajectory. Args: prediction (str): The final predicted response. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. input (str): The input to the agent. reference (Optional[str]): The reference answer. Returns: dict: The evaluation result. """ async def _aevaluate_agent_trajectory( self, *, prediction: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], input: str, reference: Optional[str] = None, **kwargs: Any, ) -> dict: """Asynchronously evaluate a trajectory. Args: prediction (str): The final predicted response. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. input (str): The input to the agent. reference (Optional[str]): The reference answer. Returns: dict: The evaluation result. """ return await run_in_executor( None, self._evaluate_agent_trajectory, prediction=prediction, agent_trajectory=agent_trajectory, reference=reference, input=input, **kwargs, ) def evaluate_agent_trajectory( self, *, prediction: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], input: str, reference: Optional[str] = None, **kwargs: Any, ) -> dict: """Evaluate a trajectory. Args: prediction (str): The final predicted response. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. input (str): The input to the agent. reference (Optional[str]): The reference answer. Returns: dict: The evaluation result. """ self._check_evaluation_args(reference=reference, input=input) return self._evaluate_agent_trajectory( prediction=prediction, input=input, agent_trajectory=agent_trajectory, reference=reference, **kwargs, ) async def aevaluate_agent_trajectory( self, *, prediction: str, agent_trajectory: Sequence[Tuple[AgentAction, str]], input: str, reference: Optional[str] = None, **kwargs: Any, ) -> dict: """Asynchronously evaluate a trajectory. Args: prediction (str): The final predicted response. agent_trajectory (List[Tuple[AgentAction, str]]): The intermediate steps forming the agent trajectory. input (str): The input to the agent. reference (Optional[str]): The reference answer. Returns: dict: The evaluation result. """ self._check_evaluation_args(reference=reference, input=input) return await self._aevaluate_agent_trajectory( prediction=prediction, input=input, agent_trajectory=agent_trajectory, reference=reference, **kwargs, )
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@evaluation@schema.py@.PATH_END.py
{ "filename": "writer.py", "repo_name": "langchain-ai/langchain", "repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/chat_models/writer.py", "type": "Python" }
"""Writer chat wrapper.""" from __future__ import annotations import json import logging from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Literal, Optional, Sequence, Tuple, Type, Union, ) from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain_core.language_models import LanguageModelInput from langchain_core.language_models.chat_models import ( BaseChatModel, ) from langchain_core.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, HumanMessage, SystemMessage, ToolMessage, ) from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.runnables import Runnable from langchain_core.utils import get_from_dict_or_env from langchain_core.utils.function_calling import convert_to_openai_tool from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator logger = logging.getLogger(__name__) class ChatWriter(BaseChatModel): """Writer chat model. To use, you should have the ``writer-sdk`` Python package installed, and the environment variable ``WRITER_API_KEY`` set with your API key or pass 'api_key' init param. Example: .. code-block:: python from langchain_community.chat_models import ChatWriter chat = ChatWriter( api_key="your key" model="palmyra-x-004" ) """ client: Any = Field(default=None, exclude=True) #: :meta private: async_client: Any = Field(default=None, exclude=True) #: :meta private: api_key: Optional[SecretStr] = Field(default=None) """Writer API key.""" model_name: str = Field(default="palmyra-x-004", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" n: int = 1 """Number of chat completions to generate for each prompt.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" model_config = ConfigDict(populate_by_name=True) @property def _llm_type(self) -> str: """Return type of chat model.""" return "writer-chat" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "temperature": self.temperature, **self.model_kwargs, } @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Writer API.""" return { "model": self.model_name, "temperature": self.temperature, "n": self.n, "max_tokens": self.max_tokens, **self.model_kwargs, } @model_validator(mode="before") @classmethod def validate_environment(cls, values: Dict) -> Any: """Validates that api key is passed and creates Writer clients.""" try: from writerai import AsyncClient, Client except ImportError as e: raise ImportError( "Could not import writerai python package. " "Please install it with `pip install writerai`." ) from e if not values.get("client"): values.update( { "client": Client( api_key=get_from_dict_or_env( values, "api_key", "WRITER_API_KEY" ) ) } ) if not values.get("async_client"): values.update( { "async_client": AsyncClient( api_key=get_from_dict_or_env( values, "api_key", "WRITER_API_KEY" ) ) } ) if not ( type(values.get("client")) is Client and type(values.get("async_client")) is AsyncClient ): raise ValueError( "'client' attribute must be with type 'Client' and " "'async_client' must be with type 'AsyncClient' from 'writerai' package" ) return values def _create_chat_result(self, response: Any) -> ChatResult: generations = [] for choice in response.choices: message = self._convert_writer_to_langchain(choice.message) gen = ChatGeneration( message=message, generation_info=dict(finish_reason=choice.finish_reason), ) generations.append(gen) token_usage = {} if response.usage: token_usage = response.usage.__dict__ llm_output = { "token_usage": token_usage, "model_name": self.model_name, "system_fingerprint": response.system_fingerprint, } return ChatResult(generations=generations, llm_output=llm_output) @staticmethod def _convert_langchain_to_writer(message: BaseMessage) -> dict: """Convert a LangChain message to a Writer message dict.""" message_dict = {"role": "", "content": message.content} if isinstance(message, ChatMessage): message_dict["role"] = message.role elif isinstance(message, HumanMessage): message_dict["role"] = "user" elif isinstance(message, AIMessage): message_dict["role"] = "assistant" if message.tool_calls: message_dict["tool_calls"] = [ { "id": tool["id"], "type": "function", "function": {"name": tool["name"], "arguments": tool["args"]}, } for tool in message.tool_calls ] elif isinstance(message, SystemMessage): message_dict["role"] = "system" elif isinstance(message, ToolMessage): message_dict["role"] = "tool" message_dict["tool_call_id"] = message.tool_call_id else: raise ValueError(f"Got unknown message type: {type(message)}") if message.name: message_dict["name"] = message.name return message_dict @staticmethod def _convert_writer_to_langchain(response_message: Any) -> BaseMessage: """Convert a Writer message to a LangChain message.""" if not isinstance(response_message, dict): response_message = json.loads( json.dumps(response_message, default=lambda o: o.__dict__) ) role = response_message.get("role", "") content = response_message.get("content") if not content: content = "" if role == "user": return HumanMessage(content=content) elif role == "assistant": additional_kwargs = {} if tool_calls := response_message.get("tool_calls", []): additional_kwargs["tool_calls"] = tool_calls return AIMessageChunk(content=content, additional_kwargs=additional_kwargs) elif role == "system": return SystemMessage(content=content) elif role == "tool": return ToolMessage( content=content, tool_call_id=response_message.get("tool_call_id", ""), name=response_message.get("name", ""), ) else: return ChatMessage(content=content, role=role) def _convert_messages_to_writer( self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: """Convert a list of LangChain messages to List of Writer dicts.""" params = { "model": self.model_name, "temperature": self.temperature, "n": self.n, **self.model_kwargs, } if stop: params["stop"] = stop if self.max_tokens is not None: params["max_tokens"] = self.max_tokens message_dicts = [self._convert_langchain_to_writer(m) for m in messages] return message_dicts, params def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: message_dicts, params = self._convert_messages_to_writer(messages, stop) params = {**params, **kwargs, "stream": True} response = self.client.chat.chat(messages=message_dicts, **params) for chunk in response: delta = chunk.choices[0].delta if not delta or not delta.content: continue chunk = self._convert_writer_to_langchain( {"role": "assistant", "content": delta.content} ) chunk = ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.text) yield chunk async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: message_dicts, params = self._convert_messages_to_writer(messages, stop) params = {**params, **kwargs, "stream": True} response = await self.async_client.chat.chat(messages=message_dicts, **params) async for chunk in response: delta = chunk.choices[0].delta if not delta or not delta.content: continue chunk = self._convert_writer_to_langchain( {"role": "assistant", "content": delta.content} ) chunk = ChatGenerationChunk(message=chunk) if run_manager: await run_manager.on_llm_new_token(chunk.text) yield chunk def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._convert_messages_to_writer(messages, stop) params = {**params, **kwargs} response = self.client.chat.chat(messages=message_dicts, **params) return self._create_chat_result(response) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._convert_messages_to_writer(messages, stop) params = {**params, **kwargs} response = await self.async_client.chat.chat(messages=message_dicts, **params) return self._create_chat_result(response) def bind_tools( self, tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]], *, tool_choice: Optional[Union[str, Literal["auto", "none"]]] = None, **kwargs: Any, ) -> Runnable[LanguageModelInput, BaseMessage]: """Bind tools to the chat model. Args: tools: Tools to bind to the model tool_choice: Which tool to require ('auto', 'none', or specific tool name) **kwargs: Additional parameters to pass to the chat model Returns: A runnable that will use the tools """ formatted_tools = [convert_to_openai_tool(tool) for tool in tools] if tool_choice: kwargs["tool_choice"] = ( (tool_choice) if tool_choice in ("auto", "none") else {"type": "function", "function": {"name": tool_choice}} ) return super().bind(tools=formatted_tools, **kwargs)
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@chat_models@writer.py@.PATH_END.py
{ "filename": "_size.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/splom/marker/colorbar/tickfont/_size.py", "type": "Python" }
import _plotly_utils.basevalidators class SizeValidator(_plotly_utils.basevalidators.NumberValidator): def __init__( self, plotly_name="size", parent_name="splom.marker.colorbar.tickfont", **kwargs ): super(SizeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), min=kwargs.pop("min", 1), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@splom@marker@colorbar@tickfont@_size.py@.PATH_END.py
{ "filename": "generate_ref_ast.py", "repo_name": "waynebhayes/SpArcFiRe", "repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/coordinates/tests/accuracy/generate_ref_ast.py", "type": "Python" }
""" This series of functions are used to generate the reference CSV files used by the accuracy tests. Running this as a comand-line script will generate them all. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import os import numpy as np from ....table import Table, Column from ....extern.six.moves import range def ref_fk4_no_e_fk4(fnout='fk4_no_e_fk4.csv'): """ Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the FK4 # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to FK4. ra = np.random.uniform(0., 360., N) dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] ra_fk4ne, dec_fk4ne = [], [] ra_fk4, dec_fk4 = [], [] for i in range(N): # Set up frames for AST frame_fk4ne = Ast.SkyFrame('System=FK4-NO-E,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i])) frame_fk4 = Ast.SkyFrame('System=FK4,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i])) # FK4 to FK4 (no E-terms) frameset = frame_fk4.convert(frame_fk4ne) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk4ne.append(coords[0, 0]) dec_fk4ne.append(coords[1, 0]) # FK4 (no E-terms) to FK4 frameset = frame_fk4ne.convert(frame_fk4) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk4.append(coords[0, 0]) dec_fk4.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='ra_in', data=ra)) t.add_column(Column(name='dec_in', data=dec)) t.add_column(Column(name='ra_fk4ne', data=ra_fk4ne)) t.add_column(Column(name='dec_fk4ne', data=dec_fk4ne)) t.add_column(Column(name='ra_fk4', data=ra_fk4)) t.add_column(Column(name='dec_fk4', data=dec_fk4)) f = open(fnout, 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') def ref_fk4_no_e_fk5(fnout='fk4_no_e_fk5.csv'): """ Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the FK4 # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to FK4. ra = np.random.uniform(0., 360., N) dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] equinox_fk4 = ["B{0:7.2f}".format(x) for x in np.random.uniform(1925., 1975., N)] equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] ra_fk4, dec_fk4 = [], [] ra_fk5, dec_fk5 = [], [] for i in range(N): # Set up frames for AST frame_fk4 = Ast.SkyFrame('System=FK4-NO-E,Epoch={epoch},Equinox={equinox_fk4}'.format(epoch=obstime[i], equinox_fk4=equinox_fk4[i])) frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i])) # FK4 to FK5 frameset = frame_fk4.convert(frame_fk5) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk5.append(coords[0, 0]) dec_fk5.append(coords[1, 0]) # FK5 to FK4 frameset = frame_fk5.convert(frame_fk4) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk4.append(coords[0, 0]) dec_fk4.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='equinox_fk4', data=equinox_fk4)) t.add_column(Column(name='equinox_fk5', data=equinox_fk5)) t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='ra_in', data=ra)) t.add_column(Column(name='dec_in', data=dec)) t.add_column(Column(name='ra_fk5', data=ra_fk5)) t.add_column(Column(name='dec_fk5', data=dec_fk5)) t.add_column(Column(name='ra_fk4', data=ra_fk4)) t.add_column(Column(name='dec_fk4', data=dec_fk4)) f = open(fnout, 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') def ref_galactic_fk4(fnout='galactic_fk4.csv'): """ Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the ICRS # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to ICRS. lon = np.random.uniform(0., 360., N) lat = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] equinox_fk4 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] lon_gal, lat_gal = [], [] ra_fk4, dec_fk4 = [], [] for i in range(N): # Set up frames for AST frame_gal = Ast.SkyFrame('System=Galactic,Epoch={epoch}'.format(epoch=obstime[i])) frame_fk4 = Ast.SkyFrame('System=FK4,Epoch={epoch},Equinox={equinox_fk4}'.format(epoch=obstime[i], equinox_fk4=equinox_fk4[i])) # ICRS to FK5 frameset = frame_gal.convert(frame_fk4) coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]])) ra_fk4.append(coords[0, 0]) dec_fk4.append(coords[1, 0]) # FK5 to ICRS frameset = frame_fk4.convert(frame_gal) coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]])) lon_gal.append(coords[0, 0]) lat_gal.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='equinox_fk4', data=equinox_fk4)) t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='lon_in', data=lon)) t.add_column(Column(name='lat_in', data=lat)) t.add_column(Column(name='ra_fk4', data=ra_fk4)) t.add_column(Column(name='dec_fk4', data=dec_fk4)) t.add_column(Column(name='lon_gal', data=lon_gal)) t.add_column(Column(name='lat_gal', data=lat_gal)) f = open(fnout, 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') def ref_icrs_fk5(fnout='icrs_fk5.csv'): """ Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5 conversion, with arbitrary equinoxes and epoch of observation. """ import starlink.Ast as Ast np.random.seed(12345) N = 200 # Sample uniformly on the unit sphere. These will be either the ICRS # coordinates for the transformation to FK5, or the FK5 coordinates for the # transformation to ICRS. ra = np.random.uniform(0., 360., N) dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) # Generate random observation epoch and equinoxes obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] ra_icrs, dec_icrs = [], [] ra_fk5, dec_fk5 = [], [] for i in range(N): # Set up frames for AST frame_icrs = Ast.SkyFrame('System=ICRS,Epoch={epoch}'.format(epoch=obstime[i])) frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i])) # ICRS to FK5 frameset = frame_icrs.convert(frame_fk5) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_fk5.append(coords[0, 0]) dec_fk5.append(coords[1, 0]) # FK5 to ICRS frameset = frame_fk5.convert(frame_icrs) coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) ra_icrs.append(coords[0, 0]) dec_icrs.append(coords[1, 0]) # Write out table to a CSV file t = Table() t.add_column(Column(name='equinox_fk5', data=equinox_fk5)) t.add_column(Column(name='obstime', data=obstime)) t.add_column(Column(name='ra_in', data=ra)) t.add_column(Column(name='dec_in', data=dec)) t.add_column(Column(name='ra_fk5', data=ra_fk5)) t.add_column(Column(name='dec_fk5', data=dec_fk5)) t.add_column(Column(name='ra_icrs', data=ra_icrs)) t.add_column(Column(name='dec_icrs', data=dec_icrs)) f = open(fnout, 'wb') f.write("# This file was generated with the {0} script, and the reference " "values were computed using AST\n".format(os.path.basename(__file__))) t.write(f, format='ascii', delimiter=',') if __name__ == '__main__': ref_fk4_no_e_fk4() ref_fk4_no_e_fk5() ref_galactic_fk4() ref_icrs_fk5()
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@coordinates@tests@accuracy@generate_ref_ast.py@.PATH_END.py
{ "filename": "Further Examples 2 - The Leja2019 non-parametric continuity SFH Model.ipynb", "repo_name": "ACCarnall/bagpipes", "repo_path": "bagpipes_extracted/bagpipes-master/examples/Further Examples 2 - The Leja2019 non-parametric continuity SFH Model.ipynb", "type": "Jupyter Notebook" }
# Using the Leja et al. (2019) continuity non-parametric SFH model I've finally got round to implementing the [Leja et al. (2019)](https://ui.adsabs.harvard.edu/abs/2019ApJ...876....3L/abstract) continuity non-parametric SFH model in Bagpipes. This notebook should explain how to build models and run fits using this SFH. ## Setting up Let's begin by running a quick new fit to the same object from GOODSS that we used in Example 4. ```python import numpy as np import bagpipes as pipes from astropy.io import fits def load_goodss(ID): """ Load UltraVISTA photometry from catalogue. """ # load up the relevant columns from the catalogue. cat = np.loadtxt("hlsp_candels_hst_wfc3_goodss-tot-multiband_f160w_v1-1photom_cat.txt", usecols=(10, 13, 16, 19, 25, 28, 31, 34, 37, 40, 43, 46, 49, 52, 55, 11, 14, 17, 20, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 56)) # Find the correct row for the object we want. row = int(ID) - 1 # Extract the object we want from the catalogue. fluxes = cat[row, :15] fluxerrs = cat[row, 15:] # Turn these into a 2D array. photometry = np.c_[fluxes, fluxerrs] # blow up the errors associated with any missing fluxes. for i in range(len(photometry)): if (photometry[i, 0] == 0.) or (photometry[i, 1] <= 0): photometry[i,:] = [0., 9.9*10**99.] # Enforce a maximum SNR of 20, or 10 in the IRAC channels. for i in range(len(photometry)): if i < 10: max_snr = 20. else: max_snr = 10. if photometry[i, 0]/photometry[i, 1] > max_snr: photometry[i, 1] = photometry[i, 0]/max_snr return photometry goodss_filt_list = np.loadtxt("filters/goodss_filt_list.txt", dtype="str").tolist() galaxy = pipes.galaxy("17433", load_goodss, spectrum_exists=False, filt_list=goodss_filt_list) # Now make a basic fit instructions dictionary. dust = {} dust["type"] = "Calzetti" dust["eta"] = 2. dust["Av"] = (0., 4.) nebular = {} nebular["logU"] = -3. fit_instructions = {} fit_instructions["dust"] = dust fit_instructions["nebular"] = nebular fit_instructions["t_bc"] = 0.01 fit_instructions["redshift"] = 1.05 ``` Starting dense_basis. Failed to load FSPS, only GP-SFH module will be available. ## The continuity model Now for the continuity SFH component. You'll need to define your own age bin edges - these are counted in Myr backwards from the time of observation. You'll also need to set up fitted parameters called "dsfr1", "dsfr2", etc, which have associated Student's t distribution priors. The range from (-10, 10) is just an arbitrarily large range over which these can vary. I haven't yet got round to doing anything about bin resizing when redshift changes, so using this model whilst varying redshift freely won't work (i.e. you'll get an error if you define a bin that starts before the Big Bang). For fixed redshift or redshift varied within a narrow range (e.g. when fitting spectroscopy) the current setup should be fine. I can have a go at implementing bins that vary in width with redshift if this would be useful. ```python continuity = {} continuity["massformed"] = (0., 13.) continuity["metallicity"] = (0.01, 5.) continuity["metallicity_prior"] = "log_10" continuity["bin_edges"] = [0., 10., 100., 250., 500., 1000., 2500., 5000., 5500.] for i in range(1, len(continuity["bin_edges"])-1): continuity["dsfr" + str(i)] = (-10., 10.) continuity["dsfr" + str(i) + "_prior"] = "student_t" #continuity["dsfr" + str(i) + "_prior_scale"] = 0.3 # Defaults to this value as in Leja19, but can be set #continuity["dsfr" + str(i) + "_prior_df"] = 2 # Defaults to this value as in Leja19, but can be set fit_instructions["continuity"] = continuity ``` ```python fit = pipes.fit(galaxy, fit_instructions, run="advanced_2") fit.fit(verbose=False) fig = fit.plot_spectrum_posterior(save=False, show=True) fig = fit.plot_sfh_posterior(save=False, show=True) ``` Bagpipes: fitting object 17433 Completed in 325.6 seconds. Parameter Posterior percentiles 16th 50th 84th ---------------------------------------------------------- continuity:dsfr1 -0.340 -0.029 0.294 continuity:dsfr2 -0.158 0.098 0.450 continuity:dsfr3 -0.183 0.040 0.236 continuity:dsfr4 -0.728 -0.337 -0.147 continuity:dsfr5 -3.903 -2.045 -0.859 continuity:dsfr6 -0.991 -0.292 0.026 continuity:dsfr7 -0.664 -0.191 0.099 continuity:massformed 11.226 11.263 11.305 continuity:metallicity 0.538 0.936 1.385 dust:Av 0.252 0.380 0.543 ![png](output_5_1.png) ![png](output_5_2.png) ```python ```
ACCarnallREPO_NAMEbagpipesPATH_START.@bagpipes_extracted@bagpipes-master@examples@Further Examples 2 - The Leja2019 non-parametric continuity SFH Model.ipynb@.PATH_END.py
{ "filename": "sql_database.py", "repo_name": "langchain-ai/langchain", "repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/utilities/sql_database.py", "type": "Python" }
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.utilities import SQLDatabase from langchain_community.utilities.sql_database import truncate_word # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling optional imports. DEPRECATED_LOOKUP = { "truncate_word": "langchain_community.utilities.sql_database", "SQLDatabase": "langchain_community.utilities", } _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) def __getattr__(name: str) -> Any: """Look up attributes dynamically.""" return _import_attribute(name) __all__ = [ "truncate_word", "SQLDatabase", ]
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@utilities@sql_database.py@.PATH_END.py
{ "filename": "_hovertemplate.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/_hovertemplate.py", "type": "Python" }
import _plotly_utils.basevalidators class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator): def __init__( self, plotly_name="hovertemplate", parent_name="scattersmith", **kwargs ): super(HovertemplateValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "none"), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@_hovertemplate.py@.PATH_END.py
{ "filename": "plot_evolution.py", "repo_name": "exoclime/VULCAN", "repo_path": "VULCAN_extracted/VULCAN-master/plot_py/plot_evolution.py", "type": "Python" }
import sys sys.path.insert(0, '../') # including the upper level of directory for the path of modules import numpy as np import matplotlib.pyplot as plt import matplotlib.legend as lg import vulcan_cfg try: from PIL import Image except ImportError: try: import Image except: vulcan_cfg.use_PIL = False import os, sys import pickle # Setting the vulcan output file to read in vul_data = '../output/rtol005-continue-1e7conden-evap-cap-Earth.vul' #vul_data2 = '../output/COini-noPhoto-HD189.vul' # Setting the list of species to plot plot_spec = ['CH4', 'CO', 'H2O', 'H'] plot_spec = ['CH4','H2O', 'OH', 'O2', 'O3'] # Setting the output plot filename plot_name = 'evolution' # Setting the pressure level (cgs) to plot plot_p = 1e5 plot_dir = '../' + vulcan_cfg.plot_dir colors = ['c','b','g','r','m','y','k','orange','pink','grey','darkred','darkblue','salmon','chocolate','steelblue','plum','hotpink'] tex_labels = {'H':'H','H2':'H$_2$','O':'O','OH':'OH','H2O':'H$_2$O','CH':'CH','C':'C','CH2':'CH$_2$','CH3':'CH$_3$','CH4':'CH$_4$','HCO':'HCO','H2CO':'H$_2$CO', 'C4H2':'C$_4$H$_2$',\ 'C2':'C$_2$','C2H2':'C$_2$H$_2$','C2H3':'C$_2$H$_3$','C2H':'C$_2$H','CO':'CO','CO2':'CO$_2$','He':'He','O2':'O$_2$','CH3OH':'CH$_3$OH','C2H4':'C$_2$H$_4$','C2H5':'C$_2$H$_5$','C2H6':'C$_2$H$_6$','CH3O': 'CH$_3$O'\ ,'CH2OH':'CH$_2$OH','N2':'N$_2$','NH3':'NH$_3$', 'NO2':'NO$_2$','HCN':'HCN','NO':'NO', 'NO2':'NO$_2$','N2O':'N$_2$O','O3':'O$_3$' } with open(vul_data, 'rb') as handle: data = pickle.load(handle) species = data['variable']['species'] #plot_spec = species[20:30] # Find the index of pco closest to p_ana p_indx1 = min( range(len(data['atm']['pco'])), key=lambda i: abs(data['atm']['pco'][i]-plot_p)) try: with open(vul_data2, 'rb') as handle: data2 = pickle.load(handle) species2 = data2['variable']['species'] p_indx2 = min( range(len(data2['atm']['pco'])), key=lambda i: abs(data2['atm']['pco'][i]-plot_p)) except NameError: pass color_index = 0 for sp in plot_spec: if sp in tex_labels: sp_lab = tex_labels[sp] else: sp_lab = sp plt.plot(data['variable']['t_time'], np.array(data['variable']['y_time'])[:,p_indx1,species.index(sp)]/float(data['atm']['n_0'][p_indx1]), color=colors[color_index], label=sp_lab) try: plt.plot(data2['variable']['t_time'], np.array(data2['variable']['y_time'])[:,p_indx2,species2.index(sp)]/float(data2['atm']['n_0'][p_indx2]), color=colors[color_index], ls='--') except NameError: pass color_index += 1 plt.gca().set_xscale('log') plt.gca().set_yscale('log') #plt.gca().invert_yaxis() #plt.xlim((1.E-12, 1.)) plt.ylim((1e-22, 2.)) plt.legend(frameon=0, prop={'size':13}, loc='best') plt.xlabel("Time(s)", fontsize=12) #plt.ylabel("Pressure (bar)") plt.ylabel("Mixing Ratio", fontsize=12) #plt.title('Earth (CIRA equator in January 1986)', fontsize=14) plt.savefig(plot_dir + plot_name + '.png') plt.savefig(plot_dir + plot_name + '.eps') if vulcan_cfg.use_PIL == True: plot = Image.open(plot_dir + plot_name + '.png') plot.show() else: plt.show()
exoclimeREPO_NAMEVULCANPATH_START.@VULCAN_extracted@VULCAN-master@plot_py@plot_evolution.py@.PATH_END.py
{ "filename": "pyrafetiparam.py", "repo_name": "GeminiDRSoftware/DRAGONS", "repo_path": "DRAGONS_extracted/DRAGONS-master/gempy/eti_core/pyrafetiparam.py", "type": "Python" }
from .etiparam import ETIParam from ..utils import logutils log = logutils.get_logger(__name__) class PyrafETIParam(ETIParam): inputs = None params = None paramdict = None def __init__(self, inputs=None, params=None): log.debug("PyrafETIParam __init__") ETIParam.__init__(self, inputs, params) self.paramdict = {} def get_parameter(self): log.debug("PyrafETIParam get_parameter()") return self.paramdict def prepare(self): log.debug("PyrafETIParam prepare()") def recover(self): log.debug("PyrafETIParam recover(): pass") class IrafStdout(): """ This is a class to act as the standard output for the IRAF""" def __init__(self): self.log = log def flush(self): pass def write(self, out): if len(out) > 1: self.log.fullinfo(out)
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@gempy@eti_core@pyrafetiparam.py@.PATH_END.py
{ "filename": "prepare_lowl.py", "repo_name": "CosmoLike/cocoa", "repo_path": "cocoa_extracted/cocoa-main/Cocoa/external_modules/code/planck/code/spt_clik/src/python/tools/prepare_lowl.py", "type": "Python" }
#! PYTHONEXE import numpy as nm import healpy as hp import sys if __name__ == "__main__": sys.path = ["REPLACEPATH"]+sys.path def mode(l,m,nside,field,ordering="ring",all=False): if m==0: value = 1 else: value = (1+nm.sign(m)*1j)/2. nmode = ((l+1)*(l+2))/2 Ind = hp.Alm.getidx(l,l,abs(m)) dat = nm.zeros((nmode,),dtype=nm.complex) dat[Ind] = value if field.lower()=="t": res = hp.alm2map(dat,nside,l) else: datT = dat*0 if field.lower()=="e": datE = dat datB = dat*0 else: datB = dat datE = dat*0 rr = hp.alm2map((datT,datE,datB),nside,l) res = nm.array(rr[1:]).flat[:] if ordering.lower() !="ring" : #fuck reorder ! if field.lower()=="t": mres = res[hp.nest2ring(nside,list(range(12*nside**2)))] res = mres else: mres = res[nm.concatenate((hp.nest2ring(nside,list(range(12*nside**2))),hp.nest2ring(nside,list(range(12*nside**2)))+12*nside**2))] res = mres if all: return res,dat return res def cYLM(nside=16,field="t",lmax=-1,ordering="ring",ls=None): if field.lower() not in ["t","e","b"]: raise ValueError("field must be one of T, E and B") field = field.lower() npix = 12*nside**2 if field != "t": npix*=2 if lmax==-1: lmax = 3*nside lmax=int(lmax) if ls == None: ls = nm.arange(lmax+1) nmode = nm.sum(2*ls+1) res = nm.zeros((npix,nmode),dtype=nm.float64) cnt=0 for l in ls: for m in range(-l,l+1): rr = mode(l,m,nside,field,ordering) res[:,cnt]= rr cnt+=1 return res def applyMask(ylm,mask): if ylm.shape[0]==len(mask): return nm.compress(mask==1,ylm,axis=0) elif ylm.shape[0]==2*len(mask): return nm.compress(nm.tile(mask,2)==1,ylm,axis=0) def applyls(ylm,ls): rls=[] cnt=0 for l in range(max(ls)+1): if l in ls: rls+=(nm.arange(2*l+1)+cnt).tolist() cnt+=2*l+1 return ylm[:,rls] import numpy.linalg as la def svdReduce(mylm,level=1,ls=None,nside=-1,all=False): if nside==-1: pside=nm.sqrt(mylm.shape[0]/12.) nside=2**nm.ceil(nm.log(pside)/nm.log(2)) dlevel = (1-level)*nm.sqrt(3*nside**2/nm.pi) if ls!=None: mylm = applyls(mylm,ls) if level==1 and not all: q,r=la.qr(mylm) return q,r u,d,vt = la.svd(mylm,full_matrices=False) if level==1: complev=nm.ones(u.shape[1]) else: complev=d>=dlevel u=nm.compress(complev,u,axis=1) vt=nm.compress(complev,vt,axis=0) df=nm.compress(complev,d,axis=0) if all: return u,df[:,nm.newaxis]*vt,d,df return u,df[:,nm.newaxis]*vt def reducedYLM(ylm, mask, level): mylm=applyMask(ylm,mask) return svdReduce(mylm,level) class YLM: head="YLM" def _normalize(self,nside=0,masks=None,ls=None): if masks == None: amask = nm.ones((2,nside**2*12),dtype=nm.uint8) elif len(masks)!=2: amask = nm.array((masks,masks)).astype(nm.uint8) else: amask = nm.array(masks).astype(nm.uint8) if nside == 0: nside = nm.sqrt(amask.shape[0]/12) if ls == None : ls = tuple(nm.arange(3*nside+1)) return nside,amask,ls def _checksame(self,nside,masks,sigmask,ls,ordering,has_cl,ylm): same = (nside == 0 or ylm.nside==nside) same = same and (tuple(has_cl) == ylm.has_cl) same = same and (nside == 0 or ls ==None or tuple(ls) == ylm.ls) same = same and (ordering.lower() == ylm.ordering) same = same and (masks == None or sigmask == ylm.sigmask) return same def _store(self,nside,ls,ordering,sigmask,has_cl): self.nside = nside self.ls = tuple(ls) self.ordering = ordering.lower() self.sigmask = tuple(sigmask) self.has_cl = tuple(has_cl) def __init__(self,nside=0,masks=None,ls=None,has_cl=[1,0,0,0,0,0],ordering="RING",ylm=None,fromfile=""): nside,amasks,ls = self._normalize(nside,masks,ls) sigmask = tuple(nm.sum(amasks,axis=1)) if fromfile: self.load(fromfile) if not self._checksame(nside,masks,sigmask,ls,ordering,has_cl,self): raise Exception("Not the same ylm") return if ylm!=None: if not self._checksame(nside,masks,sigmask,ls,ordering,has_cl,ylm): raise Exception("Not the same ylm") self.ylm=ylm.ylm self._store(ylm.nside,ylm.ls,ylm.ordering,ylm.sigmask,ylm.has_cl) return ylmT=nm.zeros((0,0)) ylmE=nm.zeros((0,0)) ylmB=nm.zeros((0,0)) if has_cl[0] or has_cl[3] or has_cl[4]: ylmT=applyMask(cYLM(nside,"t",max(ls),ordering,ls),amasks[0]) if has_cl[1] or has_cl[3] or has_cl[5]: ylmE=applyMask(cYLM(nside,"e",max(ls),ordering,ls),amasks[1]) if ylmE.shape[0]==0: ylmE=nm.zeros((0,0)) if has_cl[2] or has_cl[4] or has_cl[5]: ylmB=applyMask(cYLM(nside,"b",max(ls),ordering,ls),amasks[1]) if ylmB.shape[0]==0: ylmB=nm.zeros((0,0)) ylm=nm.zeros((ylmT.shape[0]+max(ylmE.shape[0],ylmB.shape[0]),ylmT.shape[1]+ylmE.shape[1]+ylmB.shape[1])) #print ylm.shape,ylmT.shape,ylmE.shape,ylmB.shape ylm[:ylmT.shape[0],:ylmT.shape[1]]=ylmT if ylmE.shape[0]!=0: ylm[ylmT.shape[0]:,ylmT.shape[1]:ylmT.shape[1]+ylmE.shape[1]]=ylmE if ylmB.shape[0]!=0: ylm[ylmT.shape[0]:,ylmT.shape[1]+ylmE.shape[1]:]=ylmB self.ylm=ylm self._store(nside,ls,ordering,sigmask,has_cl) return def _saveinfo(self,f): f.write(self.head) f.write(self.ordering[0]) f.write(nm.array(self.nside,dtype=nm.int32).tostring()) f.write(nm.array(len(self.ls),dtype=nm.int32).tostring()) f.write(nm.array(self.ls,dtype=nm.int32).tostring()) f.write(nm.array(self.sigmask,dtype=nm.int32).tostring()) f.write(nm.array(self.has_cl,dtype=nm.int32).tostring()) def _saveArray(self,f,arr): f.write(nm.array(arr.shape,dtype=nm.int32).tostring()) f.write(nm.array(arr,dtype=nm.float64).tostring()) def save(self,fi): f=open(fi,"w") self._saveinfo(f) self._saveArray(f,self.ylm) f.close() def _loadinfo(self,f): if f.read(len(self.head))!=self.head: raise Exception("argl 1") self.ordering={"r":"ring","n":"nested"}[f.read(1)] self.nside = nm.frombuffer(f.read(4),dtype=nm.int32)[0] nls = nm.frombuffer(f.read(4),dtype=nm.int32)[0] self.ls = tuple(nm.frombuffer(f.read(4*nls),dtype=nm.int32)) self.sigmask = tuple(nm.frombuffer(f.read(4*2),dtype=nm.int32)) self.has_cl = tuple(nm.frombuffer(f.read(4*6),dtype=nm.int32)) def _loadArray(self,f): shape = tuple(nm.frombuffer(f.read(4*2),dtype=nm.int32)) arr = (nm.frombuffer(f.read(8*shape[0]*shape[1]),dtype=nm.float64)) arr.shape=shape return arr def load(self,fi): f=open(fi,"r") self._loadinfo(f) self.ylm = self._loadArray(f) f.close() class UG(YLM): head="UG" def __init__(self,nside=0,masks=None,ls=None,has_cl=[1,0,0,0,0,0],ordering="RING",level=1,ylm=None,ylm_fromfile="",ug=None,fromfile=""): nside,amasks,ls = self._normalize(nside,masks,ls) sigmask = tuple(nm.sum(amasks,axis=1)) if fromfile: self.load(fromfile) if not self._checksame(nside,masks,sigmask,ls,ordering,has_cl,level,self): raise Exception("Not the same UG") return if ug != None: if not self._checksame(nside,masks,sigmask,ls,ordering,has_cl,level,ug): raise Exception("Not the same UG") self.U=ug.U self.G=ug.G self._store(ug.nside,ug.ls,ug.ordering,ug.sigmask,ug.has_cl,ug.level) return ylm = YLM(nside,masks,ls,has_cl,ordering,ylm,ylm_fromfile) U,G = svdReduce(ylm.ylm,level) self.U = U self.G = G self._store(nside,ls,ordering,sigmask,has_cl,level) def _store(self,nside,ls,ordering,sigmask,has_cl,level): self.level=level YLM._store(self,nside,ls,ordering,sigmask,has_cl) def _checksame(self,nside,masks,sigmask,ls,ordering,has_cl,level,ug): same = YLM._checksame(self,nside,masks,sigmask,ls,ordering,has_cl,ug) same = same and (level == ug.level) return same def _saveinfo(self,f): YLM._saveinfo(self,f) f.write(nm.array(self.level,dtype=nm.float64).tostring()) def _loadinfo(self,f): YLM._loadinfo(self,f) self.level = nm.frombuffer(f.read(8),dtype=nm.float64)[0] def save(self,fi): f=open(fi,"w") self._saveinfo(f) self._saveArray(f,self.U) self._saveArray(f,self.G) f.close() def load(self,fi): f=open(fi,"r") self._loadinfo(f) self.U = self._loadArray(f) self.G = self._loadArray(f) f.close() def prepUG(nside,mask,ls,has_cl,svdCut=None): ylms = YLM(nside,mask,ls,has_cl,"ring") if svdCut!=None: UG = UG(nside,mask,ls,has_cl,"ring",svdCut,ylms) return UG.U,UG.G return ylms.ylm,None def prepdats(masks, maps,noise,ell,cl,has_cl,svdCut=None): import clik.lkl if len(maps)==3: nside = int(nm.sqrt(len(maps[0])/12)) else: nside = int(nm.sqrt(len(maps)/12)) U,G = prepUG(nside,masks,ell,has_cl,svdCut) if masks == None: masks = nm.ones(12*nside*nside,dtype=nm.uint8) return clik.lkl.powly_javel(masks, maps,noise,ell,cl,has_cl,U,G) def hdf_lowl(hf,cl_fid,a_bar,H): hf.attrs["neff"] = len(a_bar) hf.create_dataset('cl_fid', data=cl_fid) hf.create_dataset('a_bar', data=a_bar) hf.create_dataset('H', data=H.flat[:]) def main(argv): import clik pars = clik.miniparse(argv[1]) hascl = nm.array([int(ss) for ss in pars.str_array.has_cl]) if hascl[3]: if not (hascl[0] and hascl[1]): raise Exception("bad 1") if hascl[4]: if not (hascl[0] and hascl[2]): raise Exception("bad 2") if hascl[5]: if not (hascl[1] and hascl[2]): raise Exception("bad 3") ncl = nm.sum(hascl) if ncl == 0: raise Exception("argl") if nm.sum(hascl[1:])==0: rmaps = hp.read_map(pars.str.mapfile) print(rmaps[0]) else: rmaps = [hp.read_map(pars.str.mapfile,i) for i in range(3)] print(rmaps[0][0],rmaps[1][0],rmaps[2][0]) cl = nm.loadtxt(pars.str.clfile) wl = nm.loadtxt(pars.str.beamfile) cl.shape=[ncl,-1] allcl = nm.zeros((6,cl.shape[1]),dtype=nm.double) j=0 for i in range(6): if hascl[i]: allcl[i] = cl[j]*wl j+=1 clw = allcl.flat[:] print(clw) res = prepdats(None,rmaps,pars.float.noise,nm.arange(pars.int.lmin,pars.int.lmax+1),clw,hascl) return 0 if __name__ == "__main__": main(sys.argv)
CosmoLikeREPO_NAMEcocoaPATH_START.@cocoa_extracted@cocoa-main@Cocoa@external_modules@code@planck@code@spt_clik@src@python@tools@prepare_lowl.py@.PATH_END.py
{ "filename": "_visible.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/pathbar/_visible.py", "type": "Python" }
import _plotly_utils.basevalidators class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__(self, plotly_name="visible", parent_name="treemap.pathbar", **kwargs): super(VisibleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@pathbar@_visible.py@.PATH_END.py
{ "filename": "test_ut1.py", "repo_name": "waynebhayes/SpArcFiRe", "repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/time/tests/test_ut1.py", "type": "Python" }
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import pytest import numpy as np from ...tests.helper import remote_data from .. import Time from ...utils.iers import iers # used in testing allclose_jd = functools.partial(np.allclose, rtol=1e-15, atol=0) allclose_sec = functools.partial(np.allclose, rtol=1e-15, atol=1e-9) # 1 nanosec atol try: iers.IERS_A.open() # check if IERS_A is available except IOError: HAS_IERS_A = False else: HAS_IERS_A = True class TestTimeUT1(): """Test Time.ut1 using IERS tables""" @remote_data def test_utc_to_ut1(self): "Test conversion of UTC to UT1, making sure to include a leap second""" t = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59', '2012-06-30 23:59:60', '2012-07-01 00:00:00', '2012-07-01 12:00:00'], scale='utc') t_ut1_jd = t.ut1.jd t_comp = np.array([2456108.9999932079, 2456109.4999816339, 2456109.4999932083, 2456109.5000047823, 2456110.0000047833]) assert allclose_jd(t_ut1_jd, t_comp) t_back = t.ut1.utc assert allclose_jd(t.jd, t_back.jd) tnow = Time.now() tnow.ut1 def test_ut1_to_utc(self): """Also test the reverse, around the leap second (round-trip test closes #2077)""" t = Time(['2012-06-30 12:00:00', '2012-06-30 23:59:59', '2012-07-01 00:00:00', '2012-07-01 00:00:01', '2012-07-01 12:00:00'], scale='ut1') t_utc_jd = t.utc.jd t_comp = np.array([2456109.0000010049, 2456109.4999836441, 2456109.4999952177, 2456109.5000067917, 2456109.9999952167]) assert allclose_jd(t_utc_jd, t_comp) t_back = t.utc.ut1 assert allclose_jd(t.jd, t_back.jd) def test_delta_ut1_utc(self): """Accessing delta_ut1_utc should try to get it from IERS (closes #1924 partially)""" t = Time('2012-06-30 12:00:00', scale='utc') assert not hasattr(t, '_delta_ut1_utc') # accessing delta_ut1_utc calculates it assert allclose_sec(t.delta_ut1_utc, -0.58682110003124965) # and keeps it around assert allclose_sec(t._delta_ut1_utc, -0.58682110003124965) @pytest.mark.skipif('not HAS_IERS_A') class TestTimeUT1_IERSA(): def test_ut1_iers_A(self): tnow = Time.now() iers_a = iers.IERS_A.open() tnow.delta_ut1_utc, status = iers_a.ut1_utc(tnow, return_status=True) assert status == iers.FROM_IERS_A_PREDICTION tnow_ut1_jd = tnow.ut1.jd assert tnow_ut1_jd != tnow.jd @remote_data class TestTimeUT1_IERS_Auto(): def test_ut1_iers_auto(self): tnow = Time.now() iers_a = iers.IERS_Auto.open() tnow.delta_ut1_utc, status = iers_a.ut1_utc(tnow, return_status=True) assert status == iers.FROM_IERS_A_PREDICTION tnow_ut1_jd = tnow.ut1.jd assert tnow_ut1_jd != tnow.jd
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@time@tests@test_ut1.py@.PATH_END.py
{ "filename": "_align.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/table/cells/_align.py", "type": "Python" }
import _plotly_utils.basevalidators class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__(self, plotly_name="align", parent_name="table.cells", **kwargs): super(AlignValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), values=kwargs.pop("values", ["left", "center", "right"]), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@table@cells@_align.py@.PATH_END.py
{ "filename": "che_diagram.py", "repo_name": "orlox/mesa_input_data", "repo_path": "mesa_input_data_extracted/mesa_input_data-master/2016_ULX/scripts/che_examples/che_diagram/che_diagram.py", "type": "Python" }
#!/usr/bin/env python import matplotlib.pyplot as plt from pylab import * import numpy as np import matplotlib.patheffects as pe from scipy.interpolate import spline params = {'backend': 'pdf', 'figure.figsize': [4.3, 3.0], 'font.family':'serif', 'font.size':10, 'font.serif': 'Times Roman', 'axes.titlesize': 'medium', 'axes.labelsize': 'medium', 'legend.fontsize': 8, 'legend.frameon' : False, 'text.usetex': True, 'figure.dpi': 600, 'lines.markersize': 2, 'lines.linewidth': 3, 'lines.antialiased': False, 'path.simplify': False, 'legend.handlelength':3, 'figure.subplot.bottom':0.15, 'figure.subplot.top':0.9, 'figure.subplot.left':0.15, 'figure.subplot.right':0.92} hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\ '#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA'] Msun = 1.99e33 cgrav = 6.67e-8 Rsun = 6.96e10 #kepler third law to obtain P in days as function of a (in Rsun) and m1, m2 in Msun def kepler3_P(a,m1,m2): return (4.*math.pi**2*(a*Rsun)**3/(cgrav*(m1+m2)*Msun))**(1./2.)/(24.*3600.) def rlof_a(R,q): return R*(0.6*q**(-2./3.)+np.log(1+q**(-1./3.)))/(0.49*q**(-2./3.)) mpl.rcParams.update(params) fig, axes= plt.subplots(1) log_mass = np.array([ 1.00, 1.10, 1.20, # 1.30, # 1.40, # 1.50, 1.55, # 1.60, 1.65, 1.70, 1.80, 2.00, 2.10 ]) log_R = np.array([ 4.2760535573793501E-001, 4.9106730087380623E-001, 5.5275024385163396E-001, # 6.2462515401440166E-001, # 6.8497589930410030E-001, # 7.2680359096233449E-001, 7.5452711142763518E-001, # 8.0091387703154759E-001, 8.0979203637750508E-001, 8.3743230991846984E-001, 8.9235613691518800E-001, 1.0021433395949833E+000, 1.0577850896107002E+000 ]) omega_div_omegac = np.array([ 1.00, 0.92, 0.86, # 0.78, # 0.72, # 0.66, 0.58, # 0.48, 0.46, 0.44, 0.42, 0.41, 0.41 ]) omegac = np.array([ 4.1382012380843602E-004, 3.4093089160500161E-004, 3.0480225177594429E-004, # 2.8148196102212227E-004, # 2.4658960024444325E-004, # 2.1928813238945559E-004, 2.0698775942728026E-004, # 1.9617252597888118E-004, 1.8445967748866469E-004, 1.7402582772766377E-004, 1.5394032876626964E-004, 1.1800034838321288E-004, 1.0232255503380916E-004 ]) mass = np.linspace(10,100,100) odoc = spline(log_mass,omega_div_omegac,np.log10(mass)) oc = spline(log_mass,omegac,np.log10(mass)) lr = spline(log_mass,log_R,np.log10(mass)) axes.fill_between(mass, 1, odoc, alpha=0.2) #axes.fill_between(np.power(10,log_mass), 1, omega_div_omegac, alpha=0.5) axes.plot(mass, 2*3.14/(0.7*24*3600)/oc,color=hexcols[6],label="Fixed $P_{\\rm rot,i}$") axes.plot(mass, 2*3.14/(1.0*24*3600)/oc,color=hexcols[6]) axes.plot(mass, 2*3.14/(1.5*24*3600)/oc,color=hexcols[6]) axes.plot(mass,2*3.14/24/3600/oc/ kepler3_P(rlof_a(np.power(10,lr),0.1),mass, mass*0.1),color=hexcols[2],ls="--", label="RLOF at ZAMS for fixed $q$" ) #axes.plot(mass,2*3.14/24/3600/oc/ # kepler3_P(rlof_a(np.power(10,lr),0.25),mass, mass*0.25),color=hexcols[2],ls="--" # ) axes.plot(mass,2*3.14/24/3600/oc/ kepler3_P(rlof_a(np.power(10,lr),0.9),mass, mass*0.9),color=hexcols[2],ls="--" ) axes.text(80, 0.47,'CHE', horizontalalignment='center', verticalalignment='top', fontsize=15, color="b", alpha=0.5, rotation=-2) axes.text(80, 0.4,'Normal evolution', horizontalalignment='center', verticalalignment='top', fontsize=15, color="k", alpha=0.7, rotation=-2) axes.text(30, 0.5,'$P_{\\rm rot,i}=0.7\;\\rm d$', horizontalalignment='center', verticalalignment='center', fontsize=10, color=hexcols[6], rotation=30) axes.text(30, 0.36,'$P_{\\rm rot,i}=1.0\;\\rm d$', horizontalalignment='center', verticalalignment='center', fontsize=10, color=hexcols[6], rotation=20) axes.text(30, 0.245,'$P_{\\rm rot,i}=1.5\;\\rm d$', horizontalalignment='center', verticalalignment='center', fontsize=10, color=hexcols[6], rotation=15) axes.text(55, 0.695,'$q=0.1$', horizontalalignment='center', verticalalignment='center', fontsize=10, color=hexcols[2], rotation=10) axes.text(55, 0.51,'$q=0.9$', horizontalalignment='center', verticalalignment='center', fontsize=10, color=hexcols[2], rotation=5) axes.set_xlim([20,100]) axes.set_ylim([0.1,0.8]) axes.set_xlabel("$M_{\\rm i}\;[M_\odot]$") axes.set_ylabel("$(\Omega/\Omega_{\\rm crit})_{\\rm surf,i}$") axes.legend(loc="lower right", title = "$Z\\simeq Z_\odot/50$") plt.savefig("../../images/che_diagram.pdf") # axarr[i].plot(profs_B[i].get("log_Teff")[3791:4273],profs_B[i].get("log_L")[3791:4273],color=hexcols[8],\ # path_effects=[pe.Stroke(linewidth=7, foreground='k'), pe.Normal()], solid_capstyle='round',lw=6, zorder=-100)
orloxREPO_NAMEmesa_input_dataPATH_START.@mesa_input_data_extracted@mesa_input_data-master@2016_ULX@scripts@che_examples@che_diagram@che_diagram.py@.PATH_END.py
{ "filename": "_colorsrc.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/bar/outsidetextfont/_colorsrc.py", "type": "Python" }
import _plotly_utils.basevalidators class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="colorsrc", parent_name="bar.outsidetextfont", **kwargs ): super(ColorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@bar@outsidetextfont@_colorsrc.py@.PATH_END.py
{ "filename": "test_tools.py", "repo_name": "statsmodels/statsmodels", "repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/regression/tests/test_tools.py", "type": "Python" }
import numpy as np from numpy.testing import assert_allclose import pytest from statsmodels.regression.linear_model import WLS from statsmodels.regression._tools import _MinimalWLS class TestMinimalWLS: @classmethod def setup_class(cls): rs = np.random.RandomState(1234) cls.exog1 = rs.randn(200, 5) cls.endog1 = cls.exog1.sum(1) + rs.randn(200) cls.weights1 = 1.0 + np.sin(np.arange(200.0) / 100.0 * np.pi) cls.exog2 = rs.randn(50, 1) cls.endog2 = 0.3 * cls.exog2.ravel() + rs.randn(50) cls.weights2 = 1.0 + np.log(np.arange(1.0, 51.0)) @pytest.mark.parametrize('check', [True, False]) def test_equivalence_with_wls(self, check): res = WLS(self.endog1, self.exog1).fit() minres = _MinimalWLS(self.endog1, self.exog1, check_endog=check, check_weights=check).fit() assert_allclose(res.params, minres.params) assert_allclose(res.resid, minres.resid) res = WLS(self.endog2, self.exog2).fit() minres = _MinimalWLS(self.endog2, self.exog2, check_endog=check, check_weights=check).fit() assert_allclose(res.params, minres.params) assert_allclose(res.resid, minres.resid) res = WLS(self.endog1, self.exog1, weights=self.weights1).fit() minres = _MinimalWLS(self.endog1, self.exog1, weights=self.weights1, check_endog=check, check_weights=check).fit() assert_allclose(res.params, minres.params) assert_allclose(res.resid, minres.resid) res = WLS(self.endog2, self.exog2, weights=self.weights2).fit() minres = _MinimalWLS(self.endog2, self.exog2, weights=self.weights2, check_endog=check, check_weights=check).fit() assert_allclose(res.params, minres.params) assert_allclose(res.resid, minres.resid) @pytest.mark.parametrize('bad_value', [np.nan, np.inf]) def test_inf_nan(self, bad_value): with pytest.raises( ValueError, match=r'detected in endog, estimation infeasible'): endog = self.endog1.copy() endog[0] = bad_value _MinimalWLS(endog, self.exog1, self.weights1, check_endog=True, check_weights=True).fit() with pytest.raises( ValueError, match=r'detected in weights, estimation infeasible'): weights = self.weights1.copy() weights[-1] = bad_value _MinimalWLS(self.endog1, self.exog1, weights, check_endog=True, check_weights=True).fit()
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@regression@tests@test_tools.py@.PATH_END.py
{ "filename": "_outlinewidth.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/colorbar/_outlinewidth.py", "type": "Python" }
import _plotly_utils.basevalidators class OutlinewidthValidator(_plotly_utils.basevalidators.NumberValidator): def __init__( self, plotly_name="outlinewidth", parent_name="contourcarpet.colorbar", **kwargs ): super(OutlinewidthValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), min=kwargs.pop("min", 0), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@colorbar@_outlinewidth.py@.PATH_END.py
{ "filename": "plugin_setup.py", "repo_name": "jax-ml/jax", "repo_path": "jax_extracted/jax-main/jax_plugins/cuda/plugin_setup.py", "type": "Python" }
# Copyright 2023 The JAX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from setuptools import setup from setuptools.dist import Distribution __version__ = None cuda_version = 0 # placeholder project_name = f"jax-cuda{cuda_version}-plugin" package_name = f"jax_cuda{cuda_version}_plugin" def load_version_module(pkg_path): spec = importlib.util.spec_from_file_location( 'version', os.path.join(pkg_path, 'version.py')) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module _version_module = load_version_module(package_name) __version__ = _version_module._get_version_for_build() _cmdclass = _version_module._get_cmdclass(package_name) class BinaryDistribution(Distribution): """This class makes 'bdist_wheel' include an ABI tag on the wheel.""" def has_ext_modules(self): return True setup( name=project_name, version=__version__, cmdclass=_cmdclass, description="JAX Plugin for NVIDIA GPUs", long_description="", long_description_content_type="text/markdown", author="JAX team", author_email="jax-dev@google.com", packages=[package_name], python_requires=">=3.10", install_requires=[f"jax-cuda{cuda_version}-pjrt=={__version__}"], extras_require={ 'with_cuda': [ "nvidia-cublas-cu12>=12.1.3.1", "nvidia-cuda-cupti-cu12>=12.1.105", "nvidia-cuda-nvcc-cu12>=12.6.85", "nvidia-cuda-runtime-cu12>=12.1.105", "nvidia-cudnn-cu12>=9.1,<10.0", "nvidia-cufft-cu12>=11.0.2.54", "nvidia-cusolver-cu12>=11.4.5.107", "nvidia-cusparse-cu12>=12.1.0.106", "nvidia-nccl-cu12>=2.18.1", # nvjitlink is not a direct dependency of JAX, but it is a transitive # dependency via, for example, cuSOLVER. NVIDIA's cuSOLVER packages # do not have a version constraint on their dependencies, so the # package doesn't get upgraded even though not doing that can cause # problems (https://github.com/jax-ml/jax/issues/18027#issuecomment-1756305196) # Until NVIDIA add version constraints, add a version constraint # here. "nvidia-nvjitlink-cu12>=12.1.105", ], }, url="https://github.com/jax-ml/jax", license="Apache-2.0", classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ], package_data={ package_name: [ "*", "nvvm/libdevice/libdevice*", ], }, zip_safe=False, distclass=BinaryDistribution, )
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax_plugins@cuda@plugin_setup.py@.PATH_END.py
{ "filename": "__init__.py", "repo_name": "USNavalResearchLaboratory/eispac", "repo_path": "eispac_extracted/eispac-main/eispac/__init__.py", "type": "Python" }
# __version__ = '0.94.1' from .version import version as __version__ from . import download from . import templates from .core import * from .extern import * from .util import * from . import data
USNavalResearchLaboratoryREPO_NAMEeispacPATH_START.@eispac_extracted@eispac-main@eispac@__init__.py@.PATH_END.py
{ "filename": "f2pysig.py", "repo_name": "toshiyan/cmblensplus", "repo_path": "cmblensplus_extracted/cmblensplus-master/py/f2pysig.py", "type": "Python" }
# # This code scan f90 files and output the signature file. # # The code first split each subroutine, and scan lines including intent. # The argument should be declared as # [data type], [intent(...)], [dimension] (<- order insensitive) :: [arguments] # For optional arguments, the code also look for a default value specified by !f2py ... # import os import argparse # read libname and modulename parser = argparse.ArgumentParser(description='scan f90 file and create the signature file for f2py') parser.add_argument('-libname',default='') parser.add_argument('-modname','--list',nargs='+',default='') args = parser.parse_args() libname = args.libname modname = args.list pyfname = libname+'.pyf' # initial import f = open(pyfname,'w') f.write('python module '+libname+'\n') f.write(' interface\n') # loop over modules for mod in modname: # read lines f90name = mod g = open(f90name) lines = g.readlines() g.close() # add module name mod = mod.replace('.f90','') f.write(' module '+mod+'\n') # add use statement for line in lines: if 'use ' in line and not '!' in line: f.write(' '+line) # count number of subroutines nsub = sum((line.count('end subroutine') for line in lines)) # obtain line number of each subroutine ln = os.popen('grep -n "subroutine " '+f90name+' | cut -f1 -d:').read().split("\n") # separate lines into sublines and obtain output strings for ns in range(nsub): # lines for this subroutine slines = lines[int(ln[2*ns])-1:int(ln[2*ns+1])] # extract declaration part declare = [] for line in slines: if '::' in line and 'intent' in line: dec = line.replace('\n','').split('::') declare.append(dec) # extract f2py optional part opt = [] for line in slines: if '::' in line and '!f2py' in line: op = line.replace('\n','').split('::') opt.append(op) #print(op) # extract fuction func = slines[0].replace('\n','') f.write(' '+func+'\n') # extract args from the function definition args = func[func.find('(')+1:func.find(')')].split(',') # extract args description from the lines for p in args: for dec in declare: # split to avoid confusion of e.g. "abc" and "abctype" d = dec[1].replace(',',' ').split() if p in d: # for optional args defval = '' if 'optional' in dec[0]: for op in opt: if p in op[1].split(): defval = op[1].split('=')[1] output = dec[0] # check dependence for q in args: if q in dec[0][dec[0].find('dimension'):] or q in defval: output += ', depend('+q+')' output += ' :: ' + p if defval!='': output += ' =' + defval # write to file f.write(' '+output+'\n') break f.write(' '+slines[-1].replace('\n','')+'\n') f.write(' end module '+mod+'\n') f.write(' end interface\n') f.write('end python module '+libname+'\n') f.write('\n') f.close()
toshiyanREPO_NAMEcmblensplusPATH_START.@cmblensplus_extracted@cmblensplus-master@py@f2pysig.py@.PATH_END.py
{ "filename": "_tickangle.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/volume/colorbar/_tickangle.py", "type": "Python" }
import _plotly_utils.basevalidators class TickangleValidator(_plotly_utils.basevalidators.AngleValidator): def __init__( self, plotly_name="tickangle", parent_name="volume.colorbar", **kwargs ): super(TickangleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@volume@colorbar@_tickangle.py@.PATH_END.py
{ "filename": "SNIa_yield.py", "repo_name": "Azeret/galIMF", "repo_path": "galIMF_extracted/galIMF-master/yield_tables/SNIa_yield.py", "type": "Python" }
# This function returns the element mass ejected for a type Ia supernova event def function_mass_ejected(yield_reference_name, element_name): mass_ejected = 0 if yield_reference_name == 'Thielemann1993': # Reference: Thielemann et al. (1993) # Values adopted from # Gibson, B. K., Loewenstein, M., & Mushotzky, R. F. 1997, MNRAS, 290, 623, their TNH93 dataset if element_name == "O": mass_ejected = 0.148 elif element_name == "Ne": mass_ejected = 0.005 elif element_name == "Mg": mass_ejected = 0.009 elif element_name == "Si": mass_ejected = 0.158 elif element_name == "S": mass_ejected = 0.086 elif element_name == "Fe": mass_ejected = 0.744 else: print("element {} not included in SNIa yield table {}.".format(element_name, yield_reference_name)) mass_ejected = 0 elif yield_reference_name == 'Seitenzahl2013': # Reference: Seitenzahl et al. 2013, MNRAS, 429, 1156 # Below adopt the mean value of all the model results in their table 2 if element_name == "C": mass_ejected = 0.0073 # +-0.0047 elif element_name == "O": mass_ejected = 0.11 # +-0.06 elif element_name == "Ne": mass_ejected = 0.0057 # +-0.004 elif element_name == "Na": mass_ejected = 6.8288e-5 # +-0.004 elif element_name == "Mg": mass_ejected = 0.01928 # +-0.01 elif element_name == "Al": mass_ejected = 0.000785 elif element_name == "Si": mass_ejected = 0.248 # +-0.092 elif element_name == "S": mass_ejected = 0.0935 # +-0.032 elif element_name == "Ar": mass_ejected = 0.0148 # +-0.005 elif element_name == "Ca": mass_ejected = 0.012 # +-0.004 elif element_name == "Ti": mass_ejected = 2.5535e-4 elif element_name == "Cr": mass_ejected = 0.0072 # +-0.0024 elif element_name == "Mn": mass_ejected = 0.0106 # +-0.0025 elif element_name == "Fe": mass_ejected = 0.68935 # +-0.21 elif element_name == "Ni": mass_ejected = 0.065 # +-0.010 else: print("element {} not included in SNIa yield table {}.".format(element_name, yield_reference_name)) mass_ejected = 0 elif yield_reference_name == 'Iwamoto1999': # Reference: https://ui.adsabs.harvard.edu/abs/1999ApJS..125..439I/abstract # Below adopt the mean value of all models (W, WDD, CDD) in their table 3 if element_name == "C": mass_ejected = 0.0508 # elif element_name == "O": mass_ejected = 0.091 # (14.3+13.3+8.82+6.58+5.58+9.34+5.83)/7 # 0.133 elif element_name == "Ne": # Ne20 mass_ejected = 0.00229 # elif element_name == "Mg": mass_ejected = 0.00727 # (8.5+15.8+7.55+4.47+2.62+7.72+4.2)/7 # 0.0158 elif element_name == "Al": mass_ejected = 3.7214e-4 # (9.86+1.13+4.38+2.47+1.41+4.45+2.35)/7 * 1e-4 elif element_name == "Si": mass_ejected = 0.201 # (1.54+1.42+2.72+2.06+1.58+2.77+1.98)/7 # 0.142 elif element_name == "S": mass_ejected = 0.0914 # elif element_name == "Ar": mass_ejected = 0.0191 # elif element_name == "Ca": mass_ejected = 0.0228 # (1.19+1.81+3.1+2.43+1.88+3.18+2.38)/7 # 0.0181 elif element_name == "Ti": mass_ejected = 5.3057e-4 # (2.05+3.13+7.10+6.11+5.23+7.32+6.20)e-4/7 elif element_name == "Cr": # Cr52 mass_ejected = 0.00773 elif element_name == "Mn": mass_ejected = 0.00666 # elif element_name == "Fe": mass_ejected = 0.6747 # (6.26+6.8+5.87+7.13+7.95+5.65+7.57)/7 # 0.68 elif element_name == "Ni": # Ni 58 mass_ejected = 0.0834 # else: print("element {} not included in SNIa yield table {}.".format(element_name, yield_reference_name)) mass_ejected = 0 elif yield_reference_name == 'Iwamoto1999_W70': # Reference: https://ui.adsabs.harvard.edu/abs/1999ApJS..125..439I/abstract # Below adopt the main isotope of W70 model if element_name == "C": mass_ejected = 0.0508 elif element_name == "O": mass_ejected = 0.133 elif element_name == "Ne": # Ne20 mass_ejected = 0.00229 elif element_name == "Mg": mass_ejected = 0.0158 elif element_name == "Al": mass_ejected = 1.31e-4 elif element_name == "Si": mass_ejected = 0.142 elif element_name == "S": mass_ejected = 0.0914 elif element_name == "Ar": mass_ejected = 0.0191 elif element_name == "Ca": mass_ejected = 0.0181 elif element_name == "Ti": # Ti48 mass_ejected = 3.13e-4 elif element_name == "Cr": # Cr52 mass_ejected = 0.00773 elif element_name == "Mn": mass_ejected = 0.00666 elif element_name == "Fe": mass_ejected = 0.68 elif element_name == "Ni": # Ni 58 mass_ejected = 0.0834 # else: print("element {} not included in SNIa yield table {}.".format(element_name, yield_reference_name)) mass_ejected = 0 elif yield_reference_name == 'Iwamoto1999_W7': # Reference: https://ui.adsabs.harvard.edu/abs/1999ApJS..125..439I/abstract # Below adopt the main isotope of W70 model if element_name == "C": mass_ejected = 0.0483 elif element_name == "O": mass_ejected = 0.143 elif element_name == "Ne": # Ne20 mass_ejected = 0.00202 # elif element_name == "Mg": mass_ejected = 0.0085 * 5 ### Francois 2004 suggest W7 model * 5 elif element_name == "Al": mass_ejected = 9.86e-4 elif element_name == "Si": mass_ejected = 0.154 elif element_name == "S": mass_ejected = 0.0846 elif element_name == "Ar": mass_ejected = 0.0147 elif element_name == "Ca": mass_ejected = 0.0119 elif element_name == "Ti": # Ti48 mass_ejected = 2.05e-4 elif element_name == "Cr": # Cr52 mass_ejected = 0.00636 elif element_name == "Mn": mass_ejected = 0.00887 elif element_name == "Fe": mass_ejected = 0.626 elif element_name == "Ni": # Ni 58 mass_ejected = 0.11 else: print("element {} not included in SNIa yield table {}.".format(element_name, yield_reference_name)) mass_ejected = 0 elif yield_reference_name == 'Iwamoto1999_WDD3': # Reference: https://ui.adsabs.harvard.edu/abs/1999ApJS..125..439I/abstract if element_name == "C": mass_ejected = 1.66e-2 elif element_name == "O": mass_ejected = 5.58e-2 elif element_name == "Ne": # Ne20 mass_ejected = 4.55e-4 elif element_name == "Na": # Na23 mass_ejected = 3.01e-05 elif element_name == "Mg": mass_ejected = 2.62e-3 elif element_name == "Al": mass_ejected = 1.41e-4 elif element_name == "Si": mass_ejected = 1.58e-1 elif element_name == "S": mass_ejected = 9.37e-2 elif element_name == "Ar": mass_ejected = 1.87e-2 elif element_name == "Ca": mass_ejected = 1.88e-2 elif element_name == "Ti": # Ti48 mass_ejected = 5.23e-4 elif element_name == "Cr": # Cr52 mass_ejected = 1.13e-2 elif element_name == "Mn": mass_ejected = 6.16e-3 elif element_name == "Fe": mass_ejected = 7.95e-1 elif element_name == "Ni": # Ni 58 mass_ejected = 4.97e-2 else: print("element {} not included in SNIa yield table {}.".format(element_name, yield_reference_name)) mass_ejected = 0 else: print('Input yield reference name for SNIa, "{}", not found.'.format(yield_reference_name)) return mass_ejected # # Other yield tables: # # t86: Thielemann et al. 1986; ivo13: Seitenzahl et al. 201 # Fe_mass_eject = 0.744 # Nomoto 1984 0.613, TNH93 0.744, i99CDD1/CDD2/W7 0.56 /0.76 /0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63 # Si_mass_eject = 0.158 # O_mass_eject = 0.148 # Nomoto 1984 0.140, TNH93 0.148, i99CDD1/CDD2/W7 0.09 /0.06, /0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13 # S_mass_eject = 0.086 # Mg_mass_eject = 0.009 # Nomoto 1984 0.023, TNH93 0.009, i99CDD1/CDD2/W7 0.0077 /0.0042 /0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016 # Ne_mass_eject = 0.005 # # O/Mg_mass = # Nomoto 1984 6.0869, TNH93 16.44, i99CDD1/CDD2/W7 11.688 /14.28 /16.47, ivo12/13 6-3.448, t03 10.77, t86 8.125
AzeretREPO_NAMEgalIMFPATH_START.@galIMF_extracted@galIMF-master@yield_tables@SNIa_yield.py@.PATH_END.py
{ "filename": "TransferFunction.py", "repo_name": "classULDM/class.SFDM", "repo_path": "class.SFDM_extracted/class.SFDM-master/external/RealSpaceInterface/Calc2D/TransferFunction.py", "type": "Python" }
import os.path import pickle import uuid import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline, RectBivariateSpline import sys import logging from classy import Class import Calc2D.Database as Database import config TRANSFER_QUANTITIES = ["d_g", "d_ur", "d_cdm", "d_b", "d_g/4 + psi"] def ComputeTransferData(settings, redshift): database_key = settings.copy() database_key.update({'redshift': tuple(redshift)}) database = Database.Database(config.DATABASE_DIR) if database_key in database: return database[database_key], redshift else: cosmo = Class() cosmo.set(settings) cosmo.compute() outputData = [cosmo.get_transfer(z) for z in redshift] # Calculate d_g/4+psi for transfer_function_dict in outputData: transfer_function_dict["d_g/4 + psi"] = transfer_function_dict["d_g"]/4 + transfer_function_dict["psi"] # Now filter the relevant fields fields = TRANSFER_QUANTITIES + ["k (h/Mpc)"] outputData = [{field: outputData[i][field] for field in fields} for i in range(len(redshift))] database[database_key] = outputData return outputData, redshift def ComputeTransferFunctionList(cosmologicalParameters, redshift, kperdecade=200, P_k_max=100): class_settings = cosmologicalParameters.copy() class_settings.update({ "output": "mTk", "gauge": "newtonian", "evolver": "1", "P_k_max_h/Mpc": P_k_max, "k_per_decade_for_pk": kperdecade, "z_max_pk": str(max(redshift)), }) data_dict, redshift = ComputeTransferData(class_settings, redshift) transfer_functions = {field: [] for field in TRANSFER_QUANTITIES} for i in range(len(redshift)): k_data = data_dict[0]["k (h/Mpc)"] * cosmologicalParameters["h"] #in order to get k [1/Mpc] k_data_zero = np.concatenate(([0.0], k_data)) for field in TRANSFER_QUANTITIES: data = data_dict[i][field] / data_dict[i][field][0] data_zero = np.concatenate(([1.0], data)) interpolated_func = InterpolatedUnivariateSpline(k_data_zero, data_zero) transfer_functions[field].append(interpolated_func) return transfer_functions
classULDMREPO_NAMEclass.SFDMPATH_START.@class.SFDM_extracted@class.SFDM-master@external@RealSpaceInterface@Calc2D@TransferFunction.py@.PATH_END.py
{ "filename": "wave_transform.py", "repo_name": "herjy/SLIT", "repo_path": "SLIT_extracted/SLIT-master/SLIT/wave_transform.py", "type": "Python" }
import numpy as np import scipy.signal as cp import matplotlib.pyplot as plt import scipy.ndimage.filters as sc def symmetrise(img, size): n3, n4 = np.shape(img) n1,n2 = size img[:(n3-n1)/2, :] = np.flipud(img[(n3-n1)/2:(n3-n1),:]) img[:,:(n4-n2)/2] = np.fliplr(img[:,(n4-n2)/2:(n4-n2)]) img[(n3+n1)/2:,:] = np.flipud(img[n1:(n3+n1)/2,:]) img[:,(n4+n2)/2:] = np.fliplr(img[:,n2:(n4+n2)/2]) return img def fft_convolve(X,Y, inv = 0): XF = np.fft.rfft2(X) YF = np.fft.rfft2(Y) # YF0 = np.copy(YF) # YF.imag = 0 # XF.imag = 0 if inv == 1: # plt.imshow(np.real(YF)); plt.colorbar(); plt.show() YF = np.conj(YF) SF = XF*YF S = np.fft.irfft2(SF) n1,n2 = np.shape(S) S = np.roll(S,-n1/2+1,axis = 0) S = np.roll(S,-n2/2+1,axis = 1) return np.real(S) def wave_transform(img, lvl, Filter = 'Bspline', newwave = 1, convol2d = 0): mode = 'nearest' lvl = lvl-1 sh = np.shape(img) if np.size(sh) ==3: mn = np.min(sh) wave = np.zeros([lvl+1,sh[1], sh[1],mn]) for h in np.linspace(0,mn-1, mn): if mn == sh[0]: wave[:,:,:,h] = wave_transform(img[h,:,:],lvl+1, Filter = Filter) else: wave[:,:,:,h] = wave_transform(img[:,:,h],lvl+1, Filter = Filter) return wave n1 = sh[1] n2 = sh[1] if Filter == 'Bspline': h = [1./16, 1./4, 3./8, 1./4, 1./16] else: h = [1./4,1./2,1./4] n = np.size(h) h = np.array(h) if n+2**(lvl-1)*(n-1) >= np.min([n1,n2])/2.: lvl = np.int_(np.log2((n1-1)/(n-1.))+1) c = img ## wavelet set of coefficients. wave = np.zeros([lvl+1,n1,n2]) for i in np.linspace(0,lvl-1,lvl): newh = np.zeros((1,n+(n-1)*(2**i-1))) newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h H = np.dot(newh.T,newh) ######Calculates c(j+1) ###### Line convolution if convol2d == 1: cnew = cp.convolve2d(c, H, mode='same', boundary='symm') else: cnew = sc.convolve1d(c,newh[0,:],axis = 0, mode =mode) ###### Column convolution cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode =mode) if newwave ==1: ###### hoh for g; Column convolution if convol2d == 1: hc = cp.convolve2d(cnew, H, mode='same', boundary='symm') else: hc = sc.convolve1d(cnew,newh[0,:],axis = 0, mode = mode) ###### hoh for g; Line convolution hc = sc.convolve1d(hc,newh[0,:],axis = 1, mode = mode) ###### wj+1 = cj-hcj+1 wave[i,:,:] = c-hc else: ###### wj+1 = cj-cj+1 wave[i,:,:] = c-cnew c = cnew wave[i+1,:,:] = c return wave def iuwt(wave, convol2d =0): mode = 'nearest' lvl,n1,n2 = np.shape(wave) h = np.array([1./16, 1./4, 3./8, 1./4, 1./16]) n = np.size(h) cJ = np.copy(wave[lvl-1,:,:]) for i in np.linspace(1,lvl-1,lvl-1): newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1))) newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h H = np.dot(newh.T,newh) ###### Line convolution if convol2d == 1: cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm') else: cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode) ###### Column convolution cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode) cJ = cnew+wave[lvl-1-i,:,:] return np.reshape(cJ,(n1,n2))
herjyREPO_NAMESLITPATH_START.@SLIT_extracted@SLIT-master@SLIT@wave_transform.py@.PATH_END.py
{ "filename": "README.md", "repo_name": "bradkav/WIMpy_NREFT", "repo_path": "WIMpy_NREFT_extracted/WIMpy_NREFT-main/old_WIMpy/FormFactors/README.md", "type": "Markdown" }
# WIMpy_NREFT ## NREFT form factors The nuclear form factors are written in the form: <p align="center"><img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/0ba18dc393652bade995a9e9cec290e4.svg?invert_in_darkmode" align=middle width=177.11595pt height=37.820475pt/></p> The definition of the dimensionless momentum variable y can be found in e.g. Appendix A.3 of [arXiv:1203.3542](https://arxiv.org/abs/1203.3542). The form factors tables give the coefficients <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/0a5ec44b76d454790dd94ab5cfe77d12.svg?invert_in_darkmode" align=middle width=14.326125pt height=14.10255pt/> from <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/8f9a0a0ee8a6345657b63f035033cc10.svg?invert_in_darkmode" align=middle width=39.101865pt height=22.74591pt/> to <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/c941f6f2f4dae7b9a82e68bafb0b6c2a.svg?invert_in_darkmode" align=middle width=39.101865pt height=22.74591pt/> on each row. For each form factor <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/30e02450834ec46ab5f2f42b4262acc6.svg?invert_in_darkmode" align=middle width=22.16247pt height=22.38192pt/> there are 4 rows, corresponding to: <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/eef18ebb844731feaf1a27db7633cf08.svg?invert_in_darkmode" align=middle width=48.65619pt height=34.27314pt/> <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/0a167665ae87a31e1d2a91aff98a0d8d.svg?invert_in_darkmode" align=middle width=50.00589pt height=34.27314pt/> <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/0790f722198b36f686cc5f344fdbc228.svg?invert_in_darkmode" align=middle width=50.00589pt height=34.27314pt/> <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/3defb8d8d10efcaad80333507ae12aad.svg?invert_in_darkmode" align=middle width=51.355425pt height=34.27314pt/> The different form factors are listed in the following order (with 4 rows for each, as described above): <p align="center"><img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/c2c41a290ac7e1ce59f9c6bd2239a5db.svg?invert_in_darkmode" align=middle width=312.19155pt height=21.967605pt/></p> Note that the form factor <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/ab5b08c473fe49f4447df3509a50a91d.svg?invert_in_darkmode" align=middle width=23.490885pt height=22.38192pt/> is not (yet) included, and therefore the results are only valid for operators up to <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/917244ca615745a80feccbe760feb728.svg?invert_in_darkmode" align=middle width=26.09409pt height=22.38192pt/>. ### References NREFT form factors for Xe, F and I are taken from [arXiv:1203.3542](https://arxiv.org/abs/1203.3542). The form factor for C is taken from [arXiv:1501.03729](https://arxiv.org/abs/1501.03729). Note that the <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/84c95f91a742c9ceb460a83f9b5090bf.svg?invert_in_darkmode" align=middle width=17.74179pt height=22.38192pt/> functions of arXiv:1501.03729 (and others) are related to the form factors <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/b8bc815b5e9d5177af01fd4d3d3c2f10.svg?invert_in_darkmode" align=middle width=12.80598pt height=22.38192pt/> by: <p align="center"><img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/78a41e343701df081ecdbd0ef00e4c0b.svg?invert_in_darkmode" align=middle width=194.95575pt height=34.31538pt/></p> where J is the nuclear spin. Check out Eq. 76 of arXiv:1203.3542 for how to convert between the nucleon <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/1748711522507025e0f09120e8bd4dd2.svg?invert_in_darkmode" align=middle width=53.626485pt height=24.66849pt/> and isospin <img src="https://rawgit.com/bradkav/WIMpy_NREFT/master/svgs/0a06d517787e682c533d984ce8036565.svg?invert_in_darkmode" align=middle width=41.76513pt height=24.66849pt/> bases.
bradkavREPO_NAMEWIMpy_NREFTPATH_START.@WIMpy_NREFT_extracted@WIMpy_NREFT-main@old_WIMpy@FormFactors@README.md@.PATH_END.py
{ "filename": "multiple_axes.py", "repo_name": "matplotlib/matplotlib", "repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/animation/multiple_axes.py", "type": "Python" }
""" ======================= Multiple Axes animation ======================= This example showcases: - how animation across multiple subplots works, - using a figure artist in the animation. Output generated via `matplotlib.animation.Animation.to_jshtml`. """ import matplotlib.pyplot as plt import numpy as np import matplotlib.animation as animation from matplotlib.patches import ConnectionPatch fig, (axl, axr) = plt.subplots( ncols=2, sharey=True, figsize=(6, 2), gridspec_kw=dict(width_ratios=[1, 3], wspace=0), ) axl.set_aspect(1) axr.set_box_aspect(1 / 3) axr.yaxis.set_visible(False) axr.xaxis.set_ticks([0, np.pi, 2 * np.pi], ["0", r"$\pi$", r"$2\pi$"]) # draw circle with initial point in left Axes x = np.linspace(0, 2 * np.pi, 50) axl.plot(np.cos(x), np.sin(x), "k", lw=0.3) point, = axl.plot(0, 0, "o") # draw full curve to set view limits in right Axes sine, = axr.plot(x, np.sin(x)) # draw connecting line between both graphs con = ConnectionPatch( (1, 0), (0, 0), "data", "data", axesA=axl, axesB=axr, color="C0", ls="dotted", ) fig.add_artist(con) def animate(i): x = np.linspace(0, i, int(i * 25 / np.pi)) sine.set_data(x, np.sin(x)) x, y = np.cos(i), np.sin(i) point.set_data([x], [y]) con.xy1 = x, y con.xy2 = i, y return point, sine, con ani = animation.FuncAnimation( fig, animate, interval=50, blit=False, # blitting can't be used with Figure artists frames=x, repeat_delay=100, ) plt.show() # %% # # .. admonition:: References # # The use of the following functions, methods, classes and modules is shown # in this example: # # - `matplotlib.patches.ConnectionPatch` # - `matplotlib.animation.FuncAnimation` # # .. tags:: component: axes, animation
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@animation@multiple_axes.py@.PATH_END.py
{ "filename": "test_legendre.py", "repo_name": "scipy/scipy", "repo_path": "scipy_extracted/scipy-main/scipy/special/tests/test_legendre.py", "type": "Python" }
import math import numpy as np import pytest from numpy.testing import (assert_equal, assert_almost_equal, assert_array_almost_equal, assert_allclose) from scipy import special from scipy.special import (legendre_p, legendre_p_all, assoc_legendre_p, assoc_legendre_p_all, sph_legendre_p, sph_legendre_p_all) # Base polynomials come from Abrahmowitz and Stegan class TestLegendre: def test_legendre(self): leg0 = special.legendre(0) leg1 = special.legendre(1) leg2 = special.legendre(2) leg3 = special.legendre(3) leg4 = special.legendre(4) leg5 = special.legendre(5) assert_equal(leg0.c, [1]) assert_equal(leg1.c, [1,0]) assert_almost_equal(leg2.c, np.array([3,0,-1])/2.0, decimal=13) assert_almost_equal(leg3.c, np.array([5,0,-3,0])/2.0) assert_almost_equal(leg4.c, np.array([35,0,-30,0,3])/8.0) assert_almost_equal(leg5.c, np.array([63,0,-70,0,15,0])/8.0) @pytest.mark.parametrize('n', [1, 2, 3, 4, 5]) @pytest.mark.parametrize('zr', [0.5241717, 12.80232, -9.699001, 0.5122437, 0.1714377]) @pytest.mark.parametrize('zi', [9.766818, 0.2999083, 8.24726, -22.84843, -0.8792666]) def test_lpn_against_clpmn(self, n, zr, zi): reslpn = special.lpn(n, zr + zi*1j) resclpmn = special.clpmn(0, n, zr+zi*1j) assert_allclose(reslpn[0], resclpmn[0][0]) assert_allclose(reslpn[1], resclpmn[1][0]) class TestLegendreP: @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7)]) def test_ode(self, shape): rng = np.random.default_rng(1234) n = rng.integers(0, 100, shape) x = rng.uniform(-1, 1, shape) p, p_jac, p_hess = legendre_p(n, x, diff_n=2) assert p.shape == shape assert p_jac.shape == p.shape assert p_hess.shape == p_jac.shape err = (1 - x * x) * p_hess - 2 * x * p_jac + n * (n + 1) * p np.testing.assert_allclose(err, 0, atol=1e-10) @pytest.mark.parametrize("n_max", [1, 2, 4, 8, 16, 32]) @pytest.mark.parametrize("x_shape", [(10,), (4, 9), (3, 5, 7)]) def test_all_ode(self, n_max, x_shape): rng = np.random.default_rng(1234) x = rng.uniform(-1, 1, x_shape) p, p_jac, p_hess = legendre_p_all(n_max, x, diff_n=2) n = np.arange(n_max + 1) n = np.expand_dims(n, axis = tuple(range(1, x.ndim + 1))) assert p.shape == (len(n),) + x.shape assert p_jac.shape == p.shape assert p_hess.shape == p_jac.shape err = (1 - x * x) * p_hess - 2 * x * p_jac + n * (n + 1) * p np.testing.assert_allclose(err, 0, atol=1e-10) def test_legacy(self): p, pd = special.lpn(2, 0.5) assert_array_almost_equal(p, [1.00000, 0.50000, -0.12500], 4) assert_array_almost_equal(pd, [0.00000, 1.00000, 1.50000], 4) class TestAssocLegendreP: @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7, 10)]) @pytest.mark.parametrize("m_max", [5, 4]) @pytest.mark.parametrize("n_max", [7, 10]) def test_lpmn(self, shape, n_max, m_max): rng = np.random.default_rng(1234) x = rng.uniform(-0.99, 0.99, shape) p_all, p_all_jac, p_all_hess = \ assoc_legendre_p_all(n_max, m_max, x, diff_n=2) n = np.arange(n_max + 1) n = np.expand_dims(n, axis = tuple(range(1, x.ndim + 2))) m = np.concatenate([np.arange(m_max + 1), np.arange(-m_max, 0)]) m = np.expand_dims(m, axis = (0,) + tuple(range(2, x.ndim + 2))) x = np.expand_dims(x, axis = (0, 1)) p, p_jac, p_hess = assoc_legendre_p(n, m, x, diff_n=2) np.testing.assert_allclose(p, p_all) np.testing.assert_allclose(p_jac, p_all_jac) np.testing.assert_allclose(p_hess, p_all_hess) @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7, 10)]) @pytest.mark.parametrize("norm", [True, False]) def test_ode(self, shape, norm): rng = np.random.default_rng(1234) n = rng.integers(0, 10, shape) m = rng.integers(-10, 10, shape) x = rng.uniform(-1, 1, shape) p, p_jac, p_hess = assoc_legendre_p(n, m, x, norm=norm, diff_n=2) assert p.shape == shape assert p_jac.shape == p.shape assert p_hess.shape == p_jac.shape np.testing.assert_allclose((1 - x * x) * p_hess, 2 * x * p_jac - (n * (n + 1) - m * m / (1 - x * x)) * p, rtol=1e-05, atol=1e-08) @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7)]) def test_all(self, shape): rng = np.random.default_rng(1234) n_max = 20 m_max = 20 x = rng.uniform(-0.99, 0.99, shape) p, p_jac, p_hess = assoc_legendre_p_all(n_max, m_max, x, diff_n=2) m = np.concatenate([np.arange(m_max + 1), np.arange(-m_max, 0)]) n = np.arange(n_max + 1) n = np.expand_dims(n, axis = tuple(range(1, x.ndim + 2))) m = np.expand_dims(m, axis = (0,) + tuple(range(2, x.ndim + 2))) np.testing.assert_allclose((1 - x * x) * p_hess, 2 * x * p_jac - (n * (n + 1) - m * m / (1 - x * x)) * p, rtol=1e-05, atol=1e-08) @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7)]) @pytest.mark.parametrize("norm", [True, False]) def test_specific(self, shape, norm): rng = np.random.default_rng(1234) x = rng.uniform(-0.99, 0.99, shape) p, p_jac = assoc_legendre_p_all(4, 4, x, norm=norm, diff_n=1) np.testing.assert_allclose(p[0, 0], assoc_legendre_p_0_0(x, norm=norm)) np.testing.assert_allclose(p[0, 1], 0) np.testing.assert_allclose(p[0, 2], 0) np.testing.assert_allclose(p[0, 3], 0) np.testing.assert_allclose(p[0, 4], 0) np.testing.assert_allclose(p[0, -3], 0) np.testing.assert_allclose(p[0, -2], 0) np.testing.assert_allclose(p[0, -1], 0) np.testing.assert_allclose(p[1, 0], assoc_legendre_p_1_0(x, norm=norm)) np.testing.assert_allclose(p[1, 1], assoc_legendre_p_1_1(x, norm=norm)) np.testing.assert_allclose(p[1, 2], 0) np.testing.assert_allclose(p[1, 3], 0) np.testing.assert_allclose(p[1, 4], 0) np.testing.assert_allclose(p[1, -4], 0) np.testing.assert_allclose(p[1, -3], 0) np.testing.assert_allclose(p[1, -2], 0) np.testing.assert_allclose(p[1, -1], assoc_legendre_p_1_m1(x, norm=norm)) np.testing.assert_allclose(p[2, 0], assoc_legendre_p_2_0(x, norm=norm)) np.testing.assert_allclose(p[2, 1], assoc_legendre_p_2_1(x, norm=norm)) np.testing.assert_allclose(p[2, 2], assoc_legendre_p_2_2(x, norm=norm)) np.testing.assert_allclose(p[2, 3], 0) np.testing.assert_allclose(p[2, 4], 0) np.testing.assert_allclose(p[2, -4], 0) np.testing.assert_allclose(p[2, -3], 0) np.testing.assert_allclose(p[2, -2], assoc_legendre_p_2_m2(x, norm=norm)) np.testing.assert_allclose(p[2, -1], assoc_legendre_p_2_m1(x, norm=norm)) np.testing.assert_allclose(p[3, 0], assoc_legendre_p_3_0(x, norm=norm)) np.testing.assert_allclose(p[3, 1], assoc_legendre_p_3_1(x, norm=norm)) np.testing.assert_allclose(p[3, 2], assoc_legendre_p_3_2(x, norm=norm)) np.testing.assert_allclose(p[3, 3], assoc_legendre_p_3_3(x, norm=norm)) np.testing.assert_allclose(p[3, 4], 0) np.testing.assert_allclose(p[3, -4], 0) np.testing.assert_allclose(p[3, -3], assoc_legendre_p_3_m3(x, norm=norm)) np.testing.assert_allclose(p[3, -2], assoc_legendre_p_3_m2(x, norm=norm)) np.testing.assert_allclose(p[3, -1], assoc_legendre_p_3_m1(x, norm=norm)) np.testing.assert_allclose(p[4, 0], assoc_legendre_p_4_0(x, norm=norm)) np.testing.assert_allclose(p[4, 1], assoc_legendre_p_4_1(x, norm=norm)) np.testing.assert_allclose(p[4, 2], assoc_legendre_p_4_2(x, norm=norm)) np.testing.assert_allclose(p[4, 3], assoc_legendre_p_4_3(x, norm=norm)) np.testing.assert_allclose(p[4, 4], assoc_legendre_p_4_4(x, norm=norm)) np.testing.assert_allclose(p[4, -4], assoc_legendre_p_4_m4(x, norm=norm)) np.testing.assert_allclose(p[4, -3], assoc_legendre_p_4_m3(x, norm=norm)) np.testing.assert_allclose(p[4, -2], assoc_legendre_p_4_m2(x, norm=norm)) np.testing.assert_allclose(p[4, -1], assoc_legendre_p_4_m1(x, norm=norm)) np.testing.assert_allclose(p_jac[0, 0], assoc_legendre_p_0_0_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[0, 1], 0) np.testing.assert_allclose(p_jac[0, 2], 0) np.testing.assert_allclose(p_jac[0, 3], 0) np.testing.assert_allclose(p_jac[0, 4], 0) np.testing.assert_allclose(p_jac[0, -4], 0) np.testing.assert_allclose(p_jac[0, -3], 0) np.testing.assert_allclose(p_jac[0, -2], 0) np.testing.assert_allclose(p_jac[0, -1], 0) np.testing.assert_allclose(p_jac[1, 0], assoc_legendre_p_1_0_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[1, 1], assoc_legendre_p_1_1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[1, 2], 0) np.testing.assert_allclose(p_jac[1, 3], 0) np.testing.assert_allclose(p_jac[1, 4], 0) np.testing.assert_allclose(p_jac[1, -4], 0) np.testing.assert_allclose(p_jac[1, -3], 0) np.testing.assert_allclose(p_jac[1, -2], 0) np.testing.assert_allclose(p_jac[1, -1], assoc_legendre_p_1_m1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[2, 0], assoc_legendre_p_2_0_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[2, 1], assoc_legendre_p_2_1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[2, 2], assoc_legendre_p_2_2_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[2, 3], 0) np.testing.assert_allclose(p_jac[2, 4], 0) np.testing.assert_allclose(p_jac[2, -4], 0) np.testing.assert_allclose(p_jac[2, -3], 0) np.testing.assert_allclose(p_jac[2, -2], assoc_legendre_p_2_m2_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[2, -1], assoc_legendre_p_2_m1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, 0], assoc_legendre_p_3_0_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, 1], assoc_legendre_p_3_1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, 2], assoc_legendre_p_3_2_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, 3], assoc_legendre_p_3_3_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, 4], 0) np.testing.assert_allclose(p_jac[3, -4], 0) np.testing.assert_allclose(p_jac[3, -3], assoc_legendre_p_3_m3_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, -2], assoc_legendre_p_3_m2_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[3, -1], assoc_legendre_p_3_m1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, 0], assoc_legendre_p_4_0_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, 1], assoc_legendre_p_4_1_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, 2], assoc_legendre_p_4_2_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, 3], assoc_legendre_p_4_3_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, 4], assoc_legendre_p_4_4_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, -4], assoc_legendre_p_4_m4_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, -3], assoc_legendre_p_4_m3_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, -2], assoc_legendre_p_4_m2_jac(x, norm=norm)) np.testing.assert_allclose(p_jac[4, -1], assoc_legendre_p_4_m1_jac(x, norm=norm)) @pytest.mark.parametrize("m_max", [7]) @pytest.mark.parametrize("n_max", [10]) @pytest.mark.parametrize("x", [1, -1]) def test_all_limits(self, m_max, n_max, x): p, p_jac = assoc_legendre_p_all(n_max, m_max, x, diff_n=1) n = np.arange(n_max + 1) np.testing.assert_allclose(p_jac[:, 0], pow(x, n + 1) * n * (n + 1) / 2) np.testing.assert_allclose(p_jac[:, 1], np.where(n >= 1, pow(x, n) * np.inf, 0)) np.testing.assert_allclose(p_jac[:, 2], np.where(n >= 2, -pow(x, n + 1) * (n + 2) * (n + 1) * n * (n - 1) / 4, 0)) np.testing.assert_allclose(p_jac[:, -2], np.where(n >= 2, -pow(x, n + 1) / 4, 0)) np.testing.assert_allclose(p_jac[:, -1], np.where(n >= 1, -pow(x, n) * np.inf, 0)) for m in range(3, m_max + 1): np.testing.assert_allclose(p_jac[:, m], 0) np.testing.assert_allclose(p_jac[:, -m], 0) @pytest.mark.parametrize("m_max", [3, 5, 10]) @pytest.mark.parametrize("n_max", [10]) def test_legacy(self, m_max, n_max): x = 0.5 p, p_jac = assoc_legendre_p_all(n_max, m_max, x, diff_n=1) p_legacy, p_jac_legacy = special.lpmn(m_max, n_max, x) for m in range(m_max + 1): np.testing.assert_allclose(p_legacy[m], p[:, m]) p_legacy, p_jac_legacy = special.lpmn(-m_max, n_max, x) for m in range(m_max + 1): np.testing.assert_allclose(p_legacy[m], p[:, -m]) class TestMultiAssocLegendreP: @pytest.mark.parametrize("shape", [(1000,), (4, 9), (3, 5, 7)]) @pytest.mark.parametrize("branch_cut", [2, 3]) @pytest.mark.parametrize("z_min, z_max", [(-10 - 10j, 10 + 10j), (-1, 1), (-10j, 10j)]) @pytest.mark.parametrize("norm", [True, False]) def test_specific(self, shape, branch_cut, z_min, z_max, norm): rng = np.random.default_rng(1234) z = rng.uniform(z_min.real, z_max.real, shape) + \ 1j * rng.uniform(z_min.imag, z_max.imag, shape) p, p_jac = assoc_legendre_p_all(4, 4, z, branch_cut=branch_cut, norm=norm, diff_n=1) np.testing.assert_allclose(p[0, 0], assoc_legendre_p_0_0(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[0, 1], 0) np.testing.assert_allclose(p[0, 2], 0) np.testing.assert_allclose(p[0, 3], 0) np.testing.assert_allclose(p[0, 4], 0) np.testing.assert_allclose(p[0, -4], 0) np.testing.assert_allclose(p[0, -3], 0) np.testing.assert_allclose(p[0, -2], 0) np.testing.assert_allclose(p[0, -1], 0) np.testing.assert_allclose(p[1, 0], assoc_legendre_p_1_0(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[1, 1], assoc_legendre_p_1_1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[1, 2], 0) np.testing.assert_allclose(p[1, 3], 0) np.testing.assert_allclose(p[1, 4], 0) np.testing.assert_allclose(p[1, -4], 0) np.testing.assert_allclose(p[1, -3], 0) np.testing.assert_allclose(p[1, -2], 0) np.testing.assert_allclose(p[1, -1], assoc_legendre_p_1_m1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[2, 0], assoc_legendre_p_2_0(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[2, 1], assoc_legendre_p_2_1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[2, 2], assoc_legendre_p_2_2(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[2, 3], 0) np.testing.assert_allclose(p[2, 4], 0) np.testing.assert_allclose(p[2, -4], 0) np.testing.assert_allclose(p[2, -3], 0) np.testing.assert_allclose(p[2, -2], assoc_legendre_p_2_m2(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[2, -1], assoc_legendre_p_2_m1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, 0], assoc_legendre_p_3_0(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, 1], assoc_legendre_p_3_1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, 2], assoc_legendre_p_3_2(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, 3], assoc_legendre_p_3_3(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, 4], 0) np.testing.assert_allclose(p[3, -4], 0) np.testing.assert_allclose(p[3, -3], assoc_legendre_p_3_m3(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, -2], assoc_legendre_p_3_m2(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[3, -1], assoc_legendre_p_3_m1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, 0], assoc_legendre_p_4_0(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, 1], assoc_legendre_p_4_1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, 2], assoc_legendre_p_4_2(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, 3], assoc_legendre_p_4_3(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, 4], assoc_legendre_p_4_4(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, -4], assoc_legendre_p_4_m4(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, -3], assoc_legendre_p_4_m3(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, -2], assoc_legendre_p_4_m2(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p[4, -1], assoc_legendre_p_4_m1(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[0, 0], assoc_legendre_p_0_0_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[0, 1], 0) np.testing.assert_allclose(p_jac[0, 2], 0) np.testing.assert_allclose(p_jac[0, 3], 0) np.testing.assert_allclose(p_jac[0, 4], 0) np.testing.assert_allclose(p_jac[0, -4], 0) np.testing.assert_allclose(p_jac[0, -3], 0) np.testing.assert_allclose(p_jac[0, -2], 0) np.testing.assert_allclose(p_jac[0, -1], 0) np.testing.assert_allclose(p_jac[1, 0], assoc_legendre_p_1_0_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[1, 1], assoc_legendre_p_1_1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[1, 2], 0) np.testing.assert_allclose(p_jac[1, 3], 0) np.testing.assert_allclose(p_jac[1, 4], 0) np.testing.assert_allclose(p_jac[1, -4], 0) np.testing.assert_allclose(p_jac[1, -3], 0) np.testing.assert_allclose(p_jac[1, -2], 0) np.testing.assert_allclose(p_jac[1, -1], assoc_legendre_p_1_m1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[2, 0], assoc_legendre_p_2_0_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[2, 1], assoc_legendre_p_2_1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[2, 2], assoc_legendre_p_2_2_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[2, 3], 0) np.testing.assert_allclose(p_jac[2, 4], 0) np.testing.assert_allclose(p_jac[2, -4], 0) np.testing.assert_allclose(p_jac[2, -3], 0) np.testing.assert_allclose(p_jac[2, -2], assoc_legendre_p_2_m2_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[2, -1], assoc_legendre_p_2_m1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, 0], assoc_legendre_p_3_0_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, 1], assoc_legendre_p_3_1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, 2], assoc_legendre_p_3_2_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, 3], assoc_legendre_p_3_3_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, 4], 0) np.testing.assert_allclose(p_jac[3, -4], 0) np.testing.assert_allclose(p_jac[3, -3], assoc_legendre_p_3_m3_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, -2], assoc_legendre_p_3_m2_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[3, -1], assoc_legendre_p_3_m1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, 0], assoc_legendre_p_4_0_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, 1], assoc_legendre_p_4_1_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, 2], assoc_legendre_p_4_2_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, 3], assoc_legendre_p_4_3_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, 4], assoc_legendre_p_4_4_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, -4], assoc_legendre_p_4_m4_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, -3], assoc_legendre_p_4_m3_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, -2], assoc_legendre_p_4_m2_jac(z, branch_cut=branch_cut, norm=norm)) np.testing.assert_allclose(p_jac[4, -1], assoc_legendre_p_4_m1_jac(z, branch_cut=branch_cut, norm=norm)) class TestSphLegendreP: @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7)]) def test_specific(self, shape): rng = np.random.default_rng(1234) theta = rng.uniform(-np.pi, np.pi, shape) p, p_jac = sph_legendre_p_all(4, 4, theta, diff_n=1) np.testing.assert_allclose(p[0, 0], sph_legendre_p_0_0(theta)) np.testing.assert_allclose(p[0, 1], 0) np.testing.assert_allclose(p[0, 2], 0) np.testing.assert_allclose(p[0, 3], 0) np.testing.assert_allclose(p[0, 4], 0) np.testing.assert_allclose(p[0, -3], 0) np.testing.assert_allclose(p[0, -2], 0) np.testing.assert_allclose(p[0, -1], 0) np.testing.assert_allclose(p[1, 0], sph_legendre_p_1_0(theta)) np.testing.assert_allclose(p[1, 1], sph_legendre_p_1_1(theta)) np.testing.assert_allclose(p[1, 2], 0) np.testing.assert_allclose(p[1, 3], 0) np.testing.assert_allclose(p[1, 4], 0) np.testing.assert_allclose(p[1, -4], 0) np.testing.assert_allclose(p[1, -3], 0) np.testing.assert_allclose(p[1, -2], 0) np.testing.assert_allclose(p[1, -1], sph_legendre_p_1_m1(theta)) np.testing.assert_allclose(p[2, 0], sph_legendre_p_2_0(theta)) np.testing.assert_allclose(p[2, 1], sph_legendre_p_2_1(theta)) np.testing.assert_allclose(p[2, 2], sph_legendre_p_2_2(theta)) np.testing.assert_allclose(p[2, 3], 0) np.testing.assert_allclose(p[2, 4], 0) np.testing.assert_allclose(p[2, -4], 0) np.testing.assert_allclose(p[2, -3], 0) np.testing.assert_allclose(p[2, -2], sph_legendre_p_2_m2(theta)) np.testing.assert_allclose(p[2, -1], sph_legendre_p_2_m1(theta)) np.testing.assert_allclose(p[3, 0], sph_legendre_p_3_0(theta)) np.testing.assert_allclose(p[3, 1], sph_legendre_p_3_1(theta)) np.testing.assert_allclose(p[3, 2], sph_legendre_p_3_2(theta)) np.testing.assert_allclose(p[3, 3], sph_legendre_p_3_3(theta)) np.testing.assert_allclose(p[3, 4], 0) np.testing.assert_allclose(p[3, -4], 0) np.testing.assert_allclose(p[3, -3], sph_legendre_p_3_m3(theta)) np.testing.assert_allclose(p[3, -2], sph_legendre_p_3_m2(theta)) np.testing.assert_allclose(p[3, -1], sph_legendre_p_3_m1(theta)) np.testing.assert_allclose(p[4, 0], sph_legendre_p_4_0(theta)) np.testing.assert_allclose(p[4, 1], sph_legendre_p_4_1(theta)) np.testing.assert_allclose(p[4, 2], sph_legendre_p_4_2(theta)) np.testing.assert_allclose(p[4, 3], sph_legendre_p_4_3(theta)) np.testing.assert_allclose(p[4, 4], sph_legendre_p_4_4(theta)) np.testing.assert_allclose(p[4, -4], sph_legendre_p_4_m4(theta)) np.testing.assert_allclose(p[4, -3], sph_legendre_p_4_m3(theta)) np.testing.assert_allclose(p[4, -2], sph_legendre_p_4_m2(theta)) np.testing.assert_allclose(p[4, -1], sph_legendre_p_4_m1(theta)) np.testing.assert_allclose(p_jac[0, 0], sph_legendre_p_0_0_jac(theta)) np.testing.assert_allclose(p_jac[0, 1], 0) np.testing.assert_allclose(p_jac[0, 2], 0) np.testing.assert_allclose(p_jac[0, 3], 0) np.testing.assert_allclose(p_jac[0, 4], 0) np.testing.assert_allclose(p_jac[0, -3], 0) np.testing.assert_allclose(p_jac[0, -2], 0) np.testing.assert_allclose(p_jac[0, -1], 0) np.testing.assert_allclose(p_jac[1, 0], sph_legendre_p_1_0_jac(theta)) np.testing.assert_allclose(p_jac[1, 1], sph_legendre_p_1_1_jac(theta)) np.testing.assert_allclose(p_jac[1, 2], 0) np.testing.assert_allclose(p_jac[1, 3], 0) np.testing.assert_allclose(p_jac[1, 4], 0) np.testing.assert_allclose(p_jac[1, -4], 0) np.testing.assert_allclose(p_jac[1, -3], 0) np.testing.assert_allclose(p_jac[1, -2], 0) np.testing.assert_allclose(p_jac[1, -1], sph_legendre_p_1_m1_jac(theta)) np.testing.assert_allclose(p_jac[2, 0], sph_legendre_p_2_0_jac(theta)) np.testing.assert_allclose(p_jac[2, 1], sph_legendre_p_2_1_jac(theta)) np.testing.assert_allclose(p_jac[2, 2], sph_legendre_p_2_2_jac(theta)) np.testing.assert_allclose(p_jac[2, 3], 0) np.testing.assert_allclose(p_jac[2, 4], 0) np.testing.assert_allclose(p_jac[2, -4], 0) np.testing.assert_allclose(p_jac[2, -3], 0) np.testing.assert_allclose(p_jac[2, -2], sph_legendre_p_2_m2_jac(theta)) np.testing.assert_allclose(p_jac[2, -1], sph_legendre_p_2_m1_jac(theta)) np.testing.assert_allclose(p_jac[3, 0], sph_legendre_p_3_0_jac(theta)) np.testing.assert_allclose(p_jac[3, 1], sph_legendre_p_3_1_jac(theta)) np.testing.assert_allclose(p_jac[3, 2], sph_legendre_p_3_2_jac(theta)) np.testing.assert_allclose(p_jac[3, 3], sph_legendre_p_3_3_jac(theta)) np.testing.assert_allclose(p_jac[3, 4], 0) np.testing.assert_allclose(p_jac[3, -4], 0) np.testing.assert_allclose(p_jac[3, -3], sph_legendre_p_3_m3_jac(theta)) np.testing.assert_allclose(p_jac[3, -2], sph_legendre_p_3_m2_jac(theta)) np.testing.assert_allclose(p_jac[3, -1], sph_legendre_p_3_m1_jac(theta)) np.testing.assert_allclose(p_jac[4, 0], sph_legendre_p_4_0_jac(theta)) np.testing.assert_allclose(p_jac[4, 1], sph_legendre_p_4_1_jac(theta)) np.testing.assert_allclose(p_jac[4, 2], sph_legendre_p_4_2_jac(theta)) np.testing.assert_allclose(p_jac[4, 3], sph_legendre_p_4_3_jac(theta)) np.testing.assert_allclose(p_jac[4, 4], sph_legendre_p_4_4_jac(theta)) np.testing.assert_allclose(p_jac[4, -4], sph_legendre_p_4_m4_jac(theta)) np.testing.assert_allclose(p_jac[4, -3], sph_legendre_p_4_m3_jac(theta)) np.testing.assert_allclose(p_jac[4, -2], sph_legendre_p_4_m2_jac(theta)) np.testing.assert_allclose(p_jac[4, -1], sph_legendre_p_4_m1_jac(theta)) @pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7, 10)]) def test_ode(self, shape): rng = np.random.default_rng(1234) n = rng.integers(0, 10, shape) m = rng.integers(-10, 10, shape) theta = rng.uniform(-np.pi, np.pi, shape) p, p_jac, p_hess = sph_legendre_p(n, m, theta, diff_n=2) assert p.shape == shape assert p_jac.shape == p.shape assert p_hess.shape == p_jac.shape np.testing.assert_allclose(np.sin(theta) * p_hess, -np.cos(theta) * p_jac - (n * (n + 1) * np.sin(theta) - m * m / np.sin(theta)) * p, rtol=1e-05, atol=1e-08) class TestLegendreFunctions: def test_clpmn(self): z = 0.5+0.3j clp = special.clpmn(2, 2, z, 3) assert_array_almost_equal(clp, (np.array([[1.0000, z, 0.5*(3*z*z-1)], [0.0000, np.sqrt(z*z-1), 3*z*np.sqrt(z*z-1)], [0.0000, 0.0000, 3*(z*z-1)]]), np.array([[0.0000, 1.0000, 3*z], [0.0000, z/np.sqrt(z*z-1), 3*(2*z*z-1)/np.sqrt(z*z-1)], [0.0000, 0.0000, 6*z]])), 7) def test_clpmn_close_to_real_2(self): eps = 1e-10 m = 1 n = 3 x = 0.5 clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n] clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n] assert_array_almost_equal(np.array([clp_plus, clp_minus]), np.array([special.lpmv(m, n, x), special.lpmv(m, n, x)]), 7) def test_clpmn_close_to_real_3(self): eps = 1e-10 m = 1 n = 3 x = 0.5 clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n] clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n] assert_array_almost_equal(np.array([clp_plus, clp_minus]), np.array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi), special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]), 7) def test_clpmn_across_unit_circle(self): eps = 1e-7 m = 1 n = 1 x = 1j for type in [2, 3]: assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n], special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6) def test_inf(self): for z in (1, -1): for n in range(4): for m in range(1, n): lp = special.clpmn(m, n, z) assert np.isinf(lp[1][1,1:]).all() lp = special.lpmn(m, n, z) assert np.isinf(lp[1][1,1:]).all() def test_deriv_clpmn(self): # data inside and outside of the unit circle zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j, 1+1j, -1+1j, -1-1j, 1-1j] m = 2 n = 3 for type in [2, 3]: for z in zvals: for h in [1e-3, 1e-3j]: approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0] - special.clpmn(m, n, z-0.5*h, type)[0])/h assert_allclose(special.clpmn(m, n, z, type)[1], approx_derivative, rtol=1e-4) """ @pytest.mark.parametrize("m_max", [3]) @pytest.mark.parametrize("n_max", [5]) @pytest.mark.parametrize("z", [-1]) def test_clpmn_all_limits(self, m_max, n_max, z): rng = np.random.default_rng(1234) type = 2 p, p_jac = special.clpmn_all(m_max, n_max, type, z, diff_n=1) n = np.arange(n_max + 1) np.testing.assert_allclose(p_jac[0], pow(z, n + 1) * n * (n + 1) / 2) np.testing.assert_allclose(p_jac[1], np.where(n >= 1, pow(z, n) * np.inf, 0)) np.testing.assert_allclose(p_jac[2], np.where(n >= 2, -pow(z, n + 1) * (n + 2) * (n + 1) * n * (n - 1) / 4, 0)) np.testing.assert_allclose(p_jac[-2], np.where(n >= 2, -pow(z, n + 1) / 4, 0)) np.testing.assert_allclose(p_jac[-1], np.where(n >= 1, -pow(z, n) * np.inf, 0)) for m in range(3, m_max + 1): np.testing.assert_allclose(p_jac[m], 0) np.testing.assert_allclose(p_jac[-m], 0) """ def test_lpmv(self): lp = special.lpmv(0,2,.5) assert_almost_equal(lp,-0.125,7) lp = special.lpmv(0,40,.001) assert_almost_equal(lp,0.1252678976534484,7) # XXX: this is outside the domain of the current implementation, # so ensure it returns a NaN rather than a wrong answer. with np.errstate(all='ignore'): lp = special.lpmv(-1,-1,.001) assert lp != 0 or np.isnan(lp) def test_lqmn(self): lqmnf = special.lqmn(0,2,.5) lqf = special.lqn(2,.5) assert_array_almost_equal(lqmnf[0][0],lqf[0],4) assert_array_almost_equal(lqmnf[1][0],lqf[1],4) def test_lqmn_gt1(self): """algorithm for real arguments changes at 1.0001 test against analytical result for m=2, n=1 """ x0 = 1.0001 delta = 0.00002 for x in (x0-delta, x0+delta): lq = special.lqmn(2, 1, x)[0][-1, -1] expected = 2/(x*x-1) assert_almost_equal(lq, expected) def test_lqmn_shape(self): a, b = special.lqmn(4, 4, 1.1) assert_equal(a.shape, (5, 5)) assert_equal(b.shape, (5, 5)) a, b = special.lqmn(4, 0, 1.1) assert_equal(a.shape, (5, 1)) assert_equal(b.shape, (5, 1)) def test_lqn(self): lqf = special.lqn(2,.5) assert_array_almost_equal(lqf,(np.array([0.5493, -0.7253, -0.8187]), np.array([1.3333, 1.216, -0.8427])),4) @pytest.mark.parametrize("function", [special.lpn, special.lqn]) @pytest.mark.parametrize("n", [1, 2, 4, 8, 16, 32]) @pytest.mark.parametrize("z_complex", [False, True]) @pytest.mark.parametrize("z_inexact", [False, True]) @pytest.mark.parametrize( "input_shape", [ (), (1, ), (2, ), (2, 1), (1, 2), (2, 2), (2, 2, 1), (2, 2, 2) ] ) def test_array_inputs_lxn(self, function, n, z_complex, z_inexact, input_shape): """Tests for correct output shapes.""" rng = np.random.default_rng(1234) if z_inexact: z = rng.integers(-3, 3, size=input_shape) else: z = rng.uniform(-1, 1, size=input_shape) if z_complex: z = 1j * z + 0.5j * z P_z, P_d_z = function(n, z) assert P_z.shape == (n + 1, ) + input_shape assert P_d_z.shape == (n + 1, ) + input_shape @pytest.mark.parametrize("function", [special.lqmn]) @pytest.mark.parametrize( "m,n", [(0, 1), (1, 2), (1, 4), (3, 8), (11, 16), (19, 32)] ) @pytest.mark.parametrize("z_inexact", [False, True]) @pytest.mark.parametrize( "input_shape", [ (), (1, ), (2, ), (2, 1), (1, 2), (2, 2), (2, 2, 1) ] ) def test_array_inputs_lxmn(self, function, m, n, z_inexact, input_shape): """Tests for correct output shapes and dtypes.""" rng = np.random.default_rng(1234) if z_inexact: z = rng.integers(-3, 3, size=input_shape) else: z = rng.uniform(-1, 1, size=input_shape) P_z, P_d_z = function(m, n, z) assert P_z.shape == (m + 1, n + 1) + input_shape assert P_d_z.shape == (m + 1, n + 1) + input_shape @pytest.mark.parametrize("function", [special.clpmn, special.lqmn]) @pytest.mark.parametrize( "m,n", [(0, 1), (1, 2), (1, 4), (3, 8), (11, 16), (19, 32)] ) @pytest.mark.parametrize( "input_shape", [ (), (1, ), (2, ), (2, 1), (1, 2), (2, 2), (2, 2, 1) ] ) def test_array_inputs_clxmn(self, function, m, n, input_shape): """Tests for correct output shapes and dtypes.""" rng = np.random.default_rng(1234) z = rng.uniform(-1, 1, size=input_shape) z = 1j * z + 0.5j * z P_z, P_d_z = function(m, n, z) assert P_z.shape == (m + 1, n + 1) + input_shape assert P_d_z.shape == (m + 1, n + 1) + input_shape def assoc_legendre_factor(n, m, norm): if norm: return (math.sqrt((2 * n + 1) * math.factorial(n - m) / (2 * math.factorial(n + m)))) return 1 def assoc_legendre_p_0_0(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(0, 0, norm) return np.full_like(z, fac) def assoc_legendre_p_1_0(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(1, 0, norm) return fac * z def assoc_legendre_p_1_1(z, *, branch_cut=2, norm=False): branch_sign = np.where(branch_cut == 3, np.where(np.signbit(np.real(z)), 1, -1), -1) branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(1, 1, norm) w = np.sqrt(np.where(branch_cut == 3, z * z - 1, 1 - z * z)) return branch_cut_sign * branch_sign * fac * w def assoc_legendre_p_1_m1(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(1, -1, norm) return (-branch_cut_sign * fac * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_2_0(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(2, 0, norm) return fac * (3 * z * z - 1) / 2 def assoc_legendre_p_2_1(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(2, 1, norm) return (3 * fac * z * assoc_legendre_p_1_1(z, branch_cut=branch_cut)) def assoc_legendre_p_2_2(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(2, 2, norm) return 3 * branch_cut_sign * fac * (1 - z * z) def assoc_legendre_p_2_m2(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(2, -2, norm) return branch_cut_sign * fac * (1 - z * z) / 8 def assoc_legendre_p_2_m1(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(2, -1, norm) return (-branch_cut_sign * fac * z * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_3_0(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(3, 0, norm) return fac * (5 * z * z - 3) * z / 2 def assoc_legendre_p_3_1(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(3, 1, norm) return (3 * fac * (5 * z * z - 1) * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_3_2(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, 2, norm) return 15 * branch_cut_sign * fac * (1 - z * z) * z def assoc_legendre_p_3_3(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, 3, norm) return (15 * branch_cut_sign * fac * (1 - z * z) * assoc_legendre_p_1_1(z, branch_cut=branch_cut)) def assoc_legendre_p_3_m3(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(3, -3, norm) return (fac * (z * z - 1) * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 48) def assoc_legendre_p_3_m2(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, -2, norm) return branch_cut_sign * fac * (1 - z * z) * z / 8 def assoc_legendre_p_3_m1(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, -1, norm) return (branch_cut_sign * fac * (1 - 5 * z * z) * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 8) def assoc_legendre_p_4_0(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, 0, norm) return fac * ((35 * z * z - 30) * z * z + 3) / 8 def assoc_legendre_p_4_1(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, 1, norm) return (5 * fac * (7 * z * z - 3) * z * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_4_2(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, 2, norm) return 15 * branch_cut_sign * fac * ((8 - 7 * z * z) * z * z - 1) / 2 def assoc_legendre_p_4_3(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, 3, norm) return (105 * branch_cut_sign * fac * (1 - z * z) * z * assoc_legendre_p_1_1(z, branch_cut=branch_cut)) def assoc_legendre_p_4_4(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, 4, norm) return 105 * fac * np.square(z * z - 1) def assoc_legendre_p_4_m4(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, -4, norm) return fac * np.square(z * z - 1) / 384 def assoc_legendre_p_4_m3(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, -3, norm) return (fac * (z * z - 1) * z * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 48) def assoc_legendre_p_4_m2(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, -2, norm) return branch_cut_sign * fac * ((8 - 7 * z * z) * z * z - 1) / 48 def assoc_legendre_p_4_m1(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, -1, norm) return (branch_cut_sign * fac * (3 - 7 * z * z) * z * assoc_legendre_p_1_1(z, branch_cut=branch_cut) / 8) def assoc_legendre_p_1_1_jac_div_z(z, branch_cut=2): branch_sign = np.where(branch_cut == 3, np.where(np.signbit(np.real(z)), 1, -1), -1) out11_div_z = (-branch_sign / np.sqrt(np.where(branch_cut == 3, z * z - 1, 1 - z * z))) return out11_div_z def assoc_legendre_p_0_0_jac(z, *, branch_cut=2, norm=False): return np.zeros_like(z) def assoc_legendre_p_1_0_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(1, 0, norm) return np.full_like(z, fac) def assoc_legendre_p_1_1_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(1, 1, norm) return (fac * z * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut)) def assoc_legendre_p_1_m1_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(1, -1, norm) return (-branch_cut_sign * fac * z * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_2_0_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(2, 0, norm) return 3 * fac * z def assoc_legendre_p_2_1_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(2, 1, norm) return (3 * fac * (2 * z * z - 1) * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut)) def assoc_legendre_p_2_2_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(2, 2, norm) return -6 * branch_cut_sign * fac * z def assoc_legendre_p_2_m1_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(2, -1, norm) return (branch_cut_sign * fac * (1 - 2 * z * z) * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_2_m2_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(2, -2, norm) return -branch_cut_sign * fac * z / 4 def assoc_legendre_p_3_0_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(3, 0, norm) return 3 * fac * (5 * z * z - 1) / 2 def assoc_legendre_p_3_1_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(3, 1, norm) return (3 * fac * (15 * z * z - 11) * z * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_3_2_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, 2, norm) return 15 * branch_cut_sign * fac * (1 - 3 * z * z) def assoc_legendre_p_3_3_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, 3, norm) return (45 * branch_cut_sign * fac * (1 - z * z) * z * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut)) def assoc_legendre_p_3_m3_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(3, -3, norm) return (fac * (z * z - 1) * z * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 16) def assoc_legendre_p_3_m2_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, -2, norm) return branch_cut_sign * fac * (1 - 3 * z * z) / 8 def assoc_legendre_p_3_m1_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(3, -1, norm) return (branch_cut_sign * fac * (11 - 15 * z * z) * z * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 8) def assoc_legendre_p_4_0_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, 0, norm) return 5 * fac * (7 * z * z - 3) * z / 2 def assoc_legendre_p_4_1_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, 1, norm) return (5 * fac * ((28 * z * z - 27) * z * z + 3) * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 2) def assoc_legendre_p_4_2_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, 2, norm) return 30 * branch_cut_sign * fac * (4 - 7 * z * z) * z def assoc_legendre_p_4_3_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, 3, norm) return (105 * branch_cut_sign * fac * ((5 - 4 * z * z) * z * z - 1) * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut)) def assoc_legendre_p_4_4_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, 4, norm) return 420 * fac * (z * z - 1) * z def assoc_legendre_p_4_m4_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, -4, norm) return fac * (z * z - 1) * z / 96 def assoc_legendre_p_4_m3_jac(z, *, branch_cut=2, norm=False): fac = assoc_legendre_factor(4, -3, norm) return (fac * ((4 * z * z - 5) * z * z + 1) * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 48) def assoc_legendre_p_4_m2_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, -2, norm) return branch_cut_sign * fac * (4 - 7 * z * z) * z / 12 def assoc_legendre_p_4_m1_jac(z, *, branch_cut=2, norm=False): branch_cut_sign = np.where(branch_cut == 3, -1, 1) fac = assoc_legendre_factor(4, -1, norm) return (branch_cut_sign * fac * ((27 - 28 * z * z) * z * z - 3) * assoc_legendre_p_1_1_jac_div_z(z, branch_cut=branch_cut) / 8) def sph_legendre_factor(n, m): return assoc_legendre_factor(n, m, norm=True) / np.sqrt(2 * np.pi) def sph_legendre_p_0_0(theta): fac = sph_legendre_factor(0, 0) return np.full_like(theta, fac) def sph_legendre_p_1_0(theta): fac = sph_legendre_factor(1, 0) return fac * np.cos(theta) def sph_legendre_p_1_1(theta): fac = sph_legendre_factor(1, 1) return -fac * np.abs(np.sin(theta)) def sph_legendre_p_1_m1(theta): fac = sph_legendre_factor(1, -1) return fac * np.abs(np.sin(theta)) / 2 def sph_legendre_p_2_0(theta): fac = sph_legendre_factor(2, 0) return fac * (3 * np.square(np.cos(theta)) - 1) / 2 def sph_legendre_p_2_1(theta): fac = sph_legendre_factor(2, 1) return -3 * fac * np.abs(np.sin(theta)) * np.cos(theta) def sph_legendre_p_2_2(theta): fac = sph_legendre_factor(2, 2) return 3 * fac * (1 - np.square(np.cos(theta))) def sph_legendre_p_2_m2(theta): fac = sph_legendre_factor(2, -2) return fac * (1 - np.square(np.cos(theta))) / 8 def sph_legendre_p_2_m1(theta): fac = sph_legendre_factor(2, -1) return fac * np.cos(theta) * np.abs(np.sin(theta)) / 2 def sph_legendre_p_3_0(theta): fac = sph_legendre_factor(3, 0) return (fac * (5 * np.square(np.cos(theta)) - 3) * np.cos(theta) / 2) def sph_legendre_p_3_1(theta): fac = sph_legendre_factor(3, 1) return (-3 * fac * (5 * np.square(np.cos(theta)) - 1) * np.abs(np.sin(theta)) / 2) def sph_legendre_p_3_2(theta): fac = sph_legendre_factor(3, 2) return (-15 * fac * (np.square(np.cos(theta)) - 1) * np.cos(theta)) def sph_legendre_p_3_3(theta): fac = sph_legendre_factor(3, 3) return -15 * fac * np.power(np.abs(np.sin(theta)), 3) def sph_legendre_p_3_m3(theta): fac = sph_legendre_factor(3, -3) return fac * np.power(np.abs(np.sin(theta)), 3) / 48 def sph_legendre_p_3_m2(theta): fac = sph_legendre_factor(3, -2) return (-fac * (np.square(np.cos(theta)) - 1) * np.cos(theta) / 8) def sph_legendre_p_3_m1(theta): fac = sph_legendre_factor(3, -1) return (fac * (5 * np.square(np.cos(theta)) - 1) * np.abs(np.sin(theta)) / 8) def sph_legendre_p_4_0(theta): fac = sph_legendre_factor(4, 0) return (fac * (35 * np.square(np.square(np.cos(theta))) - 30 * np.square(np.cos(theta)) + 3) / 8) def sph_legendre_p_4_1(theta): fac = sph_legendre_factor(4, 1) return (-5 * fac * (7 * np.square(np.cos(theta)) - 3) * np.cos(theta) * np.abs(np.sin(theta)) / 2) def sph_legendre_p_4_2(theta): fac = sph_legendre_factor(4, 2) return (-15 * fac * (7 * np.square(np.cos(theta)) - 1) * (np.square(np.cos(theta)) - 1) / 2) def sph_legendre_p_4_3(theta): fac = sph_legendre_factor(4, 3) return -105 * fac * np.power(np.abs(np.sin(theta)), 3) * np.cos(theta) def sph_legendre_p_4_4(theta): fac = sph_legendre_factor(4, 4) return 105 * fac * np.square(np.square(np.cos(theta)) - 1) def sph_legendre_p_4_m4(theta): fac = sph_legendre_factor(4, -4) return fac * np.square(np.square(np.cos(theta)) - 1) / 384 def sph_legendre_p_4_m3(theta): fac = sph_legendre_factor(4, -3) return (fac * np.power(np.abs(np.sin(theta)), 3) * np.cos(theta) / 48) def sph_legendre_p_4_m2(theta): fac = sph_legendre_factor(4, -2) return (-fac * (7 * np.square(np.cos(theta)) - 1) * (np.square(np.cos(theta)) - 1) / 48) def sph_legendre_p_4_m1(theta): fac = sph_legendre_factor(4, -1) return (fac * (7 * np.square(np.cos(theta)) - 3) * np.cos(theta) * np.abs(np.sin(theta)) / 8) def sph_legendre_p_0_0_jac(theta): return np.zeros_like(theta) def sph_legendre_p_1_0_jac(theta): fac = sph_legendre_factor(1, 0) return -fac * np.sin(theta) def sph_legendre_p_1_1_jac(theta): fac = sph_legendre_factor(1, 1) return -fac * np.cos(theta) * (2 * np.heaviside(np.sin(theta), 1) - 1) def sph_legendre_p_1_m1_jac(theta): fac = sph_legendre_factor(1, -1) return fac * np.cos(theta) * (2 * np.heaviside(np.sin(theta), 1) - 1) / 2 def sph_legendre_p_2_0_jac(theta): fac = sph_legendre_factor(2, 0) return -3 * fac * np.cos(theta) * np.sin(theta) def sph_legendre_p_2_1_jac(theta): fac = sph_legendre_factor(2, 1) return (3 * fac * (-np.square(np.cos(theta)) * (2 * np.heaviside(np.sin(theta), 1) - 1) + np.abs(np.sin(theta)) * np.sin(theta))) def sph_legendre_p_2_2_jac(theta): fac = sph_legendre_factor(2, 2) return 6 * fac * np.sin(theta) * np.cos(theta) def sph_legendre_p_2_m2_jac(theta): fac = sph_legendre_factor(2, -2) return fac * np.sin(theta) * np.cos(theta) / 4 def sph_legendre_p_2_m1_jac(theta): fac = sph_legendre_factor(2, -1) return (-fac * (-np.square(np.cos(theta)) * (2 * np.heaviside(np.sin(theta), 1) - 1) + np.abs(np.sin(theta)) * np.sin(theta)) / 2) def sph_legendre_p_3_0_jac(theta): fac = sph_legendre_factor(3, 0) return 3 * fac * (1 - 5 * np.square(np.cos(theta))) * np.sin(theta) / 2 def sph_legendre_p_3_1_jac(theta): fac = sph_legendre_factor(3, 1) return (3 * fac * (11 - 15 * np.square(np.cos(theta))) * np.cos(theta) * (2 * np.heaviside(np.sin(theta), 1) - 1) / 2) def sph_legendre_p_3_2_jac(theta): fac = sph_legendre_factor(3, 2) return 15 * fac * (3 * np.square(np.cos(theta)) - 1) * np.sin(theta) def sph_legendre_p_3_3_jac(theta): fac = sph_legendre_factor(3, 3) return -45 * fac * np.abs(np.sin(theta)) * np.sin(theta) * np.cos(theta) def sph_legendre_p_3_m3_jac(theta): fac = sph_legendre_factor(3, -3) return fac * np.abs(np.sin(theta)) * np.sin(theta) * np.cos(theta) / 16 def sph_legendre_p_3_m2_jac(theta): fac = sph_legendre_factor(3, -2) return fac * (3 * np.square(np.cos(theta)) - 1) * np.sin(theta) / 8 def sph_legendre_p_3_m1_jac(theta): fac = sph_legendre_factor(3, -1) return (-fac * (11 - 15 * np.square(np.cos(theta))) * np.cos(theta) * (2 * np.heaviside(np.sin(theta), 1) - 1) / 8) def sph_legendre_p_4_0_jac(theta): fac = sph_legendre_factor(4, 0) return (-5 * fac * (7 * np.square(np.cos(theta)) - 3) * np.sin(theta) * np.cos(theta) / 2) def sph_legendre_p_4_1_jac(theta): fac = sph_legendre_factor(4, 1) return (5 * fac * (-3 + 27 * np.square(np.cos(theta)) - 28 * np.square(np.square(np.cos(theta)))) * (2 * np.heaviside(np.sin(theta), 1) - 1) / 2) def sph_legendre_p_4_2_jac(theta): fac = sph_legendre_factor(4, 2) return (30 * fac * (7 * np.square(np.cos(theta)) - 4) * np.sin(theta) * np.cos(theta)) def sph_legendre_p_4_3_jac(theta): fac = sph_legendre_factor(4, 3) return (-105 * fac * (4 * np.square(np.cos(theta)) - 1) * np.abs(np.sin(theta)) * np.sin(theta)) def sph_legendre_p_4_4_jac(theta): fac = sph_legendre_factor(4, 4) return (-420 * fac * (np.square(np.cos(theta)) - 1) * np.sin(theta) * np.cos(theta)) def sph_legendre_p_4_m4_jac(theta): fac = sph_legendre_factor(4, -4) return (-fac * (np.square(np.cos(theta)) - 1) * np.sin(theta) * np.cos(theta) / 96) def sph_legendre_p_4_m3_jac(theta): fac = sph_legendre_factor(4, -3) return (fac * (4 * np.square(np.cos(theta)) - 1) * np.abs(np.sin(theta)) * np.sin(theta) / 48) def sph_legendre_p_4_m2_jac(theta): fac = sph_legendre_factor(4, -2) return (fac * (7 * np.square(np.cos(theta)) - 4) * np.sin(theta) * np.cos(theta) / 12) def sph_legendre_p_4_m1_jac(theta): fac = sph_legendre_factor(4, -1) return (-fac * (-3 + 27 * np.square(np.cos(theta)) - 28 * np.square(np.square(np.cos(theta)))) * (2 * np.heaviside(np.sin(theta), 1) - 1) / 8)
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@special@tests@test_legendre.py@.PATH_END.py
{ "filename": "print_disabled.py", "repo_name": "AstroGPU/swarm", "repo_path": "swarm_extracted/swarm-master/test/monitors/close_orbits/print_disabled.py", "type": "Python" }
from sys import path path.append('lib') from libswarmng_ext import * from numpy import * from math import * from random import uniform from sys import argv def norm(l): s = 0 for x in l: s += x**2 return sqrt(s) ens = DefaultEnsemble.load_from_text(argv[1]) print "Time, Body number 1, Body number 2, Distance between bodies" for i in range(0,ens.nsys): if ens[i].state == -1: print ens[i].time, for j in range(1,ens.nbod): for k in range(1,j): a = ens[i][j].pos b = ens[i][k].pos dist = sqrt((a[0]-b[0])**2 +(a[1]-b[1])**2+(a[2]-b[2])**2) if dist < .01: print j,k, dist, print ""
AstroGPUREPO_NAMEswarmPATH_START.@swarm_extracted@swarm-master@test@monitors@close_orbits@print_disabled.py@.PATH_END.py
{ "filename": "_lightposition.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/_lightposition.py", "type": "Python" }
import _plotly_utils.basevalidators class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="lightposition", parent_name="surface", **kwargs): super(LightpositionValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Lightposition"), data_docs=kwargs.pop( "data_docs", """ x Numeric vector, representing the X coordinate for each vertex. y Numeric vector, representing the Y coordinate for each vertex. z Numeric vector, representing the Z coordinate for each vertex. """, ), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@_lightposition.py@.PATH_END.py
{ "filename": "install_egg_info.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py3/setuptools/_distutils/command/install_egg_info.py", "type": "Python" }
""" distutils.command.install_egg_info Implements the Distutils 'install_egg_info' command, for installing a package's PKG-INFO metadata. """ import os import re import sys from .. import dir_util from .._log import log from ..cmd import Command class install_egg_info(Command): """Install an .egg-info file for the package""" description = "Install package's PKG-INFO metadata as an .egg-info file" user_options = [ ('install-dir=', 'd', "directory to install to"), ] def initialize_options(self): self.install_dir = None @property def basename(self): """ Allow basename to be overridden by child class. Ref pypa/distutils#2. """ return "%s-%s-py%d.%d.egg-info" % ( to_filename(safe_name(self.distribution.get_name())), to_filename(safe_version(self.distribution.get_version())), *sys.version_info[:2], ) def finalize_options(self): self.set_undefined_options('install_lib', ('install_dir', 'install_dir')) self.target = os.path.join(self.install_dir, self.basename) self.outputs = [self.target] def run(self): target = self.target if os.path.isdir(target) and not os.path.islink(target): dir_util.remove_tree(target, dry_run=self.dry_run) elif os.path.exists(target): self.execute(os.unlink, (self.target,), "Removing " + target) elif not os.path.isdir(self.install_dir): self.execute( os.makedirs, (self.install_dir,), "Creating " + self.install_dir ) log.info("Writing %s", target) if not self.dry_run: with open(target, 'w', encoding='UTF-8') as f: self.distribution.metadata.write_pkg_file(f) def get_outputs(self): return self.outputs # The following routines are taken from setuptools' pkg_resources module and # can be replaced by importing them from pkg_resources once it is included # in the stdlib. def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """Convert an arbitrary string to a standard version string Spaces become dots, and all other non-alphanumeric characters become dashes, with runs of multiple dashes condensed to a single dash. """ version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-', '_')
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py3@setuptools@_distutils@command@install_egg_info.py@.PATH_END.py
{ "filename": "_familysrc.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmap/hoverlabel/font/_familysrc.py", "type": "Python" }
import _plotly_utils.basevalidators class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="familysrc", parent_name="heatmap.hoverlabel.font", **kwargs ): super(FamilysrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmap@hoverlabel@font@_familysrc.py@.PATH_END.py
{ "filename": "makeflat.py", "repo_name": "HiPERCAM/hipercam", "repo_path": "hipercam_extracted/hipercam-master/hipercam/scripts/makeflat.py", "type": "Python" }
import sys import os import tempfile import signal import numpy as np from trm import cline from trm.cline import Cline import hipercam as hcam from hipercam import utils, spooler __all__ = [ "makeflat", ] #################################################### # # makeflat -- makes flat fields from a set of frames # #################################################### def makeflat(args=None): """``makeflat [source] (run first last [twait tmax] | flist) bias dark ngroup ccd lower upper [clobber] output`` Averages a set of images to make a flat field. Typically flat-fields for HiPERCAM and ULTRA(CAM|SPEC) are taken with a strongly time-variable twilight sky as the Sun sets or rises. A typical flat field run may start out bright, or even saturated, but by the end be only a few thousand counts above bias. Moreover, there are very often stars visible in the images, so we usually take them while offsetting the telescope in a spiral pattern. The challenge is to combine these images while rejecting the stars and saturated frames and giving due weight to the better exposed images. This moreover has to be done for each CCD which vary significantly in sensitivity. 'makeflat' does this as follows: given an input list of files (or optionally a single run), it reads them all in, debiases them (optionally), and calculates the mean count level in each CCD, normalises by the mean and writes out the results to temporary files. For each CCD it then sorts the files by their (original) mean level, and for those that lie between defined limits it takes the median of the mean-mormalised frames in groups of defined size. Thus, say one had 75 OK images, then these would be divided into 10 groups, the first 9 having 7 frames, the last having 16. The median average of each of these would be taken. In each case the mean levels would be adjusted to be the same before taking the average to overcome the problem of taking a median of a time-variable sky. The assumption is that while the level may vary, the pattern of the image does not. It is up to the user to check that this is correct. Each of the medians is adjusted to have a mean equal to the sum of the means of the input frames. Finally the normal average of all of these median frames is taken and the mean level of the final output normalised to 1. The first step, taking the median in groups is designed to remove the stars assuming that the telescope was spiralled. The size of the groups ('ngroup' below is a crucial parameter in whether this works). A good strategy is to run makeflat for a succession of ever larger 'ngroup' and then to divide the results into each other to see if stars are visible. The final step, the average of the medians with adjusted mean levels, is to ensure that the flats are combined in a way that reflects the level of signal that they have, i.e. to avoid giving equal weights to the median of a series of flats with 20,000 counts per pixel and another series with 1,000 counts per pixel. This somewhat complex procedure is implemented through a series of temporary files which are written and read as the script runs, but deleted at its end. This allows very large numbers to be combined as long as there is enough memory to load 'ngroup' CCDs simultaneously, which should usually be fine. Parameters: source : str [hidden] Data source, five options: | 'hs' : HiPERCAM server | 'hl' : local HiPERCAM FITS file | 'us' : ULTRACAM server | 'ul' : local ULTRACAM .xml/.dat files | 'hf' : list of HiPERCAM hcm FITS-format files 'hf' is used to look at sets of frames generated by 'grab' or converted from foreign data formats. The standard start-off default for ``source`` can be set using the environment variable HIPERCAM_DEFAULT_SOURCE. e.g. in bash :code:`export HIPERCAM_DEFAULT_SOURCE="us"` would ensure it always started with the ULTRACAM server by default. If unspecified, it defaults to 'hl'. run : str [if source ends 's' or 'l'] run number to access, e.g. 'run034' first : int [if source ends 's' or 'l'] exposure number to start from. 1 = first frame ('0' is not supported). last : int [if source ends 's' or 'l'] last exposure number must be >= first or 0 for the whole lot. twait : float [if source ends 's' or 'l'; hidden] time to wait between attempts to find a new exposure, seconds. tmax : float [if source ends 's' or 'l'; hidden] maximum time to wait between attempts to find a new exposure, seconds. flist : str [if source ends 'f'] name of file list. Assumed that these are dias and dark corrected. bias : str Name of bias frame to subtract, 'none' to ignore. dark : str Name of dark frame to subtract, 'none' to ignore. Note that it is assumed all CCDs have the same exposure time when making a dark correction. ngroup : int the number of frames. Probably should be at least 5, preferably more. Experiment to see its effect. ccd : str CCD(s) to process, '0' for all, '1 3' for '1' and '3' only, etc. Would almost always expect this to be set = '0'. lower : list of floats Lower limits to the mean count level for a flat to be included. The count level is determined after bias subtraction. Should be the same number as the selected CCDs, and will be assumed to be in the same order. Use this to elminate frames that are of so low a level that the accuracy of the bias subtraction could be a worry. Suggested hipercam values: 3000 for each CCD. Enter values separated by spaces. upper : list of floats Upper limits to the mean count level for a flat to be included. The count level is determined *after* bias subtraction. Should be the same number as the selected CCDs, and will be assumed to be in the same order. Use this to eliminate saturated, peppered or non-linear frames. Suggested hipercam values: 58000, 58000, 58000, 40000 and 40000 for CCDs 1, 2, 3, 4 and 5. Enter values separated by spaces. ULTRACAM values 49000, 29000, 27000 for CCDs 1, 2 and 3. clobber : bool [hidden] clobber any pre-existing output files output : str output file. will be set by default to match the input name. .. Note:: This routine writes the files returned by 'grab' to automatically generated files, typically in .hipercam/tmp, to avoid polluting the working directory. These are removed at the end, but may not be if you ctrl-C. You should check .hipercam/tmp for redundant files every so often """ command, args = cline.script_args(args) # get the inputs with Cline("HIPERCAM_ENV", ".hipercam", command, args) as cl: # register parameters cl.register("source", Cline.GLOBAL, Cline.HIDE) cl.register("run", Cline.GLOBAL, Cline.PROMPT) cl.register("first", Cline.LOCAL, Cline.PROMPT) cl.register("last", Cline.LOCAL, Cline.PROMPT) cl.register("twait", Cline.LOCAL, Cline.HIDE) cl.register("tmax", Cline.LOCAL, Cline.HIDE) cl.register("flist", Cline.LOCAL, Cline.PROMPT) cl.register("bias", Cline.LOCAL, Cline.PROMPT) cl.register("dark", Cline.LOCAL, Cline.PROMPT) cl.register("ngroup", Cline.LOCAL, Cline.PROMPT) cl.register("ccd", Cline.LOCAL, Cline.PROMPT) cl.register("lower", Cline.LOCAL, Cline.PROMPT) cl.register("upper", Cline.LOCAL, Cline.PROMPT) cl.register("clobber", Cline.LOCAL, Cline.HIDE) cl.register("output", Cline.LOCAL, Cline.PROMPT) # get inputs default_source = os.environ.get('HIPERCAM_DEFAULT_SOURCE','hl') source = cl.get_value( "source", "data source [hs, hl, us, ul, hf]", default_source, lvals=("hs", "hl", "us", "ul", "hf"), ) # set a flag server_or_local = source.endswith("s") or source.endswith("l") if server_or_local: resource = cl.get_value("run", "run name", "run005") root = os.path.basename(resource) cl.set_default('output', cline.Fname(root, hcam.HCAM)) first = cl.get_value("first", "first frame to average", 1, 1) last = cl.get_value("last", "last frame to average (0 for all)", first, 0) if last < first and last != 0: sys.stderr.write("last must be >= first or 0") sys.exit(1) twait = cl.get_value( "twait", "time to wait for a new frame [secs]", 1.0, 0.0 ) tmax = cl.get_value( "tmax", "maximum time to wait for a new frame [secs]", 10.0, 0.0 ) else: resource = cl.get_value( "flist", "file list", cline.Fname("files.lis", hcam.LIST) ) first = 1 # bias frame (if any) bias = cl.get_value( "bias", "bias frame ['none' to ignore]", cline.Fname("bias", hcam.HCAM), ignore="none", ) # dark frame (if any) dark = cl.get_value( "dark", "dark frame ['none' to ignore]", cline.Fname("dark", hcam.HCAM), ignore="none", ) ngroup = cl.get_value( "ngroup", "number of frames per median average group", 3, 1 ) ccdinf = spooler.get_ccd_pars(source, resource) if len(ccdinf) > 1: ccd = cl.get_value("ccd", "CCD(s) to process [0 for all]", "0") if ccd == "0": ccds = list(ccdinf.keys()) else: ccds = ccd.split() else: ccds = list(ccdinf.keys()) # need to check that the default has the right number of items, if not # overr-ride it lowers = cl.get_default("lower") if lowers is not None and len(lowers) != len(ccds): cl.set_default("lower", len(ccds) * (5000,)) lowers = cl.get_value( "lower", "lower limits on mean count level for included flats, 1 per CCD", len(ccds) * (5000,) ) uppers = cl.get_default("upper") if uppers is not None and len(uppers) != len(ccds): cl.set_default("upper", len(ccds) * (50000,)) uppers = cl.get_value( "upper", "lower limits on mean count level for included flats, 1 per CCD", len(ccds) * (50000,) ) clobber = cl.get_value( "clobber", "clobber any pre-existing files on output", False ) output = cl.get_value( "output", "output average", cline.Fname( "hcam", hcam.HCAM, cline.Fname.NEW if clobber else cline.Fname.NOCLOBBER ) ) # inputs done with. if server_or_local or bias is not None or dark is not None: print("\nCalling 'grab' ...") args = [None, "prompt", source, "yes", resource] if server_or_local: args += [str(first), str(last),str(twait), str(tmax)] args += [ "no", "none" if bias is None else bias, "none" if dark is None else dark, "none", "none", "f32", ] resource = hcam.scripts.grab(args) # at this point 'resource' is a list of files, no matter the input # method. 'fnames' below will be used to store still more temporaries fnames = [] with CleanUp( resource, fnames, server_or_local or bias is not None or dark is not None ) as cleanup: # Read all the files to determine mean levels (after bias # subtraction) save the bias-subtracted, mean-level normalised # results to temporary files print("Reading all files in to determine their mean levels") means = {cnam: {} for cnam in ccds} medians = {cnam: {} for cnam in ccds} # We might have a load of temporaries from grab, but we are about to # make some more to save the normalised versions. tdir = utils.temp_dir() with spooler.HcamListSpool(resource) as spool: for mccd in spool: # here we determine the mean and median levels, store them # then normalise the CCDs by the mean and save the files # to disk # generate the name to save to automatically fd, fname = tempfile.mkstemp(suffix=hcam.HCAM, dir=tdir) for cnam in ccds: # its unlikely that flats would be taken with skips, but # you never know. Eliminate them from consideration now. ccd = mccd[cnam] if ccd.is_data(): cmean = ccd.mean() means[cnam][fname] = cmean cmedian = ccd.median() medians[cnam][fname] = cmedian mccd[cnam] /= cmean # write the disk, save the name, close the filehandle fnames.append(fname) mccd.write(fname) os.close(fd) # a bit of progress info print(f"Saved processed flat to {fname}") # now we go through CCD by CCD, using the first as a template # for the window names in which we will also store the results. template = hcam.MCCD.read(fnames[0]) for cnam, lower, upper in zip(ccds, lowers, uppers): tccd = template[cnam] # get the keys (filenames) and corresponding mean values mkeys = np.array(list(means[cnam].keys())) mvals = np.array([means[cnam][fname] for fname in mkeys]) meds = np.array([medians[cnam][fname] for fname in mkeys]) # chop down to acceptable ones ok = (mvals > lower) & (mvals < upper) & (meds > lower) & (meds < upper) mkeys = mkeys[ok] mvals = mvals[ok] # some more progress info print("Found {:d} frames for CCD {:s}".format(len(mkeys), cnam)) if len(mkeys) == 0: print( (".. cannot average 0 frames;" " will skip CCD {:s}").format(cnam) ) continue elif len(mkeys) < ngroup: print( ( "WARNING: fewer than ngroup = {:d} frames" " found. Output for CCD {:s} could be poor" ).format(ngroup, cnam) ) nchunk = len(mkeys) // ngroup if nchunk == 0: nchunk = 1 # sort by mean value isort = mvals.argsort() mvals = mvals[isort] mkeys = mkeys[isort] # wsum used to sum all the eight factors to allow overall # normalisation at the end of the loop wsum = 0.0 for n in range(nchunk): # loop through in chunks of ngroup at a time with a # potentially larger group to sweep up the end ones. n1 = ngroup * n n2 = n1 + ngroup if n == nchunk: n2 = len(mkeys) # load the CCDs of this group ccdgroup = [] with spooler.HcamListSpool(list(mkeys[n1:n2]), cnam) as spool: for ccd in spool: ccdgroup.append(ccd) # take median of the group to get rid of jumping # stars. 'weight' used to weight the results when summing the # results together. this stage is like the 'n' option of # 'combine' except we have already cut out any junk frames and # we have normalised the remainder weight = mvals[n1:n2].sum() wsum += weight for wnam, wind in tccd.items(): # go through each window, building a list of all data # arrays arrs = [ccd[wnam].data for ccd in ccdgroup] arr3d = np.stack(arrs) # at this point, arr3d is a 3D array, with the first # dimension (axis=0) running over the images. We take the # median over this axis. The first time through we put # this straight into the output Window. afterwards we add # it in (with the appropriate weight) if n == 0: wind.data = weight * np.median(arr3d, axis=0) else: wind.data += weight * np.median(arr3d, axis=0) # Normalise the final result to a mean = 1. tccd /= wsum # Add some history tccd.head.add_history( ("result of makeflat on {:d}" " frames, ngroup = {:d}").format( len(mkeys), ngroup ) ) # Remove any CCDs not included to avoid impression of having done # something to them dcnams = [] for cnam in template.keys(): if cnam not in ccds: dcnams.append(cnam) for cnam in dcnams: del template[cnam] # write out template.write(output, clobber) print("\nFinal result written to {:s}".format(output)) print('makeflat finished') class CleanUp: """ Context manager to handle temporary files """ def __init__(self, flist, fnames, temp): self.flist = flist self.fnames = fnames self.temp = temp def _sigint_handler(self, signal_received, frame): print("\nmakeflat aborted") sys.exit(1) def __enter__(self): signal.signal(signal.SIGINT, self._sigint_handler) def __exit__(self, type, value, traceback): if self.temp: with open(self.flist) as fp: for line in fp: os.remove(line.strip()) os.remove(self.flist) for fname in self.fnames: if os.path.exists(fname): os.remove(fname) print('temporary files removed')
HiPERCAMREPO_NAMEhipercamPATH_START.@hipercam_extracted@hipercam-master@hipercam@scripts@makeflat.py@.PATH_END.py
{ "filename": "_ticks.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/marker/colorbar/_ticks.py", "type": "Python" }
import _plotly_utils.basevalidators class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="ticks", parent_name="funnel.marker.colorbar", **kwargs ): super(TicksValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), values=kwargs.pop("values", ["outside", "inside", ""]), **kwargs, )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@marker@colorbar@_ticks.py@.PATH_END.py
{ "filename": "test_sampler.py", "repo_name": "minaskar/zeus", "repo_path": "zeus_extracted/zeus-main/tests/test_sampler.py", "type": "Python" }
import numpy as np import pytest import zeus def logp(x): return -0.5 * np.sum((x-1.0)**2.0) def test_mean(logp=logp,seed=42): np.random.seed(seed) ndim = np.random.randint(2,5) nwalkers = 2 * ndim nsteps = np.random.randint(3000,5000) sampler = zeus.EnsembleSampler(nwalkers,ndim,logp,verbose=False) start = np.random.rand(nwalkers,ndim) sampler.run_mcmc(start,nsteps) assert np.all(np.abs(np.mean(sampler.get_chain(flat=True),axis=0)-1.0) < 0.1) assert np.all(np.isfinite(sampler.get_log_prob(flat=True))) assert np.all(np.isfinite(sampler.get_log_prob())) def test_std(logp=logp,seed=42): np.random.seed(seed) ndim = np.random.randint(2,5) nwalkers = 2 * ndim nsteps = np.random.randint(3000,5000) sampler = zeus.EnsembleSampler(nwalkers,ndim,logp,verbose=False) start = np.random.rand(nwalkers,ndim) sampler.run_mcmc(start,nsteps) assert np.all(np.abs(np.std(sampler.get_chain(flat=True),axis=0)-1.0) < 0.1) def test_ncall(seed=42): np.random.seed(seed) def loglike(theta): assert len(theta) == 5 a = theta[:-1] b = theta[1:] loglike.ncalls += 1 return -2 * (100 * (b - a**2)**2 + (1 - a)**2).sum() loglike.ncalls = 0 ndim = 5 nsteps = 100 nwalkers = 2 * ndim sampler = zeus.EnsembleSampler(nwalkers,ndim,loglike,verbose=False) start = np.random.rand(nwalkers,ndim) sampler.run_mcmc(start,nsteps) assert loglike.ncalls == sampler.ncall + nwalkers
minaskarREPO_NAMEzeusPATH_START.@zeus_extracted@zeus-main@tests@test_sampler.py@.PATH_END.py
{ "filename": "_visible.py", "repo_name": "plotly/plotly.py", "repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/error_y/_visible.py", "type": "Python" }
import _plotly_utils.basevalidators class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator): def __init__( self, plotly_name="visible", parent_name="histogram.error_y", **kwargs ): super(VisibleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), **kwargs, )
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@error_y@_visible.py@.PATH_END.py
{ "filename": "ELGtargnonsense.ipynb", "repo_name": "desihub/LSS", "repo_path": "LSS_extracted/LSS-main/Sandbox/ELGtargnonsense.ipynb", "type": "Jupyter Notebook" }
# Things that don't make sense in the distribution of ELG targets ### This notebook is meant to explain what does not make sense in the DESI ELG target distribution and leave room for the solutions. ### Ideally, we would find the explanations before targeting is finalized, and this might help fix some issues before we start amassing the full ELG survey #### import all needed python here ```python import fitsio import numpy as np #from desitarget.io import read_targets_in_hp, read_targets_in_box, read_targets_in_cap import astropy.io.fits as fits import glob import os import healpy as hp from matplotlib import pyplot as plt ``` #### The following imports all of the data generated elsewhere. This includes the DR8 versions of the ELG targets, the randoms, the pixelized maps of conditions, the depth based MC efficiency prediction... ```python #ELGs were saved here elgf = os.getenv('SCRATCH')+'/ELGtargetinfo.fits' felg = fitsio.read(elgf) lelg = len(felg) print('there are '+str(lelg)+ ' elg targets') ``` there are 47256516 elg targets ```python #full random file is available, easy to read some limited number; take 1.5x ELG to start with rall = fitsio.read('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randomsall/randoms-inside-dr8-0.31.0-all.fits',rows=np.arange(int(1.5*lelg)) ) rall_header = fitsio.read_header('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randomsall/randoms-inside-dr8-0.31.0-all.fits',ext=1) ``` ```python #cut randoms to ELG footprint keep = (rall['NOBS_G']>0) & (rall['NOBS_R']>0) & (rall['NOBS_Z']>0) print(len(rall[keep])) elgbits = [1,5,6,7,11,12,13] keepelg = keep for bit in elgbits: keepelg &= ((rall['MASKBITS'] & 2**bit)==0) print(len(rall[keepelg])) relg = rall[keepelg] ``` 67762950 64567641 ```python #Some information is in pixelized map #get nside and nest from header pixfn = '/project/projectdirs/desi/target/catalogs/dr8/0.31.1/pixweight/pixweight-dr8-0.31.1.fits' hdr = fits.getheader(pixfn,1) nside,nest = hdr['HPXNSIDE'],hdr['HPXNEST'] print(fits.open(pixfn)[1].columns.names) hpq = fitsio.read(pixfn) ``` ['HPXPIXEL', 'FRACAREA', 'STARDENS', 'EBV', 'PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R', 'GALDEPTH_Z', 'PSFDEPTH_W1', 'PSFDEPTH_W2', 'PSFSIZE_G', 'PSFSIZE_R', 'PSFSIZE_Z', 'ELG', 'LRG', 'QSO', 'BGS_ANY', 'MWS_ANY', 'ALL', 'STD_FAINT', 'STD_BRIGHT', 'LRG_1PASS', 'LRG_2PASS', 'BGS_FAINT', 'BGS_BRIGHT', 'BGS_WISE', 'MWS_BROAD', 'MWS_MAIN_RED', 'MWS_MAIN_BLUE', 'MWS_WD', 'MWS_NEARBY'] ```python #get MC efficiency map mcf = fitsio.read(os.getenv('SCRATCH')+'/ELGMCeffHSCHP.fits') mmc = np.mean(mcf['EFF']) mcl = np.zeros(12*nside*nside) for i in range(0,len(mcf)): pix = mcf['HPXPIXEL'][i] mcl[pix] = mcf['EFF'][i]/mmc ``` ```python #for healpix coordinates def radec2thphi(ra,dec): return (-dec+90.)*np.pi/180.,ra*np.pi/180. ``` ```python # put ELGs them into healpix dth,dphi = radec2thphi(felg['RA'],felg['DEC']) dpix = hp.ang2pix(nside,dth,dphi,nest) dallpix = np.zeros(12*nside*nside) for pix in dpix: dallpix[pix] += 1. ``` ```python #put randoms into healpix rth,rphi = radec2thphi(relg['RA'],relg['DEC']) rpix = hp.ang2pix(nside,rth,rphi,nest=nest) rallpix = np.zeros(12*nside*nside) for pix in rpix: rallpix[pix] += 1. ``` #### First, to start with something that makes some sense, let's plot the density of ELGs and compare it to the MC prediction ```python #let's take a look at the density mg = len(relg)/len(felg) print(mg,sum(rallpix)/sum(dallpix)) wp = rallpix > 0 pixls = [] for i in range(0,len(rallpix)): if rallpix[i] > 0: pixls.append(i) pixls = np.array(pixls).astype(int) th,phi = hp.pix2ang(nside,pixls,nest=nest) od = dallpix[wp]/rallpix[wp]*mg def thphi2radec(theta,phi): return 180./np.pi*phi,-(180./np.pi*theta-90) ra,dec = thphi2radec(th,phi) ``` 1.3663224982561135 1.3663224982561135 ```python plt.scatter(ra,dec,c=od,vmax=2,vmin=.5,s=.1) plt.colorbar() plt.title('relative ELG target density') plt.show() ``` ![png](output_14_0.png) ```python plt.scatter(ra,dec,c=mcl[wp],vmax=1.1,vmin=.9,s=.1) plt.colorbar() plt.title('predicted relative ELG target density based on depth') plt.show() ``` ![png](output_15_0.png) The depth-based MC **does** predict we should be able to see the DES footprint and the deliniation between BASS/MzLS (bmzls) and DECaLS. However, note that it predicts the **opposite** of what is seen in the high declination regions of bmzls. ### Assign quantities to data/randoms While healpix is nice, we can avoid some pixelization effects by assigning quantities directly to the data/randoms. In some cases (stellar density), the pixelization remains, but in most it does not. In particular, we assign the different MC predictions for the different DECaLS/bmzls selections ```python grids = np.loadtxt(os.getenv('SCRATCH')+'/ELGeffgridsouth.dat').transpose() #grids[3] = grids[3] gridn = np.loadtxt(os.getenv('SCRATCH')+'/ELGeffgridnorth.dat').transpose() #print(np.mean(gridn[3])) #gridn[3] = gridn[3]/np.mean(gridn[3]) def interpeff(gsig,rsig,zsig,south=True): md = 0 xg = 0.15 #if gsig > xg: # gsig = .99*xg xr = 0.15 #if rsig > xr: # rsig = 0.99*xr xz = 0.4 #if zsig > xz: # zsig = 0.99*xz ngp = 30 if south: grid = grids else: grid = gridn i = (ngp*gsig/(xg-md)).astype(int) j = (ngp*rsig/(xr-md)).astype(int) k = (ngp*zsig/(xz-md)).astype(int) ind = (i*ngp**2.+j*ngp+k).astype(int) #print(i,j,k,ind) #print(grid[0][ind],grid[1][ind],grid[2][ind]) #print(grid[0][ind-1],grid[1][ind-1],grid[2][ind-1]) #print(grid[0][ind+1],grid[1][ind+1],grid[2][ind+1]) return grid[3][ind] ``` ```python #Get depth values that match those used for efficiency grids depth_keyword="PSFDEPTH" R_G=3.214 # http://legacysurvey.org/dr8/catalogs/#galactic-extinction-coefficients R_R=2.165 R_Z=1.211 gsigmad=1./np.sqrt(felg[depth_keyword+"_G"]) rsigmad=1./np.sqrt(felg[depth_keyword+"_R"]) zsigmad=1./np.sqrt(felg[depth_keyword+"_Z"]) gsig = gsigmad*10**(0.4*R_G*felg["EBV"]) w = gsig >= 0.15 gsig[w] = 0.99*0.15 rsig = rsigmad*10**(0.4*R_R*felg["EBV"]) w = rsig >= 0.15 rsig[w] = 0.99*0.15 zsig = zsigmad*10**(0.4*R_Z*felg["EBV"]) w = zsig >= 0.4 zsig[w] = 0.99*0.4 ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in true_divide import sys /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: divide by zero encountered in true_divide /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:9: RuntimeWarning: divide by zero encountered in true_divide if __name__ == '__main__': ```python print(min(gsig),max(gsig)) effsouthl = interpeff(gsig,rsig,zsig,south=True) ``` 0.011088011 0.14999883 ```python effnorthl = interpeff(gsig,rsig,zsig,south=False) plt.hist(effnorthl,bins=100) plt.hist(effsouthl,bins=100) plt.title('plot to confirm MC depth assignment went fine') plt.show() ``` ![png](output_21_0.png) #### assign stellar densities to data/random ```python stardensg = np.zeros(len(felg)) print(len(felg),len(dpix)) for i in range(0,len(dpix)): #if i%1000000==0 : print(i) pix = dpix[i] stardensg[i] = hpq['STARDENS'][pix] ``` 47256516 47256516 ```python stardensr = np.zeros(len(relg)) print(len(relg),len(rpix)) for i in range(0,len(rpix)): #if i%1000000==0 : print(i) pix = rpix[i] stardensr[i] = hpq['STARDENS'][pix] ``` 64567641 64567641 ### Divide into three regions: DECaLS SGC, DECaLS NGC, bmzls ```python #let's define split into bmzls, DECaLS North, DECaLS South (Anand has tools to make distinct DES region as well) #one function to do directly, the other just for the indices print(np.unique(felg['PHOTSYS'])) #bmzls = b'N' #if in desi environment bmzls = 'N' #if in Python 3; why the difference? Maybe version of fitsio? def splitcat(cat): NN = cat['PHOTSYS'] == bmzls d1 = (cat['PHOTSYS'] != bmzls) & (cat['RA'] < 300) & (cat['RA'] > 100) & (cat['DEC'] > -20) d2 = (d1==0) & (NN ==0) & (cat['DEC'] > -30) return cat[NN],cat[d1],cat[d2] def splitcat_ind(cat): NN = cat['PHOTSYS'] == bmzls d1 = (cat['PHOTSYS'] != bmzls) & (cat['RA'] < 300) & (cat['RA'] > 100) & (cat['DEC'] > -20) d2 = (d1==0) & (NN ==0) & (cat['DEC'] > -30) return NN,d1,d2 ``` ['N' 'S'] ```python #indices for split dbml,ddnl,ddsl = splitcat_ind(felg) rbml,rdnl,rdsl = splitcat_ind(relg) print('number in bmzls, DECaLS N, DECaLS S') print(len(felg[dbml]),len(felg[ddnl]),len(felg[ddsl])) ``` number in bmzls, DECaLS N, DECaLS S 12726178 14094848 13407623 ### Plot density dependence on different quantities, comparing different regions ```python #function to make plot comparing results in three regions def plotrel(hg1,hr1,hg2,hr2,hg3,hr3,title,quant): xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg1[0])): xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) norm1 = sum(hg1[0])/sum(hr1[0]) plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') norm2 = sum(hg2[0])/sum(hr2[0]) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') norm3 = sum(hg3[0])/sum(hr3[0]) plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') plt.ylim(.7,1.3) plt.xlabel(quant) plt.ylabel('relative density') plt.legend((['bmzls','DECaLS N','DECaLS S'])) plt.plot(xl2,np.ones(len(xl2)),'k--') plt.title(title) plt.show() ``` #### Start with raw results ```python #g-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_G']*np.exp(-R_G*felg[dbml]['EBV']),range=(0,2000)) hr1 = np.histogram(relg[rbml]['GALDEPTH_G']*np.exp(-R_G*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_G']*np.exp(-R_G*felg[ddnl]['EBV']),range=(0,3000)) hr2 = np.histogram(relg[rdnl]['GALDEPTH_G']*np.exp(-R_G*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_G']*np.exp(-R_G*felg[ddsl]['EBV'])) hr3 = np.histogram(relg[rdsl]['GALDEPTH_G']*np.exp(-R_G*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'raw','g-band depth') ``` ![png](output_32_0.png) ```python #r-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_R']*np.exp(-R_R*felg[dbml]['EBV']),range=(0,500)) hr1 = np.histogram(relg[rbml]['GALDEPTH_R']*np.exp(-R_R*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_R']*np.exp(-R_R*felg[ddnl]['EBV']),range=(0,1000)) hr2 = np.histogram(relg[rdnl]['GALDEPTH_R']*np.exp(-R_R*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_R']*np.exp(-R_R*felg[ddsl]['EBV']),range=(0,1000)) hr3 = np.histogram(relg[rdsl]['GALDEPTH_R']*np.exp(-R_R*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'raw','r-band depth') ``` ![png](output_34_0.png) ```python #z-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_Z']*np.exp(-R_Z*felg[dbml]['EBV'])) hr1 = np.histogram(relg[rbml]['GALDEPTH_Z']*np.exp(-R_Z*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_Z']*np.exp(-R_Z*felg[ddnl]['EBV'])) hr2 = np.histogram(relg[rdnl]['GALDEPTH_Z']*np.exp(-R_Z*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_Z']*np.exp(-R_Z*felg[ddsl]['EBV'])) hr3 = np.histogram(relg[rdsl]['GALDEPTH_Z']*np.exp(-R_Z*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'raw','z-band depth') ``` ![png](output_36_0.png) #### Now apply MC, with ^2 for DECaLS ```python #g-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_G']*np.exp(-R_G*felg[dbml]['EBV']),weights=1./effnorthl[dbml],range=(0,2000)) hr1 = np.histogram(relg[rbml]['GALDEPTH_G']*np.exp(-R_G*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_G']*np.exp(-R_G*felg[ddnl]['EBV']),weights=1./effsouthl[ddnl]**2.,range=(0,3000)) hr2 = np.histogram(relg[rdnl]['GALDEPTH_G']*np.exp(-R_G*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_G']*np.exp(-R_G*felg[ddsl]['EBV']),weights=1./effsouthl[ddsl]**2.) hr3 = np.histogram(relg[rdsl]['GALDEPTH_G']*np.exp(-R_G*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC','g-band depth') ``` ![png](output_39_0.png) ```python #r-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_R']*np.exp(-R_R*felg[dbml]['EBV']),weights=1./effnorthl[dbml],range=(0,500)) hr1 = np.histogram(relg[rbml]['GALDEPTH_R']*np.exp(-R_R*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_R']*np.exp(-R_R*felg[ddnl]['EBV']),weights=1./effsouthl[ddnl]**2.,range=(0,1000)) hr2 = np.histogram(relg[rdnl]['GALDEPTH_R']*np.exp(-R_R*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_R']*np.exp(-R_R*felg[ddsl]['EBV']),weights=1./effsouthl[ddsl]**2.,range=(0,1000)) hr3 = np.histogram(relg[rdsl]['GALDEPTH_R']*np.exp(-R_R*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC','r-band depth') ``` ![png](output_41_0.png) ```python #z-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_Z']*np.exp(-R_Z*felg[dbml]['EBV']),weights=1./effnorthl[dbml]) hr1 = np.histogram(relg[rbml]['GALDEPTH_Z']*np.exp(-R_Z*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_Z']*np.exp(-R_Z*felg[ddnl]['EBV']),weights=1./effsouthl[ddnl]**2.) hr2 = np.histogram(relg[rdnl]['GALDEPTH_Z']*np.exp(-R_Z*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_Z']*np.exp(-R_Z*felg[ddsl]['EBV']),weights=1./effsouthl[ddsl]**2.) hr3 = np.histogram(relg[rdsl]['GALDEPTH_Z']*np.exp(-R_Z*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC','z-band depth') ``` ![png](output_43_0.png) #### Big improvement for g-band, not much for r-band, and a little bit for z band #### Now check stellar density relationship after MC and after approximate linear fit ```python #bmzls slp = -0.2/4000. b = 1.1 wsb = 1./(slp*stardensg[dbml]+b) hg1 = np.histogram(stardensg[dbml],weights=1./effnorthl[dbml],range=(0,5000)) hgn1 = np.histogram(stardensg[dbml],bins=hg1[1]) hr1 = np.histogram(stardensr[rbml],bins=hg1[1]) #DECaLS N slp = -0.35/4000. b = 1.1 wsn = 1./(slp*stardensg[ddnl]+b) hg2 = np.histogram(stardensg[ddnl],weights=1./effsouthl[ddnl]**2.,range=(0,5000)) hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(stardensr[rdnl],bins=hg2[1]) #DECaLS S hg3 = np.histogram(stardensg[ddsl],weights=1./effsouthl[ddsl]**2.,range=(0,5000)) hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(stardensr[rdsl],bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC','stellar density') ``` ![png](output_46_0.png) ## Question: Why is relationship with stellar density so different between DECaLS N and S? ```python #bmzls slp = -0.2/4000. b = 1.1 wsb = 1./(slp*stardensg[dbml]+b) hg1 = np.histogram(stardensg[dbml],weights=1./effnorthl[dbml]*wsb,range=(0,5000)) hgn1 = np.histogram(stardensg[dbml],bins=hg1[1]) hr1 = np.histogram(stardensr[rbml],bins=hg1[1]) #DECaLS N slp = -0.35/4000. b = 1.1 wsn = 1./(slp*stardensg[ddnl]+b) hg2 = np.histogram(stardensg[ddnl],weights=1./effsouthl[ddnl]**2.*wsn,range=(0,5000)) hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(stardensr[rdnl],bins=hg2[1]) #DECaLS S hg3 = np.histogram(stardensg[ddsl],weights=1./effsouthl[ddsl]**2.,range=(0,5000)) hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(stardensr[rdsl],bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC and linear stellar density correction','stellar density') ``` ![png](output_49_0.png) #### apply linear stellar density relationship to depth ```python #g-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_G']*np.exp(-R_G*felg[dbml]['EBV']),weights=1./effnorthl[dbml]*wsb,range=(0,2000)) hr1 = np.histogram(relg[rbml]['GALDEPTH_G']*np.exp(-R_G*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_G']*np.exp(-R_G*felg[ddnl]['EBV']),weights=1./effsouthl[ddnl]**2.*wsn,range=(0,3000)) hr2 = np.histogram(relg[rdnl]['GALDEPTH_G']*np.exp(-R_G*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_G']*np.exp(-R_G*felg[ddsl]['EBV']),weights=1./effsouthl[ddsl]**2.) hr3 = np.histogram(relg[rdsl]['GALDEPTH_G']*np.exp(-R_G*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC and stellar density','g-band depth') ``` ![png](output_52_0.png) ```python #r-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_R']*np.exp(-R_R*felg[dbml]['EBV']),weights=1./effnorthl[dbml]*wsb,range=(0,500)) hr1 = np.histogram(relg[rbml]['GALDEPTH_R']*np.exp(-R_R*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_R']*np.exp(-R_R*felg[ddnl]['EBV']),weights=1./effsouthl[ddnl]**2.*wsn,range=(0,1000)) hr2 = np.histogram(relg[rdnl]['GALDEPTH_R']*np.exp(-R_R*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_R']*np.exp(-R_R*felg[ddsl]['EBV']),weights=1./effsouthl[ddsl]**2.,range=(0,1000)) hr3 = np.histogram(relg[rdsl]['GALDEPTH_R']*np.exp(-R_R*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC and stellar density','r-band depth') ``` ![png](output_54_0.png) ```python #z-band depth #bmzls hg1 = np.histogram(felg[dbml]['GALDEPTH_Z']*np.exp(-R_Z*felg[dbml]['EBV']),weights=1./effnorthl[dbml]*wsb) hr1 = np.histogram(relg[rbml]['GALDEPTH_Z']*np.exp(-R_Z*relg[rbml]['EBV']),bins=hg1[1]) #decals N hg2 = np.histogram(felg[ddnl]['GALDEPTH_Z']*np.exp(-R_Z*felg[ddnl]['EBV']),weights=1./effsouthl[ddnl]**2.*wsn) hr2 = np.histogram(relg[rdnl]['GALDEPTH_Z']*np.exp(-R_Z*relg[rdnl]['EBV']),bins=hg2[1]) #decals S hg3 = np.histogram(felg[ddsl]['GALDEPTH_Z']*np.exp(-R_Z*felg[ddsl]['EBV']),weights=1./effsouthl[ddsl]**2.) hr3 = np.histogram(relg[rdsl]['GALDEPTH_Z']*np.exp(-R_Z*relg[rdsl]['EBV']),bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC and stellar density','z-band depth') ``` ![png](output_56_0.png) ## So, r-band depth relationship still significant, z-band not perfect ## Keeping MC and stellar density corrections, look at other quantities ```python #Galactic extinction #bmzls hg1 = np.histogram(felg[dbml]['EBV'],weights=1./1./effnorthl[dbml]*wsb,range=(0,0.15)) hr1 = np.histogram(relg[rbml]['EBV'],bins=hg1[1]) #DECaLS N hg2 = np.histogram(felg[ddnl]['EBV'],weights=1./effsouthl[ddnl]**2.*wsn,range=(0,0.15)) hr2 = np.histogram(relg[rdnl]['EBV'],bins=hg2[1]) #DECaLS S hg3 = np.histogram(felg[ddsl]['EBV'],weights=1./effsouthl[ddsl]**2.,range=(0,0.15)) hr3 = np.histogram(relg[rdsl]['EBV'],bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC and stellar density','Galactic extinction (E[B-V])') ``` ![png](output_60_0.png) ## Strong residual dependence on Galactic extinction for bmzls, why? ```python sweep = fitsio.read('/global/cscratch1/sd/rongpu/temp/dr8_sky_residual_south.fits') sweepn = fitsio.read('/global/cscratch1/sd/rongpu/temp/dr8_sky_residual_north.fits') ``` ```python sweep.dtype.names ``` ('RA', 'DEC', 'MASKBITS', 'g_sky', 'r_sky', 'z_sky') ```python elgbits = [1,5,6,7,11,12,13] keeps = np.ones(len(sweep)).astype(bool) for bit in elgbits: keeps &= ((sweep['MASKBITS'] & 2**bit)==0) print(len(sweep[keeps])) sweepm = sweep[keeps] ``` 22582680 ```python plt.hist(sallpixg,bins=100,range=(-0.01,0.01)) plt.show() ``` ![png](output_65_0.png) ```python # put into healpix sth,sphi = radec2thphi(sweepm['RA'],sweepm['DEC']) spix = hp.ang2pix(nside,sth,sphi,nest) sallpix = np.zeros(12*nside*nside) sallpixg = np.zeros(12*nside*nside) for i in range(0,len(spix)): pix = spix[i] sallpix[pix] += 1. sallpixg[pix] += sweepm[i]['g_sky'] ``` ```python sallpixr = np.zeros(12*nside*nside) sallpixz = np.zeros(12*nside*nside) for i in range(0,len(spix)): pix = spix[i] sallpixz[pix] += sweepm[i]['z_sky'] sallpixr[pix] += sweepm[i]['r_sky'] ``` ```python w = sallpix > 0 sallpixr[w] = sallpixr[w]/sallpix[w] w = sallpix > 0 sallpixz[w] = sallpixz[w]/sallpix[w] ``` ```python w = sallpix > 0 sallpixg[w] = sallpixg[w]/sallpix[w] ``` ```python gres_r = np.zeros(len(felg)) gres_z = np.zeros(len(felg)) print(len(felg),len(dpix)) for i in range(0,len(dpix)): if i%1000000==0 : print(i) pix = dpix[i] gres_r[i] = sallpixr[pix] gres_z[i] = sallpixz[pix] ``` ```python rres_r = np.zeros(len(relg)) rres_z = np.zeros(len(relg)) print(len(relg),len(rpix)) for i in range(0,len(rpix)): #if i%1000000==0 : print(i) pix = rpix[i] rres_r[i] = sallpixr[pix] rres_z[i] = sallpixz[pix] ``` 64567641 64567641 ```python gres_g = np.zeros(len(felg)) print(len(felg),len(dpix)) for i in range(0,len(dpix)): if i%1000000==0 : print(i) pix = dpix[i] gres_g[i] = sallpixg[pix] ``` 47256516 47256516 0 1000000 2000000 3000000 4000000 5000000 6000000 7000000 8000000 9000000 10000000 11000000 12000000 13000000 14000000 15000000 16000000 17000000 18000000 19000000 20000000 21000000 22000000 23000000 24000000 25000000 26000000 27000000 28000000 29000000 30000000 31000000 32000000 33000000 34000000 35000000 36000000 37000000 38000000 39000000 40000000 41000000 42000000 43000000 44000000 45000000 46000000 47000000 ```python rres_g = np.zeros(len(relg)) print(len(relg),len(rpix)) for i in range(0,len(rpix)): #if i%1000000==0 : print(i) pix = rpix[i] rres_g[i] = sallpixg[pix] ``` 64567641 64567641 ```python #DECaLS N hg2 = np.histogram(gres_g[ddnl],range=(-.002,-.001))#,weights=1./effsouthl[ddnl]**2.,range=(0,5000)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(rres_g[rdnl],bins=hg2[1]) #DECaLS S hg3 = np.histogram(gres_g[ddsl],range=(-.002,-.001))#,range=(-.1,.1))#,weights=1./effsouthl[ddsl]**2.,range=(0,5000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(rres_g[rdsl],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') norm2 = sum(hg2[0])/sum(hr2[0]) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') norm3 = sum(hg3[0])/sum(hr3[0]) plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('g_res') plt.ylabel('relative density') plt.legend((['DECaLS N','DECaLS S'])) plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` ![png](output_74_0.png) ```python w = gres_g < -0.0015 ``` ```python len(felg[w]) ``` 5460993 ```python len(felg) ``` 47256516 ```python #DECaLS N hg2 = np.histogram(gres_g[ddnl]+gres_r[ddnl]-gres_z[ddnl],range=(-.006,.003))#,weights=1./effsouthl[ddnl]**2.,range=(0,5000)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(rres_g[rdnl]+rres_r[rdnl]-rres_z[rdnl],bins=hg2[1]) #DECaLS S hg3 = np.histogram(gres_g[ddsl]+gres_r[ddsl]-gres_z[ddsl],range=(-.006,.003))#,range=(-.1,.1))#,weights=1./effsouthl[ddsl]**2.,range=(0,5000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(rres_g[rdsl]+rres_r[rdsl]-rres_z[rdsl],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') norm2 = sum(hg2[0])/sum(hr2[0]) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') norm3 = sum(hg3[0])/sum(hr3[0]) plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('(g-r)_res') plt.ylabel('relative density') plt.legend((['DECaLS N','DECaLS S'])) plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` ![png](output_78_0.png) ```python xl2 ``` [-0.00555, -0.00465, -0.00375, -0.00285, -0.0019499999999999997, -0.0010499999999999997, -0.00014999999999999953, 0.0007500000000000007, 0.0016500000000000009, 0.0025500000000000006] ```python #DECaLS N hg2 = np.histogram(stardensg[ddnl],weights=gres_g[ddnl],range=(0,5000)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(stardensr[rdnl],bins=hg2[1]) #DECaLS S hg3 = np.histogram(stardensg[ddsl],weights=gres_g[ddsl],range=(0,5000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(stardensr[rdsl],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') #norm2 = sum(hg2[0])/sum(hr2[0]) norm2 = len(stardensg)/len(stardensr) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') #norm3 = sum(hg3[0])/sum(hr3[0]) norm3 = norm2 plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('stellar density') plt.ylabel('g-band res') plt.legend((['DECaLS N','DECaLS S'])) #plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:22: RuntimeWarning: invalid value encountered in sqrt /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:26: RuntimeWarning: invalid value encountered in sqrt ![png](output_80_1.png) ```python #DECaLS N hg2 = np.histogram(felg[ddnl]['EBV'],weights=gres_g[ddnl],range=(0,0.15)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(relg[rdnl]['EBV'],bins=hg2[1]) #DECaLS S hg3 = np.histogram(felg[ddsl]['EBV'],weights=gres_g[ddsl],range=(0,.15000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(relg[rdsl]['EBV'],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') #norm2 = sum(hg2[0])/sum(hr2[0]) norm2 = len(stardensg)/len(stardensr) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') #norm3 = sum(hg3[0])/sum(hr3[0]) norm3 = norm2 plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('EBV') plt.ylabel('g-band res') plt.legend((['DECaLS N','DECaLS S'])) #plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:22: RuntimeWarning: invalid value encountered in sqrt /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:26: RuntimeWarning: invalid value encountered in sqrt ![png](output_81_1.png) ```python #DECaLS N hg2 = np.histogram(felg[ddnl]['GALDEPTH_R'],weights=gres_g[ddnl])#,range=(0,0.15)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(relg[rdnl]['GALDEPTH_R'],bins=hg2[1]) #DECaLS S hg3 = np.histogram(felg[ddsl]['GALDEPTH_R'],weights=gres_g[ddsl])#,range=(0,.15000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(relg[rdsl]['GALDEPTH_R'],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') #norm2 = sum(hg2[0])/sum(hr2[0]) norm2 = len(stardensg)/len(stardensr) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') #norm3 = sum(hg3[0])/sum(hr3[0]) norm3 = norm2 plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('GALDEPTH_R') plt.ylabel('g-band res') plt.legend((['DECaLS N','DECaLS S'])) #plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:22: RuntimeWarning: invalid value encountered in sqrt /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:26: RuntimeWarning: invalid value encountered in sqrt ![png](output_82_1.png) ```python #DECaLS N hg2 = np.histogram(felg[ddnl]['GALDEPTH_Z'],weights=gres_g[ddnl])#,range=(0,0.15)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(relg[rdnl]['GALDEPTH_Z'],bins=hg2[1]) #DECaLS S hg3 = np.histogram(felg[ddsl]['GALDEPTH_Z'],weights=gres_g[ddsl])#,range=(0,.15000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(relg[rdsl]['GALDEPTH_Z'],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') #norm2 = sum(hg2[0])/sum(hr2[0]) norm2 = len(stardensg)/len(stardensr) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') #norm3 = sum(hg3[0])/sum(hr3[0]) norm3 = norm2 plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('GALDEPTH_Z') plt.ylabel('g-band res') plt.legend((['DECaLS N','DECaLS S'])) #plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:22: RuntimeWarning: invalid value encountered in sqrt /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:26: RuntimeWarning: invalid value encountered in sqrt ![png](output_83_1.png) ```python #DECaLS N hg2 = np.histogram(felg[ddnl]['GALDEPTH_G'],weights=gres_g[ddnl])#,range=(0,0.15)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(relg[rdnl]['GALDEPTH_G'],bins=hg2[1]) #DECaLS S hg3 = np.histogram(felg[ddsl]['GALDEPTH_G'],weights=gres_g[ddsl])#,range=(0,.15000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(relg[rdsl]['GALDEPTH_G'],bins=hg3[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') #norm2 = sum(hg2[0])/sum(hr2[0]) norm2 = len(stardensg)/len(stardensr) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') #norm3 = sum(hg3[0])/sum(hr3[0]) norm3 = norm2 plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('GALDEPTH_G') plt.ylabel('g-band res') plt.legend((['DECaLS N','DECaLS S'])) #plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:22: RuntimeWarning: invalid value encountered in sqrt /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:26: RuntimeWarning: invalid value encountered in sqrt ![png](output_84_1.png) ```python felg.dtype.names ``` ('RA', 'DEC', 'BRICKNAME', 'MORPHTYPE', 'DCHISQ', 'FLUX_G', 'FLUX_R', 'FLUX_Z', 'MW_TRANSMISSION_G', 'MW_TRANSMISSION_R', 'MW_TRANSMISSION_Z', 'NOBS_G', 'NOBS_R', 'NOBS_Z', 'PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R', 'GALDEPTH_Z', 'FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z', 'FIBERTOTFLUX_G', 'FIBERTOTFLUX_R', 'FIBERTOTFLUX_Z', 'MASKBITS', 'EBV', 'PHOTSYS', 'TARGETID', 'DESI_TARGET') ```python print(len(felg[ddnl])) w = ddnl & (gres_g < -0.002) wr = rdnl & (rres_g < -0.002) nr = len(relg[wr]) ng = len(felg[w]) nl = np.ones(ng)/nr print(ng,nr) plt.hist(felg[w]['FLUX_G']/felg[w]['MW_TRANSMISSION_G'],weights=nl,bins=100,histtype='step',range=(.35,.6)) print(len(felg[ddnl])) w = ddnl & (gres_g > -0.002) & (gres_g < -0.001) wr = rdnl & (rres_g > -0.002) & (rres_g < -0.001) nr = len(relg[wr]) ng = len(felg[w]) nl = np.ones(ng)/nr print(ng,nr) plt.hist(felg[w]['FLUX_G']/felg[w]['MW_TRANSMISSION_G'],weights=nl,bins=100,histtype='step',range=(.35,.6)) print(len(felg[ddnl])) w = ddnl & (gres_g > -0.001) & (gres_g < 0.00) wr = rdnl & (rres_g > -0.001) & (rres_g < 0.00) nr = len(relg[wr]) ng = len(felg[w]) nl = np.ones(ng)/nr print(ng,nr) plt.hist(felg[w]['FLUX_G']/felg[w]['MW_TRANSMISSION_G'],weights=nl,bins=100,histtype='step',range=(.35,.6),color='k') print(len(felg[ddnl])) w = ddnl & (gres_g > 0.00) & (gres_g < 0.001) wr = rdnl & (rres_g > 0.00) & (rres_g < 0.001) nr = len(relg[wr]) ng = len(felg[w]) nl = np.ones(ng)/nr print(ng,nr) plt.hist(felg[w]['FLUX_G']/felg[w]['MW_TRANSMISSION_G'],weights=nl,bins=100,histtype='step',range=(.35,.6)) print(len(felg[ddnl])) print(len(felg[ddnl])) w = ddnl & (gres_g > 0.001)# & (gres_g < 0.001) wr = rdnl & (rres_g > 0.001)# & (rres_g < 0.001) nr = len(relg[wr]) ng = len(felg[w]) nl = np.ones(ng)/nr print(ng,nr) plt.hist(felg[w]['FLUX_G']/felg[w]['MW_TRANSMISSION_G'],weights=nl,bins=100,histtype='step',range=(.35,.6)) print(len(felg[ddnl])) plt.show() ``` 14094848 237329 325063 14094848 3809444 5297581 14094848 9113646 12233349 14094848 737930 930583 14094848 14094848 107904 108121 14094848 ![png](output_86_1.png) ```python wg = ddnl & (gres_g < 0) wr = rdnl & (rres_g < 0) #DECaLS N hg2 = np.histogram(stardensg[wg],range=(0,5000)) #hgn2 = np.histogram(stardensg[ddnl],bins=hg2[1]) hr2 = np.histogram(stardensr[wr],bins=hg2[1]) #DECaLS S wg = ddsl & (gres_g < 0) wr = rdsl & (rres_g < 0) hg3 = np.histogram(stardensg[wg],range=(0,5000)) #hgn3 = np.histogram(stardensg[ddsl],bins=hg3[1]) hr3 = np.histogram(stardensr[wr],bins=hg2[1]) #xl1 = [] xl2 = [] xl3 = [] for i in range(0,len(hg2[0])): #xl1.append((hg1[1][i]+hg1[1][i+1])/2.) xl2.append((hg2[1][i]+hg2[1][i+1])/2.) xl3.append((hg3[1][i]+hg3[1][i+1])/2.) #norm1 = sum(hg1[0])/sum(hr1[0]) #plt.errorbar(xl1,hg1[0]/hr1[0]/norm1,np.sqrt(hg1[0])/hr1[0],fmt='ko') #plt.plot(xl1,hgn1[0]/hrn1[0]/norm1,'k:') #norm2 = sum(hg2[0])/sum(hr2[0]) norm2 = len(stardensg)/len(stardensr) plt.errorbar(xl2,hg2[0]/hr2[0]/norm2,np.sqrt(hg2[0])/hr2[0],fmt='rd') #plt.plot(xl2,hgn2[0]/hrn2[0]/norm2,'r:') #norm3 = sum(hg3[0])/sum(hr3[0]) norm3 = norm2 plt.errorbar(xl3,hg3[0]/hr3[0]/norm3,np.sqrt(hg3[0])/hr3[0],fmt='b^') #plt.plot(xl3,hgn3[0]/hrn3[0]/norm1,'b:') #plt.ylim(.7,1.3) plt.xlabel('stellar density') plt.ylabel('g-band res') plt.legend((['DECaLS N','DECaLS S'])) #plt.plot(xl2,np.ones(len(xl2)),'k--') #plt.title(title) plt.show() ``` /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:26: RuntimeWarning: invalid value encountered in true_divide /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:30: RuntimeWarning: invalid value encountered in true_divide ![png](output_87_1.png) ```python import pandas as pd ``` ```python smf = os.getenv('SCRATCH')+'/dr8_combined256.h5' ``` ```python sm = pd.read_hdf(smf) ``` ```python len(sm) ``` 786432 ```python sm['airmass_g_mean'][0] ``` nan ```python wp = rallpix > 0 pixls = [] for i in range(0,len(rallpix)): if rallpix[i] > 0: pixls.append(i) pixls = np.array(pixls).astype(int) th,phi = hp.pix2ang(nside,pixls,nest=nest) od = hp.reorder(sm['hits_g_total'],r2n=True)[wp] def thphi2radec(theta,phi): return 180./np.pi*phi,-(180./np.pi*theta-90) ra,dec = thphi2radec(th,phi) ``` ```python sm.columns ``` Index(['airmass_g_mean', 'ccdskymag_g_mean', 'exptime_g_total', 'fwhm_g_mean', 'hits_g_fracdet', 'hits_g_total', 'depth_g_total', 'mjd_g_min', 'airmass_r_mean', 'ccdskymag_r_mean', 'exptime_r_total', 'fwhm_r_mean', 'hits_r_fracdet', 'hits_r_total', 'depth_r_total', 'mjd_r_min', 'airmass_z_mean', 'ccdskymag_z_mean', 'exptime_z_total', 'fwhm_z_mean', 'hits_z_fracdet', 'hits_z_total', 'depth_z_total', 'mjd_z_min', 'nstar', 'ebv', 'loghi'], dtype='object') ```python plt.scatter(ra,dec,c=od,s=.1,vmax=20) plt.colorbar() plt.title('relative ELG target density') plt.show() ``` ![png](output_95_0.png) ```python #wp &= hp.reorder(sm['hits_g_total'],r2n=True)*0 == 0 od = hp.reorder(sm['hits_g_total'],r2n=True)#[wp] np.percentile(od, 99) ``` nan ```python wn = (od*0 != 0) & (rallpix > 0) ``` ```python wg = np.isnan(od) np.mean(od[~wg]) od[wn] = np.mean(od[~wg]) od[wn] ``` array([14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455, 14.629455]) ```python def doplotmmap(name): od = hp.reorder(sm[name],r2n=True) wn = (od*0 != 0) & ((rallpix > 0) | (dallpix > 0)) wg = np.isnan(od) print(len(od[wn])) od[wn] = np.mean(od[~wg]) print(np.mean(od[~wg])) odg = np.zeros(len(felg)) print(len(felg),len(dpix)) for i in range(0,len(dpix)): #if i%1000000==0 : print(i) pix = dpix[i] odg[i] = od[pix] print('gal done') odr = np.zeros(len(relg)) for i in range(0,len(rpix)): #if i%1000000==0 : print(i) pix = rpix[i] odr[i] = od[pix] print('ran done') #bmzls hg1 = np.histogram(odg[dbml],weights=1./effnorthl[dbml],range=(np.percentile(odg[dbml],2.5),np.percentile(odg[dbml],97.5))) hr1 = np.histogram(odr[rbml],bins=hg1[1]) #DECaLS N hg2 = np.histogram(odg[ddnl],weights=1./effsouthl[ddnl]**2.,range=(np.percentile(odg[ddnl],2.5),np.percentile(odg[ddnl],97.5))) hr2 = np.histogram(odr[rdnl],bins=hg2[1]) #DECaLS S hg3 = np.histogram(odg[ddsl],weights=1./effsouthl[ddsl]**2.,range=(np.percentile(odg[ddsl],2.5),np.percentile(odg[ddsl],97.5))) hr3 = np.histogram(odr[rdsl],bins=hg3[1]) plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'with depth correction',name) ``` ```python doplotmmap('hits_g_total') ``` 992 14.629454996806823 47256516 47256516 gal done ran done ![png](output_100_1.png) ```python for name in sm.columns: doplotmmap(name) ``` 992 1.3408565629045797 47256516 47256516 gal done ran done ![png](output_101_1.png) 1118 26.43684443559328 47256516 47256516 gal done ran done ![png](output_101_3.png) 992 1814.8150981560948 47256516 47256516 gal done ran done ![png](output_101_5.png) 992 1.4293593620615017 47256516 47256516 gal done ran done ![png](output_101_7.png) 992 1.0 47256516 47256516 gal done ran done /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:11: RuntimeWarning: invalid value encountered in true_divide # This is added back by InteractiveShellApp.init_path() /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:14: RuntimeWarning: invalid value encountered in true_divide /usr/common/software/python/3.7-anaconda-2019.07/lib/python3.7/site-packages/ipykernel_launcher.py:17: RuntimeWarning: invalid value encountered in true_divide ![png](output_101_10.png) 992 14.629454996806823 47256516 47256516 gal done ran done ![png](output_101_12.png) 992 57437.56121851446 47256516 47256516 gal done ran done ![png](output_101_14.png) 916 1.3428321561254157 47256516 47256516 gal done ran done ![png](output_101_16.png) 1030 25.06775107932448 47256516 47256516 gal done ran done ![png](output_101_18.png) 916 1446.187270174255 47256516 47256516 gal done ran done ![png](output_101_20.png) 916 1.296672437723619 47256516 47256516 gal done ran done ![png](output_101_22.png) 916 1.0 47256516 47256516 gal done ran done ![png](output_101_24.png) 916 13.428957107865047 47256516 47256516 gal done ran done ![png](output_101_26.png) 916 24.262318497543955 47256516 47256516 gal done ran done ![png](output_101_28.png) 916 57359.53387586379 47256516 47256516 gal done ran done ![png](output_101_30.png) 845 1.3991432063582103 47256516 47256516 gal done ran done ![png](output_101_32.png) 998 23.076823242050757 47256516 47256516 gal done ran done ![png](output_101_34.png) 845 1917.0951113335057 47256516 47256516 gal done ran done ![png](output_101_36.png) 845 1.1221467816189927 47256516 47256516 gal done ran done ![png](output_101_38.png) 845 1.0 47256516 47256516 gal done ran done ![png](output_101_40.png) 845 13.87963344237306 47256516 47256516 gal done ran done ![png](output_101_42.png) 845 23.38037449501416 47256516 47256516 gal done ran done ![png](output_101_44.png) 845 57229.14232635461 47256516 47256516 gal done ran done ![png](output_101_46.png) 0 1405.9876000552176 47256516 47256516 gal done ran done ![png](output_101_48.png) 0 0.30985871034917145 47256516 47256516 gal done ran done ![png](output_101_50.png) 0 20.78052943080509 47256516 47256516 gal done ran done ![png](output_101_52.png) ```python #Galactic extinction #bmzls hg1 = np.histogram(felg[dbml]['EBV'],weights=1./1./effnorthl[dbml],range=(0,0.15)) hr1 = np.histogram(relg[rbml]['EBV'],bins=hg1[1]) #DECaLS N hg2 = np.histogram(felg[ddnl]['EBV'],weights=1./effsouthl[ddnl]**2.,range=(0,0.15)) hr2 = np.histogram(relg[rdnl]['EBV'],bins=hg2[1]) #DECaLS S hg3 = np.histogram(felg[ddsl]['EBV'],weights=1./effsouthl[ddsl]**2.,range=(0,0.15)) hr3 = np.histogram(relg[rdsl]['EBV'],bins=hg3[1]) ``` ```python plotrel(hg1,hr1,hg2,hr2,hg3,hr3,'applying depth MC','Galactic extinction (E[B-V])') ``` ![png](output_103_0.png) ```python ```
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@Sandbox@ELGtargnonsense.ipynb@.PATH_END.py
{ "filename": "_namelengthsrc.py", "repo_name": "catboost/catboost", "repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/densitymapbox/hoverlabel/_namelengthsrc.py", "type": "Python" }
import _plotly_utils.basevalidators class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="namelengthsrc", parent_name="densitymapbox.hoverlabel", **kwargs ): super(NamelengthsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@densitymapbox@hoverlabel@_namelengthsrc.py@.PATH_END.py
{ "filename": "distutilsLib.py", "repo_name": "ggobi/ggobi", "repo_path": "ggobi_extracted/ggobi-master/plugins/Python/Install/distutilsLib.py", "type": "Python" }
from distutils.sysconfig import * print get_config_vars()['LIBP'] + "/config" # print get_config_vars()['LIBRARY']
ggobiREPO_NAMEggobiPATH_START.@ggobi_extracted@ggobi-master@plugins@Python@Install@distutilsLib.py@.PATH_END.py