hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c38b0c4ef7bf695efea339bdab4d6094cc2c8b8 | 2,092 | py | Python | huji_lab/Display.py | stormage2/huji_lab | 07734f1891d05177dd1395a4a3e4694e258ca0f8 | [
"MIT"
] | null | null | null | huji_lab/Display.py | stormage2/huji_lab | 07734f1891d05177dd1395a4a3e4694e258ca0f8 | [
"MIT"
] | 7 | 2018-10-16T17:02:15.000Z | 2018-10-31T13:39:30.000Z | build/lib/huji_lab/Display.py | stormage2/huji_lab | 07734f1891d05177dd1395a4a3e4694e258ca0f8 | [
"MIT"
] | 1 | 2018-10-21T11:43:55.000Z | 2018-10-21T11:43:55.000Z | import matplotlib.pyplot as _plt
from IPython.display import Image as _Image
from IPython.display import display as _display
from IPython.display import Markdown as _Markdown
from IPython.display import Latex as _Latex
"""
# A dangerous override function, currently unimplemented.
from uncertainties.core import Variable as _varu
def is_ufloat(num):
if type(num) is _varu:
if num.std_dev / abs(num.nominal_value) > 0.02:
print_color_bold(num, 'red')
else:
print_color_bold(num, 'green')
else:
print_color_bold(num, 'none')
def print_color_bold(string, color):
if color != 'none':
num = str(string)
text_line = _Markdown("<span style=\"color: " + color + "\">**" + num + "**</span>") # type: tuple
_display(text_line)
else:
_display(string)
global print
print = is_ufloat
"""
def print_color_bold(string, color='black'):
text_line = _Markdown("<span style=\"color: " + color + "\">**" + string + "**</span>") # type: tuple
_display(text_line)
def _print_latex_old(text_to_print):
"""
DEPRECATED, Please don't use
Nicely prints LaTeX syntax, inline with python output.
:param text_to_print:
:return: None.
"""
fig, ax = _plt.subplots(figsize=(1, 1))
_plt.rc('text', usetex=True)
_plt.tight_layout()
_plt.axis('off')
ax.grid(False)
_plt.figtext(0, 0, text_to_print, fontsize=40, bbox=dict(facecolor='white', linewidth=0))
def print_latex(text_to_print):
return _Latex(text_to_print)
def print_wolfram(wolf_query):
"""
Nicely prints a wolframAlpha query as a series of photos.
:param wolf_query: A wolfram_query() object.
:return: None.
"""
for result in wolf_query['pod']:
outer = result['subpod']
if type(outer) is dict:
disp = _Image(url=outer['img']['@src']) # type: tuple
_display(disp)
else:
for i in range(len(outer)):
disp = _Image(url=outer[i]['img']['@src']) # type: tuple
_display(disp)
| 28.27027 | 107 | 0.628107 | import matplotlib.pyplot as _plt
from IPython.display import Image as _Image
from IPython.display import display as _display
from IPython.display import Markdown as _Markdown
from IPython.display import Latex as _Latex
def print_color_bold(string, color='black'):
text_line = _Markdown("<span style=\"color: " + color + "\">**" + string + "**</span>")
_display(text_line)
def _print_latex_old(text_to_print):
fig, ax = _plt.subplots(figsize=(1, 1))
_plt.rc('text', usetex=True)
_plt.tight_layout()
_plt.axis('off')
ax.grid(False)
_plt.figtext(0, 0, text_to_print, fontsize=40, bbox=dict(facecolor='white', linewidth=0))
def print_latex(text_to_print):
return _Latex(text_to_print)
def print_wolfram(wolf_query):
for result in wolf_query['pod']:
outer = result['subpod']
if type(outer) is dict:
disp = _Image(url=outer['img']['@src'])
_display(disp)
else:
for i in range(len(outer)):
disp = _Image(url=outer[i]['img']['@src'])
_display(disp)
| true | true |
1c38b24548b3a8a34d4aa01e684231ba2be1e09f | 26,473 | py | Python | xrviz/dashboard.py | jhamman/xrviz | 7b933f29330fed8c9ee6c11822058478f58d51a1 | [
"BSD-3-Clause"
] | null | null | null | xrviz/dashboard.py | jhamman/xrviz | 7b933f29330fed8c9ee6c11822058478f58d51a1 | [
"BSD-3-Clause"
] | null | null | null | xrviz/dashboard.py | jhamman/xrviz | 7b933f29330fed8c9ee6c11822058478f58d51a1 | [
"BSD-3-Clause"
] | 1 | 2021-01-06T17:07:43.000Z | 2021-01-06T17:07:43.000Z | import ast
import dask
import panel as pn
import pandas as pd
import numpy as np
import xarray as xr
import hvplot.xarray
import hvplot.pandas
import holoviews as hv
from holoviews import streams
from bokeh.models import HoverTool
import warnings
from itertools import cycle
import numpy
from .sigslot import SigSlot
from .control import Control
from .utils import convert_widget, player_with_name_and_value, is_float
from .compatibility import ccrs, gv, gf, has_cartopy, logger, has_crick_tdigest
class Dashboard(SigSlot):
"""
Main entry point to XrViz, an interactive GUI for a given dataset.
Parameters
----------
data: xarray.DataSet
The data to be visualised
initial_params: `dict`
To pre-select values of widgets upon initialization. The keys are
generally names of widgets within the input area of the interface.
For more details, refer to
`Set Initial Parameters <../html/set_initial_parameters.html>`_ .
Attributes
----------
1. panel:
A ``panel.Tabs`` instance containing the user input panes and
output graphs of the interface.
2. control:
A ``Control`` instance responsible for input panes (control panel).
3. plot_button:
A ``pn.widgets.Button`` that generates graph according to values
selected in input panes, upon click.
4. graph:
A ``HoloViews(DynamicMap)`` instance containing the main graph.
5. output:
The ``graph`` along with the select widgets for index selection.
6. taps_graph:
A ``holoviews.Points`` instance to record the location of taps.
7. series_graph:
A ``HoloViews(Overlay)`` instance having series extracted.
8. clear_series_button:
A ``pn.widgets.Button`` to clear the `taps_graph` and
`series_graph`.
"""
def __init__(self, data, initial_params={}):
super().__init__()
if not isinstance(data, xr.core.dataarray.DataWithCoords):
raise ValueError("Input should be an xarray data object, not %s" % type(data))
self.set_data(data)
self.initial_params = initial_params
self.control = Control(self.data)
self.plot_button = pn.widgets.Button(name='Plot', width=200,
disabled=True)
self.index_selectors = []
self.graph = pn.Spacer(name='Graph')
self.taps_graph = hv.Points([])
self.series_graph = pn.Row(pn.Spacer(name='Series Graph'))
self.clear_series_button = pn.widgets.Button(name='Clear',
width=200,
disabled=True)
self.output = pn.Row(self.graph,
pn.Column(name='Index_selectors'))
self._register(self.plot_button, 'plot_clicked', 'clicks')
self.connect('plot_clicked', self.create_graph)
self._register(self.control.coord_setter.coord_selector, 'set_coords')
self.connect("set_coords", self.set_coords)
self._register(self.clear_series_button, 'clear_series', 'clicks')
self.connect('clear_series', self.clear_series)
self.control.displayer.connect('variable_selected',
self.check_is_plottable)
self.control.displayer.connect('variable_selected',
self._link_aggregation_selectors)
self.control.fields.connect('x', self._link_aggregation_selectors)
self.control.fields.connect('y', self._link_aggregation_selectors)
self.panel = pn.Column(self.control.panel,
pn.Row(self.plot_button,
self.clear_series_button),
self.output,
self.series_graph, width_policy='max')
# To auto-select in case of single variable
if len(list(self.data.variables)) == 1:
self.control.displayer.select.value = list(self.data.variables)
self.control.setup_initial_values(self.initial_params)
self.taps = []
self.tap_stream = streams.Tap(transient=True)
colors = ['#60fffc', '#6da252', '#ff60d4', '#ff9400', '#f4e322',
'#229cf4', '#af9862', '#629baf', '#7eed5a', '#e29ec8',
'#ff4300']
self.color_pool = cycle(colors)
self.clear_points = hv.streams.Stream.define(
'Clear_points', clear=False)(transient=True)
def clear_series(self, *args):
"""
Clears the markers on the image, and the extracted series.
"""
if not self.clear_series_button.disabled:
self.series_graph[0] = pn.Spacer(name='Series Graph')
self.series = hv.Points([]).opts(height=self.kwargs['height'],
width=self.kwargs['width'])
self.taps.clear()
self.clear_points.event(clear=True)
def _link_aggregation_selectors(self, *args):
for dim_selector in self.control.kwargs['remaining_dims']:
self.control.fields.connect(dim_selector, self.control.style.setup)
def create_graph(self, *args):
"""
Creates a graph according to the values selected in the widgets.
This method is usually invoked by the user clicking "Plot"
It handles the following two cases:
1. Both `x`, `y` are present in selected variable's coordinates.
Geographic projection is possible only in this case. It uses
``create_selectors_players`` method for creation of the graph.
Here the selectors generated automatically by hvplot are used.
2. One or both of `x`, `y` are NOT present in selected variable's
coordinates (both `x` and `y` are considered as dimensions). It
uses ``create_indexed_graph`` method for creation of the graph.
The selectors are created and linked with graph by XrViz.
"""
self.kwargs = self.control.kwargs
self.var = self.kwargs['Variables']
if self.index_selectors:
for selector in self.index_selectors:
del selector
self.index_selectors = []
self.output[1].clear() # clears Index_selectors
self.series_graph[0] = pn.Spacer(name='Series Graph')
self.series = hv.Points([]).opts(height=self.kwargs['height'],
width=self.kwargs['width'])
self.taps.clear()
self.control.fields.connect('extract_along', self.clear_series)
are_var_coords = self.kwargs['are_var_coords']
if are_var_coords:
graph_opts = {'x': self.kwargs['x'],
'y': self.kwargs['y'],
'title': self.var,
'height': self.kwargs['height'],
'width': self.kwargs['width'],
'cmap': self.kwargs['cmap'],
'colorbar': self.kwargs['colorbar'],
'rasterize': self.kwargs['rasterize']}
color_scale = self.kwargs['color_scale']
dims_to_agg = self.kwargs['dims_to_agg']
use_all_data = self.kwargs['compute min/max from all data']
sel_data = self.data[self.var]
if has_cartopy:
is_geo = self.kwargs['is_geo']
base_map = self.kwargs['basemap']
show_map = True if base_map != None else False
if is_geo:
crs_params = self.kwargs['crs params']
crs_params = process_proj_params(crs_params)
crs = getattr(ccrs, self.kwargs['crs'])(**crs_params)
geo_ops = {'alpha': self.kwargs['alpha'],
'project': self.kwargs['project'],
'global_extent': self.kwargs['global_extent'],
'geo': True,
'crs': crs}
if not show_map:
# find projection and crs, add it to geo_ops
proj_val = self.kwargs['projection']
if proj_val:
proj_params = self.kwargs['projection params']
proj_params = process_proj_params(proj_params)
projection = getattr(ccrs, self.kwargs['projection'])(**proj_params)
geo_ops.update({'projection': projection})
graph_opts.update(geo_ops)
feature_map = gv.Overlay([getattr(gf, feat) for feat in self.kwargs['features'] if feat is not 'None'])
for dim in dims_to_agg:
if self.kwargs[dim] == 'count':
sel_data = (~ sel_data.isnull()).sum(dim)
else:
agg = self.kwargs[dim]
sel_data = getattr(sel_data, agg)(dim)
if self.var in list(sel_data.coords): # When a var(coord) is plotted wrt itself
sel_data = sel_data.to_dataset(name=f'{sel_data.name}_')
if color_scale is not 'linear':
sel_data = getattr(numpy, color_scale)(sel_data) # Color Scaling
if not use_all_data:
# sel the values at first step, to use for cmap limits
sels = {dim: 0 for dim in self.kwargs['dims_to_select_animate']}
sel_data_for_cmap = sel_data.isel(**sels, drop=True)
else:
sel_data_for_cmap = sel_data
cmin, cmax = self.kwargs['cmap lower limit'], self.kwargs['cmap upper limit']
cmin, cmax = (cmin, cmax) if is_float(cmin) and is_float(cmax) else ('', '')
# It is better to set initial values as 0.1,0.9 rather than
# 0,1(min, max) to get a color balance graph
c_lim_lower, c_lim_upper = (
(float(cmin), float(cmax)) if cmin and cmax
else find_cmap_limits(sel_data_for_cmap))
color_range = {sel_data.name: (c_lim_lower, c_lim_upper)}
if not cmin: # if user left blank or initial values are empty
self.control.style.lower_limit.value = str(round(c_lim_lower, 5))
self.control.style.upper_limit.value = str(round(c_lim_upper, 5))
assign_opts = {dim: self.data[dim] for dim in sel_data.dims}
# Following tasks are happening here:
# 1. assign_opts: reassignment of coords(if not done result in
# errors for some of the selections in fields panel)
# 2. graph_opts: customise the plot according to selections in
# style and projection(if available)
# 3. color_range: customise the colormap range according to cmap
# lower and upper limits
# 4. active_tools: activate the tools required such as 'wheel_zoom',
# 'pan'
graph = sel_data.assign_coords(
**assign_opts).hvplot.quadmesh(
**graph_opts).redim.range(**color_range).opts(
active_tools=['wheel_zoom', 'pan'])
self.tap_stream.source = graph
if has_cartopy and is_geo:
graph = (
feature_map * graph
if self.kwargs['features'] != ['None'] else graph
)
if show_map:
graph = base_map * graph
self.tap_stream.source = graph
self.create_selectors_players(graph)
else: # if one or both x,y are var_dims
self.var_dims = list(self.data[self.var].dims)
# var_selector_dims refers to dims for which index_selectors
# would be created
self.var_selector_dims = self.kwargs['dims_to_select_animate']
for dim in self.var_selector_dims:
ops = list(self.data[self.var][dim].values)
if self.kwargs[dim] == 'select':
selector = pn.widgets.Select(name=dim, options=ops)
else:
selector = pn.widgets.DiscretePlayer(name=dim,
value=ops[0],
options=ops)
self.index_selectors.append(selector)
self._register(selector, selector.name)
self.connect(selector.name, self.create_indexed_graph)
self.create_indexed_graph()
for selector in self.index_selectors:
if isinstance(selector, pn.widgets.Select):
self.output[1].append(selector)
else:
player = player_with_name_and_value(selector)
self.output[1].append(player)
def create_indexed_graph(self, *args):
"""
Creates a graph for the dimensions selected in widgets `x` and `y`.
This is used when values selected in `x` and `y` are not data
coordinates (i.e. one or both values are data dimensions).
"""
self.kwargs = self.control.kwargs
selection = {} # to collect the value of index selectors
for i, dim in enumerate(list(self.var_selector_dims)):
selection[dim] = self.index_selectors[i].value
graph_opts = {'x': self.kwargs['x'],
'y': self.kwargs['y'],
'title': self.var,
'height': self.kwargs['height'],
'width': self.kwargs['width'],
'cmap': self.kwargs['cmap'],
'colorbar': self.kwargs['colorbar'],
'rasterize': self.kwargs['rasterize']}
dims_to_agg = self.kwargs['dims_to_agg']
color_scale = self.kwargs['color_scale']
use_all_data = self.kwargs['compute min/max from all data']
sel_data = self.data[self.var]
for dim in dims_to_agg:
if self.kwargs[dim] == 'count':
sel_data = (~ sel_data.isnull()).sum(dim)
else:
agg = self.kwargs[dim]
sel_data = getattr(sel_data, agg)(dim)
# rename the sel_data in case it is a coordinate, because we
# cannot create a Dataset from a DataArray with the same name
# as one of its coordinates
if sel_data.name in self.data.coords:
sel_data = sel_data.to_dataset(name=f'{sel_data.name}_')
if not use_all_data: # do the selection earlier
sel_data = sel_data.sel(**selection, drop=True)
if color_scale is not 'linear':
sel_data = getattr(numpy, color_scale)(sel_data) # Color Scaling
cmin, cmax = self.kwargs['cmap lower limit'], self.kwargs['cmap upper limit']
cmin, cmax = (cmin, cmax) if is_float(cmin) and is_float(cmax) else ('', '')
# It is better to set initial values as 0.1,0.9 rather than
# 0,1(min, max) to get a color balance graph
c_lim_lower, c_lim_upper = (
(float(cmin), float(cmax)) if cmin and cmax
else find_cmap_limits(sel_data))
color_range = {sel_data.name: (c_lim_lower, c_lim_upper)}
if not cmin: # if user left blank or initial values are empty
self.control.style.lower_limit.value = str(round(c_lim_lower, 5))
self.control.style.upper_limit.value = str(round(c_lim_upper, 5))
if use_all_data: # do the selection later
sel_data = sel_data.sel(**selection, drop=True)
assign_opts = {dim: self.data[dim] for dim in sel_data.dims}
graph = sel_data.assign_coords(
**assign_opts).hvplot.quadmesh(**graph_opts).redim.range(
**color_range).opts(active_tools=['wheel_zoom', 'pan'])
self.graph = graph
if len(self.data[self.var].dims) > 2 and self.kwargs['extract along']:
self.tap_stream.source = graph
self.taps_graph = hv.DynamicMap(
self.create_taps_graph,
streams=[self.tap_stream, self.clear_points])
self.output[0] = self.graph * self.taps_graph
self.clear_series_button.disabled = False
else:
self.output[0] = self.graph
self.clear_series_button.disabled = True
def create_taps_graph(self, x, y, clear=False):
"""
Create an output layer in the graph which responds to taps
Whenever the user taps (or clicks) the graph, a glyph will be overlaid,
and a series is extracted at that point.
"""
color = next(iter(self.color_pool))
if None not in [x, y]:
self.taps.append((x, y, color))
if self.control.kwargs['extract along'] is None:
self.taps = []
is_geo = self.kwargs['is_geo'] if 'is_geo' in self.kwargs else None
geo_disabled = self.control.projection.is_geo.disabled if is_geo else None
# Choose between gv.Points and hv.Points
if is_geo and geo_disabled is False:
tapped_map = gv.Points(self.taps, vdims=['z'])
else:
tapped_map = hv.Points(self.taps, vdims=['z'])
tapped_map.opts(color='z', marker='triangle', line_color='black',
size=8)
self.series_graph[0] = self.create_series_graph(x, y, color, clear)
return tapped_map
def create_series_graph(self, x, y, color, clear=False):
"""
Extract a series at a given point, and plot it.
The series plotted has same color as that of the marker depicting the
location of the tap.
The following cases have been handled:
`Case 1`:
When both x and y are NOT coords (i.e. are dims)
`Case 2`:
When both x and y are coords
``2a``: Both are 1-dimensional
``2b``: Both are 2-dimensional with same dimensions.
``2c``: Both are 2-dimensional with different dims or are multi-dimcoordinates. Here we are unable to extract.
Note that ``Case 1`` and ``Case 2a`` can be handled with the same
code.
"""
extract_along = self.control.kwargs['extract along']
if None not in [x, y] and extract_along:
color = self.taps[-1][-1] if self.taps[-1][-1] else None
other_dims = [dim for dim in self.kwargs['remaining_dims'] if
dim is not extract_along]
# to use the value selected in index selector for selecting
# data to create series. In case of aggregation, plot is
# created along 0th val of the dim.
if len(other_dims):
other_dim_sels = {}
for dim in other_dims:
dim_found = False
for dim_sel in self.index_selectors:
long_name = self.data[dim].long_name if hasattr(
self.data[dim], 'long_name') else None
if dim_sel.name == dim or dim_sel.name == long_name:
val = dim_sel.value
other_dim_sels.update({dim: val})
dim_found = True
if not dim_found: # when dim is used for aggregation
val = self.data[dim][0].values
other_dim_sels.update({dim: val})
# Case 1 and 2a
if not self.kwargs['are_var_coords'] or self.both_coords_1d():
series_sel = {
self.kwargs['x']: self.correct_val(self.kwargs['x'], x),
self.kwargs['y']: self.correct_val(self.kwargs['y'], y)}
# Case 2b
elif self.both_coords_2d_with_same_dims():
y_dim, x_dim = self.data[self.kwargs['x']].dims
y_mean = self.data[self.kwargs['y']].mean() * np.pi / 180.
a = (self.data[self.kwargs['y']] - y) ** 2 + (
(self.data[self.kwargs['x']] - x) * np.cos(
y_mean)) ** 2
j, i = np.unravel_index(a.argmin(), a.shape)
series_sel = {x_dim: self.correct_val(x_dim, i),
y_dim: self.correct_val(y_dim, j)}
# Case 2c
else:
logger.debug("Cannot extract 2d coords with different dims and"
" multi-dimensional coords.")
return self.series
if len(other_dims):
series_sel.update(other_dim_sels)
sel_series_data = self.data[self.var]
for dim, val in series_sel.items():
sel_series_data = sel_val_from_dim(sel_series_data, dim, val)
series_df = pd.DataFrame({extract_along: self.data[extract_along],
self.var: np.asarray(sel_series_data)})
tooltips = [(extract_along, f"@{extract_along}"),
(self.var, f"@{self.var}")]
if len(other_dims):
for dim, val in other_dim_sels.items():
tooltips.append((dim, str(val)))
hover = HoverTool(tooltips=tooltips)
series_map = series_df.hvplot(x=extract_along, y=self.var,
height=self.kwargs['height'],
width=self.kwargs['width'],
tools=[hover])
self.series = series_map.opts(color=color) * self.series
return self.series
def create_selectors_players(self, graph):
"""
Converts the sliders generated by hvplot into selectors/players.
This is applicable only when both `x` and `y` are present in variable
coordinates. It converts any sliders generated by hvplot into
selectors/players and moves them to the bottom of graph.
"""
if len(self.data[self.var].dims) > 2 and self.kwargs['extract along']:
self.taps_graph = hv.DynamicMap(self.create_taps_graph,
streams=[self.tap_stream,
self.clear_points])
self.clear_series_button.disabled = False
graph = graph * self.taps_graph
else:
self.clear_series_button.disabled = True
graph = pn.Row(graph)
try:
if graph[0][1]: # if sliders are generated
self.output[0] = graph[0][0]
# link the generated slider with agg selector in fields
for slider in graph[0][1]:
for dim in self.kwargs['dims_to_select_animate']:
long_name = self.data[dim].long_name if hasattr(
self.data[dim], 'long_name') else None
if slider.name == dim or slider.name == long_name:
if self.kwargs[dim] == 'select':
selector = convert_widget(slider,
pn.widgets.Select())
else:
selector = convert_widget(
slider, pn.widgets.DiscretePlayer())
self.index_selectors.append(selector)
for selector in self.index_selectors:
if isinstance(selector, pn.widgets.Select):
self.output[1].append(selector)
else:
player = player_with_name_and_value(selector)
self.output[1].append(player)
except: # else return simple graph
self.output[0] = graph
def set_data(self, data):
self.data = (
xr.Dataset({f'{data.name}': data})
if isinstance(data, xr.DataArray) else data
)
def set_coords(self, *args):
# We can't reset indexed coordinates so add them every time
# in coord_selector.value
self.data = self.data.reset_coords()
indexed_coords = set(self.data.dims).intersection(set(self.data.coords))
new_coords = set(args[0]).union(indexed_coords)
self.data = self.data.set_coords(new_coords) # this `set_coords` belongs to xr.dataset
self.control.set_coords(self.data)
def check_is_plottable(self, var):
"""
Check if a data variable can be plotted.
If a variable is 1-d, disable plot_button for it.
"""
self.plot_button.disabled = False # important to enable button once disabled
data = self.data[var[0]]
self.plot_button.disabled = len(data.dims) <= 1
def correct_val(self, dim, x):
""" Convert tapped coordinates to int, if not time-type
"""
dtype = self.data[dim].dtype.kind
if dtype == 'i':
return int(x)
elif dtype == 'f':
return float(x)
else:
return str(x)
def both_coords_1d(self):
return len(self.data[self.kwargs['x']].dims) == 1 and len(self.data[self.kwargs['y']].dims) == 1
def both_coords_2d_with_same_dims(self):
x_dims = self.data[self.kwargs['x']].dims
y_dims = self.data[self.kwargs['y']].dims
return len(x_dims) == len(y_dims) == 2 and sorted(x_dims) == sorted(y_dims)
def find_cmap_limits(sel_data):
if isinstance(sel_data.data, dask.array.core.Array):
method = 'tdigest' if has_crick_tdigest else 'default'
return dask.array.percentile(sel_data.data.ravel(), (10, 90),
method=method).compute()
else: # if sel_data.data is numpy.ndarray
return [float(q) for q in sel_data.quantile([0.1, 0.9])]
def sel_val_from_dim(data, dim, x):
""" Select values from a dim.
"""
try:
return data.sel({dim: x})
except:
return data.sel({dim: x}, method='nearest')
def process_proj_params(params):
params = ast.literal_eval(params)
for k, v in params.items():
if k == 'globe' and params['globe']:
globe = ccrs.Globe(**v)
params.update({'globe': globe})
return params
| 43.82947 | 126 | 0.55736 | import ast
import dask
import panel as pn
import pandas as pd
import numpy as np
import xarray as xr
import hvplot.xarray
import hvplot.pandas
import holoviews as hv
from holoviews import streams
from bokeh.models import HoverTool
import warnings
from itertools import cycle
import numpy
from .sigslot import SigSlot
from .control import Control
from .utils import convert_widget, player_with_name_and_value, is_float
from .compatibility import ccrs, gv, gf, has_cartopy, logger, has_crick_tdigest
class Dashboard(SigSlot):
def __init__(self, data, initial_params={}):
super().__init__()
if not isinstance(data, xr.core.dataarray.DataWithCoords):
raise ValueError("Input should be an xarray data object, not %s" % type(data))
self.set_data(data)
self.initial_params = initial_params
self.control = Control(self.data)
self.plot_button = pn.widgets.Button(name='Plot', width=200,
disabled=True)
self.index_selectors = []
self.graph = pn.Spacer(name='Graph')
self.taps_graph = hv.Points([])
self.series_graph = pn.Row(pn.Spacer(name='Series Graph'))
self.clear_series_button = pn.widgets.Button(name='Clear',
width=200,
disabled=True)
self.output = pn.Row(self.graph,
pn.Column(name='Index_selectors'))
self._register(self.plot_button, 'plot_clicked', 'clicks')
self.connect('plot_clicked', self.create_graph)
self._register(self.control.coord_setter.coord_selector, 'set_coords')
self.connect("set_coords", self.set_coords)
self._register(self.clear_series_button, 'clear_series', 'clicks')
self.connect('clear_series', self.clear_series)
self.control.displayer.connect('variable_selected',
self.check_is_plottable)
self.control.displayer.connect('variable_selected',
self._link_aggregation_selectors)
self.control.fields.connect('x', self._link_aggregation_selectors)
self.control.fields.connect('y', self._link_aggregation_selectors)
self.panel = pn.Column(self.control.panel,
pn.Row(self.plot_button,
self.clear_series_button),
self.output,
self.series_graph, width_policy='max')
if len(list(self.data.variables)) == 1:
self.control.displayer.select.value = list(self.data.variables)
self.control.setup_initial_values(self.initial_params)
self.taps = []
self.tap_stream = streams.Tap(transient=True)
colors = ['#60fffc', '#6da252', '#ff60d4', '#ff9400', '#f4e322',
'#229cf4', '#af9862', '#629baf', '#7eed5a', '#e29ec8',
'#ff4300']
self.color_pool = cycle(colors)
self.clear_points = hv.streams.Stream.define(
'Clear_points', clear=False)(transient=True)
def clear_series(self, *args):
if not self.clear_series_button.disabled:
self.series_graph[0] = pn.Spacer(name='Series Graph')
self.series = hv.Points([]).opts(height=self.kwargs['height'],
width=self.kwargs['width'])
self.taps.clear()
self.clear_points.event(clear=True)
def _link_aggregation_selectors(self, *args):
for dim_selector in self.control.kwargs['remaining_dims']:
self.control.fields.connect(dim_selector, self.control.style.setup)
def create_graph(self, *args):
self.kwargs = self.control.kwargs
self.var = self.kwargs['Variables']
if self.index_selectors:
for selector in self.index_selectors:
del selector
self.index_selectors = []
self.output[1].clear()
self.series_graph[0] = pn.Spacer(name='Series Graph')
self.series = hv.Points([]).opts(height=self.kwargs['height'],
width=self.kwargs['width'])
self.taps.clear()
self.control.fields.connect('extract_along', self.clear_series)
are_var_coords = self.kwargs['are_var_coords']
if are_var_coords:
graph_opts = {'x': self.kwargs['x'],
'y': self.kwargs['y'],
'title': self.var,
'height': self.kwargs['height'],
'width': self.kwargs['width'],
'cmap': self.kwargs['cmap'],
'colorbar': self.kwargs['colorbar'],
'rasterize': self.kwargs['rasterize']}
color_scale = self.kwargs['color_scale']
dims_to_agg = self.kwargs['dims_to_agg']
use_all_data = self.kwargs['compute min/max from all data']
sel_data = self.data[self.var]
if has_cartopy:
is_geo = self.kwargs['is_geo']
base_map = self.kwargs['basemap']
show_map = True if base_map != None else False
if is_geo:
crs_params = self.kwargs['crs params']
crs_params = process_proj_params(crs_params)
crs = getattr(ccrs, self.kwargs['crs'])(**crs_params)
geo_ops = {'alpha': self.kwargs['alpha'],
'project': self.kwargs['project'],
'global_extent': self.kwargs['global_extent'],
'geo': True,
'crs': crs}
if not show_map:
proj_val = self.kwargs['projection']
if proj_val:
proj_params = self.kwargs['projection params']
proj_params = process_proj_params(proj_params)
projection = getattr(ccrs, self.kwargs['projection'])(**proj_params)
geo_ops.update({'projection': projection})
graph_opts.update(geo_ops)
feature_map = gv.Overlay([getattr(gf, feat) for feat in self.kwargs['features'] if feat is not 'None'])
for dim in dims_to_agg:
if self.kwargs[dim] == 'count':
sel_data = (~ sel_data.isnull()).sum(dim)
else:
agg = self.kwargs[dim]
sel_data = getattr(sel_data, agg)(dim)
if self.var in list(sel_data.coords):
sel_data = sel_data.to_dataset(name=f'{sel_data.name}_')
if color_scale is not 'linear':
sel_data = getattr(numpy, color_scale)(sel_data)
if not use_all_data:
sels = {dim: 0 for dim in self.kwargs['dims_to_select_animate']}
sel_data_for_cmap = sel_data.isel(**sels, drop=True)
else:
sel_data_for_cmap = sel_data
cmin, cmax = self.kwargs['cmap lower limit'], self.kwargs['cmap upper limit']
cmin, cmax = (cmin, cmax) if is_float(cmin) and is_float(cmax) else ('', '')
c_lim_lower, c_lim_upper = (
(float(cmin), float(cmax)) if cmin and cmax
else find_cmap_limits(sel_data_for_cmap))
color_range = {sel_data.name: (c_lim_lower, c_lim_upper)}
if not cmin:
self.control.style.lower_limit.value = str(round(c_lim_lower, 5))
self.control.style.upper_limit.value = str(round(c_lim_upper, 5))
assign_opts = {dim: self.data[dim] for dim in sel_data.dims}
graph = sel_data.assign_coords(
**assign_opts).hvplot.quadmesh(
**graph_opts).redim.range(**color_range).opts(
active_tools=['wheel_zoom', 'pan'])
self.tap_stream.source = graph
if has_cartopy and is_geo:
graph = (
feature_map * graph
if self.kwargs['features'] != ['None'] else graph
)
if show_map:
graph = base_map * graph
self.tap_stream.source = graph
self.create_selectors_players(graph)
else:
self.var_dims = list(self.data[self.var].dims)
self.var_selector_dims = self.kwargs['dims_to_select_animate']
for dim in self.var_selector_dims:
ops = list(self.data[self.var][dim].values)
if self.kwargs[dim] == 'select':
selector = pn.widgets.Select(name=dim, options=ops)
else:
selector = pn.widgets.DiscretePlayer(name=dim,
value=ops[0],
options=ops)
self.index_selectors.append(selector)
self._register(selector, selector.name)
self.connect(selector.name, self.create_indexed_graph)
self.create_indexed_graph()
for selector in self.index_selectors:
if isinstance(selector, pn.widgets.Select):
self.output[1].append(selector)
else:
player = player_with_name_and_value(selector)
self.output[1].append(player)
def create_indexed_graph(self, *args):
self.kwargs = self.control.kwargs
selection = {}
for i, dim in enumerate(list(self.var_selector_dims)):
selection[dim] = self.index_selectors[i].value
graph_opts = {'x': self.kwargs['x'],
'y': self.kwargs['y'],
'title': self.var,
'height': self.kwargs['height'],
'width': self.kwargs['width'],
'cmap': self.kwargs['cmap'],
'colorbar': self.kwargs['colorbar'],
'rasterize': self.kwargs['rasterize']}
dims_to_agg = self.kwargs['dims_to_agg']
color_scale = self.kwargs['color_scale']
use_all_data = self.kwargs['compute min/max from all data']
sel_data = self.data[self.var]
for dim in dims_to_agg:
if self.kwargs[dim] == 'count':
sel_data = (~ sel_data.isnull()).sum(dim)
else:
agg = self.kwargs[dim]
sel_data = getattr(sel_data, agg)(dim)
if sel_data.name in self.data.coords:
sel_data = sel_data.to_dataset(name=f'{sel_data.name}_')
if not use_all_data:
sel_data = sel_data.sel(**selection, drop=True)
if color_scale is not 'linear':
sel_data = getattr(numpy, color_scale)(sel_data)
cmin, cmax = self.kwargs['cmap lower limit'], self.kwargs['cmap upper limit']
cmin, cmax = (cmin, cmax) if is_float(cmin) and is_float(cmax) else ('', '')
c_lim_lower, c_lim_upper = (
(float(cmin), float(cmax)) if cmin and cmax
else find_cmap_limits(sel_data))
color_range = {sel_data.name: (c_lim_lower, c_lim_upper)}
if not cmin:
self.control.style.lower_limit.value = str(round(c_lim_lower, 5))
self.control.style.upper_limit.value = str(round(c_lim_upper, 5))
if use_all_data:
sel_data = sel_data.sel(**selection, drop=True)
assign_opts = {dim: self.data[dim] for dim in sel_data.dims}
graph = sel_data.assign_coords(
**assign_opts).hvplot.quadmesh(**graph_opts).redim.range(
**color_range).opts(active_tools=['wheel_zoom', 'pan'])
self.graph = graph
if len(self.data[self.var].dims) > 2 and self.kwargs['extract along']:
self.tap_stream.source = graph
self.taps_graph = hv.DynamicMap(
self.create_taps_graph,
streams=[self.tap_stream, self.clear_points])
self.output[0] = self.graph * self.taps_graph
self.clear_series_button.disabled = False
else:
self.output[0] = self.graph
self.clear_series_button.disabled = True
def create_taps_graph(self, x, y, clear=False):
color = next(iter(self.color_pool))
if None not in [x, y]:
self.taps.append((x, y, color))
if self.control.kwargs['extract along'] is None:
self.taps = []
is_geo = self.kwargs['is_geo'] if 'is_geo' in self.kwargs else None
geo_disabled = self.control.projection.is_geo.disabled if is_geo else None
if is_geo and geo_disabled is False:
tapped_map = gv.Points(self.taps, vdims=['z'])
else:
tapped_map = hv.Points(self.taps, vdims=['z'])
tapped_map.opts(color='z', marker='triangle', line_color='black',
size=8)
self.series_graph[0] = self.create_series_graph(x, y, color, clear)
return tapped_map
def create_series_graph(self, x, y, color, clear=False):
extract_along = self.control.kwargs['extract along']
if None not in [x, y] and extract_along:
color = self.taps[-1][-1] if self.taps[-1][-1] else None
other_dims = [dim for dim in self.kwargs['remaining_dims'] if
dim is not extract_along]
if len(other_dims):
other_dim_sels = {}
for dim in other_dims:
dim_found = False
for dim_sel in self.index_selectors:
long_name = self.data[dim].long_name if hasattr(
self.data[dim], 'long_name') else None
if dim_sel.name == dim or dim_sel.name == long_name:
val = dim_sel.value
other_dim_sels.update({dim: val})
dim_found = True
if not dim_found:
val = self.data[dim][0].values
other_dim_sels.update({dim: val})
if not self.kwargs['are_var_coords'] or self.both_coords_1d():
series_sel = {
self.kwargs['x']: self.correct_val(self.kwargs['x'], x),
self.kwargs['y']: self.correct_val(self.kwargs['y'], y)}
elif self.both_coords_2d_with_same_dims():
y_dim, x_dim = self.data[self.kwargs['x']].dims
y_mean = self.data[self.kwargs['y']].mean() * np.pi / 180.
a = (self.data[self.kwargs['y']] - y) ** 2 + (
(self.data[self.kwargs['x']] - x) * np.cos(
y_mean)) ** 2
j, i = np.unravel_index(a.argmin(), a.shape)
series_sel = {x_dim: self.correct_val(x_dim, i),
y_dim: self.correct_val(y_dim, j)}
else:
logger.debug("Cannot extract 2d coords with different dims and"
" multi-dimensional coords.")
return self.series
if len(other_dims):
series_sel.update(other_dim_sels)
sel_series_data = self.data[self.var]
for dim, val in series_sel.items():
sel_series_data = sel_val_from_dim(sel_series_data, dim, val)
series_df = pd.DataFrame({extract_along: self.data[extract_along],
self.var: np.asarray(sel_series_data)})
tooltips = [(extract_along, f"@{extract_along}"),
(self.var, f"@{self.var}")]
if len(other_dims):
for dim, val in other_dim_sels.items():
tooltips.append((dim, str(val)))
hover = HoverTool(tooltips=tooltips)
series_map = series_df.hvplot(x=extract_along, y=self.var,
height=self.kwargs['height'],
width=self.kwargs['width'],
tools=[hover])
self.series = series_map.opts(color=color) * self.series
return self.series
def create_selectors_players(self, graph):
if len(self.data[self.var].dims) > 2 and self.kwargs['extract along']:
self.taps_graph = hv.DynamicMap(self.create_taps_graph,
streams=[self.tap_stream,
self.clear_points])
self.clear_series_button.disabled = False
graph = graph * self.taps_graph
else:
self.clear_series_button.disabled = True
graph = pn.Row(graph)
try:
if graph[0][1]:
self.output[0] = graph[0][0]
for slider in graph[0][1]:
for dim in self.kwargs['dims_to_select_animate']:
long_name = self.data[dim].long_name if hasattr(
self.data[dim], 'long_name') else None
if slider.name == dim or slider.name == long_name:
if self.kwargs[dim] == 'select':
selector = convert_widget(slider,
pn.widgets.Select())
else:
selector = convert_widget(
slider, pn.widgets.DiscretePlayer())
self.index_selectors.append(selector)
for selector in self.index_selectors:
if isinstance(selector, pn.widgets.Select):
self.output[1].append(selector)
else:
player = player_with_name_and_value(selector)
self.output[1].append(player)
except:
self.output[0] = graph
def set_data(self, data):
self.data = (
xr.Dataset({f'{data.name}': data})
if isinstance(data, xr.DataArray) else data
)
def set_coords(self, *args):
# in coord_selector.value
self.data = self.data.reset_coords()
indexed_coords = set(self.data.dims).intersection(set(self.data.coords))
new_coords = set(args[0]).union(indexed_coords)
self.data = self.data.set_coords(new_coords) # this `set_coords` belongs to xr.dataset
self.control.set_coords(self.data)
def check_is_plottable(self, var):
self.plot_button.disabled = False # important to enable button once disabled
data = self.data[var[0]]
self.plot_button.disabled = len(data.dims) <= 1
def correct_val(self, dim, x):
dtype = self.data[dim].dtype.kind
if dtype == 'i':
return int(x)
elif dtype == 'f':
return float(x)
else:
return str(x)
def both_coords_1d(self):
return len(self.data[self.kwargs['x']].dims) == 1 and len(self.data[self.kwargs['y']].dims) == 1
def both_coords_2d_with_same_dims(self):
x_dims = self.data[self.kwargs['x']].dims
y_dims = self.data[self.kwargs['y']].dims
return len(x_dims) == len(y_dims) == 2 and sorted(x_dims) == sorted(y_dims)
def find_cmap_limits(sel_data):
if isinstance(sel_data.data, dask.array.core.Array):
method = 'tdigest' if has_crick_tdigest else 'default'
return dask.array.percentile(sel_data.data.ravel(), (10, 90),
method=method).compute()
else: # if sel_data.data is numpy.ndarray
return [float(q) for q in sel_data.quantile([0.1, 0.9])]
def sel_val_from_dim(data, dim, x):
try:
return data.sel({dim: x})
except:
return data.sel({dim: x}, method='nearest')
def process_proj_params(params):
params = ast.literal_eval(params)
for k, v in params.items():
if k == 'globe' and params['globe']:
globe = ccrs.Globe(**v)
params.update({'globe': globe})
return params
| true | true |
1c38b3278269bec67cf467d295393f336f4b67c6 | 923 | py | Python | topics/urls.py | techstoreclub/codebook | b9b471403984189c890202bc8d7f735129115609 | [
"MIT"
] | 1 | 2020-06-09T06:04:16.000Z | 2020-06-09T06:04:16.000Z | topics/urls.py | techstoreclub/codebook | b9b471403984189c890202bc8d7f735129115609 | [
"MIT"
] | 14 | 2020-03-30T20:08:30.000Z | 2021-12-22T09:06:58.000Z | topics/urls.py | andychase/codebook | b9b471403984189c890202bc8d7f735129115609 | [
"MIT"
] | 1 | 2016-01-03T18:40:34.000Z | 2016-01-03T18:40:34.000Z | from django.urls import re_path
from django.contrib.auth import views as auth_views
from topics.views import sso
from .views import users
from .views import topics
app_name = 'topics'
urlpatterns = [
# User authentication
re_path(r'^_login/$', users.login_view, name='login'),
re_path(r'^_login/sso$', sso.sso),
re_path(r'^_accounts/login/$', users.login_view, name='login'),
re_path(r'^_create_account/$', users.create_account_view, name='create_account'),
re_path(r'^_logout/$', users.logout_view, name='logout'),
re_path(r'^_reset/done/$', auth_views.PasswordResetCompleteView, name='password_reset_complete'),
# Topics
re_path(r'^_tag/$', topics.tag_topic, name='tag_topic'),
re_path(r'^_icon/([0-9]+)\.ico$', topics.get_link_icon, name='get_link_icon'),
re_path(r'^(.*)/$', topics.get_topic, name='get_topic'),
re_path(r'^()$', topics.get_topic, name='get_topic'),
]
| 38.458333 | 101 | 0.697725 | from django.urls import re_path
from django.contrib.auth import views as auth_views
from topics.views import sso
from .views import users
from .views import topics
app_name = 'topics'
urlpatterns = [
re_path(r'^_login/$', users.login_view, name='login'),
re_path(r'^_login/sso$', sso.sso),
re_path(r'^_accounts/login/$', users.login_view, name='login'),
re_path(r'^_create_account/$', users.create_account_view, name='create_account'),
re_path(r'^_logout/$', users.logout_view, name='logout'),
re_path(r'^_reset/done/$', auth_views.PasswordResetCompleteView, name='password_reset_complete'),
re_path(r'^_tag/$', topics.tag_topic, name='tag_topic'),
re_path(r'^_icon/([0-9]+)\.ico$', topics.get_link_icon, name='get_link_icon'),
re_path(r'^(.*)/$', topics.get_topic, name='get_topic'),
re_path(r'^()$', topics.get_topic, name='get_topic'),
]
| true | true |
1c38b37aeb114f496749e633a91ea5c09f773f91 | 373 | py | Python | src/interpreter.py | SatishS11/Qrangen | 917b6b9b09669b58251bbdc15427ea0105dbc521 | [
"Apache-2.0"
] | 2 | 2019-09-08T00:05:06.000Z | 2020-07-22T18:39:41.000Z | src/interpreter.py | SatishS11/Qrangen | 917b6b9b09669b58251bbdc15427ea0105dbc521 | [
"Apache-2.0"
] | 9 | 2020-03-24T17:07:02.000Z | 2022-03-11T23:48:38.000Z | src/interpreter.py | SatishS11/Qrangen | 917b6b9b09669b58251bbdc15427ea0105dbc521 | [
"Apache-2.0"
] | 4 | 2019-05-28T19:32:10.000Z | 2021-03-04T09:50:04.000Z | class Interpreter:
def __init__(self, n=1, mode='decimal'):
self.mode = mode
self.n = n
def extract_random_number(self, result):
numbers = result.get_memory()
if self.mode == 'binary':
return numbers
else:
return [int(number, 2) for number in numbers]
def binary_to_decimal(b):
return int(b,2)
| 23.3125 | 57 | 0.58445 | class Interpreter:
def __init__(self, n=1, mode='decimal'):
self.mode = mode
self.n = n
def extract_random_number(self, result):
numbers = result.get_memory()
if self.mode == 'binary':
return numbers
else:
return [int(number, 2) for number in numbers]
def binary_to_decimal(b):
return int(b,2)
| true | true |
1c38b4e2c6d0bbdea0a300cfd8975351afde846c | 580 | py | Python | backend/server/restapi/views.py | luismsanchez/RestaurantApp | 3793be5ec814463a37dd8da7d1b2343a1d69e28b | [
"MIT"
] | null | null | null | backend/server/restapi/views.py | luismsanchez/RestaurantApp | 3793be5ec814463a37dd8da7d1b2343a1d69e28b | [
"MIT"
] | null | null | null | backend/server/restapi/views.py | luismsanchez/RestaurantApp | 3793be5ec814463a37dd8da7d1b2343a1d69e28b | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from .models import Cliente, Producto, Pedido
from .serializer import ClienteSerializer, ProductoSerializer, PedidoSerializer
class ClienteViewSet(viewsets.ModelViewSet):
queryset = Cliente.objects.all()
serializer_class = ClienteSerializer
class ProductoViewSet(viewsets.ModelViewSet):
queryset = Producto.objects.all()
serializer_class = ProductoSerializer
class PedidoViewSet(viewsets.ModelViewSet):
queryset = Pedido.objects.all()
serializer_class = PedidoSerializer | 34.117647 | 82 | 0.796552 | from django.shortcuts import render
from rest_framework import viewsets
from .models import Cliente, Producto, Pedido
from .serializer import ClienteSerializer, ProductoSerializer, PedidoSerializer
class ClienteViewSet(viewsets.ModelViewSet):
queryset = Cliente.objects.all()
serializer_class = ClienteSerializer
class ProductoViewSet(viewsets.ModelViewSet):
queryset = Producto.objects.all()
serializer_class = ProductoSerializer
class PedidoViewSet(viewsets.ModelViewSet):
queryset = Pedido.objects.all()
serializer_class = PedidoSerializer | true | true |
1c38b5053ed48e42abbabd6171297061ed1cb8f3 | 1,521 | py | Python | real_model.py | bryanlincoln/curiosity-driven-exploration | 2c099eb851a9319845f06ca030fee1bf4d587de2 | [
"MIT"
] | null | null | null | real_model.py | bryanlincoln/curiosity-driven-exploration | 2c099eb851a9319845f06ca030fee1bf4d587de2 | [
"MIT"
] | null | null | null | real_model.py | bryanlincoln/curiosity-driven-exploration | 2c099eb851a9319845f06ca030fee1bf4d587de2 | [
"MIT"
] | null | null | null | from chainer import Link, Chain, ChainList
import chainer.links as L
import chainer.functions as F
import chainerrl
class RealPPOModel(Chain):
def __init__(self, n_actions):
super(RealPPOModel, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(None, 32, 8, stride=4)
self.conv2 = L.Convolution2D(None, 64, 4, stride=2)
#self.conv3 = L.Convolution2D(None, 64, 3, stride=1)
self.l1 = L.Linear(None, 512)
self.l2_pi = L.Linear(None, 256)
self.l2_val = L.Linear(None, 256)
self.pi = L.Linear(None, n_actions)
self.val = L.Linear(None, 1)
self.gaussianPolicy = chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=n_actions,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x), # Parameterize log std
var_param_init=0, # log std = 0 => std = 1
)
def forward(self, x):
# shared layers
im = x['retina']
im = F.relu(self.conv1(im))
im = F.relu(self.conv2(im))
#im = F.relu(self.conv3(im))
im = self.l1(im)
imx = F.concat([im, x['joint_positions'], x['touch_sensors']])
# pi layers
l2_pi = F.relu(self.l2_pi(imx))
pi = self.pi(l2_pi)
pi = self.gaussianPolicy(pi)
# value layers
value = F.relu(self.l2_val(imx))
value = self.val(value)
return pi, value
| 34.568182 | 96 | 0.566075 | from chainer import Link, Chain, ChainList
import chainer.links as L
import chainer.functions as F
import chainerrl
class RealPPOModel(Chain):
def __init__(self, n_actions):
super(RealPPOModel, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(None, 32, 8, stride=4)
self.conv2 = L.Convolution2D(None, 64, 4, stride=2)
self.l1 = L.Linear(None, 512)
self.l2_pi = L.Linear(None, 256)
self.l2_val = L.Linear(None, 256)
self.pi = L.Linear(None, n_actions)
self.val = L.Linear(None, 1)
self.gaussianPolicy = chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=n_actions,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x),
var_param_init=0,
)
def forward(self, x):
im = x['retina']
im = F.relu(self.conv1(im))
im = F.relu(self.conv2(im))
im = self.l1(im)
imx = F.concat([im, x['joint_positions'], x['touch_sensors']])
l2_pi = F.relu(self.l2_pi(imx))
pi = self.pi(l2_pi)
pi = self.gaussianPolicy(pi)
value = F.relu(self.l2_val(imx))
value = self.val(value)
return pi, value
| true | true |
1c38b519a8a78124e3beaaa80e76163684eca1a6 | 813 | py | Python | tf_encrypted/keras/layers/__init__.py | redshiftzero/tf-encrypted | d48de7ab37e270fac2a0ae1fc6de87cb6bd9ce6c | [
"Apache-2.0"
] | 3 | 2018-10-18T19:36:02.000Z | 2020-07-05T19:46:23.000Z | tf_encrypted/keras/layers/__init__.py | redshiftzero/tf-encrypted | d48de7ab37e270fac2a0ae1fc6de87cb6bd9ce6c | [
"Apache-2.0"
] | null | null | null | tf_encrypted/keras/layers/__init__.py | redshiftzero/tf-encrypted | d48de7ab37e270fac2a0ae1fc6de87cb6bd9ce6c | [
"Apache-2.0"
] | null | null | null | """Higher-level layer abstractions built on TF Encrypted."""
from __future__ import absolute_import
from tf_encrypted.keras.engine.input_layer import Input
from tf_encrypted.keras.layers.activation import Activation
from tf_encrypted.keras.layers.convolutional import Conv2D
from tf_encrypted.keras.layers.dense import Dense
from tf_encrypted.keras.layers.flatten import Flatten
from tf_encrypted.keras.layers.pooling import AveragePooling2D, MaxPooling2D
from tf_encrypted.keras.layers.relu import ReLU
from tf_encrypted.keras.layers.normalization import BatchNormalization
from tf_encrypted.keras.layers.core import Reshape
__all__ = [
'Input',
'Activation',
'Conv2D',
'Dense',
'Flatten',
'AveragePooling2D',
'MaxPooling2D',
'ReLU',
'BatchNormalization',
'Reshape',
]
| 30.111111 | 76 | 0.785978 | from __future__ import absolute_import
from tf_encrypted.keras.engine.input_layer import Input
from tf_encrypted.keras.layers.activation import Activation
from tf_encrypted.keras.layers.convolutional import Conv2D
from tf_encrypted.keras.layers.dense import Dense
from tf_encrypted.keras.layers.flatten import Flatten
from tf_encrypted.keras.layers.pooling import AveragePooling2D, MaxPooling2D
from tf_encrypted.keras.layers.relu import ReLU
from tf_encrypted.keras.layers.normalization import BatchNormalization
from tf_encrypted.keras.layers.core import Reshape
__all__ = [
'Input',
'Activation',
'Conv2D',
'Dense',
'Flatten',
'AveragePooling2D',
'MaxPooling2D',
'ReLU',
'BatchNormalization',
'Reshape',
]
| true | true |
1c38b64d2e426a225f775a07e698d2709589746f | 1,362 | py | Python | componere/area.py | abramsimon/componere | 910f714eb049efd8b358dd96772bc5f6187af627 | [
"Apache-2.0"
] | null | null | null | componere/area.py | abramsimon/componere | 910f714eb049efd8b358dd96772bc5f6187af627 | [
"Apache-2.0"
] | null | null | null | componere/area.py | abramsimon/componere | 910f714eb049efd8b358dd96772bc5f6187af627 | [
"Apache-2.0"
] | null | null | null | # Copyright 2o18 Premise Data
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Area:
identifier = None
name = None
parent_identifier = None
def __init__(self, identifier, name=None, parent_identifier=None):
self.identifier = identifier
self.name = name
self.parent_identifier = parent_identifier
@classmethod
def from_values_dict(cls, identifier, values_dict):
if values_dict is None:
return None
identifier = identifier
name = values_dict.get("name")
parent_identifier = values_dict.get("parent")
return Area(identifier, name, parent_identifier)
@classmethod
def from_collection_dict(cls, collection_dict):
if collection_dict is None:
return None
dict = {}
for identifier, values_dict in collection_dict.iteritems():
object = Area.from_values_dict(identifier, values_dict)
dict[identifier] = object
return dict
| 31.674419 | 74 | 0.762849 |
class Area:
identifier = None
name = None
parent_identifier = None
def __init__(self, identifier, name=None, parent_identifier=None):
self.identifier = identifier
self.name = name
self.parent_identifier = parent_identifier
@classmethod
def from_values_dict(cls, identifier, values_dict):
if values_dict is None:
return None
identifier = identifier
name = values_dict.get("name")
parent_identifier = values_dict.get("parent")
return Area(identifier, name, parent_identifier)
@classmethod
def from_collection_dict(cls, collection_dict):
if collection_dict is None:
return None
dict = {}
for identifier, values_dict in collection_dict.iteritems():
object = Area.from_values_dict(identifier, values_dict)
dict[identifier] = object
return dict
| true | true |
1c38b6a146e1e25917896bb3994602aa15970f79 | 3,401 | py | Python | scenario2.py | lvreynoso/Call-Routing-Project | 8aeafe6c26d92ed93f32a0fc830bb53e339cb83a | [
"MIT"
] | null | null | null | scenario2.py | lvreynoso/Call-Routing-Project | 8aeafe6c26d92ed93f32a0fc830bb53e339cb83a | [
"MIT"
] | null | null | null | scenario2.py | lvreynoso/Call-Routing-Project | 8aeafe6c26d92ed93f32a0fc830bb53e339cb83a | [
"MIT"
] | null | null | null | import time
import resource
import platform
# check if prefix is a valid prefix for phoneNumber
def isPrefix(phoneNumber, prefix):
if len(prefix) > len(phoneNumber):
return False
for i in range(len(prefix)):
if (phoneNumber[i] != prefix[i]):
return False
return True
# find the price for the longest route with smallest price
# in input array containing tuples: (prefix, price)
def findBestSolution(solutions):
longestString = ''
bestPrice = ''
for i, rc in enumerate(solutions):
route = rc[0]
cost = rc[1]
# found longer matching route
if (len(route) > len(longestString)):
longestString = route
bestPrice = cost
# found better price for same length route
elif (len(route) == len(longestString) and bestPrice < cost):
longestString = route
bestPrice = cost
if (len(bestPrice) == 0):
return None
return bestPrice
# finds the cost for one phone number
def findCost(routePath, phoneNumber):
with open(routePath, 'r') as f:
content = f.read()
solutions = []
# loop through all routes data
for line in content.split('\n'):
if (len(line) == 0):
break
data = line.split(",")# split line into [route, cost]
# check if the route is a prefix for our phone number
if (isPrefix(phoneNumber, data[0])):
solutions.append((data[0], data[1]))
return findBestSolution(solutions)
# writes all solutions to solution file
def writeToFile(solution):
path = 'scenario2_solution.txt'
with open(path, 'w') as f:
f.write(str(solution))
# find costs for all phone numbers
def main(routePath, phonePath):
with open(phonePath, 'r') as f:
content = f.read()
numbers = content.split('\n')
solution = ''
# loop over all phone numbers
for number in numbers:
if (len(number) == 0):
break
# find its best cost
cost = findCost(routePath, number)
# add it to the solution string
solution += "{number}: {cost}\n".format(number = number, cost = cost)
writeToFile(solution)
# Cite: get_mem from KJ's code
def get_mem():
"""
returns current memory usage in mb.
"""
usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if platform.system() == 'Linux':
return round(usage/float(1 << 10), 2)
return round(usage/float(1 << 20), 2)
if __name__ == '__main__':
# route paths to try
# routePath = 'data/route-costs-10.txt'
routePath = 'data/route-costs-100.txt'
# routePath = 'data/route-costs-600.txt'
# routePath = 'data/route-costs-35000.txt'
# routePath = 'data/route-costs-106000.txt'
# routePath = 'data/route-costs-1000000.txt'
# routePath = 'data/route-costs-1000000.txt'
# phone paths to true
# phonePath = 'data/phone-numbers-3.txt'
phonePath = 'data/phone-numbers-10.txt'
# phonePath = 'data/phone-numbers-100.txt'
# phonePath = 'data/phone-numbers-10000.txt'
print("Find cost for {phonePath}: in {path}".format(phonePath = phonePath, path = routePath))
start = time.time()
main(routePath, phonePath)
end = time.time()
print("Found costs in {time} seconds".format(time = end-start))
print("Memory used: {mem} mb".format(mem = get_mem()))
| 30.918182 | 97 | 0.6207 | import time
import resource
import platform
def isPrefix(phoneNumber, prefix):
if len(prefix) > len(phoneNumber):
return False
for i in range(len(prefix)):
if (phoneNumber[i] != prefix[i]):
return False
return True
def findBestSolution(solutions):
longestString = ''
bestPrice = ''
for i, rc in enumerate(solutions):
route = rc[0]
cost = rc[1]
if (len(route) > len(longestString)):
longestString = route
bestPrice = cost
elif (len(route) == len(longestString) and bestPrice < cost):
longestString = route
bestPrice = cost
if (len(bestPrice) == 0):
return None
return bestPrice
def findCost(routePath, phoneNumber):
with open(routePath, 'r') as f:
content = f.read()
solutions = []
for line in content.split('\n'):
if (len(line) == 0):
break
data = line.split(",")
if (isPrefix(phoneNumber, data[0])):
solutions.append((data[0], data[1]))
return findBestSolution(solutions)
def writeToFile(solution):
path = 'scenario2_solution.txt'
with open(path, 'w') as f:
f.write(str(solution))
def main(routePath, phonePath):
with open(phonePath, 'r') as f:
content = f.read()
numbers = content.split('\n')
solution = ''
for number in numbers:
if (len(number) == 0):
break
cost = findCost(routePath, number)
solution += "{number}: {cost}\n".format(number = number, cost = cost)
writeToFile(solution)
def get_mem():
usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if platform.system() == 'Linux':
return round(usage/float(1 << 10), 2)
return round(usage/float(1 << 20), 2)
if __name__ == '__main__':
# route paths to try
# routePath = 'data/route-costs-10.txt'
routePath = 'data/route-costs-100.txt'
# routePath = 'data/route-costs-600.txt'
# routePath = 'data/route-costs-35000.txt'
# routePath = 'data/route-costs-106000.txt'
# routePath = 'data/route-costs-1000000.txt'
# routePath = 'data/route-costs-1000000.txt'
# phone paths to true
# phonePath = 'data/phone-numbers-3.txt'
phonePath = 'data/phone-numbers-10.txt'
# phonePath = 'data/phone-numbers-100.txt'
# phonePath = 'data/phone-numbers-10000.txt'
print("Find cost for {phonePath}: in {path}".format(phonePath = phonePath, path = routePath))
start = time.time()
main(routePath, phonePath)
end = time.time()
print("Found costs in {time} seconds".format(time = end-start))
print("Memory used: {mem} mb".format(mem = get_mem()))
| true | true |
1c38b6ae57e0008c1f492bc4516b097b764c9bc0 | 5,829 | py | Python | awx/main/dispatch/worker/callback.py | Sicaine/awx | a1fe60da78445c7970eaa823e88ce21860053785 | [
"Apache-2.0"
] | 1 | 2019-03-07T11:54:50.000Z | 2019-03-07T11:54:50.000Z | awx/main/dispatch/worker/callback.py | Sicaine/awx | a1fe60da78445c7970eaa823e88ce21860053785 | [
"Apache-2.0"
] | 1 | 2020-06-18T14:53:26.000Z | 2020-06-18T14:53:26.000Z | awx/main/dispatch/worker/callback.py | Sicaine/awx | a1fe60da78445c7970eaa823e88ce21860053785 | [
"Apache-2.0"
] | null | null | null | import logging
import time
import os
import signal
import traceback
from django.conf import settings
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *builds* these types of messages is found in the AWX display
callback (`awx.lib.awx_display_callback`).
'''
MAX_RETRIES = 2
def perform_work(self, body):
try:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
job_key = 'unknown'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
job_key = key
break
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
from pprint import pformat
if body.get('event') == 'EOF':
event_thing = 'EOF event'
else:
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
logger.info('Callback worker received {} for {} {}'.format(
event_thing, job_key[:-len('_id')], job_identifier
))
logger.debug('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
)[:1024 * 4])
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if hasattr(uj, 'send_notification_templates'):
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
break
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_identifier)
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| 44.496183 | 152 | 0.532853 | import logging
import time
import os
import signal
import traceback
from django.conf import settings
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
MAX_RETRIES = 2
def perform_work(self, body):
try:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
job_key = 'unknown'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
job_key = key
break
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
from pprint import pformat
if body.get('event') == 'EOF':
event_thing = 'EOF event'
else:
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
logger.info('Callback worker received {} for {} {}'.format(
event_thing, job_key[:-len('_id')], job_identifier
))
logger.debug('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
)[:1024 * 4])
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
uj = UnifiedJob.objects.get(pk=job_identifier)
if hasattr(uj, 'send_notification_templates'):
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
break
else:
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_identifier)
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| true | true |
1c38b6e00ca40fee87d35cf4853cec01dcdb1aec | 44,135 | py | Python | xrft/xrft.py | RichardScottOZ/xrft | 5b18b88957661b8f5e1967ec28e81c552c63834f | [
"MIT"
] | null | null | null | xrft/xrft.py | RichardScottOZ/xrft | 5b18b88957661b8f5e1967ec28e81c552c63834f | [
"MIT"
] | null | null | null | xrft/xrft.py | RichardScottOZ/xrft | 5b18b88957661b8f5e1967ec28e81c552c63834f | [
"MIT"
] | null | null | null | import warnings
import operator
import sys
import functools as ft
from functools import reduce
import numpy as np
import xarray as xr
import pandas as pd
import dask.array as dsar
from dask import delayed
import scipy.signal as sps
import scipy.linalg as spl
from .detrend import detrend as _detrend
__all__ = [
"fft",
"ifft",
"dft",
"idft",
"power_spectrum",
"cross_spectrum",
"cross_phase",
"isotropize",
"isotropic_power_spectrum",
"isotropic_cross_spectrum",
"isotropic_powerspectrum",
"isotropic_crossspectrum",
"fit_loglog",
]
def _fft_module(da):
if da.chunks:
return dsar.fft
else:
return np.fft
def _apply_window(da, dims, window_type="hann"):
"""Creating windows in dimensions dims."""
if window_type == True:
window_type = "hann"
warnings.warn(
"Please provide the name of window adhering to scipy.signal.windows. The boolean option will be deprecated in future releases.",
FutureWarning,
)
elif window_type not in [
"hann",
"hamming",
"kaiser",
"tukey",
"parzen",
"taylor",
"boxcar",
"barthann",
"bartlett",
"blackman",
"blackmanharris",
"bohman",
"chebwin",
"cosine",
"dpss",
"exponential",
"flattop",
"gaussian",
"general_cosine",
"general_gaussian",
"general_hamming",
"triang",
"nuttall",
]:
raise NotImplementedError(
"Window type {window_type} not supported. Please adhere to scipy.signal.windows for naming convention."
)
if dims is None:
dims = list(da.dims)
else:
if isinstance(dims, str):
dims = [dims]
scipy_win_func = getattr(sps.windows, window_type)
if da.chunks:
def dask_win_func(n, sym=False):
return dsar.from_delayed(
delayed(scipy_win_func, pure=True)(n, sym=sym), (n,), float
)
win_func = dask_win_func
else:
win_func = scipy_win_func
windows = [
xr.DataArray(
win_func(len(da[d]), sym=False), dims=da[d].dims, coords=da[d].coords
)
for d in dims
]
return reduce(operator.mul, windows[::-1]), da * reduce(operator.mul, windows[::-1])
def _stack_chunks(da, dim, suffix="_segment"):
"""Reshape a DataArray so there is only one chunk along dimension `dim`"""
data = da.data
attr = da.attrs
newdims = []
newcoords = {}
newshape = []
for d in da.dims:
if d in dim:
axis_num = da.get_axis_num(d)
if np.diff(da.chunks[axis_num]).sum() != 0:
raise ValueError("Chunk lengths need to be the same.")
n = len(da[d])
chunklen = da.chunks[axis_num][0]
coord_rs = da[d].data.reshape((int(n / chunklen), int(chunklen)))
newdims.append(d + suffix)
newdims.append(d)
newshape.append(int(n / chunklen))
newshape.append(int(chunklen))
newcoords[d + suffix] = range(int(n / chunklen))
newcoords[d] = coord_rs[0]
else:
newdims.append(d)
newshape.append(len(da[d]))
newcoords[d] = da[d].data
da = xr.DataArray(
data.reshape(newshape), dims=newdims, coords=newcoords, attrs=attr
)
return da
def _freq(N, delta_x, real, shift):
# calculate frequencies from coordinates
# coordinates are always loaded eagerly, so we use numpy
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
# Discard negative frequencies from transform along last axis to be
# consistent with np.fft.rfftn
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(np.fft.rfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _ifreq(N, delta_x, real, shift):
# calculate frequencies from coordinates
# coordinates are always loaded eagerly, so we use numpy
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
irfftfreq = lambda Nx, dx: np.fft.fftfreq(
2 * (Nx - 1), dx
) # Not in standard numpy !
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(irfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _new_dims_and_coords(da, dim, wavenm, prefix):
# set up new dimensions and coordinates for dataarray
swap_dims = dict()
new_coords = dict()
wavenm = dict(zip(dim, wavenm))
for d in dim:
k = wavenm[d]
new_name = prefix + d if d[: len(prefix)] != prefix else d[len(prefix) :]
new_dim = xr.DataArray(k, dims=new_name, coords={new_name: k}, name=new_name)
new_dim.attrs.update({"spacing": k[1] - k[0]})
new_coords[new_name] = new_dim
swap_dims[d] = new_name
return new_coords, swap_dims
def _diff_coord(coord):
"""Returns the difference as a xarray.DataArray."""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(coord, ref_units, calendar)
coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords)
return np.diff(coord)
elif pd.api.types.is_datetime64_dtype(v0):
return np.diff(coord).astype("timedelta64[s]").astype("f8")
else:
return np.diff(coord)
def _lag_coord(coord):
"""Returns the coordinate lag"""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if coord[-1] > coord[0]:
coord_data = coord.data
else:
coord_data = np.flip(coord.data, axis=-1)
lag = coord_data[len(coord.data) // 2]
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(lag, ref_units, calendar)
return decoded_time
elif pd.api.types.is_datetime64_dtype(v0):
return lag.astype("timedelta64[s]").astype("f8").data
else:
return lag.data
def dft(
da, dim=None, true_phase=False, true_amplitude=False, **kwargs
): # pragma: no cover
"""
Deprecated function. See fft doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `fft` instead"
)
warnings.warn(msg, FutureWarning)
return fft(
da, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def idft(
daft, dim=None, true_phase=False, true_amplitude=False, **kwargs
): # pragma: no cover
"""
Deprecated function. See ifft doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `ifft` instead"
)
warnings.warn(msg, FutureWarning)
return ifft(
daft, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def fft(
da,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
detrend=None,
window=None,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
**kwargs,
):
"""
Perform discrete Fourier transform of xarray data-array `da` along the
specified dimensions.
.. math::
daft = \mathbb{F}(da - \overline{da})
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed. If the inputs are dask arrays, the
arrays must not be chunked along these dimensions.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
shift : bool, default
Whether to shift the fft output. Default is `True`, unless `real_dim is not None`,
in which case shift will be set to False always.
detrend : {None, 'constant', 'linear'}
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT. For `linear`, only dims of length 1 and 2 are supported.
window : str, optional
Whether to apply a window to the data before the Fourier
transform is taken. A window will be applied to all the dimensions in
dim. Please follow `scipy.signal.windows`' naming convention.
true_phase : bool, optional
If set to False, standard fft algorithm is applied on signal without consideration of coordinates.
If set to True, coordinates location are correctly taken into account to evaluate Fourier Tranforrm phase and
fftshift is applied on input signal prior to fft (fft algorithm intrinsically considers that input signal is on fftshifted grid).
true_amplitude : bool, optional
If set to True, output is multiplied by the spacing of the transformed variables to match theoretical FT amplitude.
If set to False, amplitude regularisation by spacing is not applied (as in numpy.fft)
chunks_to_segments : bool, optional
Whether the data is chunked along the axis to take FFT.
prefix : str
The prefix for the new transformed dimensions.
Returns
-------
daft : `xarray.DataArray`
The output of the Fourier transformation, with appropriate dimensions.
"""
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.dft to preserve the theoretical phasing and amplitude of Fourier Transform. Consider using xrft.fft to ensure future compatibility with numpy.fft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(da.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.dft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in da.dims:
raise ValueError(
"The dimension along which real FT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
] # real dim has to be moved or added at the end !
if chunks_to_segments:
da = _stack_chunks(da, dim)
rawdims = da.dims # take care of segmented dimesions, if any
if real_dim is not None:
da = da.transpose(
*[d for d in da.dims if d not in [real_dim]] + [real_dim]
) # dimension for real transformed is moved at the end
fftm = _fft_module(da)
if real_dim is None:
fft_fn = fftm.fftn
else:
shift = False
fft_fn = fftm.rfftn
# the axes along which to take ffts
axis_num = [
da.get_axis_num(d) for d in dim
] # if there is a real dim , it has to be the last one
N = [da.shape[n] for n in axis_num]
# raise error if there are multiple coordinates attached to the dimension(s) over which the FFT is taken
for d in dim:
bad_coords = [
cname for cname in da.coords if cname != d and d in da[cname].dims
]
if bad_coords:
raise ValueError(
f"The input array contains coordinate variable(s) ({bad_coords}) whose dims include the transform dimension(s) `{d}`. "
f"Please drop these coordinates (`.drop({bad_coords}`) before invoking xrft."
)
# verify even spacing of input coordinates
delta_x = []
lag_x = []
for d in dim:
diff = _diff_coord(da[d])
delta = np.abs(diff[0])
lag = _lag_coord(da[d])
if not np.allclose(diff, diff[0], rtol=spacing_tol):
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if delta == 0.0:
raise ValueError(
"Can't take Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
lag_x.append(lag)
if detrend is not None:
if detrend == "linear":
orig_dims = da.dims
da = _detrend(da, dim, detrend_type=detrend).transpose(*orig_dims)
else:
da = _detrend(da, dim, detrend_type=detrend)
if window is not None:
_, da = _apply_window(da, dim, window_type=window)
if true_phase:
reversed_axis = [
da.get_axis_num(d) for d in dim if da[d][-1] < da[d][0]
] # handling decreasing coordinates
f = fft_fn(
fftm.ifftshift(np.flip(da, axis=reversed_axis), axes=axis_num),
axes=axis_num,
)
else:
f = fft_fn(da.data, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _freq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(da, dim, k, prefix)
daft = xr.DataArray(
f, dims=da.dims, coords=dict([c for c in da.coords.items() if c[0] not in dim])
)
daft = daft.swap_dims(swap_dims).assign_coords(newcoords)
daft = daft.drop([d for d in dim if d in daft.coords])
updated_dims = [
daft.dims[i] for i in da.get_axis_num(dim)
] # List of transformed dimensions
if true_phase:
for up_dim, lag in zip(updated_dims, lag_x):
daft = daft * xr.DataArray(
np.exp(-1j * 2.0 * np.pi * newcoords[up_dim] * lag),
dims=up_dim,
coords={up_dim: newcoords[up_dim]},
) # taking advantage of xarray broadcasting and ordered coordinates
daft[up_dim].attrs.update({"direct_lag": lag.obj})
if true_amplitude:
daft = daft * np.prod(delta_x)
return daft.transpose(
*[swap_dims.get(d, d) for d in rawdims]
) # Do nothing if da was not transposed
def ifft(
daft,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
lag=None,
**kwargs,
):
"""
Perform inverse discrete Fourier transform of xarray data-array `daft` along the
specified dimensions.
.. math::
da = \mathbb{F}(daft - \overline{daft})
Parameters
----------
daft : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
shift : bool, default
Whether to shift the fft output. Default is `True`.
chunks_to_segments : bool, optional
Whether the data is chunked along the axis to take FFT.
prefix : str
The prefix for the new transformed dimensions.
true_phase : bool, optional
If set to False, standard ifft algorithm is applied on signal without consideration of coordinates order.
If set to True, coordinates are correctly taken into account to evaluate Inverse Fourier Tranforrm phase and
fftshift is applied on input signal prior to ifft (ifft algorithm intrinsically considers that input signal is on fftshifted grid).
true_amplitude : bool, optional
If set to True, output is divided by the spacing of the transformed variables to match theoretical IFT amplitude.
If set to False, amplitude regularisation by spacing is not applied (as in numpy.ifft)
lag : None, float or sequence of float and/or None, optional
Output coordinates of transformed dimensions will be shifted by corresponding lag values and correct signal phasing will be preserved if true_phase is set to True.
If lag is None (default), 'direct_lag' attributes of each dimension is used (or set to zero if not found).
If defined, lag must have same length as dim.
If lag is a sequence, a None element means that 'direct_lag' attribute will be used for the corresponding dimension
Manually set lag to zero to get output coordinates centered on zero.
Returns
-------
da : `xarray.DataArray`
The output of the Inverse Fourier transformation, with appropriate dimensions.
"""
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.idft to preserve the theoretical phasing and amplitude of Inverse Fourier Transform. Consider using xrft.ifft to ensure future compatibility with numpy.ifft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(daft.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.idft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in daft.dims:
raise ValueError(
"The dimension along which real IFT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
] # real dim has to be moved or added at the end !
if lag is None:
lag = [daft[d].attrs.get("direct_lag", 0.0) for d in dim]
msg = "Default idft's behaviour (lag=None) changed! Default value of lag was zero (centered output coordinates) and is now set to transformed coordinate's attribute: 'direct_lag'."
warnings.warn(msg, FutureWarning)
else:
if isinstance(lag, float) or isinstance(lag, int):
lag = [lag]
if len(dim) != len(lag):
raise ValueError("dim and lag must have the same length.")
if not true_phase:
msg = "Setting lag with true_phase=False does not guarantee accurate idft."
warnings.warn(msg, Warning)
lag = [
daft[d].attrs.get("direct_lag") if l is None else l
for d, l in zip(dim, lag)
] # enable lag of the form [3.2, None, 7]
if true_phase:
for d, l in zip(dim, lag):
daft = daft * np.exp(1j * 2.0 * np.pi * daft[d] * l)
if chunks_to_segments:
daft = _stack_chunks(daft, dim)
rawdims = daft.dims # take care of segmented dimensions, if any
if real_dim is not None:
daft = daft.transpose(
*[d for d in daft.dims if d not in [real_dim]] + [real_dim]
) # dimension for real transformed is moved at the end
fftm = _fft_module(daft)
if real_dim is None:
fft_fn = fftm.ifftn
else:
fft_fn = fftm.irfftn
# the axes along which to take ffts
axis_num = [daft.get_axis_num(d) for d in dim]
N = [daft.shape[n] for n in axis_num]
# verify even spacing of input coordinates (It handle fftshifted grids)
delta_x = []
for d in dim:
diff = _diff_coord(daft[d])
delta = np.abs(diff[0])
l = _lag_coord(daft[d]) if d is not real_dim else daft[d][0].data
if not np.allclose(
diff, delta, rtol=spacing_tol
): # means that input is not on regular increasing grid
reordered_coord = daft[d].copy()
reordered_coord = reordered_coord.sortby(d)
diff = _diff_coord(reordered_coord)
l = _lag_coord(reordered_coord)
if np.allclose(
diff, diff[0], rtol=spacing_tol
): # means that input is on fftshifted grid
daft = daft.sortby(d) # reordering the input
else:
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if np.abs(l) > spacing_tol:
raise ValueError(
"Inverse Fourier Transform can not be computed because coordinate %s is not centered on zero frequency"
% d
)
if delta == 0.0:
raise ValueError(
"Can't take Inverse Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
axis_shift = [
daft.get_axis_num(d) for d in dim if d is not real_dim
] # remove real dim of the list
f = fftm.ifftshift(
daft.data, axes=axis_shift
) # Force to be on fftshift grid before Fourier Transform
f = fft_fn(f, axes=axis_num)
if not true_phase:
f = fftm.ifftshift(f, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _ifreq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(daft, dim, k, prefix)
da = xr.DataArray(
f,
dims=daft.dims,
coords=dict([c for c in daft.coords.items() if c[0] not in dim]),
)
da = da.swap_dims(swap_dims).assign_coords(newcoords)
da = da.drop([d for d in dim if d in da.coords])
with xr.set_options(
keep_attrs=True
): # This line ensures keeping spacing attribute in output coordinates
for d, l in zip(dim, lag):
tfd = swap_dims[d]
da = da.assign_coords({tfd: da[tfd] + l})
if true_amplitude:
da = da / np.prod([float(da[up_dim].spacing) for up_dim in swap_dims.values()])
return da.transpose(
*[swap_dims.get(d, d) for d in rawdims]
) # Do nothing if daft was not transposed
def power_spectrum(
da, dim=None, real_dim=None, scaling="density", window_correction=False, **kwargs
):
"""
Calculates the power spectrum of da.
.. math::
da' = da - \overline{da}
.. math::
ps = \mathbb{F}(da') {\mathbb{F}(da')}^*
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
scaling : str, optional
If 'density', it will normalize the output to power spectral density
If 'spectrum', it will normalize the output to power spectrum
window_correction : boolean
If True, it will correct for the energy reduction resulting from applying a non-uniform window.
This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).
If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.
If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.
If False, the spectrum gives a representation of the power in the windowed signal.
Note that when True, Parseval's theorem may only be approximately satisfied.
kwargs : dict : see xrft.dft for argument list
"""
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.power_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.power_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
kwargs.update(
{"true_amplitude": True, "true_phase": False}
) # true_phase do not matter in power_spectrum
daft = fft(da, dim=dim, real_dim=real_dim, **kwargs)
updated_dims = [
d for d in daft.dims if (d not in da.dims and "segment" not in d)
] # Transformed dimensions
ps = np.abs(daft) ** 2
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
] # find transformed real dimension
f = np.full(ps.sizes[real], 2.0)
if len(da[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
ps = ps * xr.DataArray(f, dims=real, coords=ps[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / (windows ** 2).mean()
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / windows.mean() ** 2
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs ** 2
elif scaling == "false_density": # Corresponds to density=False
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return ps
def cross_spectrum(
da1,
da2,
dim=None,
real_dim=None,
scaling="density",
window_correction=False,
true_phase=False,
**kwargs,
):
"""
Calculates the cross spectra of da1 and da2.
.. math::
da1' = da1 - \overline{da1};\ \ da2' = da2 - \overline{da2}
.. math::
cs = \mathbb{F}(da1') {\mathbb{F}(da2')}^*
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
scaling : str, optional
If 'density', it will normalize the output to power spectral density
If 'spectrum', it will normalize the output to power spectrum
window_correction : boolean
If True, it will correct for the energy reduction resulting from applying a non-uniform window.
This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).
If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.
If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.
If False, the spectrum gives a representation of the power in the windowed signal.
Note that when True, Parseval's theorem may only be approximately satisfied.
kwargs : dict : see xrft.dft for argument list
"""
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_spectrum output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.cross_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.cross_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
kwargs.update({"true_amplitude": True})
daft1 = fft(da1, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
daft2 = fft(da2, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
if daft1.dims != daft2.dims:
raise ValueError("The two datasets have different dimensions")
updated_dims = [
d for d in daft1.dims if (d not in da1.dims and "segment" not in d)
] # Transformed dimensions
cs = daft1 * np.conj(daft2)
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
] # find transformed real dimension
f = np.full(cs.sizes[real], 2.0)
if len(da1[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
cs = cs * xr.DataArray(f, dims=real, coords=cs[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
cs = cs / (windows ** 2).mean()
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
cs = cs / windows.mean() ** 2
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs ** 2
elif scaling == "false_density": # Corresponds to density=False
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return cs
def cross_phase(da1, da2, dim=None, true_phase=False, **kwargs):
"""
Calculates the cross-phase between da1 and da2.
Returned values are in [-pi, pi].
.. math::
da1' = da1 - \overline{da1};\ \ da2' = da2 - \overline{da2}
.. math::
cp = \text{Arg} [\mathbb{F}(da1')^*, \mathbb{F}(da2')]
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
kwargs : dict : see xrft.dft for argument list
"""
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_phase output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
cp = xr.ufuncs.angle(
cross_spectrum(da1, da2, dim=dim, true_phase=true_phase, **kwargs)
)
if da1.name and da2.name:
cp.name = "{}_{}_phase".format(da1.name, da2.name)
return cp
def _binned_agg(
array: np.ndarray,
indices: np.ndarray,
num_bins: int,
*,
func,
fill_value,
dtype,
) -> np.ndarray:
"""NumPy helper function for aggregating over bins."""
try:
import numpy_groupies
except ImportError:
raise ImportError(
"This function requires the `numpy_groupies` package to be installed. Please install it with pip or conda."
)
mask = np.logical_not(np.isnan(indices))
int_indices = indices[mask].astype(int)
shape = array.shape[: -indices.ndim] + (num_bins,)
result = numpy_groupies.aggregate(
int_indices,
array[..., mask],
func=func,
size=num_bins,
fill_value=fill_value,
dtype=dtype,
axis=-1,
)
return result
def _groupby_bins_agg(
array: xr.DataArray,
group: xr.DataArray,
bins,
func="sum",
fill_value=0,
dtype=None,
**cut_kwargs,
) -> xr.DataArray:
"""Faster equivalent of Xarray's groupby_bins(...).sum()."""
# https://github.com/pydata/xarray/issues/4473
binned = pd.cut(np.ravel(group), bins, **cut_kwargs)
new_dim_name = group.name + "_bins"
indices = group.copy(data=binned.codes.reshape(group.shape))
result = xr.apply_ufunc(
_binned_agg,
array,
indices,
input_core_dims=[indices.dims, indices.dims],
output_core_dims=[[new_dim_name]],
output_dtypes=[array.dtype],
dask_gufunc_kwargs=dict(
allow_rechunk=True,
output_sizes={new_dim_name: binned.categories.size},
),
kwargs={
"num_bins": binned.categories.size,
"func": func,
"fill_value": fill_value,
"dtype": dtype,
},
dask="parallelized",
)
result.coords[new_dim_name] = binned.categories
return result
def isotropize(ps, fftdim, nfactor=4, truncate=False):
"""
Isotropize a 2D power spectrum or cross spectrum
by taking an azimuthal average.
.. math::
\text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2
where :math:`N` is the number of azimuthal bins.
Parameters
----------
ps : `xarray.DataArray`
The power spectrum or cross spectrum to be isotropized.
fftdim : list
The fft dimensions overwhich the isotropization must be performed.
nfactor : int, optional
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
"""
# compute radial wavenumber bins
k = ps[fftdim[1]]
l = ps[fftdim[0]]
N = [k.size, l.size]
nbins = int(min(N) / nfactor)
freq_r = np.sqrt(k ** 2 + l ** 2).rename("freq_r")
kr = _groupby_bins_agg(freq_r, freq_r, bins=nbins, func="mean")
if truncate:
if k.max() > l.max():
kmax = l.max()
else:
kmax = k.max()
kr = kr.where(kr <= kmax)
else:
msg = (
"The flag `truncate` will be set to True by default in future version "
+ "in order to truncate the isotropic wavenumber larger than the "
+ "Nyquist wavenumber."
)
warnings.warn(msg, FutureWarning)
iso_ps = (
_groupby_bins_agg(ps, freq_r, bins=nbins, func="mean")
.rename({"freq_r_bins": "freq_r"})
.drop_vars("freq_r")
)
iso_ps.coords["freq_r"] = kr.data
if truncate:
return (iso_ps * iso_ps.freq_r).dropna("freq_r")
else:
return iso_ps * iso_ps.freq_r
def isotropic_powerspectrum(*args, **kwargs): # pragma: no cover
"""
Deprecated function. See isotropic_power_spectrum doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_power_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_power_spectrum(*args, **kwargs)
def isotropic_power_spectrum(
da,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
"""
Calculates the isotropic spectrum from the
two-dimensional power spectrum by taking the
azimuthal average.
.. math::
\text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2
where :math:`N` is the number of azimuthal bins.
Note: the method is not lazy does trigger computations.
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : list, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
shift : bool, optional
Whether to shift the fft output.
detrend : str, optional
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT.
density : list, optional
If true, it will normalize the spectrum to spectral density
window : str, optional
Whether to apply a window to the data before the Fourier
transform is taken. Please adhere to scipy.signal.windows for naming convention.
nfactor : int, optional
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
Returns
-------
iso_ps : `xarray.DataArray`
Isotropic power spectrum
"""
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da.dims
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
ps = power_spectrum(
da,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(ps, fftdim, nfactor=nfactor, truncate=truncate)
def isotropic_crossspectrum(*args, **kwargs): # pragma: no cover
"""
Deprecated function. See isotropic_cross_spectrum doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_cross_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_cross_spectrum(*args, **kwargs)
def isotropic_cross_spectrum(
da1,
da2,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
"""
Calculates the isotropic spectrum from the
two-dimensional power spectrumby taking the
azimuthal average.
.. math::
\text{iso}_{cs} = k_r N^{-1} \sum_{N} (\mathbb{F}(da1') {\mathbb{F}(da2')}^*)
where :math:`N` is the number of azimuthal bins.
Note: the method is not lazy does trigger computations.
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
spacing_tol: float (default)
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : list (optional)
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
shift : bool (optional)
Whether to shift the fft output.
detrend : str (optional)
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT.
density : list (optional)
If true, it will normalize the spectrum to spectral density
window : str (optional)
Whether to apply a window to the data before the Fourier
transform is taken. Please adhere to scipy.signal.windows for naming convention.
nfactor : int (optional)
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
Returns
-------
iso_cs : `xarray.DataArray`
Isotropic cross spectrum
"""
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da1.dims
dim2 = da2.dims
if dim != dim2:
raise ValueError("The two datasets have different dimensions")
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
cs = cross_spectrum(
da1,
da2,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(cs, fftdim, nfactor=nfactor, truncate=truncate)
def fit_loglog(x, y):
"""
Fit a line to isotropic spectra in log-log space
Parameters
----------
x : `numpy.array`
Coordinate of the data
y : `numpy.array`
data
Returns
-------
y_fit : `numpy.array`
The linear fit
a : float64
Slope of the fit
b : float64
Intercept of the fit
"""
# fig log vs log
p = np.polyfit(np.log2(x), np.log2(y), 1)
y_fit = 2 ** (np.log2(x) * p[0] + p[1])
return y_fit, p[0], p[1]
| 34.453552 | 542 | 0.620347 | import warnings
import operator
import sys
import functools as ft
from functools import reduce
import numpy as np
import xarray as xr
import pandas as pd
import dask.array as dsar
from dask import delayed
import scipy.signal as sps
import scipy.linalg as spl
from .detrend import detrend as _detrend
__all__ = [
"fft",
"ifft",
"dft",
"idft",
"power_spectrum",
"cross_spectrum",
"cross_phase",
"isotropize",
"isotropic_power_spectrum",
"isotropic_cross_spectrum",
"isotropic_powerspectrum",
"isotropic_crossspectrum",
"fit_loglog",
]
def _fft_module(da):
if da.chunks:
return dsar.fft
else:
return np.fft
def _apply_window(da, dims, window_type="hann"):
if window_type == True:
window_type = "hann"
warnings.warn(
"Please provide the name of window adhering to scipy.signal.windows. The boolean option will be deprecated in future releases.",
FutureWarning,
)
elif window_type not in [
"hann",
"hamming",
"kaiser",
"tukey",
"parzen",
"taylor",
"boxcar",
"barthann",
"bartlett",
"blackman",
"blackmanharris",
"bohman",
"chebwin",
"cosine",
"dpss",
"exponential",
"flattop",
"gaussian",
"general_cosine",
"general_gaussian",
"general_hamming",
"triang",
"nuttall",
]:
raise NotImplementedError(
"Window type {window_type} not supported. Please adhere to scipy.signal.windows for naming convention."
)
if dims is None:
dims = list(da.dims)
else:
if isinstance(dims, str):
dims = [dims]
scipy_win_func = getattr(sps.windows, window_type)
if da.chunks:
def dask_win_func(n, sym=False):
return dsar.from_delayed(
delayed(scipy_win_func, pure=True)(n, sym=sym), (n,), float
)
win_func = dask_win_func
else:
win_func = scipy_win_func
windows = [
xr.DataArray(
win_func(len(da[d]), sym=False), dims=da[d].dims, coords=da[d].coords
)
for d in dims
]
return reduce(operator.mul, windows[::-1]), da * reduce(operator.mul, windows[::-1])
def _stack_chunks(da, dim, suffix="_segment"):
data = da.data
attr = da.attrs
newdims = []
newcoords = {}
newshape = []
for d in da.dims:
if d in dim:
axis_num = da.get_axis_num(d)
if np.diff(da.chunks[axis_num]).sum() != 0:
raise ValueError("Chunk lengths need to be the same.")
n = len(da[d])
chunklen = da.chunks[axis_num][0]
coord_rs = da[d].data.reshape((int(n / chunklen), int(chunklen)))
newdims.append(d + suffix)
newdims.append(d)
newshape.append(int(n / chunklen))
newshape.append(int(chunklen))
newcoords[d + suffix] = range(int(n / chunklen))
newcoords[d] = coord_rs[0]
else:
newdims.append(d)
newshape.append(len(da[d]))
newcoords[d] = da[d].data
da = xr.DataArray(
data.reshape(newshape), dims=newdims, coords=newcoords, attrs=attr
)
return da
def _freq(N, delta_x, real, shift):
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(np.fft.rfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _ifreq(N, delta_x, real, shift):
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
irfftfreq = lambda Nx, dx: np.fft.fftfreq(
2 * (Nx - 1), dx
)
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(irfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _new_dims_and_coords(da, dim, wavenm, prefix):
swap_dims = dict()
new_coords = dict()
wavenm = dict(zip(dim, wavenm))
for d in dim:
k = wavenm[d]
new_name = prefix + d if d[: len(prefix)] != prefix else d[len(prefix) :]
new_dim = xr.DataArray(k, dims=new_name, coords={new_name: k}, name=new_name)
new_dim.attrs.update({"spacing": k[1] - k[0]})
new_coords[new_name] = new_dim
swap_dims[d] = new_name
return new_coords, swap_dims
def _diff_coord(coord):
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(coord, ref_units, calendar)
coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords)
return np.diff(coord)
elif pd.api.types.is_datetime64_dtype(v0):
return np.diff(coord).astype("timedelta64[s]").astype("f8")
else:
return np.diff(coord)
def _lag_coord(coord):
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if coord[-1] > coord[0]:
coord_data = coord.data
else:
coord_data = np.flip(coord.data, axis=-1)
lag = coord_data[len(coord.data) // 2]
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(lag, ref_units, calendar)
return decoded_time
elif pd.api.types.is_datetime64_dtype(v0):
return lag.astype("timedelta64[s]").astype("f8").data
else:
return lag.data
def dft(
da, dim=None, true_phase=False, true_amplitude=False, **kwargs
):
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `fft` instead"
)
warnings.warn(msg, FutureWarning)
return fft(
da, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def idft(
daft, dim=None, true_phase=False, true_amplitude=False, **kwargs
):
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `ifft` instead"
)
warnings.warn(msg, FutureWarning)
return ifft(
daft, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def fft(
da,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
detrend=None,
window=None,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
**kwargs,
):
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.dft to preserve the theoretical phasing and amplitude of Fourier Transform. Consider using xrft.fft to ensure future compatibility with numpy.fft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(da.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.dft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in da.dims:
raise ValueError(
"The dimension along which real FT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
]
if chunks_to_segments:
da = _stack_chunks(da, dim)
rawdims = da.dims
if real_dim is not None:
da = da.transpose(
*[d for d in da.dims if d not in [real_dim]] + [real_dim]
)
fftm = _fft_module(da)
if real_dim is None:
fft_fn = fftm.fftn
else:
shift = False
fft_fn = fftm.rfftn
axis_num = [
da.get_axis_num(d) for d in dim
]
N = [da.shape[n] for n in axis_num]
for d in dim:
bad_coords = [
cname for cname in da.coords if cname != d and d in da[cname].dims
]
if bad_coords:
raise ValueError(
f"The input array contains coordinate variable(s) ({bad_coords}) whose dims include the transform dimension(s) `{d}`. "
f"Please drop these coordinates (`.drop({bad_coords}`) before invoking xrft."
)
delta_x = []
lag_x = []
for d in dim:
diff = _diff_coord(da[d])
delta = np.abs(diff[0])
lag = _lag_coord(da[d])
if not np.allclose(diff, diff[0], rtol=spacing_tol):
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if delta == 0.0:
raise ValueError(
"Can't take Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
lag_x.append(lag)
if detrend is not None:
if detrend == "linear":
orig_dims = da.dims
da = _detrend(da, dim, detrend_type=detrend).transpose(*orig_dims)
else:
da = _detrend(da, dim, detrend_type=detrend)
if window is not None:
_, da = _apply_window(da, dim, window_type=window)
if true_phase:
reversed_axis = [
da.get_axis_num(d) for d in dim if da[d][-1] < da[d][0]
]
f = fft_fn(
fftm.ifftshift(np.flip(da, axis=reversed_axis), axes=axis_num),
axes=axis_num,
)
else:
f = fft_fn(da.data, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _freq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(da, dim, k, prefix)
daft = xr.DataArray(
f, dims=da.dims, coords=dict([c for c in da.coords.items() if c[0] not in dim])
)
daft = daft.swap_dims(swap_dims).assign_coords(newcoords)
daft = daft.drop([d for d in dim if d in daft.coords])
updated_dims = [
daft.dims[i] for i in da.get_axis_num(dim)
]
if true_phase:
for up_dim, lag in zip(updated_dims, lag_x):
daft = daft * xr.DataArray(
np.exp(-1j * 2.0 * np.pi * newcoords[up_dim] * lag),
dims=up_dim,
coords={up_dim: newcoords[up_dim]},
)
daft[up_dim].attrs.update({"direct_lag": lag.obj})
if true_amplitude:
daft = daft * np.prod(delta_x)
return daft.transpose(
*[swap_dims.get(d, d) for d in rawdims]
)
def ifft(
daft,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
lag=None,
**kwargs,
):
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.idft to preserve the theoretical phasing and amplitude of Inverse Fourier Transform. Consider using xrft.ifft to ensure future compatibility with numpy.ifft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(daft.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.idft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in daft.dims:
raise ValueError(
"The dimension along which real IFT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
]
if lag is None:
lag = [daft[d].attrs.get("direct_lag", 0.0) for d in dim]
msg = "Default idft's behaviour (lag=None) changed! Default value of lag was zero (centered output coordinates) and is now set to transformed coordinate's attribute: 'direct_lag'."
warnings.warn(msg, FutureWarning)
else:
if isinstance(lag, float) or isinstance(lag, int):
lag = [lag]
if len(dim) != len(lag):
raise ValueError("dim and lag must have the same length.")
if not true_phase:
msg = "Setting lag with true_phase=False does not guarantee accurate idft."
warnings.warn(msg, Warning)
lag = [
daft[d].attrs.get("direct_lag") if l is None else l
for d, l in zip(dim, lag)
]
if true_phase:
for d, l in zip(dim, lag):
daft = daft * np.exp(1j * 2.0 * np.pi * daft[d] * l)
if chunks_to_segments:
daft = _stack_chunks(daft, dim)
rawdims = daft.dims
if real_dim is not None:
daft = daft.transpose(
*[d for d in daft.dims if d not in [real_dim]] + [real_dim]
)
fftm = _fft_module(daft)
if real_dim is None:
fft_fn = fftm.ifftn
else:
fft_fn = fftm.irfftn
axis_num = [daft.get_axis_num(d) for d in dim]
N = [daft.shape[n] for n in axis_num]
delta_x = []
for d in dim:
diff = _diff_coord(daft[d])
delta = np.abs(diff[0])
l = _lag_coord(daft[d]) if d is not real_dim else daft[d][0].data
if not np.allclose(
diff, delta, rtol=spacing_tol
):
reordered_coord = daft[d].copy()
reordered_coord = reordered_coord.sortby(d)
diff = _diff_coord(reordered_coord)
l = _lag_coord(reordered_coord)
if np.allclose(
diff, diff[0], rtol=spacing_tol
):
daft = daft.sortby(d)
else:
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if np.abs(l) > spacing_tol:
raise ValueError(
"Inverse Fourier Transform can not be computed because coordinate %s is not centered on zero frequency"
% d
)
if delta == 0.0:
raise ValueError(
"Can't take Inverse Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
axis_shift = [
daft.get_axis_num(d) for d in dim if d is not real_dim
]
f = fftm.ifftshift(
daft.data, axes=axis_shift
)
f = fft_fn(f, axes=axis_num)
if not true_phase:
f = fftm.ifftshift(f, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _ifreq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(daft, dim, k, prefix)
da = xr.DataArray(
f,
dims=daft.dims,
coords=dict([c for c in daft.coords.items() if c[0] not in dim]),
)
da = da.swap_dims(swap_dims).assign_coords(newcoords)
da = da.drop([d for d in dim if d in da.coords])
with xr.set_options(
keep_attrs=True
):
for d, l in zip(dim, lag):
tfd = swap_dims[d]
da = da.assign_coords({tfd: da[tfd] + l})
if true_amplitude:
da = da / np.prod([float(da[up_dim].spacing) for up_dim in swap_dims.values()])
return da.transpose(
*[swap_dims.get(d, d) for d in rawdims]
)
def power_spectrum(
da, dim=None, real_dim=None, scaling="density", window_correction=False, **kwargs
):
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.power_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.power_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
kwargs.update(
{"true_amplitude": True, "true_phase": False}
)
daft = fft(da, dim=dim, real_dim=real_dim, **kwargs)
updated_dims = [
d for d in daft.dims if (d not in da.dims and "segment" not in d)
]
ps = np.abs(daft) ** 2
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
]
f = np.full(ps.sizes[real], 2.0)
if len(da[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
ps = ps * xr.DataArray(f, dims=real, coords=ps[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / (windows ** 2).mean()
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / windows.mean() ** 2
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs ** 2
elif scaling == "false_density":
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return ps
def cross_spectrum(
da1,
da2,
dim=None,
real_dim=None,
scaling="density",
window_correction=False,
true_phase=False,
**kwargs,
):
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_spectrum output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.cross_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.cross_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
kwargs.update({"true_amplitude": True})
daft1 = fft(da1, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
daft2 = fft(da2, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
if daft1.dims != daft2.dims:
raise ValueError("The two datasets have different dimensions")
updated_dims = [
d for d in daft1.dims if (d not in da1.dims and "segment" not in d)
]
cs = daft1 * np.conj(daft2)
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
]
f = np.full(cs.sizes[real], 2.0)
if len(da1[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
cs = cs * xr.DataArray(f, dims=real, coords=cs[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
cs = cs / (windows ** 2).mean()
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
cs = cs / windows.mean() ** 2
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs ** 2
elif scaling == "false_density":
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return cs
def cross_phase(da1, da2, dim=None, true_phase=False, **kwargs):
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_phase output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
cp = xr.ufuncs.angle(
cross_spectrum(da1, da2, dim=dim, true_phase=true_phase, **kwargs)
)
if da1.name and da2.name:
cp.name = "{}_{}_phase".format(da1.name, da2.name)
return cp
def _binned_agg(
array: np.ndarray,
indices: np.ndarray,
num_bins: int,
*,
func,
fill_value,
dtype,
) -> np.ndarray:
try:
import numpy_groupies
except ImportError:
raise ImportError(
"This function requires the `numpy_groupies` package to be installed. Please install it with pip or conda."
)
mask = np.logical_not(np.isnan(indices))
int_indices = indices[mask].astype(int)
shape = array.shape[: -indices.ndim] + (num_bins,)
result = numpy_groupies.aggregate(
int_indices,
array[..., mask],
func=func,
size=num_bins,
fill_value=fill_value,
dtype=dtype,
axis=-1,
)
return result
def _groupby_bins_agg(
array: xr.DataArray,
group: xr.DataArray,
bins,
func="sum",
fill_value=0,
dtype=None,
**cut_kwargs,
) -> xr.DataArray:
binned = pd.cut(np.ravel(group), bins, **cut_kwargs)
new_dim_name = group.name + "_bins"
indices = group.copy(data=binned.codes.reshape(group.shape))
result = xr.apply_ufunc(
_binned_agg,
array,
indices,
input_core_dims=[indices.dims, indices.dims],
output_core_dims=[[new_dim_name]],
output_dtypes=[array.dtype],
dask_gufunc_kwargs=dict(
allow_rechunk=True,
output_sizes={new_dim_name: binned.categories.size},
),
kwargs={
"num_bins": binned.categories.size,
"func": func,
"fill_value": fill_value,
"dtype": dtype,
},
dask="parallelized",
)
result.coords[new_dim_name] = binned.categories
return result
def isotropize(ps, fftdim, nfactor=4, truncate=False):
k = ps[fftdim[1]]
l = ps[fftdim[0]]
N = [k.size, l.size]
nbins = int(min(N) / nfactor)
freq_r = np.sqrt(k ** 2 + l ** 2).rename("freq_r")
kr = _groupby_bins_agg(freq_r, freq_r, bins=nbins, func="mean")
if truncate:
if k.max() > l.max():
kmax = l.max()
else:
kmax = k.max()
kr = kr.where(kr <= kmax)
else:
msg = (
"The flag `truncate` will be set to True by default in future version "
+ "in order to truncate the isotropic wavenumber larger than the "
+ "Nyquist wavenumber."
)
warnings.warn(msg, FutureWarning)
iso_ps = (
_groupby_bins_agg(ps, freq_r, bins=nbins, func="mean")
.rename({"freq_r_bins": "freq_r"})
.drop_vars("freq_r")
)
iso_ps.coords["freq_r"] = kr.data
if truncate:
return (iso_ps * iso_ps.freq_r).dropna("freq_r")
else:
return iso_ps * iso_ps.freq_r
def isotropic_powerspectrum(*args, **kwargs):
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_power_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_power_spectrum(*args, **kwargs)
def isotropic_power_spectrum(
da,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da.dims
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
ps = power_spectrum(
da,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(ps, fftdim, nfactor=nfactor, truncate=truncate)
def isotropic_crossspectrum(*args, **kwargs):
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_cross_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_cross_spectrum(*args, **kwargs)
def isotropic_cross_spectrum(
da1,
da2,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da1.dims
dim2 = da2.dims
if dim != dim2:
raise ValueError("The two datasets have different dimensions")
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
cs = cross_spectrum(
da1,
da2,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(cs, fftdim, nfactor=nfactor, truncate=truncate)
def fit_loglog(x, y):
p = np.polyfit(np.log2(x), np.log2(y), 1)
y_fit = 2 ** (np.log2(x) * p[0] + p[1])
return y_fit, p[0], p[1]
| true | true |
1c38b757134a77c1004ddbc6823505175387eb23 | 4,674 | py | Python | tests/unit/models/test_job.py | amolkahat/cibyl | 586c3c0a6b21a8f1b71db0c5b29e7d60f9cf0def | [
"Apache-2.0"
] | null | null | null | tests/unit/models/test_job.py | amolkahat/cibyl | 586c3c0a6b21a8f1b71db0c5b29e7d60f9cf0def | [
"Apache-2.0"
] | null | null | null | tests/unit/models/test_job.py | amolkahat/cibyl | 586c3c0a6b21a8f1b71db0c5b29e7d60f9cf0def | [
"Apache-2.0"
] | null | null | null | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
import unittest
from cibyl.models.ci.build import Build
from cibyl.models.ci.job import Job
# pylint: disable=no-member
class TestJob(unittest.TestCase):
"""Testing Job CI model"""
def setUp(self):
self.job_name = 'test-job'
self.job_status = 'FAILURE'
self.job_url = 'http://ci_system/test-job'
self.builds = [Build("1")]
self.job = Job(name=self.job_name)
self.second_job = Job(name=self.job_name)
def test_job_name(self):
"""Testing new Job name attribute"""
self.assertTrue(
hasattr(self.job, 'name'), msg="Job lacks name attribute")
self.assertEqual(
self.job.name.value, self.job_name,
msg=f"Job name is {self.job.name.value}. \
Should be {self.job_name}")
def test_job_builds(self):
"""Testing new Job builds attribute"""
self.assertTrue(
hasattr(self.job, 'builds'), msg="Job lacks builds attribute")
self.assertEqual(
self.job.builds.value, {},
msg=f"Job default builds is {self.job.builds.value}. \
Should be []")
self.assertEqual(
self.second_job.builds.value, {},
msg="Job default builds are {self.second_job.builds.value}.\
Should be []")
self.job.builds.value = self.builds
self.assertEqual(
self.job.builds.value, self.builds,
msg="New job builds are {self.job.builds.value}. \
Should be {self.builds}")
def test_job_url(self):
"""Testing new Job url attribute"""
self.assertTrue(
hasattr(self.job, 'url'), msg="Job lacks url attribute")
self.assertEqual(
self.job.url.value, None,
msg=f"Job default url is {self.job.url.value}. Should be {None}")
self.job.url.value = self.job_url
self.assertEqual(
self.job.url.value, self.job_url,
msg=f"New job url is {self.job.url.value}. \
Should be {self.job_url}")
def test_jobs_comparison(self):
"""Testing new Job instances comparison."""
self.assertEqual(
self.job, self.second_job,
msg=f"Jobs {self.job.name.value} and \
{self.second_job.name.value} are not equal")
def test_jobs_comparison_other_type(self):
"""Testing new Job instances comparison."""
self.assertNotEqual(
self.job, "hello",
msg=f"Job {self.job.name.value} should be different from str")
def test_job_str(self):
"""Testing Job __str__ method."""
self.assertIn('Job: ', str(self.job))
self.assertIn('Job: ', str(self.second_job))
self.assertIn(self.job.name.value, str(self.job))
self.assertIn(self.job_name, str(self.second_job))
self.second_job.url.value = self.job_url
self.assertIn('Job: ', str(self.second_job))
self.assertIn(self.job_name, str(self.second_job))
def test_jobs_add_build(self):
"""Testing Job add_build method."""
build2 = Build("2", "SUCCESS")
self.job.add_build(build2)
self.assertEqual(1, len(self.job.builds.value))
self.assertEqual(build2, self.job.builds.value["2"])
def test_jobs_add_build_with_merge(self):
"""Testing Job add_build method."""
build2 = Build("2")
build3 = Build("2", "SUCCESS")
self.job.add_build(build2)
self.job.add_build(build3)
self.assertEqual(1, len(self.job.builds.value))
build = self.job.builds.value["2"]
self.assertEqual(build.status.value, build3.status.value)
def test_jobs_merge(self):
"""Testing Job merge method."""
build = Build("2", "SUCCESS")
self.second_job.add_build(build)
self.second_job.url.value = self.job_url
self.job.merge(self.second_job)
self.assertEqual(self.job.url.value, self.job_url)
self.assertEqual(1, len(self.job.builds.value))
build_obj = self.job.builds.value["2"]
self.assertEqual(build_obj.status.value, build.status.value)
| 34.880597 | 78 | 0.629226 | import unittest
from cibyl.models.ci.build import Build
from cibyl.models.ci.job import Job
class TestJob(unittest.TestCase):
def setUp(self):
self.job_name = 'test-job'
self.job_status = 'FAILURE'
self.job_url = 'http://ci_system/test-job'
self.builds = [Build("1")]
self.job = Job(name=self.job_name)
self.second_job = Job(name=self.job_name)
def test_job_name(self):
self.assertTrue(
hasattr(self.job, 'name'), msg="Job lacks name attribute")
self.assertEqual(
self.job.name.value, self.job_name,
msg=f"Job name is {self.job.name.value}. \
Should be {self.job_name}")
def test_job_builds(self):
self.assertTrue(
hasattr(self.job, 'builds'), msg="Job lacks builds attribute")
self.assertEqual(
self.job.builds.value, {},
msg=f"Job default builds is {self.job.builds.value}. \
Should be []")
self.assertEqual(
self.second_job.builds.value, {},
msg="Job default builds are {self.second_job.builds.value}.\
Should be []")
self.job.builds.value = self.builds
self.assertEqual(
self.job.builds.value, self.builds,
msg="New job builds are {self.job.builds.value}. \
Should be {self.builds}")
def test_job_url(self):
self.assertTrue(
hasattr(self.job, 'url'), msg="Job lacks url attribute")
self.assertEqual(
self.job.url.value, None,
msg=f"Job default url is {self.job.url.value}. Should be {None}")
self.job.url.value = self.job_url
self.assertEqual(
self.job.url.value, self.job_url,
msg=f"New job url is {self.job.url.value}. \
Should be {self.job_url}")
def test_jobs_comparison(self):
self.assertEqual(
self.job, self.second_job,
msg=f"Jobs {self.job.name.value} and \
{self.second_job.name.value} are not equal")
def test_jobs_comparison_other_type(self):
self.assertNotEqual(
self.job, "hello",
msg=f"Job {self.job.name.value} should be different from str")
def test_job_str(self):
self.assertIn('Job: ', str(self.job))
self.assertIn('Job: ', str(self.second_job))
self.assertIn(self.job.name.value, str(self.job))
self.assertIn(self.job_name, str(self.second_job))
self.second_job.url.value = self.job_url
self.assertIn('Job: ', str(self.second_job))
self.assertIn(self.job_name, str(self.second_job))
def test_jobs_add_build(self):
build2 = Build("2", "SUCCESS")
self.job.add_build(build2)
self.assertEqual(1, len(self.job.builds.value))
self.assertEqual(build2, self.job.builds.value["2"])
def test_jobs_add_build_with_merge(self):
build2 = Build("2")
build3 = Build("2", "SUCCESS")
self.job.add_build(build2)
self.job.add_build(build3)
self.assertEqual(1, len(self.job.builds.value))
build = self.job.builds.value["2"]
self.assertEqual(build.status.value, build3.status.value)
def test_jobs_merge(self):
build = Build("2", "SUCCESS")
self.second_job.add_build(build)
self.second_job.url.value = self.job_url
self.job.merge(self.second_job)
self.assertEqual(self.job.url.value, self.job_url)
self.assertEqual(1, len(self.job.builds.value))
build_obj = self.job.builds.value["2"]
self.assertEqual(build_obj.status.value, build.status.value)
| true | true |
1c38b83aead408389e0116016b8e5521e4063ab7 | 88 | py | Python | output/models/ms_data/regex/re_l21_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_l21_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_l21_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.regex.re_l21_xsd.re_l21 import Doc
__all__ = [
"Doc",
]
| 14.666667 | 61 | 0.715909 | from output.models.ms_data.regex.re_l21_xsd.re_l21 import Doc
__all__ = [
"Doc",
]
| true | true |
1c38b861973e77d69b84bb9a7ed9781a38aa4742 | 1,441 | py | Python | src/gui/MainLayout.py | tinfins/CMSC495-Group-3 | 0b7ea3c885322631d6dd3ef7ee96b6a98ba2392e | [
"MIT"
] | 1 | 2021-02-11T01:18:08.000Z | 2021-02-11T01:18:08.000Z | src/gui/MainLayout.py | tinfins/CMSC495-Group-3 | 0b7ea3c885322631d6dd3ef7ee96b6a98ba2392e | [
"MIT"
] | 1 | 2021-01-28T00:03:15.000Z | 2021-01-28T00:03:15.000Z | src/gui/MainLayout.py | tinfins/CMSC495-Group-3 | 0b7ea3c885322631d6dd3ef7ee96b6a98ba2392e | [
"MIT"
] | 3 | 2021-02-13T22:45:14.000Z | 2021-02-16T02:57:11.000Z | # Standard imports
import logging.config
import tkinter as tk
from tkinter import ttk
# Big Teacher module imports:
import src.gui.MenuStatus as MenuStatus
class MainLayout(tk.Frame):
'''
Main GUI layout of application. Contains Menu Bar and Status Bar
'''
def __init__(self, master, controller):
'''
Initialize Main Layout page
'''
ttk.Frame.__init__(self, master)
self.logger = logging.getLogger(__name__)
self.master = master
self.controller = controller
self.master.title('Big Teacher')
self.pack()
# Menu bar Frame
self.menu_frame = ttk.Frame(self.master)
# Main content frame
self.content_frame = ttk.Frame(self.master)
# Status bar frame
self.status_frame = ttk.Frame(self.master)
# Pack frames root window
self.menu_frame.pack(side=tk.TOP, fill=tk.X)
self.status_frame.pack(side=tk.BOTTOM, fill=tk.X)
self.content_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
# Instantiate menu bar
menu_bar = MenuStatus.MenuBarGui(self.menu_frame, self.master)
# Filler label
filler_label = ttk.Label(self.content_frame)
# Instantiate status bar
self.status_bar = MenuStatus.StatusBar(self.status_frame)
# Pack frames in root window
menu_bar.pack()
filler_label.pack()
self.status_bar.pack()
| 30.659574 | 70 | 0.644691 |
import logging.config
import tkinter as tk
from tkinter import ttk
import src.gui.MenuStatus as MenuStatus
class MainLayout(tk.Frame):
def __init__(self, master, controller):
ttk.Frame.__init__(self, master)
self.logger = logging.getLogger(__name__)
self.master = master
self.controller = controller
self.master.title('Big Teacher')
self.pack()
self.menu_frame = ttk.Frame(self.master)
self.content_frame = ttk.Frame(self.master)
self.status_frame = ttk.Frame(self.master)
self.menu_frame.pack(side=tk.TOP, fill=tk.X)
self.status_frame.pack(side=tk.BOTTOM, fill=tk.X)
self.content_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
menu_bar = MenuStatus.MenuBarGui(self.menu_frame, self.master)
filler_label = ttk.Label(self.content_frame)
self.status_bar = MenuStatus.StatusBar(self.status_frame)
menu_bar.pack()
filler_label.pack()
self.status_bar.pack()
| true | true |
1c38b86b63961d39979e82afb0da3ab99a95bef1 | 2,782 | py | Python | examples/daal4py/decision_tree_classification_batch.py | owerbat/scikit-learn-intelex | 986637668853a00f0047b7a8854ddb9fb3620549 | [
"Apache-2.0"
] | 336 | 2021-04-07T04:25:28.000Z | 2022-03-29T09:18:44.000Z | examples/daal4py/decision_tree_classification_batch.py | owerbat/scikit-learn-intelex | 986637668853a00f0047b7a8854ddb9fb3620549 | [
"Apache-2.0"
] | 213 | 2021-03-25T20:51:24.000Z | 2022-03-31T09:04:13.000Z | examples/daal4py/decision_tree_classification_batch.py | owerbat/scikit-learn-intelex | 986637668853a00f0047b7a8854ddb9fb3620549 | [
"Apache-2.0"
] | 43 | 2021-05-23T14:29:23.000Z | 2022-03-04T08:12:53.000Z | #===============================================================================
# Copyright 2014-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# daal4py Decision Tree Classification example for shared memory systems
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=np.float32)
except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=np.float32)
def main(readcsv=read_csv, method='defaultDense'):
# input data file
infile = "./data/batch/decision_tree_train.csv"
prunefile = "./data/batch/decision_tree_prune.csv"
testfile = "./data/batch/decision_tree_test.csv"
# Configure a training object (5 classes)
train_algo = d4p.decision_tree_classification_training(5)
# Read data. Let's use 5 features per observation
data = readcsv(infile, range(5), t=np.float32)
labels = readcsv(infile, range(5, 6), t=np.float32)
prunedata = readcsv(prunefile, range(5), t=np.float32)
prunelabels = readcsv(prunefile, range(5, 6), t=np.float32)
train_result = train_algo.compute(data, labels, prunedata, prunelabels)
# Now let's do some prediction
predict_algo = d4p.decision_tree_classification_prediction()
# read test data (with same #features)
pdata = readcsv(testfile, range(5), t=np.float32)
plabels = readcsv(testfile, range(5, 6), t=np.float32)
# now predict using the model from the training above
predict_result = predict_algo.compute(pdata, train_result.model)
# Prediction result provides prediction
assert(predict_result.prediction.shape == (pdata.shape[0], 1))
return (train_result, predict_result, plabels)
if __name__ == "__main__":
(train_result, predict_result, plabels) = main()
print(
"\nDecision tree prediction results (first 20 rows):\n",
predict_result.prediction[0:20]
)
print("\nGround truth (first 20 rows):\n", plabels[0:20])
print('All looks good!')
| 38.638889 | 90 | 0.673976 |
import daal4py as d4p
import numpy as np
try:
import pandas
def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=np.float32)
except ImportError:
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=np.float32)
def main(readcsv=read_csv, method='defaultDense'):
infile = "./data/batch/decision_tree_train.csv"
prunefile = "./data/batch/decision_tree_prune.csv"
testfile = "./data/batch/decision_tree_test.csv"
train_algo = d4p.decision_tree_classification_training(5)
data = readcsv(infile, range(5), t=np.float32)
labels = readcsv(infile, range(5, 6), t=np.float32)
prunedata = readcsv(prunefile, range(5), t=np.float32)
prunelabels = readcsv(prunefile, range(5, 6), t=np.float32)
train_result = train_algo.compute(data, labels, prunedata, prunelabels)
# Now let's do some prediction
predict_algo = d4p.decision_tree_classification_prediction()
= readcsv(testfile, range(5), t=np.float32)
plabels = readcsv(testfile, range(5, 6), t=np.float32)
predict_result = predict_algo.compute(pdata, train_result.model)
assert(predict_result.prediction.shape == (pdata.shape[0], 1))
return (train_result, predict_result, plabels)
if __name__ == "__main__":
(train_result, predict_result, plabels) = main()
print(
"\nDecision tree prediction results (first 20 rows):\n",
predict_result.prediction[0:20]
)
print("\nGround truth (first 20 rows):\n", plabels[0:20])
print('All looks good!')
| true | true |
1c38b9842852205b95892006d267eb3f51cd2da7 | 6,384 | py | Python | mkt/commonplace/views.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/commonplace/views.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/commonplace/views.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import importlib
import json
import os
from urlparse import urlparse
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import resolve
from django.http import HttpResponse, Http404
from django.shortcuts import render
from django.views.decorators.gzip import gzip_page
import jingo
import jinja2
import newrelic.agent
import waffle
from cache_nuggets.lib import memoize
from mkt.webapps.models import Webapp
def get_whitelisted_origins(request, include_loop=True):
current_domain = settings.DOMAIN
current_origin = '%s://%s' % ('https' if request.is_secure() else 'http',
current_domain)
development_server = (settings.DEBUG or
current_domain == 'marketplace-dev.allizom.org')
origin_whitelist = [
# Start by whitelisting the 2 app:// variants for the current domain,
# and then whitelist the current http or https origin.
'app://packaged.%s' % current_domain,
'app://%s' % current_domain,
current_origin,
]
# On dev, also allow localhost/mp.dev.
if development_server:
origin_whitelist.extend([
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev',
])
if include_loop:
# Include loop origins if necessary.
origin_whitelist.extend([
'https://hello.firefox.com',
'https://call.firefox.com',
])
# On dev, include loop dev origin as well.
if development_server:
origin_whitelist.extend([
'http://loop-webapp.dev.mozaws.net',
])
return json.dumps(origin_whitelist)
def get_build_id(repo):
try:
# This is where the `build_{repo}.py` files get written to after
# compiling and minifying our assets.
# Get the `BUILD_ID` from `build_{repo}.py` and use that to
# cache-bust the assets for this repo's CSS/JS minified bundles.
module = 'build_%s' % repo
return importlib.import_module(module).BUILD_ID
except (ImportError, AttributeError):
try:
build_id_fn = os.path.join(settings.MEDIA_ROOT, repo,
'build_id.txt')
with storage.open(build_id_fn) as fh:
return fh.read()
except:
# Either `build_{repo}.py` does not exist or `build_{repo}.py`
# exists but does not contain `BUILD_ID`. Fall back to
# `BUILD_ID_JS` which is written to `build.py` by jingo-minify.
try:
from build import BUILD_ID_CSS
return BUILD_ID_CSS
except ImportError:
return 'dev'
def get_imgurls(repo):
imgurls_fn = os.path.join(settings.MEDIA_ROOT, repo, 'imgurls.txt')
with storage.open(imgurls_fn) as fh:
return list(set(fh.readlines()))
@gzip_page
def commonplace(request, repo, **kwargs):
if repo not in settings.COMMONPLACE_REPOS:
raise Http404
BUILD_ID = get_build_id(repo)
ua = request.META.get('HTTP_USER_AGENT', '').lower()
include_persona = True
include_splash = False
if repo == 'fireplace':
include_splash = True
if (request.GET.get('nativepersona') or
'mccs' in request.GET or
('mcc' in request.GET and 'mnc' in request.GET)):
include_persona = False
elif repo == 'discoplace':
include_persona = False
include_splash = True
if waffle.switch_is_active('firefox-accounts'):
# We never want to include persona shim if firefox accounts is enabled:
# native fxa already provides navigator.id, and fallback fxa doesn't
# need it.
include_persona = False
site_settings = {}
else:
site_settings = {
'persona_unverified_issuer': settings.BROWSERID_DOMAIN,
}
site_settings['fxa_css_path'] = settings.FXA_CSS_PATH
ctx = {
'BUILD_ID': BUILD_ID,
'appcache': repo in settings.COMMONPLACE_REPOS_APPCACHED,
'include_persona': include_persona,
'include_splash': include_splash,
'repo': repo,
'robots': 'googlebot' in ua,
'site_settings': site_settings,
'newrelic_header': newrelic.agent.get_browser_timing_header,
'newrelic_footer': newrelic.agent.get_browser_timing_footer,
}
# For OpenGraph stuff.
resolved_url = resolve(request.path)
if repo == 'fireplace' and resolved_url.url_name == 'detail':
ctx = add_app_ctx(ctx, resolved_url.kwargs['app_slug'])
media_url = urlparse(settings.MEDIA_URL)
if media_url.netloc:
ctx['media_origin'] = media_url.scheme + '://' + media_url.netloc
return render(request, 'commonplace/index.html', ctx)
def add_app_ctx(ctx, app_slug):
"""
If we are hitting the Fireplace detail page, get the app for Open Graph
tags.
"""
try:
app = Webapp.objects.get(app_slug=app_slug)
ctx['app'] = app
except Webapp.DoesNotExist:
pass
return ctx
@gzip_page
def appcache_manifest(request):
"""Serves the appcache manifest."""
repo = request.GET.get('repo')
if not repo or repo not in settings.COMMONPLACE_REPOS_APPCACHED:
raise Http404
template = _appcache_manifest_template(repo)
return HttpResponse(template, content_type='text/cache-manifest')
@memoize('appcache-manifest-template')
def _appcache_manifest_template(repo):
ctx = {
'BUILD_ID': get_build_id(repo),
'imgurls': get_imgurls(repo),
'repo': repo,
'timestamp': datetime.datetime.now(),
}
t = jingo.env.get_template('commonplace/manifest.appcache').render(ctx)
return unicode(jinja2.Markup(t))
@gzip_page
def iframe_install(request):
return render(request, 'commonplace/iframe-install.html', {
'whitelisted_origins': get_whitelisted_origins(request)
})
@gzip_page
def potatolytics(request):
return render(request, 'commonplace/potatolytics.html', {
'whitelisted_origins': get_whitelisted_origins(request,
include_loop=False)
})
| 31.761194 | 79 | 0.636122 | import datetime
import importlib
import json
import os
from urlparse import urlparse
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import resolve
from django.http import HttpResponse, Http404
from django.shortcuts import render
from django.views.decorators.gzip import gzip_page
import jingo
import jinja2
import newrelic.agent
import waffle
from cache_nuggets.lib import memoize
from mkt.webapps.models import Webapp
def get_whitelisted_origins(request, include_loop=True):
current_domain = settings.DOMAIN
current_origin = '%s://%s' % ('https' if request.is_secure() else 'http',
current_domain)
development_server = (settings.DEBUG or
current_domain == 'marketplace-dev.allizom.org')
origin_whitelist = [
'app://packaged.%s' % current_domain,
'app://%s' % current_domain,
current_origin,
]
if development_server:
origin_whitelist.extend([
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev',
])
if include_loop:
origin_whitelist.extend([
'https://hello.firefox.com',
'https://call.firefox.com',
])
if development_server:
origin_whitelist.extend([
'http://loop-webapp.dev.mozaws.net',
])
return json.dumps(origin_whitelist)
def get_build_id(repo):
try:
module = 'build_%s' % repo
return importlib.import_module(module).BUILD_ID
except (ImportError, AttributeError):
try:
build_id_fn = os.path.join(settings.MEDIA_ROOT, repo,
'build_id.txt')
with storage.open(build_id_fn) as fh:
return fh.read()
except:
# Either `build_{repo}.py` does not exist or `build_{repo}.py`
# exists but does not contain `BUILD_ID`. Fall back to
# `BUILD_ID_JS` which is written to `build.py` by jingo-minify.
try:
from build import BUILD_ID_CSS
return BUILD_ID_CSS
except ImportError:
return 'dev'
def get_imgurls(repo):
imgurls_fn = os.path.join(settings.MEDIA_ROOT, repo, 'imgurls.txt')
with storage.open(imgurls_fn) as fh:
return list(set(fh.readlines()))
@gzip_page
def commonplace(request, repo, **kwargs):
if repo not in settings.COMMONPLACE_REPOS:
raise Http404
BUILD_ID = get_build_id(repo)
ua = request.META.get('HTTP_USER_AGENT', '').lower()
include_persona = True
include_splash = False
if repo == 'fireplace':
include_splash = True
if (request.GET.get('nativepersona') or
'mccs' in request.GET or
('mcc' in request.GET and 'mnc' in request.GET)):
include_persona = False
elif repo == 'discoplace':
include_persona = False
include_splash = True
if waffle.switch_is_active('firefox-accounts'):
# We never want to include persona shim if firefox accounts is enabled:
# native fxa already provides navigator.id, and fallback fxa doesn't
include_persona = False
site_settings = {}
else:
site_settings = {
'persona_unverified_issuer': settings.BROWSERID_DOMAIN,
}
site_settings['fxa_css_path'] = settings.FXA_CSS_PATH
ctx = {
'BUILD_ID': BUILD_ID,
'appcache': repo in settings.COMMONPLACE_REPOS_APPCACHED,
'include_persona': include_persona,
'include_splash': include_splash,
'repo': repo,
'robots': 'googlebot' in ua,
'site_settings': site_settings,
'newrelic_header': newrelic.agent.get_browser_timing_header,
'newrelic_footer': newrelic.agent.get_browser_timing_footer,
}
resolved_url = resolve(request.path)
if repo == 'fireplace' and resolved_url.url_name == 'detail':
ctx = add_app_ctx(ctx, resolved_url.kwargs['app_slug'])
media_url = urlparse(settings.MEDIA_URL)
if media_url.netloc:
ctx['media_origin'] = media_url.scheme + '://' + media_url.netloc
return render(request, 'commonplace/index.html', ctx)
def add_app_ctx(ctx, app_slug):
try:
app = Webapp.objects.get(app_slug=app_slug)
ctx['app'] = app
except Webapp.DoesNotExist:
pass
return ctx
@gzip_page
def appcache_manifest(request):
repo = request.GET.get('repo')
if not repo or repo not in settings.COMMONPLACE_REPOS_APPCACHED:
raise Http404
template = _appcache_manifest_template(repo)
return HttpResponse(template, content_type='text/cache-manifest')
@memoize('appcache-manifest-template')
def _appcache_manifest_template(repo):
ctx = {
'BUILD_ID': get_build_id(repo),
'imgurls': get_imgurls(repo),
'repo': repo,
'timestamp': datetime.datetime.now(),
}
t = jingo.env.get_template('commonplace/manifest.appcache').render(ctx)
return unicode(jinja2.Markup(t))
@gzip_page
def iframe_install(request):
return render(request, 'commonplace/iframe-install.html', {
'whitelisted_origins': get_whitelisted_origins(request)
})
@gzip_page
def potatolytics(request):
return render(request, 'commonplace/potatolytics.html', {
'whitelisted_origins': get_whitelisted_origins(request,
include_loop=False)
})
| true | true |
1c38b99bfb1848d9aba4d4d4a92c531feb23e7ae | 2,226 | py | Python | src/movieMain.py | o-x-y-g-e-n/Web-Scraping-With-Sentiment-Analysis | 4f0faaefdbdc515165a58fd53942c2902c3e7372 | [
"MIT"
] | 3 | 2020-12-23T14:57:59.000Z | 2021-06-14T07:30:25.000Z | src/movieMain.py | o-x-y-g-e-n/Web-Scraping-With-Sentiment-Analysis | 4f0faaefdbdc515165a58fd53942c2902c3e7372 | [
"MIT"
] | 2 | 2021-04-06T18:21:56.000Z | 2021-06-02T03:36:15.000Z | src/movieMain.py | o-x-y-g-e-n/Web-Scraping-With-Sentiment-Analysis | 4f0faaefdbdc515165a58fd53942c2902c3e7372 | [
"MIT"
] | 1 | 2021-06-14T07:30:40.000Z | 2021-06-14T07:30:40.000Z | import nltk,re,time,sys ,codecs,os,requests
import xml.etree.ElementTree as ET
from lxml import etree
from nltk.corpus import stopwords
from textblob import TextBlob
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from xml.dom import minidom
from tqdm import tqdm
import pandas as pd
from pandas import ExcelWriter
from bs4 import BeautifulSoup
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from contextlib import closing
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver import Firefox
from src.majority import find_majority_terms
from src.percentage import percentage
from src.remove_non_ascii import remove_non_ascii_1
def movie_main():
major_terms = find_majority_terms("movie_reviews")
print(major_terms)
total_count = [0]*len(major_terms)
positive = [0]*len(major_terms)
negative = [0]*len(major_terms)
neutral = [0]*len(major_terms)
positive_tweet = []
negative_tweet = []
neutral_tweet = []
total_tweets=[]
for i in range(0,len(major_terms)):
total_tweets.append([])
for z in total_tweets:
z.append([])
z.append([])
z.append([])
doc = etree.XMLParser(recover=True)
tree = etree.parse('movie_reviews.xml',doc)
for df in tree.xpath('//review'):
subfields = df.getchildren()
i=0
sentences = nltk.sent_tokenize(str(subfields[0].text))
for term in major_terms:
for sentence in sentences:
words = nltk.word_tokenize(sentence)
if term in words:
analysis = TextBlob(sentence)
if(analysis.sentiment.polarity == 0):
neutral[i] +=1
total_tweets[i][0].append(sentence)
elif(analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <=1):
positive[i] +=1
total_tweets[i][1].append(sentence)
elif(analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <=0):
negative[i]+=1
total_tweets[i][2].append(sentence)
total_count[i] +=1
i+=1
return total_tweets,positive,negative,neutral,major_terms | 32.735294 | 81 | 0.752022 | import nltk,re,time,sys ,codecs,os,requests
import xml.etree.ElementTree as ET
from lxml import etree
from nltk.corpus import stopwords
from textblob import TextBlob
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from xml.dom import minidom
from tqdm import tqdm
import pandas as pd
from pandas import ExcelWriter
from bs4 import BeautifulSoup
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from contextlib import closing
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver import Firefox
from src.majority import find_majority_terms
from src.percentage import percentage
from src.remove_non_ascii import remove_non_ascii_1
def movie_main():
major_terms = find_majority_terms("movie_reviews")
print(major_terms)
total_count = [0]*len(major_terms)
positive = [0]*len(major_terms)
negative = [0]*len(major_terms)
neutral = [0]*len(major_terms)
positive_tweet = []
negative_tweet = []
neutral_tweet = []
total_tweets=[]
for i in range(0,len(major_terms)):
total_tweets.append([])
for z in total_tweets:
z.append([])
z.append([])
z.append([])
doc = etree.XMLParser(recover=True)
tree = etree.parse('movie_reviews.xml',doc)
for df in tree.xpath('//review'):
subfields = df.getchildren()
i=0
sentences = nltk.sent_tokenize(str(subfields[0].text))
for term in major_terms:
for sentence in sentences:
words = nltk.word_tokenize(sentence)
if term in words:
analysis = TextBlob(sentence)
if(analysis.sentiment.polarity == 0):
neutral[i] +=1
total_tweets[i][0].append(sentence)
elif(analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <=1):
positive[i] +=1
total_tweets[i][1].append(sentence)
elif(analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <=0):
negative[i]+=1
total_tweets[i][2].append(sentence)
total_count[i] +=1
i+=1
return total_tweets,positive,negative,neutral,major_terms | true | true |
1c38ba494a7048305333a4416b8225cce6de2a25 | 3,027 | py | Python | vega/core/pipeline/multi_task_pipe_step.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/core/pipeline/multi_task_pipe_step.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/core/pipeline/multi_task_pipe_step.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-task pipe step."""
import logging
from vega.common.general import General
from vega.common.class_factory import ClassFactory, ClassType
from vega.report import ReportServer, ReportRecord, ReportClient
from vega.common import Status
from vega.core.scheduler import create_master
from vega.core.pipeline.conf import PipeStepConfig
from vega.core.pipeline.train_pipe_step import TrainPipeStep
from vega.trainer.conf import TrainerConfig
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.PIPE_STEP)
class MultiTaskPipeStep(TrainPipeStep):
"""TrainPipeStep is the implementation class of PipeStep.
Fully train is the last pipe step in pipeline, we provide horovrd or local trainer
for user to choose.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
logger.info("init MultiTaskPipeStep...")
def do(self):
"""Start to run fully train with horovod or local trainer."""
logger.info("MultiTaskPipeStep started...")
self.update_status(Status.running)
self.master = create_master()
self._train_multi_task()
self.master.join()
ReportServer().output_step_all_records(step_name=self.task.step_name)
self.master.close()
ReportServer().backup_output_path()
self.update_status(Status.finished)
def _train_single_model(self, model_desc, model_id, hps, multi_task):
cls_trainer = ClassFactory.get_cls(ClassType.TRAINER, PipeStepConfig.trainer.type)
step_name = self.task.step_name
sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name)
record = ReportRecord().load_dict(sample)
logging.debug("update record=%s", str(record))
trainer = cls_trainer(model_desc=model_desc, id=model_id, hps=hps, multi_task=multi_task)
ReportClient().update(**record.to_dict())
self.train_model(trainer)
def _train_multi_task(self):
from copy import deepcopy
for epoch in range(0, PipeStepConfig.pipe_step.multi_task_epochs):
for alg in PipeStepConfig.pipe_step.tasks:
desc = deepcopy(PipeStepConfig().to_dict()[alg])
model_desc = desc.model.model_desc
desc.pop('model')
self._train_single_model(model_desc=model_desc, model_id=0, hps=desc, multi_task=alg)
| 40.36 | 101 | 0.718203 |
import logging
from vega.common.general import General
from vega.common.class_factory import ClassFactory, ClassType
from vega.report import ReportServer, ReportRecord, ReportClient
from vega.common import Status
from vega.core.scheduler import create_master
from vega.core.pipeline.conf import PipeStepConfig
from vega.core.pipeline.train_pipe_step import TrainPipeStep
from vega.trainer.conf import TrainerConfig
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.PIPE_STEP)
class MultiTaskPipeStep(TrainPipeStep):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
logger.info("init MultiTaskPipeStep...")
def do(self):
logger.info("MultiTaskPipeStep started...")
self.update_status(Status.running)
self.master = create_master()
self._train_multi_task()
self.master.join()
ReportServer().output_step_all_records(step_name=self.task.step_name)
self.master.close()
ReportServer().backup_output_path()
self.update_status(Status.finished)
def _train_single_model(self, model_desc, model_id, hps, multi_task):
cls_trainer = ClassFactory.get_cls(ClassType.TRAINER, PipeStepConfig.trainer.type)
step_name = self.task.step_name
sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name)
record = ReportRecord().load_dict(sample)
logging.debug("update record=%s", str(record))
trainer = cls_trainer(model_desc=model_desc, id=model_id, hps=hps, multi_task=multi_task)
ReportClient().update(**record.to_dict())
self.train_model(trainer)
def _train_multi_task(self):
from copy import deepcopy
for epoch in range(0, PipeStepConfig.pipe_step.multi_task_epochs):
for alg in PipeStepConfig.pipe_step.tasks:
desc = deepcopy(PipeStepConfig().to_dict()[alg])
model_desc = desc.model.model_desc
desc.pop('model')
self._train_single_model(model_desc=model_desc, model_id=0, hps=desc, multi_task=alg)
| true | true |
1c38bbb507524c08538e3489e0f8293d4dab2320 | 3,169 | py | Python | user/views.py | myl7/conus | 6eb2c9ee55c45d8b9c7f9de0e5998f94954b1a4e | [
"MIT"
] | null | null | null | user/views.py | myl7/conus | 6eb2c9ee55c45d8b9c7f9de0e5998f94954b1a4e | [
"MIT"
] | null | null | null | user/views.py | myl7/conus | 6eb2c9ee55c45d8b9c7f9de0e5998f94954b1a4e | [
"MIT"
] | null | null | null | import uuid
import re
from urllib.parse import urlencode
from xml.etree import ElementTree
import json
from django.shortcuts import redirect, reverse
from django.conf import settings
from django.http.response import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.views.generic import FormView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
import requests
from .models import UstcCasCredential
from . import forms
from .utils.email import validate_email
def login_view(request):
if request.user.is_anonymous:
url = settings.USTC_CAS_LOGIN_URL + '?' + urlencode({
'service': settings.USTC_CAS_CALLBACK_URL
})
return redirect(url)
else:
return redirect(reverse('notice:list_recv'))
def _validate_ticket(ticket):
url = settings.USTC_CAS_VALIDATE_URL + '?' + urlencode({
'service': settings.USTC_CAS_CALLBACK_URL,
'ticket': ticket
})
response = requests.get(url)
tree = ElementTree.fromstring(response.content.decode())[0]
cas_tag = '{http://www.yale.edu/tp/cas}'
if tree.tag != f'{cas_tag}authenticationSuccess':
return None
gid = tree.find('attributes').find(f'{cas_tag}gid').text.strip()
uid = tree.find(f'{cas_tag}user').text.strip()
return gid, uid
def validate_view(request):
error = '登录失败,请重试'
ticket = request.GET.get('ticket', None)
if not ticket:
return HttpResponse(error, status=401)
res = _validate_ticket(ticket)
if not res:
return HttpResponse(error, status=401)
gid, uid = res
try:
user = User.objects.get(username=uid)
except User.DoesNotExist:
user = User.objects.create_user(username=uid, password=uuid.uuid4().hex, first_name=uid)
UstcCasCredential.objects.create(user=user, gid=gid)
if not re.match(r'^[A-Z]{2}[0-9]{8}$', uid):
user.user_permissions.add('notice.add_notice')
login(request, user)
return redirect(reverse('notice:list_recv'))
def logout_view(request):
logout(request)
return redirect(settings.USTC_CAS_LOGOUT_URL)
@login_required
def validate_email_view(request):
error = '验证邮箱失败,请重试'
token = request.GET.get('token', None)
if not token:
return HttpResponse(error, status=400)
key = f'user:{request.user.pk}:validate_email'
email = json.loads(cache.get(key)).get(token, None)
if not email:
return HttpResponse(error, status=400)
contact_info = request.user.contactinfo
contact_info.email = email
contact_info.save()
cache.delete(key)
return redirect(reverse('notice:list_recv'))
class ContactInfoUpdateEmailView(LoginRequiredMixin, FormView):
form_class = forms.ContactInfoUpdateEmailForm
template_name = 'user/update_email.html'
success_url = reverse_lazy('user:update_email')
def form_valid(self, form):
validate_email(self.request.user, form.cleaned_data['email'])
return super().form_valid(form)
| 32.010101 | 96 | 0.71095 | import uuid
import re
from urllib.parse import urlencode
from xml.etree import ElementTree
import json
from django.shortcuts import redirect, reverse
from django.conf import settings
from django.http.response import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.views.generic import FormView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
import requests
from .models import UstcCasCredential
from . import forms
from .utils.email import validate_email
def login_view(request):
if request.user.is_anonymous:
url = settings.USTC_CAS_LOGIN_URL + '?' + urlencode({
'service': settings.USTC_CAS_CALLBACK_URL
})
return redirect(url)
else:
return redirect(reverse('notice:list_recv'))
def _validate_ticket(ticket):
url = settings.USTC_CAS_VALIDATE_URL + '?' + urlencode({
'service': settings.USTC_CAS_CALLBACK_URL,
'ticket': ticket
})
response = requests.get(url)
tree = ElementTree.fromstring(response.content.decode())[0]
cas_tag = '{http://www.yale.edu/tp/cas}'
if tree.tag != f'{cas_tag}authenticationSuccess':
return None
gid = tree.find('attributes').find(f'{cas_tag}gid').text.strip()
uid = tree.find(f'{cas_tag}user').text.strip()
return gid, uid
def validate_view(request):
error = '登录失败,请重试'
ticket = request.GET.get('ticket', None)
if not ticket:
return HttpResponse(error, status=401)
res = _validate_ticket(ticket)
if not res:
return HttpResponse(error, status=401)
gid, uid = res
try:
user = User.objects.get(username=uid)
except User.DoesNotExist:
user = User.objects.create_user(username=uid, password=uuid.uuid4().hex, first_name=uid)
UstcCasCredential.objects.create(user=user, gid=gid)
if not re.match(r'^[A-Z]{2}[0-9]{8}$', uid):
user.user_permissions.add('notice.add_notice')
login(request, user)
return redirect(reverse('notice:list_recv'))
def logout_view(request):
logout(request)
return redirect(settings.USTC_CAS_LOGOUT_URL)
@login_required
def validate_email_view(request):
error = '验证邮箱失败,请重试'
token = request.GET.get('token', None)
if not token:
return HttpResponse(error, status=400)
key = f'user:{request.user.pk}:validate_email'
email = json.loads(cache.get(key)).get(token, None)
if not email:
return HttpResponse(error, status=400)
contact_info = request.user.contactinfo
contact_info.email = email
contact_info.save()
cache.delete(key)
return redirect(reverse('notice:list_recv'))
class ContactInfoUpdateEmailView(LoginRequiredMixin, FormView):
form_class = forms.ContactInfoUpdateEmailForm
template_name = 'user/update_email.html'
success_url = reverse_lazy('user:update_email')
def form_valid(self, form):
validate_email(self.request.user, form.cleaned_data['email'])
return super().form_valid(form)
| true | true |
1c38bc5683d427dfa31b3e03713eb31a5e6faef6 | 1,987 | py | Python | test/test_new_password.py | talon-one/talon_one.py | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | [
"MIT"
] | 1 | 2021-03-05T06:41:26.000Z | 2021-03-05T06:41:26.000Z | test/test_new_password.py | talon-one/talon_one.py | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | [
"MIT"
] | 1 | 2021-09-07T08:56:58.000Z | 2021-09-07T08:56:58.000Z | test/test_new_password.py | talon-one/talon_one.py | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | [
"MIT"
] | 1 | 2019-05-21T10:27:54.000Z | 2019-05-21T10:27:54.000Z | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.new_password import NewPassword # noqa: E501
from talon_one.rest import ApiException
class TestNewPassword(unittest.TestCase):
"""NewPassword unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test NewPassword
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.new_password.NewPassword() # noqa: E501
if include_optional :
return NewPassword(
password = '0',
reset_token = '0'
)
else :
return NewPassword(
password = '0',
reset_token = '0',
)
def testNewPassword(self):
"""Test NewPassword"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 35.482143 | 647 | 0.67841 |
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.new_password import NewPassword
from talon_one.rest import ApiException
class TestNewPassword(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return NewPassword(
password = '0',
reset_token = '0'
)
else :
return NewPassword(
password = '0',
reset_token = '0',
)
def testNewPassword(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true | true |
1c38bcecf5d2b10709950ef3d19204f26c5d8e6f | 607 | py | Python | var/spack/repos/builtin.mock/packages/indirect-mpich/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin.mock/packages/indirect-mpich/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin.mock/packages/indirect-mpich/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class IndirectMpich(Package):
"""Test case for a package that depends on MPI and one of its
dependencies requires a *particular version* of MPI.
"""
homepage = "http://www.example.com"
url = "http://www.example.com/indirect_mpich-1.0.tar.gz"
version(1.0, '0123456789abcdef0123456789abcdef')
depends_on('mpi')
depends_on('direct-mpich')
| 28.904762 | 73 | 0.710049 |
from spack.package import *
class IndirectMpich(Package):
homepage = "http://www.example.com"
url = "http://www.example.com/indirect_mpich-1.0.tar.gz"
version(1.0, '0123456789abcdef0123456789abcdef')
depends_on('mpi')
depends_on('direct-mpich')
| true | true |
1c38be7b5959d2a3c502984d191233707a0c0b2a | 624 | py | Python | setup.py | k4m454k/VerbariusRus | 54bb20570d3013a93340df3be5592a20c5576322 | [
"MIT"
] | 2 | 2020-11-15T19:41:20.000Z | 2020-11-15T21:22:05.000Z | setup.py | k4m454k/VerbariusRus | 54bb20570d3013a93340df3be5592a20c5576322 | [
"MIT"
] | null | null | null | setup.py | k4m454k/VerbariusRus | 54bb20570d3013a93340df3be5592a20c5576322 | [
"MIT"
] | 1 | 2020-12-20T13:35:19.000Z | 2020-12-20T13:35:19.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="verbarius",
version="0.7",
author="Vadim Apenko",
author_email="k4m454k@gmail.com",
description="VerbariusRus",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/k4m454k/Verbarius",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 27.130435 | 50 | 0.658654 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="verbarius",
version="0.7",
author="Vadim Apenko",
author_email="k4m454k@gmail.com",
description="VerbariusRus",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/k4m454k/Verbarius",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| true | true |
1c38c04a0ea5aaf4c7610e0a4db970bfce3dc8d0 | 3,895 | py | Python | tests/test_utils.py | Algebra8/pyopenapi3 | ebfb2b99e185277845ac9fc53aafaa1505339c11 | [
"MIT"
] | null | null | null | tests/test_utils.py | Algebra8/pyopenapi3 | ebfb2b99e185277845ac9fc53aafaa1505339c11 | [
"MIT"
] | 21 | 2021-03-27T22:32:51.000Z | 2021-04-13T05:37:17.000Z | tests/test_utils.py | Algebra8/pyopenapi3 | ebfb2b99e185277845ac9fc53aafaa1505339c11 | [
"MIT"
] | null | null | null | from unittest import mock
from pyopenapi3.objects import (
TextPlainMediaType,
JSONMediaType,
)
from pyopenapi3.data_types import (
Email,
Int64,
String,
Component,
Array
)
from pyopenapi3.schemas import (
MediaTypeObject,
Int64DTSchema,
ReferenceObject,
ArrayDTSchema,
AnyTypeArrayDTSchema,
MixedTypeArrayDTSchema
)
from pyopenapi3.types import MediaTypeEnum
from pyopenapi3.utils import (
build_mediatype_schema_from_content,
convert_primitive_to_schema,
convert_objects_to_schema,
convert_array_to_schema,
create_schema,
parse_name_and_type_from_fmt_str
)
def test_build_mediatype_from_object__success():
class Customer(Component): # Test custom components.
...
text = TextPlainMediaType(String)
json = JSONMediaType(Customer)
b = build_mediatype_schema_from_content([text, json])
should_be = {
MediaTypeEnum.JSON: MediaTypeObject(
schema={'ref': '#/components/schemas/Customer'}),
MediaTypeEnum.PLAIN: MediaTypeObject(
schema={'type': 'string'})
}
assert b == should_be
def test_build_mediatype_from_tuples__success():
content = [
(MediaTypeEnum.PNG, Int64, None, None, None),
(MediaTypeEnum.JSON, String, None, None, None),
(MediaTypeEnum.PLAIN, Email, None, None, None)
]
b = build_mediatype_schema_from_content(content)
should_be = {
MediaTypeEnum.PNG: MediaTypeObject(
schema={'type': 'integer', 'format': 'int64'}),
MediaTypeEnum.JSON: MediaTypeObject(
schema={'type': 'string'}),
MediaTypeEnum.PLAIN: MediaTypeObject(
schema={'type': 'string', 'format': 'email'})
}
assert b == should_be
def test_convert_primitive_to_schema():
p = convert_primitive_to_schema(Int64)
assert p == Int64DTSchema()
def test_convert_objects_to_schema():
class Pet(Component):
...
r = convert_objects_to_schema(Pet)
assert r == ReferenceObject(ref='#/components/schemas/Pet')
def test_array_to_schema():
kwargs = {'min_length': 1, 'max_length': 10}
arb_array = convert_array_to_schema(Array[...], **kwargs)
assert arb_array == AnyTypeArrayDTSchema(**kwargs)
single_array = convert_array_to_schema(Array[Int64], **kwargs)
assert single_array == ArrayDTSchema(
items={'type': 'integer', 'format': 'int64'},
**kwargs
)
mixed_array = convert_array_to_schema(Array[Int64, Email], **kwargs)
assert mixed_array == MixedTypeArrayDTSchema(
items={'oneOf': [
{'type': 'integer', 'format': 'int64'},
{'type': 'string', 'format': 'email'}
]},
**kwargs
)
@mock.patch('pyopenapi3.utils.convert_array_to_schema')
@mock.patch('pyopenapi3.utils.convert_primitive_to_schema')
@mock.patch('pyopenapi3.utils.convert_objects_to_schema')
def test_create_schema(
mock_objects_to_schema,
mock_prim_to_schema,
mock_arr_to_schema
):
kwargs = {'a': 1, 'b': 2}
# objects to schema
class Customer(Component):
...
create_schema(Customer, **kwargs)
# create_schema will only create references for custom components,
# so kwargs should not be passed to it.
mock_objects_to_schema.assert_called_once_with(Customer)
# primitive to schema
create_schema(Int64, **kwargs)
mock_prim_to_schema.assert_called_once_with(Int64, **kwargs)
# array to schema
arr = Array[Int64, Email, Customer]
create_schema(arr, **kwargs)
mock_arr_to_schema.assert_called_once_with(arr, **kwargs)
def test_parse_name_and_type():
fmt_str = "{id:Int64}/{email:Email}/"
parsed_gen = parse_name_and_type_from_fmt_str(fmt_str)
_id = next(parsed_gen)
assert _id == ("id", Int64)
email = next(parsed_gen)
assert email == ("email", Email)
| 26.14094 | 72 | 0.674711 | from unittest import mock
from pyopenapi3.objects import (
TextPlainMediaType,
JSONMediaType,
)
from pyopenapi3.data_types import (
Email,
Int64,
String,
Component,
Array
)
from pyopenapi3.schemas import (
MediaTypeObject,
Int64DTSchema,
ReferenceObject,
ArrayDTSchema,
AnyTypeArrayDTSchema,
MixedTypeArrayDTSchema
)
from pyopenapi3.types import MediaTypeEnum
from pyopenapi3.utils import (
build_mediatype_schema_from_content,
convert_primitive_to_schema,
convert_objects_to_schema,
convert_array_to_schema,
create_schema,
parse_name_and_type_from_fmt_str
)
def test_build_mediatype_from_object__success():
class Customer(Component):
...
text = TextPlainMediaType(String)
json = JSONMediaType(Customer)
b = build_mediatype_schema_from_content([text, json])
should_be = {
MediaTypeEnum.JSON: MediaTypeObject(
schema={'ref': '#/components/schemas/Customer'}),
MediaTypeEnum.PLAIN: MediaTypeObject(
schema={'type': 'string'})
}
assert b == should_be
def test_build_mediatype_from_tuples__success():
content = [
(MediaTypeEnum.PNG, Int64, None, None, None),
(MediaTypeEnum.JSON, String, None, None, None),
(MediaTypeEnum.PLAIN, Email, None, None, None)
]
b = build_mediatype_schema_from_content(content)
should_be = {
MediaTypeEnum.PNG: MediaTypeObject(
schema={'type': 'integer', 'format': 'int64'}),
MediaTypeEnum.JSON: MediaTypeObject(
schema={'type': 'string'}),
MediaTypeEnum.PLAIN: MediaTypeObject(
schema={'type': 'string', 'format': 'email'})
}
assert b == should_be
def test_convert_primitive_to_schema():
p = convert_primitive_to_schema(Int64)
assert p == Int64DTSchema()
def test_convert_objects_to_schema():
class Pet(Component):
...
r = convert_objects_to_schema(Pet)
assert r == ReferenceObject(ref='#/components/schemas/Pet')
def test_array_to_schema():
kwargs = {'min_length': 1, 'max_length': 10}
arb_array = convert_array_to_schema(Array[...], **kwargs)
assert arb_array == AnyTypeArrayDTSchema(**kwargs)
single_array = convert_array_to_schema(Array[Int64], **kwargs)
assert single_array == ArrayDTSchema(
items={'type': 'integer', 'format': 'int64'},
**kwargs
)
mixed_array = convert_array_to_schema(Array[Int64, Email], **kwargs)
assert mixed_array == MixedTypeArrayDTSchema(
items={'oneOf': [
{'type': 'integer', 'format': 'int64'},
{'type': 'string', 'format': 'email'}
]},
**kwargs
)
@mock.patch('pyopenapi3.utils.convert_array_to_schema')
@mock.patch('pyopenapi3.utils.convert_primitive_to_schema')
@mock.patch('pyopenapi3.utils.convert_objects_to_schema')
def test_create_schema(
mock_objects_to_schema,
mock_prim_to_schema,
mock_arr_to_schema
):
kwargs = {'a': 1, 'b': 2}
class Customer(Component):
...
create_schema(Customer, **kwargs)
mock_objects_to_schema.assert_called_once_with(Customer)
create_schema(Int64, **kwargs)
mock_prim_to_schema.assert_called_once_with(Int64, **kwargs)
arr = Array[Int64, Email, Customer]
create_schema(arr, **kwargs)
mock_arr_to_schema.assert_called_once_with(arr, **kwargs)
def test_parse_name_and_type():
fmt_str = "{id:Int64}/{email:Email}/"
parsed_gen = parse_name_and_type_from_fmt_str(fmt_str)
_id = next(parsed_gen)
assert _id == ("id", Int64)
email = next(parsed_gen)
assert email == ("email", Email)
| true | true |
1c38c159ae02da710e19c4593c15a0eaa92ab01e | 3,528 | py | Python | python-midonetclient/src/midonetclient/neutron/chain_rule.py | duarten/midonet | c7a5aa352a8038bdc6a463c68abc47bb411a1e7c | [
"Apache-2.0"
] | null | null | null | python-midonetclient/src/midonetclient/neutron/chain_rule.py | duarten/midonet | c7a5aa352a8038bdc6a463c68abc47bb411a1e7c | [
"Apache-2.0"
] | null | null | null | python-midonetclient/src/midonetclient/neutron/chain_rule.py | duarten/midonet | c7a5aa352a8038bdc6a463c68abc47bb411a1e7c | [
"Apache-2.0"
] | 1 | 2018-10-25T05:52:05.000Z | 2018-10-25T05:52:05.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Midokura Europe SARL, All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from midonetclient import url_provider
from midonetclient import util
from midonetclient import vendor_media_type as mt
LOG = logging.getLogger(__name__)
class ChainRuleUrlProviderMixin(url_provider.UrlProviderMixin):
"""ChainRule URL provider mixin
This mixin provides URLs for chain rules.
"""
def chain_url(self, chain_id):
return self.template_url("chain_template", chain_id)
def chains_url(self):
return self.resource_url("chains")
def rule_url(self, rule_id):
return self.template_url("rule_template", rule_id)
def rules_url(self, chain_id):
return self.chain_url(chain_id) + "/rules"
class ChainRuleClientMixin(ChainRuleUrlProviderMixin):
"""ChainRule mixin
Mixin that defines all the Neutron chain rule operations in MidoNet API.
"""
@util.convert_case
def create_chain(self, chain):
LOG.info("create_chain %r", chain)
return self.client.post(self.chains_url(),
mt.APPLICATION_CHAIN_JSON, body=chain)
def delete_chain(self, chain_id):
LOG.info("delete_chain %r", chain_id)
self.client.delete(self.chain_url(chain_id))
@util.convert_case
def get_chain(self, chain_id, fields=None):
LOG.info("get_chain %r", chain_id)
return self.client.get(self.chain_url(chain_id),
mt.APPLICATION_CHAIN_JSON)
@util.convert_case
def get_chains(self, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
LOG.info("get_chains")
return self.client.get(self.chains_url(),
mt.APPLICATION_CHAIN_COLLECTION_JSON)
@util.convert_case
def update_chain(self, chain):
LOG.info("update_chain %r", chain)
return self.client.put(self.chain_url(chain["id"]),
mt.APPLICATION_CHAIN_JSON, chain)
@util.convert_case
def create_chain_rule(self, rule):
LOG.info("create_chain_rule %r", rule)
# convert_case converted to camel
return self.client.post(self.rules_url(rule["chainId"]),
mt.APPLICATION_RULE_JSON, body=rule)
def delete_chain_rule(self, rule_id):
LOG.info("delete_chain_rule %r", rule_id)
self.client.delete(self.rule_url(rule_id))
@util.convert_case
def get_chain_rule(self, rule_id):
LOG.info("get_chain_rule %r", rule_id)
return self.client.get(self.rule_url(rule_id),
mt.APPLICATION_RULE_JSON)
@util.convert_case
def get_chain_rules(self, chain_id):
LOG.info("get_chain_rules %r", chain_id)
return self.client.get(self.rules_url(chain_id),
mt.APPLICATION_RULE_COLLECTION_JSON)
| 34.252427 | 76 | 0.673186 |
import logging
from midonetclient import url_provider
from midonetclient import util
from midonetclient import vendor_media_type as mt
LOG = logging.getLogger(__name__)
class ChainRuleUrlProviderMixin(url_provider.UrlProviderMixin):
def chain_url(self, chain_id):
return self.template_url("chain_template", chain_id)
def chains_url(self):
return self.resource_url("chains")
def rule_url(self, rule_id):
return self.template_url("rule_template", rule_id)
def rules_url(self, chain_id):
return self.chain_url(chain_id) + "/rules"
class ChainRuleClientMixin(ChainRuleUrlProviderMixin):
@util.convert_case
def create_chain(self, chain):
LOG.info("create_chain %r", chain)
return self.client.post(self.chains_url(),
mt.APPLICATION_CHAIN_JSON, body=chain)
def delete_chain(self, chain_id):
LOG.info("delete_chain %r", chain_id)
self.client.delete(self.chain_url(chain_id))
@util.convert_case
def get_chain(self, chain_id, fields=None):
LOG.info("get_chain %r", chain_id)
return self.client.get(self.chain_url(chain_id),
mt.APPLICATION_CHAIN_JSON)
@util.convert_case
def get_chains(self, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
LOG.info("get_chains")
return self.client.get(self.chains_url(),
mt.APPLICATION_CHAIN_COLLECTION_JSON)
@util.convert_case
def update_chain(self, chain):
LOG.info("update_chain %r", chain)
return self.client.put(self.chain_url(chain["id"]),
mt.APPLICATION_CHAIN_JSON, chain)
@util.convert_case
def create_chain_rule(self, rule):
LOG.info("create_chain_rule %r", rule)
return self.client.post(self.rules_url(rule["chainId"]),
mt.APPLICATION_RULE_JSON, body=rule)
def delete_chain_rule(self, rule_id):
LOG.info("delete_chain_rule %r", rule_id)
self.client.delete(self.rule_url(rule_id))
@util.convert_case
def get_chain_rule(self, rule_id):
LOG.info("get_chain_rule %r", rule_id)
return self.client.get(self.rule_url(rule_id),
mt.APPLICATION_RULE_JSON)
@util.convert_case
def get_chain_rules(self, chain_id):
LOG.info("get_chain_rules %r", chain_id)
return self.client.get(self.rules_url(chain_id),
mt.APPLICATION_RULE_COLLECTION_JSON)
| true | true |
1c38c2d8958f5e1c652a51619a36b079c004fd36 | 37,135 | py | Python | cinder/volume/drivers/pure.py | tlakshman26/cinder-https-changes | c688a0af521e8679ac8f68d3dd035fe998e736d3 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/pure.py | tlakshman26/cinder-https-changes | c688a0af521e8679ac8f68d3dd035fe998e736d3 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/pure.py | tlakshman26/cinder-https-changes | c688a0af521e8679ac8f68d3dd035fe998e736d3 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Pure Storage FlashArray storage system.
This driver requires Purity version 4.0.0 or later.
"""
import math
import re
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import objects
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
try:
import purestorage
except ImportError:
purestorage = None
LOG = logging.getLogger(__name__)
PURE_OPTS = [
cfg.StrOpt("pure_api_token",
default=None,
help="REST API authorization token."),
]
CONF = cfg.CONF
CONF.register_opts(PURE_OPTS)
INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]")
GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$")
CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET"
ERR_MSG_NOT_EXIST = "does not exist"
ERR_MSG_PENDING_ERADICATION = "has been destroyed"
CONNECT_LOCK_NAME = 'PureVolumeDriver_connect'
def log_debug_trace(f):
def wrapper(*args, **kwargs):
cls_name = args[0].__class__.__name__
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
"method": f.func_name}
LOG.debug("Enter " + method_name)
result = f(*args, **kwargs)
LOG.debug("Leave " + method_name)
return result
return wrapper
class PureBaseVolumeDriver(san.SanDriver):
"""Performs volume management on Pure Storage FlashArray."""
SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4']
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureBaseVolumeDriver, self).__init__(execute=execute, *args,
**kwargs)
self.configuration.append_config_values(PURE_OPTS)
self._array = None
self._storage_protocol = None
self._backend_name = (self.configuration.volume_backend_name or
self.__class__.__name__)
def do_setup(self, context):
"""Performs driver initialization steps that could raise exceptions."""
if purestorage is None:
msg = _("Missing 'purestorage' python module, ensure the library"
" is installed and available.")
raise exception.PureDriverException(msg)
# Raises PureDriverException if unable to connect and PureHTTPError
# if unable to authenticate.
purestorage.FlashArray.supported_rest_versions = \
self.SUPPORTED_REST_API_VERSIONS
self._array = purestorage.FlashArray(
self.configuration.san_ip,
api_token=self.configuration.pure_api_token)
def check_for_setup_error(self):
# Avoid inheriting check_for_setup_error from SanDriver, which checks
# for san_password or san_private_key, not relevant to our driver.
pass
@log_debug_trace
def create_volume(self, volume):
"""Creates a volume."""
vol_name = self._get_vol_name(volume)
vol_size = volume["size"] * units.Gi
self._array.create_volume(vol_name, vol_size)
if volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
@log_debug_trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_name = self._get_vol_name(volume)
if snapshot['cgsnapshot_id']:
snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot)
else:
snap_name = self._get_snap_name(snapshot)
self._array.copy_volume(snap_name, vol_name)
self._extend_if_needed(vol_name, snapshot["volume_size"],
volume["size"])
if volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
@log_debug_trace
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_name = self._get_vol_name(volume)
src_name = self._get_vol_name(src_vref)
self._array.copy_volume(src_name, vol_name)
self._extend_if_needed(vol_name, src_vref["size"], volume["size"])
if volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
def _extend_if_needed(self, vol_name, src_size, vol_size):
"""Extend the volume from size src_size to size vol_size."""
if vol_size > src_size:
vol_size = vol_size * units.Gi
self._array.extend_volume(vol_name, vol_size)
@log_debug_trace
def delete_volume(self, volume):
"""Disconnect all hosts and delete the volume"""
vol_name = self._get_vol_name(volume)
try:
connected_hosts = \
self._array.list_volume_private_connections(vol_name)
for host_info in connected_hosts:
host_name = host_info["host"]
self._disconnect_host(host_name, vol_name)
self._array.destroy_volume(vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and \
ERR_MSG_NOT_EXIST in err.text:
# Happens if the volume does not exist.
ctxt.reraise = False
LOG.warning(_LW("Volume deletion failed with message: %s"),
err.text)
@log_debug_trace
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
vol_name, snap_suff = self._get_snap_name(snapshot).split(".")
self._array.create_snapshot(vol_name, suffix=snap_suff)
@log_debug_trace
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
snap_name = self._get_snap_name(snapshot)
try:
self._array.destroy_volume(snap_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400:
# Happens if the snapshot does not exist.
ctxt.reraise = False
LOG.error(_LE("Snapshot deletion failed with message:"
" %s"), err.text)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def _get_host(self, connector):
"""Get a Purity Host that corresponds to the host in the connector.
This implementation is specific to the host type (iSCSI, FC, etc).
"""
raise NotImplementedError
@utils.synchronized(CONNECT_LOCK_NAME, external=True)
def _disconnect(self, volume, connector, **kwargs):
vol_name = self._get_vol_name(volume)
host = self._get_host(connector)
if host:
host_name = host["name"]
result = self._disconnect_host(host_name, vol_name)
else:
LOG.error(_LE("Unable to disconnect host from volume."))
result = False
return result
@log_debug_trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection."""
self._disconnect(volume, connector, **kwargs)
@log_debug_trace
def _disconnect_host(self, host_name, vol_name):
"""Return value indicates if host was deleted on array or not"""
try:
self._array.disconnect_host(host_name, vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400:
# Happens if the host and volume are not connected.
ctxt.reraise = False
LOG.error(_LE("Disconnection failed with message: "
"%(msg)s."), {"msg": err.text})
if (GENERATED_NAME.match(host_name) and
not self._array.list_host_connections(host_name,
private=True)):
LOG.info(_LI("Deleting unneeded host %(host_name)r."),
{"host_name": host_name})
try:
self._array.delete_host(host_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
# Happens if the host is already deleted.
# This is fine though, just treat it as a warning.
ctxt.reraise = False
LOG.warning(_LW("Purity host deletion failed: "
"%(msg)s."), {"msg": err.text})
return True
return False
@log_debug_trace
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
LOG.debug("Updating volume stats.")
self._update_stats()
return self._stats
def _update_stats(self):
"""Set self._stats with relevant information."""
info = self._array.get(space=True)
total_capacity = float(info["capacity"]) / units.Gi
used_space = float(info["total"]) / units.Gi
free_space = float(total_capacity - used_space)
prov_space, total_vols = self._get_provisioned_space()
provisioned_space = float(prov_space) / units.Gi
# If array is empty we can not calculate a max oversubscription ratio.
# In this case we choose 20 as a default value for the ratio. Once
# some volumes are actually created and some data is stored on the
# array a much more accurate number will be presented based on current
# usage.
if used_space == 0 or provisioned_space == 0:
thin_provisioning = 20
else:
thin_provisioning = provisioned_space / used_space
data = {
"volume_backend_name": self._backend_name,
"vendor_name": "Pure Storage",
"driver_version": self.VERSION,
"storage_protocol": self._storage_protocol,
"total_capacity_gb": total_capacity,
"free_capacity_gb": free_space,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": provisioned_space,
"max_over_subscription_ratio": thin_provisioning,
"total_volumes": total_vols,
"filter_function": self.get_filter_function(),
}
self._stats = data
def _get_provisioned_space(self):
"""Sum up provisioned size of all volumes on array"""
volumes = self._array.list_volumes(pending=True)
return sum(item["size"] for item in volumes), len(volumes)
@log_debug_trace
def extend_volume(self, volume, new_size):
"""Extend volume to new_size."""
vol_name = self._get_vol_name(volume)
new_size = new_size * units.Gi
self._array.extend_volume(vol_name, new_size)
def _add_volume_to_consistency_group(self, consistencygroup_id, vol_name):
pgroup_name = self._get_pgroup_name_from_id(consistencygroup_id)
self._array.set_pgroup(pgroup_name, addvollist=[vol_name])
@log_debug_trace
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self._array.create_pgroup(self._get_pgroup_name_from_id(group.id))
model_update = {'status': 'available'}
return model_update
def _create_cg_from_cgsnap(self, volumes, snapshots):
"""Creates a new consistency group from a cgsnapshot.
The new volumes will be consistent with the snapshot.
"""
for volume, snapshot in zip(volumes, snapshots):
self.create_volume_from_snapshot(volume, snapshot)
def _create_cg_from_cg(self, group, source_group, volumes, source_vols):
"""Creates a new consistency group from an existing cg.
The new volumes will be in a consistent state, but this requires
taking a new temporary group snapshot and cloning from that.
"""
pgroup_name = self._get_pgroup_name_from_id(source_group.id)
tmp_suffix = '%s-tmp' % uuid.uuid4()
tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % {
'pgroup_name': pgroup_name,
'pgsnap_suffix': tmp_suffix,
}
LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s '
'while cloning Consistency Group %(source_group)s.',
{'snap_name': tmp_pgsnap_name,
'source_group': source_group.id})
self._array.create_pgroup_snapshot(pgroup_name, suffix=tmp_suffix)
try:
for source_vol, cloned_vol in zip(source_vols, volumes):
source_snap_name = self._get_pgroup_vol_snap_name(
pgroup_name,
tmp_suffix,
self._get_vol_name(source_vol)
)
cloned_vol_name = self._get_vol_name(cloned_vol)
self._array.copy_volume(source_snap_name, cloned_vol_name)
self._add_volume_to_consistency_group(
group.id,
cloned_vol_name
)
finally:
self._delete_pgsnapshot(tmp_pgsnap_name)
@log_debug_trace
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
self.create_consistencygroup(context, group)
if cgsnapshot and snapshots:
self._create_cg_from_cgsnap(volumes,
snapshots)
elif source_cg:
self._create_cg_from_cg(group, source_cg, volumes, source_vols)
return None, None
@log_debug_trace
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
try:
self._array.destroy_pgroup(self._get_pgroup_name_from_id(group.id))
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
(ERR_MSG_PENDING_ERADICATION in err.text or
ERR_MSG_NOT_EXIST in err.text)):
# Treat these as a "success" case since we are trying
# to delete them anyway.
ctxt.reraise = False
LOG.warning(_LW("Unable to delete Protection Group: %s"),
err.text)
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume in volumes:
self.delete_volume(volume)
volume.status = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
@log_debug_trace
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
pgroup_name = self._get_pgroup_name_from_id(group.id)
if add_volumes:
addvollist = [self._get_vol_name(vol) for vol in add_volumes]
else:
addvollist = []
if remove_volumes:
remvollist = [self._get_vol_name(vol) for vol in remove_volumes]
else:
remvollist = []
self._array.set_pgroup(pgroup_name, addvollist=addvollist,
remvollist=remvollist)
return None, None, None
@log_debug_trace
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
cg_id = cgsnapshot.consistencygroup_id
pgroup_name = self._get_pgroup_name_from_id(cg_id)
pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot)
self._array.create_pgroup_snapshot(pgroup_name, suffix=pgsnap_suffix)
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snapshot in snapshots:
snapshot.status = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
def _delete_pgsnapshot(self, pgsnap_name):
try:
# FlashArray.destroy_pgroup is also used for deleting
# pgroup snapshots. The underlying REST API is identical.
self._array.destroy_pgroup(pgsnap_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
(ERR_MSG_PENDING_ERADICATION in err.text or
ERR_MSG_NOT_EXIST in err.text)):
# Treat these as a "success" case since we are trying
# to delete them anyway.
ctxt.reraise = False
LOG.warning(_LW("Unable to delete Protection Group "
"Snapshot: %s"), err.text)
@log_debug_trace
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
pgsnap_name = self._get_pgroup_snap_name(cgsnapshot)
self._delete_pgsnapshot(pgsnap_name)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snapshot in snapshots:
snapshot.status = 'deleted'
model_update = {'status': cgsnapshot.status}
return model_update, snapshots
def _validate_manage_existing_ref(self, existing_ref):
"""Ensure that an existing_ref is valid and return volume info
If the ref is not valid throw a ManageExistingInvalidReference
exception with an appropriate error.
"""
if "name" not in existing_ref or not existing_ref["name"]:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("manage_existing requires a 'name'"
" key to identify an existing volume."))
ref_vol_name = existing_ref['name']
try:
volume_info = self._array.get_volume(ref_vol_name)
if volume_info:
return volume_info
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
# If volume information was unable to be retrieved we need
# to throw a Invalid Reference exception
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("Unable to find volume with name=%s") % ref_vol_name)
@log_debug_trace
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
We expect a volume name in the existing_ref that matches one in Purity.
"""
self._validate_manage_existing_ref(existing_ref)
ref_vol_name = existing_ref['name']
connected_hosts = \
self._array.list_volume_private_connections(ref_vol_name)
if len(connected_hosts) > 0:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("%(driver)s manage_existing cannot manage a volume "
"connected to hosts. Please disconnect this volume "
"from existing hosts before importing"
) % {'driver': self.__class__.__name__})
new_vol_name = self._get_vol_name(volume)
LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"),
{"ref_name": ref_vol_name, "new_name": new_vol_name})
self._array.rename_volume(ref_vol_name, new_vol_name)
return None
@log_debug_trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
We expect a volume name in the existing_ref that matches one in Purity.
"""
volume_info = self._validate_manage_existing_ref(existing_ref)
size = math.ceil(float(volume_info["size"]) / units.Gi)
return size
@log_debug_trace
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
The volume will be renamed with "-unmanaged" as a suffix
"""
vol_name = self._get_vol_name(volume)
unmanaged_vol_name = vol_name + "-unmanaged"
LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"),
{"ref_name": vol_name, "new_name": unmanaged_vol_name})
try:
self._array.rename_volume(vol_name, unmanaged_vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
LOG.warning(_LW("Volume unmanage was unable to rename "
"the volume, error message: %s"), err.text)
@staticmethod
def _get_vol_name(volume):
"""Return the name of the volume Purity will use."""
return volume["name"] + "-cinder"
@staticmethod
def _get_snap_name(snapshot):
"""Return the name of the snapshot that Purity will use."""
return "%s-cinder.%s" % (snapshot["volume_name"], snapshot["name"])
@staticmethod
def _get_pgroup_name_from_id(id):
return "consisgroup-%s-cinder" % id
@staticmethod
def _get_pgroup_snap_suffix(cgsnapshot):
return "cgsnapshot-%s-cinder" % cgsnapshot.id
@classmethod
def _get_pgroup_snap_name(cls, cgsnapshot):
"""Return the name of the pgroup snapshot that Purity will use"""
cg_id = cgsnapshot.consistencygroup_id
return "%s.%s" % (cls._get_pgroup_name_from_id(cg_id),
cls._get_pgroup_snap_suffix(cgsnapshot))
@staticmethod
def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name):
return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % {
'pgroup_name': pg_name,
'pgsnap_suffix': pgsnap_suffix,
'volume_name': volume_name,
}
def _get_pgroup_snap_name_from_snapshot(self, snapshot):
"""Return the name of the snapshot that Purity will use."""
cg_id = snapshot.cgsnapshot.consistencygroup_id
cg_name = self._get_pgroup_name_from_id(cg_id)
cgsnapshot_id = self._get_pgroup_snap_suffix(snapshot.cgsnapshot)
volume_name = snapshot.volume_name
return "%s.%s.%s-cinder" % (cg_name, cgsnapshot_id, volume_name)
@staticmethod
def _generate_purity_host_name(name):
"""Return a valid Purity host name based on the name passed in."""
if len(name) > 23:
name = name[0:23]
name = INVALID_CHARACTERS.sub("-", name)
name = name.lstrip("-")
return "{name}-{uuid}-cinder".format(name=name, uuid=uuid.uuid4().hex)
def _connect_host_to_vol(self, host_name, vol_name):
connection = None
try:
connection = self._array.connect_host(host_name, vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
"Connection already exists" in err.text):
# Happens if the volume is already connected to the host.
# Treat this as a success.
ctxt.reraise = False
LOG.debug("Volume connection already exists for Purity "
"host with message: %s", err.text)
# Get the info for the existing connection
connected_hosts = \
self._array.list_volume_private_connections(vol_name)
for host_info in connected_hosts:
if host_info["host"] == host_name:
connection = host_info
break
if not connection:
raise exception.PureDriverException(
reason=_("Unable to connect or find connection to host"))
return connection
def retype(self, context, volume, new_type, diff, host):
"""Retype from one volume type to another on the same backend.
For a Pure Array there is currently no differentiation between types
of volumes. This means that changing from one type to another on the
same array should be a no-op.
"""
return True, None
class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver):
VERSION = "3.0.0"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs)
self._storage_protocol = "iSCSI"
def do_setup(self, context):
super(PureISCSIDriver, self).do_setup(context)
def _get_host(self, connector):
"""Return dict describing existing Purity host object or None."""
hosts = self._array.list_hosts()
for host in hosts:
if connector["initiator"] in host["iqn"]:
return host
return None
@log_debug_trace
def initialize_connection(self, volume, connector, initiator_data=None):
"""Allow connection to connector and return connection info."""
connection = self._connect(volume, connector, initiator_data)
target_ports = self._get_target_iscsi_ports()
multipath = connector.get("multipath", False)
properties = self._build_connection_properties(connection,
target_ports,
multipath)
if self.configuration.use_chap_auth:
properties["data"]["auth_method"] = "CHAP"
properties["data"]["auth_username"] = connection["auth_username"]
properties["data"]["auth_password"] = connection["auth_password"]
initiator_update = connection.get("initiator_update", False)
if initiator_update:
properties["initiator_update"] = initiator_update
return properties
def _build_connection_properties(self, connection, target_ports,
multipath):
props = {
"driver_volume_type": "iscsi",
"data": {
"target_discovered": False,
"access_mode": "rw",
"discard": True,
},
}
port_iter = iter(target_ports)
target_luns = []
target_iqns = []
target_portals = []
for port in port_iter:
target_luns.append(connection["lun"])
target_iqns.append(port["iqn"])
target_portals.append(port["portal"])
# If we have multiple ports always report them
if target_luns and target_iqns and target_portals:
props["data"]["target_luns"] = target_luns
props["data"]["target_iqns"] = target_iqns
props["data"]["target_portals"] = target_portals
return props
def _get_target_iscsi_ports(self):
"""Return list of iSCSI-enabled port descriptions."""
ports = self._array.list_ports()
iscsi_ports = [port for port in ports if port["iqn"]]
if not iscsi_ports:
raise exception.PureDriverException(
reason=_("No iSCSI-enabled ports on target array."))
return iscsi_ports
@staticmethod
def _generate_chap_secret():
return volume_utils.generate_password()
@classmethod
def _get_chap_credentials(cls, host, data):
initiator_updates = None
username = host
password = None
if data:
for d in data:
if d["key"] == CHAP_SECRET_KEY:
password = d["value"]
break
if not password:
password = cls._generate_chap_secret()
initiator_updates = {
"set_values": {
CHAP_SECRET_KEY: password
}
}
return username, password, initiator_updates
@utils.synchronized(CONNECT_LOCK_NAME, external=True)
def _connect(self, volume, connector, initiator_data):
"""Connect the host and volume; return dict describing connection."""
iqn = connector["initiator"]
if self.configuration.use_chap_auth:
(chap_username, chap_password, initiator_update) = \
self._get_chap_credentials(connector['host'], initiator_data)
vol_name = self._get_vol_name(volume)
host = self._get_host(connector)
if host:
host_name = host["name"]
LOG.info(_LI("Re-using existing purity host %(host_name)r"),
{"host_name": host_name})
if self.configuration.use_chap_auth:
if not GENERATED_NAME.match(host_name):
LOG.error(_LE("Purity host %(host_name)s is not managed "
"by Cinder and can't have CHAP credentials "
"modified. Remove IQN %(iqn)s from the host "
"to resolve this issue."),
{"host_name": host_name,
"iqn": connector["initiator"]})
raise exception.PureDriverException(
reason=_("Unable to re-use a host that is not "
"managed by Cinder with use_chap_auth=True,"))
elif chap_username is None or chap_password is None:
LOG.error(_LE("Purity host %(host_name)s is managed by "
"Cinder but CHAP credentials could not be "
"retrieved from the Cinder database."),
{"host_name": host_name})
raise exception.PureDriverException(
reason=_("Unable to re-use host with unknown CHAP "
"credentials configured."))
else:
host_name = self._generate_purity_host_name(connector["host"])
LOG.info(_LI("Creating host object %(host_name)r with IQN:"
" %(iqn)s."), {"host_name": host_name, "iqn": iqn})
self._array.create_host(host_name, iqnlist=[iqn])
if self.configuration.use_chap_auth:
self._array.set_host(host_name,
host_user=chap_username,
host_password=chap_password)
connection = self._connect_host_to_vol(host_name, vol_name)
if self.configuration.use_chap_auth:
connection["auth_username"] = chap_username
connection["auth_password"] = chap_password
if initiator_update:
connection["initiator_update"] = initiator_update
return connection
class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver):
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs)
self._storage_protocol = "FC"
self._lookup_service = fczm_utils.create_lookup_service()
def do_setup(self, context):
super(PureFCDriver, self).do_setup(context)
def _get_host(self, connector):
"""Return dict describing existing Purity host object or None."""
hosts = self._array.list_hosts()
for host in hosts:
for wwn in connector["wwpns"]:
if wwn in str(host["wwn"]).lower():
return host
def _get_array_wwns(self):
"""Return list of wwns from the array"""
ports = self._array.list_ports()
return [port["wwn"] for port in ports if port["wwn"]]
@log_debug_trace
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector, initiator_data=None):
"""Allow connection to connector and return connection info."""
connection = self._connect(volume, connector)
target_wwns = self._get_array_wwns()
init_targ_map = self._build_initiator_target_map(target_wwns,
connector)
properties = {
"driver_volume_type": "fibre_channel",
"data": {
'target_discovered': True,
"target_lun": connection["lun"],
"target_wwn": target_wwns,
'access_mode': 'rw',
'initiator_target_map': init_targ_map,
"discard": True,
}
}
return properties
@utils.synchronized(CONNECT_LOCK_NAME, external=True)
def _connect(self, volume, connector):
"""Connect the host and volume; return dict describing connection."""
wwns = connector["wwpns"]
vol_name = self._get_vol_name(volume)
host = self._get_host(connector)
if host:
host_name = host["name"]
LOG.info(_LI("Re-using existing purity host %(host_name)r"),
{"host_name": host_name})
else:
host_name = self._generate_purity_host_name(connector["host"])
LOG.info(_LI("Creating host object %(host_name)r with WWN:"
" %(wwn)s."), {"host_name": host_name, "wwn": wwns})
self._array.create_host(host_name, wwnlist=wwns)
return self._connect_host_to_vol(host_name, vol_name)
def _build_initiator_target_map(self, target_wwns, connector):
"""Build the target_wwns and the initiator target map."""
init_targ_map = {}
if self._lookup_service:
# use FC san lookup to determine which NSPs to use
# for the new VLUN.
dev_map = self._lookup_service.get_device_mapping_from_network(
connector['wwpns'],
target_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
else:
init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns)
return init_targ_map
@log_debug_trace
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection."""
no_more_connections = self._disconnect(volume, connector, **kwargs)
properties = {"driver_volume_type": "fibre_channel", "data": {}}
if no_more_connections:
target_wwns = self._get_array_wwns()
init_targ_map = self._build_initiator_target_map(target_wwns,
connector)
properties["data"] = {"target_wwn": target_wwns,
"initiator_target_map": init_targ_map}
return properties
| 39.547391 | 79 | 0.604632 |
import math
import re
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import objects
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
try:
import purestorage
except ImportError:
purestorage = None
LOG = logging.getLogger(__name__)
PURE_OPTS = [
cfg.StrOpt("pure_api_token",
default=None,
help="REST API authorization token."),
]
CONF = cfg.CONF
CONF.register_opts(PURE_OPTS)
INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]")
GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$")
CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET"
ERR_MSG_NOT_EXIST = "does not exist"
ERR_MSG_PENDING_ERADICATION = "has been destroyed"
CONNECT_LOCK_NAME = 'PureVolumeDriver_connect'
def log_debug_trace(f):
def wrapper(*args, **kwargs):
cls_name = args[0].__class__.__name__
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
"method": f.func_name}
LOG.debug("Enter " + method_name)
result = f(*args, **kwargs)
LOG.debug("Leave " + method_name)
return result
return wrapper
class PureBaseVolumeDriver(san.SanDriver):
SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4']
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureBaseVolumeDriver, self).__init__(execute=execute, *args,
**kwargs)
self.configuration.append_config_values(PURE_OPTS)
self._array = None
self._storage_protocol = None
self._backend_name = (self.configuration.volume_backend_name or
self.__class__.__name__)
def do_setup(self, context):
if purestorage is None:
msg = _("Missing 'purestorage' python module, ensure the library"
" is installed and available.")
raise exception.PureDriverException(msg)
purestorage.FlashArray.supported_rest_versions = \
self.SUPPORTED_REST_API_VERSIONS
self._array = purestorage.FlashArray(
self.configuration.san_ip,
api_token=self.configuration.pure_api_token)
def check_for_setup_error(self):
pass
@log_debug_trace
def create_volume(self, volume):
vol_name = self._get_vol_name(volume)
vol_size = volume["size"] * units.Gi
self._array.create_volume(vol_name, vol_size)
if volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
@log_debug_trace
def create_volume_from_snapshot(self, volume, snapshot):
vol_name = self._get_vol_name(volume)
if snapshot['cgsnapshot_id']:
snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot)
else:
snap_name = self._get_snap_name(snapshot)
self._array.copy_volume(snap_name, vol_name)
self._extend_if_needed(vol_name, snapshot["volume_size"],
volume["size"])
if volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
@log_debug_trace
def create_cloned_volume(self, volume, src_vref):
vol_name = self._get_vol_name(volume)
src_name = self._get_vol_name(src_vref)
self._array.copy_volume(src_name, vol_name)
self._extend_if_needed(vol_name, src_vref["size"], volume["size"])
if volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
def _extend_if_needed(self, vol_name, src_size, vol_size):
if vol_size > src_size:
vol_size = vol_size * units.Gi
self._array.extend_volume(vol_name, vol_size)
@log_debug_trace
def delete_volume(self, volume):
vol_name = self._get_vol_name(volume)
try:
connected_hosts = \
self._array.list_volume_private_connections(vol_name)
for host_info in connected_hosts:
host_name = host_info["host"]
self._disconnect_host(host_name, vol_name)
self._array.destroy_volume(vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and \
ERR_MSG_NOT_EXIST in err.text:
ctxt.reraise = False
LOG.warning(_LW("Volume deletion failed with message: %s"),
err.text)
@log_debug_trace
def create_snapshot(self, snapshot):
vol_name, snap_suff = self._get_snap_name(snapshot).split(".")
self._array.create_snapshot(vol_name, suffix=snap_suff)
@log_debug_trace
def delete_snapshot(self, snapshot):
snap_name = self._get_snap_name(snapshot)
try:
self._array.destroy_volume(snap_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400:
ctxt.reraise = False
LOG.error(_LE("Snapshot deletion failed with message:"
" %s"), err.text)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def _get_host(self, connector):
raise NotImplementedError
@utils.synchronized(CONNECT_LOCK_NAME, external=True)
def _disconnect(self, volume, connector, **kwargs):
vol_name = self._get_vol_name(volume)
host = self._get_host(connector)
if host:
host_name = host["name"]
result = self._disconnect_host(host_name, vol_name)
else:
LOG.error(_LE("Unable to disconnect host from volume."))
result = False
return result
@log_debug_trace
def terminate_connection(self, volume, connector, **kwargs):
self._disconnect(volume, connector, **kwargs)
@log_debug_trace
def _disconnect_host(self, host_name, vol_name):
try:
self._array.disconnect_host(host_name, vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400:
ctxt.reraise = False
LOG.error(_LE("Disconnection failed with message: "
"%(msg)s."), {"msg": err.text})
if (GENERATED_NAME.match(host_name) and
not self._array.list_host_connections(host_name,
private=True)):
LOG.info(_LI("Deleting unneeded host %(host_name)r."),
{"host_name": host_name})
try:
self._array.delete_host(host_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
ctxt.reraise = False
LOG.warning(_LW("Purity host deletion failed: "
"%(msg)s."), {"msg": err.text})
return True
return False
@log_debug_trace
def get_volume_stats(self, refresh=False):
if refresh:
LOG.debug("Updating volume stats.")
self._update_stats()
return self._stats
def _update_stats(self):
info = self._array.get(space=True)
total_capacity = float(info["capacity"]) / units.Gi
used_space = float(info["total"]) / units.Gi
free_space = float(total_capacity - used_space)
prov_space, total_vols = self._get_provisioned_space()
provisioned_space = float(prov_space) / units.Gi
if used_space == 0 or provisioned_space == 0:
thin_provisioning = 20
else:
thin_provisioning = provisioned_space / used_space
data = {
"volume_backend_name": self._backend_name,
"vendor_name": "Pure Storage",
"driver_version": self.VERSION,
"storage_protocol": self._storage_protocol,
"total_capacity_gb": total_capacity,
"free_capacity_gb": free_space,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": provisioned_space,
"max_over_subscription_ratio": thin_provisioning,
"total_volumes": total_vols,
"filter_function": self.get_filter_function(),
}
self._stats = data
def _get_provisioned_space(self):
volumes = self._array.list_volumes(pending=True)
return sum(item["size"] for item in volumes), len(volumes)
@log_debug_trace
def extend_volume(self, volume, new_size):
vol_name = self._get_vol_name(volume)
new_size = new_size * units.Gi
self._array.extend_volume(vol_name, new_size)
def _add_volume_to_consistency_group(self, consistencygroup_id, vol_name):
pgroup_name = self._get_pgroup_name_from_id(consistencygroup_id)
self._array.set_pgroup(pgroup_name, addvollist=[vol_name])
@log_debug_trace
def create_consistencygroup(self, context, group):
self._array.create_pgroup(self._get_pgroup_name_from_id(group.id))
model_update = {'status': 'available'}
return model_update
def _create_cg_from_cgsnap(self, volumes, snapshots):
for volume, snapshot in zip(volumes, snapshots):
self.create_volume_from_snapshot(volume, snapshot)
def _create_cg_from_cg(self, group, source_group, volumes, source_vols):
pgroup_name = self._get_pgroup_name_from_id(source_group.id)
tmp_suffix = '%s-tmp' % uuid.uuid4()
tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % {
'pgroup_name': pgroup_name,
'pgsnap_suffix': tmp_suffix,
}
LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s '
'while cloning Consistency Group %(source_group)s.',
{'snap_name': tmp_pgsnap_name,
'source_group': source_group.id})
self._array.create_pgroup_snapshot(pgroup_name, suffix=tmp_suffix)
try:
for source_vol, cloned_vol in zip(source_vols, volumes):
source_snap_name = self._get_pgroup_vol_snap_name(
pgroup_name,
tmp_suffix,
self._get_vol_name(source_vol)
)
cloned_vol_name = self._get_vol_name(cloned_vol)
self._array.copy_volume(source_snap_name, cloned_vol_name)
self._add_volume_to_consistency_group(
group.id,
cloned_vol_name
)
finally:
self._delete_pgsnapshot(tmp_pgsnap_name)
@log_debug_trace
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
self.create_consistencygroup(context, group)
if cgsnapshot and snapshots:
self._create_cg_from_cgsnap(volumes,
snapshots)
elif source_cg:
self._create_cg_from_cg(group, source_cg, volumes, source_vols)
return None, None
@log_debug_trace
def delete_consistencygroup(self, context, group):
try:
self._array.destroy_pgroup(self._get_pgroup_name_from_id(group.id))
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
(ERR_MSG_PENDING_ERADICATION in err.text or
ERR_MSG_NOT_EXIST in err.text)):
ctxt.reraise = False
LOG.warning(_LW("Unable to delete Protection Group: %s"),
err.text)
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume in volumes:
self.delete_volume(volume)
volume.status = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
@log_debug_trace
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
pgroup_name = self._get_pgroup_name_from_id(group.id)
if add_volumes:
addvollist = [self._get_vol_name(vol) for vol in add_volumes]
else:
addvollist = []
if remove_volumes:
remvollist = [self._get_vol_name(vol) for vol in remove_volumes]
else:
remvollist = []
self._array.set_pgroup(pgroup_name, addvollist=addvollist,
remvollist=remvollist)
return None, None, None
@log_debug_trace
def create_cgsnapshot(self, context, cgsnapshot):
cg_id = cgsnapshot.consistencygroup_id
pgroup_name = self._get_pgroup_name_from_id(cg_id)
pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot)
self._array.create_pgroup_snapshot(pgroup_name, suffix=pgsnap_suffix)
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snapshot in snapshots:
snapshot.status = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
def _delete_pgsnapshot(self, pgsnap_name):
try:
self._array.destroy_pgroup(pgsnap_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
(ERR_MSG_PENDING_ERADICATION in err.text or
ERR_MSG_NOT_EXIST in err.text)):
ctxt.reraise = False
LOG.warning(_LW("Unable to delete Protection Group "
"Snapshot: %s"), err.text)
@log_debug_trace
def delete_cgsnapshot(self, context, cgsnapshot):
pgsnap_name = self._get_pgroup_snap_name(cgsnapshot)
self._delete_pgsnapshot(pgsnap_name)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snapshot in snapshots:
snapshot.status = 'deleted'
model_update = {'status': cgsnapshot.status}
return model_update, snapshots
def _validate_manage_existing_ref(self, existing_ref):
if "name" not in existing_ref or not existing_ref["name"]:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("manage_existing requires a 'name'"
" key to identify an existing volume."))
ref_vol_name = existing_ref['name']
try:
volume_info = self._array.get_volume(ref_vol_name)
if volume_info:
return volume_info
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("Unable to find volume with name=%s") % ref_vol_name)
@log_debug_trace
def manage_existing(self, volume, existing_ref):
self._validate_manage_existing_ref(existing_ref)
ref_vol_name = existing_ref['name']
connected_hosts = \
self._array.list_volume_private_connections(ref_vol_name)
if len(connected_hosts) > 0:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("%(driver)s manage_existing cannot manage a volume "
"connected to hosts. Please disconnect this volume "
"from existing hosts before importing"
) % {'driver': self.__class__.__name__})
new_vol_name = self._get_vol_name(volume)
LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"),
{"ref_name": ref_vol_name, "new_name": new_vol_name})
self._array.rename_volume(ref_vol_name, new_vol_name)
return None
@log_debug_trace
def manage_existing_get_size(self, volume, existing_ref):
volume_info = self._validate_manage_existing_ref(existing_ref)
size = math.ceil(float(volume_info["size"]) / units.Gi)
return size
@log_debug_trace
def unmanage(self, volume):
vol_name = self._get_vol_name(volume)
unmanaged_vol_name = vol_name + "-unmanaged"
LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"),
{"ref_name": vol_name, "new_name": unmanaged_vol_name})
try:
self._array.rename_volume(vol_name, unmanaged_vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
LOG.warning(_LW("Volume unmanage was unable to rename "
"the volume, error message: %s"), err.text)
@staticmethod
def _get_vol_name(volume):
return volume["name"] + "-cinder"
@staticmethod
def _get_snap_name(snapshot):
return "%s-cinder.%s" % (snapshot["volume_name"], snapshot["name"])
@staticmethod
def _get_pgroup_name_from_id(id):
return "consisgroup-%s-cinder" % id
@staticmethod
def _get_pgroup_snap_suffix(cgsnapshot):
return "cgsnapshot-%s-cinder" % cgsnapshot.id
@classmethod
def _get_pgroup_snap_name(cls, cgsnapshot):
cg_id = cgsnapshot.consistencygroup_id
return "%s.%s" % (cls._get_pgroup_name_from_id(cg_id),
cls._get_pgroup_snap_suffix(cgsnapshot))
@staticmethod
def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name):
return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % {
'pgroup_name': pg_name,
'pgsnap_suffix': pgsnap_suffix,
'volume_name': volume_name,
}
def _get_pgroup_snap_name_from_snapshot(self, snapshot):
cg_id = snapshot.cgsnapshot.consistencygroup_id
cg_name = self._get_pgroup_name_from_id(cg_id)
cgsnapshot_id = self._get_pgroup_snap_suffix(snapshot.cgsnapshot)
volume_name = snapshot.volume_name
return "%s.%s.%s-cinder" % (cg_name, cgsnapshot_id, volume_name)
@staticmethod
def _generate_purity_host_name(name):
if len(name) > 23:
name = name[0:23]
name = INVALID_CHARACTERS.sub("-", name)
name = name.lstrip("-")
return "{name}-{uuid}-cinder".format(name=name, uuid=uuid.uuid4().hex)
def _connect_host_to_vol(self, host_name, vol_name):
connection = None
try:
connection = self._array.connect_host(host_name, vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
"Connection already exists" in err.text):
ctxt.reraise = False
LOG.debug("Volume connection already exists for Purity "
"host with message: %s", err.text)
connected_hosts = \
self._array.list_volume_private_connections(vol_name)
for host_info in connected_hosts:
if host_info["host"] == host_name:
connection = host_info
break
if not connection:
raise exception.PureDriverException(
reason=_("Unable to connect or find connection to host"))
return connection
def retype(self, context, volume, new_type, diff, host):
return True, None
class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver):
VERSION = "3.0.0"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs)
self._storage_protocol = "iSCSI"
def do_setup(self, context):
super(PureISCSIDriver, self).do_setup(context)
def _get_host(self, connector):
hosts = self._array.list_hosts()
for host in hosts:
if connector["initiator"] in host["iqn"]:
return host
return None
@log_debug_trace
def initialize_connection(self, volume, connector, initiator_data=None):
connection = self._connect(volume, connector, initiator_data)
target_ports = self._get_target_iscsi_ports()
multipath = connector.get("multipath", False)
properties = self._build_connection_properties(connection,
target_ports,
multipath)
if self.configuration.use_chap_auth:
properties["data"]["auth_method"] = "CHAP"
properties["data"]["auth_username"] = connection["auth_username"]
properties["data"]["auth_password"] = connection["auth_password"]
initiator_update = connection.get("initiator_update", False)
if initiator_update:
properties["initiator_update"] = initiator_update
return properties
def _build_connection_properties(self, connection, target_ports,
multipath):
props = {
"driver_volume_type": "iscsi",
"data": {
"target_discovered": False,
"access_mode": "rw",
"discard": True,
},
}
port_iter = iter(target_ports)
target_luns = []
target_iqns = []
target_portals = []
for port in port_iter:
target_luns.append(connection["lun"])
target_iqns.append(port["iqn"])
target_portals.append(port["portal"])
if target_luns and target_iqns and target_portals:
props["data"]["target_luns"] = target_luns
props["data"]["target_iqns"] = target_iqns
props["data"]["target_portals"] = target_portals
return props
def _get_target_iscsi_ports(self):
ports = self._array.list_ports()
iscsi_ports = [port for port in ports if port["iqn"]]
if not iscsi_ports:
raise exception.PureDriverException(
reason=_("No iSCSI-enabled ports on target array."))
return iscsi_ports
@staticmethod
def _generate_chap_secret():
return volume_utils.generate_password()
@classmethod
def _get_chap_credentials(cls, host, data):
initiator_updates = None
username = host
password = None
if data:
for d in data:
if d["key"] == CHAP_SECRET_KEY:
password = d["value"]
break
if not password:
password = cls._generate_chap_secret()
initiator_updates = {
"set_values": {
CHAP_SECRET_KEY: password
}
}
return username, password, initiator_updates
@utils.synchronized(CONNECT_LOCK_NAME, external=True)
def _connect(self, volume, connector, initiator_data):
iqn = connector["initiator"]
if self.configuration.use_chap_auth:
(chap_username, chap_password, initiator_update) = \
self._get_chap_credentials(connector['host'], initiator_data)
vol_name = self._get_vol_name(volume)
host = self._get_host(connector)
if host:
host_name = host["name"]
LOG.info(_LI("Re-using existing purity host %(host_name)r"),
{"host_name": host_name})
if self.configuration.use_chap_auth:
if not GENERATED_NAME.match(host_name):
LOG.error(_LE("Purity host %(host_name)s is not managed "
"by Cinder and can't have CHAP credentials "
"modified. Remove IQN %(iqn)s from the host "
"to resolve this issue."),
{"host_name": host_name,
"iqn": connector["initiator"]})
raise exception.PureDriverException(
reason=_("Unable to re-use a host that is not "
"managed by Cinder with use_chap_auth=True,"))
elif chap_username is None or chap_password is None:
LOG.error(_LE("Purity host %(host_name)s is managed by "
"Cinder but CHAP credentials could not be "
"retrieved from the Cinder database."),
{"host_name": host_name})
raise exception.PureDriverException(
reason=_("Unable to re-use host with unknown CHAP "
"credentials configured."))
else:
host_name = self._generate_purity_host_name(connector["host"])
LOG.info(_LI("Creating host object %(host_name)r with IQN:"
" %(iqn)s."), {"host_name": host_name, "iqn": iqn})
self._array.create_host(host_name, iqnlist=[iqn])
if self.configuration.use_chap_auth:
self._array.set_host(host_name,
host_user=chap_username,
host_password=chap_password)
connection = self._connect_host_to_vol(host_name, vol_name)
if self.configuration.use_chap_auth:
connection["auth_username"] = chap_username
connection["auth_password"] = chap_password
if initiator_update:
connection["initiator_update"] = initiator_update
return connection
class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver):
VERSION = "1.0.0"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs)
self._storage_protocol = "FC"
self._lookup_service = fczm_utils.create_lookup_service()
def do_setup(self, context):
super(PureFCDriver, self).do_setup(context)
def _get_host(self, connector):
hosts = self._array.list_hosts()
for host in hosts:
for wwn in connector["wwpns"]:
if wwn in str(host["wwn"]).lower():
return host
def _get_array_wwns(self):
ports = self._array.list_ports()
return [port["wwn"] for port in ports if port["wwn"]]
@log_debug_trace
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector, initiator_data=None):
connection = self._connect(volume, connector)
target_wwns = self._get_array_wwns()
init_targ_map = self._build_initiator_target_map(target_wwns,
connector)
properties = {
"driver_volume_type": "fibre_channel",
"data": {
'target_discovered': True,
"target_lun": connection["lun"],
"target_wwn": target_wwns,
'access_mode': 'rw',
'initiator_target_map': init_targ_map,
"discard": True,
}
}
return properties
@utils.synchronized(CONNECT_LOCK_NAME, external=True)
def _connect(self, volume, connector):
wwns = connector["wwpns"]
vol_name = self._get_vol_name(volume)
host = self._get_host(connector)
if host:
host_name = host["name"]
LOG.info(_LI("Re-using existing purity host %(host_name)r"),
{"host_name": host_name})
else:
host_name = self._generate_purity_host_name(connector["host"])
LOG.info(_LI("Creating host object %(host_name)r with WWN:"
" %(wwn)s."), {"host_name": host_name, "wwn": wwns})
self._array.create_host(host_name, wwnlist=wwns)
return self._connect_host_to_vol(host_name, vol_name)
def _build_initiator_target_map(self, target_wwns, connector):
init_targ_map = {}
if self._lookup_service:
# use FC san lookup to determine which NSPs to use
# for the new VLUN.
dev_map = self._lookup_service.get_device_mapping_from_network(
connector['wwpns'],
target_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
else:
init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns)
return init_targ_map
@log_debug_trace
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
no_more_connections = self._disconnect(volume, connector, **kwargs)
properties = {"driver_volume_type": "fibre_channel", "data": {}}
if no_more_connections:
target_wwns = self._get_array_wwns()
init_targ_map = self._build_initiator_target_map(target_wwns,
connector)
properties["data"] = {"target_wwn": target_wwns,
"initiator_target_map": init_targ_map}
return properties
| true | true |
1c38c2dcd187ff71b618077eaaff3992fe8344e0 | 7,427 | py | Python | tests/micro/qemu/test_zephyr.py | zhenlohuang/tvm | fd2e6d17120a79533852c6bb705429d9c7bc286b | [
"Apache-2.0"
] | 1 | 2021-06-08T11:55:55.000Z | 2021-06-08T11:55:55.000Z | tests/micro/qemu/test_zephyr.py | zhenlohuang/tvm | fd2e6d17120a79533852c6bb705429d9c7bc286b | [
"Apache-2.0"
] | null | null | null | tests/micro/qemu/test_zephyr.py | zhenlohuang/tvm | fd2e6d17120a79533852c6bb705429d9c7bc286b | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import glob
import os
import subprocess
import sys
import pytest
import numpy as np
import tvm
import tvm.rpc
import tvm.micro
import tvm.relay as relay
from tvm.micro.contrib import zephyr
from tvm.contrib import utils
BUILD = True
DEBUG = False
TARGET = None
def _make_sess_from_op(model, zephyr_board, op_name, sched, arg_bufs):
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, target, target_host=target, name=op_name)
return _make_session(model, target, zephyr_board, mod)
def _make_session(model, target, zephyr_board, mod):
test_name = f"{os.path.splitext(os.path.abspath(__file__))[0]}-{model}"
prev_build = f"{test_name}-last-build.micro-binary"
workspace_root = (
f'{test_name}-workspace/{datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")}'
)
workspace_parent = os.path.dirname(workspace_root)
if not os.path.exists(workspace_parent):
os.makedirs(workspace_parent)
workspace = tvm.micro.Workspace(debug=True, root=workspace_root)
project_dir = os.path.join(os.path.dirname(__file__) or ".", "zephyr-runtime")
compiler = zephyr.ZephyrCompiler(
project_dir=project_dir,
board="nucleo_f746zg" if "stm32f746" in str(target) else "qemu_x86",
zephyr_toolchain_variant="zephyr",
)
opts = tvm.micro.default_options(f"{project_dir}/crt")
# TODO(weberlo) verify this is necessary
opts["bin_opts"]["ccflags"] = ["-std=gnu++14"]
opts["lib_opts"]["ccflags"] = ["-std=gnu++14"]
flasher_kw = {}
if DEBUG:
flasher_kw["debug_rpc_session"] = tvm.rpc.connect("127.0.0.1", 9090)
session_kw = {
"flasher": compiler.flasher(**flasher_kw),
}
if BUILD:
session_kw["binary"] = tvm.micro.build_static_runtime(
# the x86 compiler *expects* you to give the exact same dictionary for both
# lib_opts and bin_opts. so the library compiler is mutating lib_opts and
# the binary compiler is expecting those mutations to be in bin_opts.
# TODO(weberlo) fix this very bizarre behavior
workspace,
compiler,
mod,
lib_opts=opts["lib_opts"],
bin_opts=opts["bin_opts"],
)
if os.path.exists(prev_build):
os.unlink(prev_build)
session_kw["binary"].archive(prev_build, metadata_only=True)
else:
unarchive_dir = utils.tempdir()
session_kw["binary"] = tvm.micro.MicroBinary.unarchive(
prev_build, unarchive_dir.relpath("binary")
)
return tvm.micro.Session(**session_kw)
def _make_add_sess(model, zephyr_board):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.placeholder((1,), dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(model, zephyr_board, "add", sched, [A, B, C])
# The models that should pass this configuration. Maps a short, identifying platform string to
# (model, zephyr_board).
PLATFORMS = {
"host": ("host", "qemu_x86"),
"stm32f746xx": ("stm32f746xx", "nucleo_f746zg"),
}
# The same test code can be executed on both the QEMU simulation and on real hardware.
def test_compile_runtime(platform):
"""Test compiling the on-device runtime."""
model, zephyr_board = PLATFORMS[platform]
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), ctx=sess.context)
assert (A_data.asnumpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), ctx=sess.context)
assert (B_data.asnumpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), ctx=sess.context)
assert (C_data.asnumpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.asnumpy() == np.array([6, 7])).all()
with _make_add_sess(model, zephyr_board) as sess:
test_basic_add(sess)
def test_platform_timer(platform):
"""Test compiling the on-device runtime."""
model, zephyr_board = PLATFORMS[platform]
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), ctx=sess.context)
assert (A_data.asnumpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), ctx=sess.context)
assert (B_data.asnumpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), ctx=sess.context)
assert (C_data.asnumpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
time_eval_f = system_lib.time_evaluator(
"add", sess.context, number=20, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data, C_data)
assert (C_data.asnumpy() == np.array([6, 7])).all()
assert result.mean > 0
assert len(result.results) == 3
with _make_add_sess(model, zephyr_board) as sess:
test_basic_add(sess)
def test_relay(platform):
"""Testing a simple relay graph"""
model, zephyr_board = PLATFORMS[platform]
shape = (10,)
dtype = "int8"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, mod, params = tvm.relay.build(func, target=target)
with _make_session(model, target, zephyr_board, mod) as session:
graph_mod = tvm.micro.create_local_graph_runtime(
graph, session.get_system_lib(), session.context
)
graph_mod.set_input(**params)
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
graph_mod.run(x=x_in)
result = graph_mod.get_output(0).asnumpy()
tvm.testing.assert_allclose(graph_mod.get_input(0).asnumpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
if __name__ == "__main__":
sys.exit(pytest.main([os.path.dirname(__file__)] + sys.argv[1:]))
| 36.586207 | 99 | 0.667834 |
import contextlib
import copy
import datetime
import glob
import os
import subprocess
import sys
import pytest
import numpy as np
import tvm
import tvm.rpc
import tvm.micro
import tvm.relay as relay
from tvm.micro.contrib import zephyr
from tvm.contrib import utils
BUILD = True
DEBUG = False
TARGET = None
def _make_sess_from_op(model, zephyr_board, op_name, sched, arg_bufs):
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, target, target_host=target, name=op_name)
return _make_session(model, target, zephyr_board, mod)
def _make_session(model, target, zephyr_board, mod):
test_name = f"{os.path.splitext(os.path.abspath(__file__))[0]}-{model}"
prev_build = f"{test_name}-last-build.micro-binary"
workspace_root = (
f'{test_name}-workspace/{datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")}'
)
workspace_parent = os.path.dirname(workspace_root)
if not os.path.exists(workspace_parent):
os.makedirs(workspace_parent)
workspace = tvm.micro.Workspace(debug=True, root=workspace_root)
project_dir = os.path.join(os.path.dirname(__file__) or ".", "zephyr-runtime")
compiler = zephyr.ZephyrCompiler(
project_dir=project_dir,
board="nucleo_f746zg" if "stm32f746" in str(target) else "qemu_x86",
zephyr_toolchain_variant="zephyr",
)
opts = tvm.micro.default_options(f"{project_dir}/crt")
opts["bin_opts"]["ccflags"] = ["-std=gnu++14"]
opts["lib_opts"]["ccflags"] = ["-std=gnu++14"]
flasher_kw = {}
if DEBUG:
flasher_kw["debug_rpc_session"] = tvm.rpc.connect("127.0.0.1", 9090)
session_kw = {
"flasher": compiler.flasher(**flasher_kw),
}
if BUILD:
session_kw["binary"] = tvm.micro.build_static_runtime(
workspace,
compiler,
mod,
lib_opts=opts["lib_opts"],
bin_opts=opts["bin_opts"],
)
if os.path.exists(prev_build):
os.unlink(prev_build)
session_kw["binary"].archive(prev_build, metadata_only=True)
else:
unarchive_dir = utils.tempdir()
session_kw["binary"] = tvm.micro.MicroBinary.unarchive(
prev_build, unarchive_dir.relpath("binary")
)
return tvm.micro.Session(**session_kw)
def _make_add_sess(model, zephyr_board):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.placeholder((1,), dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(model, zephyr_board, "add", sched, [A, B, C])
PLATFORMS = {
"host": ("host", "qemu_x86"),
"stm32f746xx": ("stm32f746xx", "nucleo_f746zg"),
}
def test_compile_runtime(platform):
model, zephyr_board = PLATFORMS[platform]
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), ctx=sess.context)
assert (A_data.asnumpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), ctx=sess.context)
assert (B_data.asnumpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), ctx=sess.context)
assert (C_data.asnumpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.asnumpy() == np.array([6, 7])).all()
with _make_add_sess(model, zephyr_board) as sess:
test_basic_add(sess)
def test_platform_timer(platform):
model, zephyr_board = PLATFORMS[platform]
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), ctx=sess.context)
assert (A_data.asnumpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), ctx=sess.context)
assert (B_data.asnumpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), ctx=sess.context)
assert (C_data.asnumpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
time_eval_f = system_lib.time_evaluator(
"add", sess.context, number=20, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data, C_data)
assert (C_data.asnumpy() == np.array([6, 7])).all()
assert result.mean > 0
assert len(result.results) == 3
with _make_add_sess(model, zephyr_board) as sess:
test_basic_add(sess)
def test_relay(platform):
model, zephyr_board = PLATFORMS[platform]
shape = (10,)
dtype = "int8"
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, mod, params = tvm.relay.build(func, target=target)
with _make_session(model, target, zephyr_board, mod) as session:
graph_mod = tvm.micro.create_local_graph_runtime(
graph, session.get_system_lib(), session.context
)
graph_mod.set_input(**params)
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
graph_mod.run(x=x_in)
result = graph_mod.get_output(0).asnumpy()
tvm.testing.assert_allclose(graph_mod.get_input(0).asnumpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
if __name__ == "__main__":
sys.exit(pytest.main([os.path.dirname(__file__)] + sys.argv[1:]))
| true | true |
1c38c3384d90c21d61e7e321827695822dca8ba3 | 1,590 | py | Python | tests/performance_boundaries.py | noah-ziethen/py-pde | b88e86439290c31284a4ac665a8e9ff34d08b494 | [
"MIT"
] | null | null | null | tests/performance_boundaries.py | noah-ziethen/py-pde | b88e86439290c31284a4ac665a8e9ff34d08b494 | [
"MIT"
] | null | null | null | tests/performance_boundaries.py | noah-ziethen/py-pde | b88e86439290c31284a4ac665a8e9ff34d08b494 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from pathlib import Path
PACKAGE_PATH = Path(__file__).resolve().parents[1]
sys.path.append(str(PACKAGE_PATH))
import numpy as np
from pde import UnitGrid, ScalarField
from pde.grids.boundaries.axes import Boundaries
from pde.tools.misc import estimate_computation_speed
def main():
""" main routine testing the performance """
print('Reports calls-per-second (larger is better)\n')
# Cartesian grid with different shapes and boundary conditions
for size in [32, 512]:
grid = UnitGrid((size, size), periodic=False)
print(grid)
field = ScalarField.random_normal(grid)
bc_value = np.ones(size)
result = field.laplace(bc={'value': 1}).data
for bc in ['scalar', 'array', 'linked']:
if bc == 'scalar':
bcs = {'value': 1}
elif bc == 'array':
bcs = {'value': bc_value}
elif bc == 'linked':
bcs = Boundaries.from_data(grid, {'value': bc_value}, rank=0)
for ax, upper in grid._iter_boundaries():
bcs[ax][upper].link_value(bc_value)
#result = field.laplace(bc=bcs).data
laplace = grid.get_operator("laplace", bc=bcs)
# call once to pre-compile and test result
np.testing.assert_allclose(laplace(field.data), result)
speed = estimate_computation_speed(laplace, field.data)
print(f'{bc:>6s}:{int(speed):>9d}')
print()
if __name__ == '__main__':
main()
| 30.576923 | 77 | 0.591824 |
import sys
from pathlib import Path
PACKAGE_PATH = Path(__file__).resolve().parents[1]
sys.path.append(str(PACKAGE_PATH))
import numpy as np
from pde import UnitGrid, ScalarField
from pde.grids.boundaries.axes import Boundaries
from pde.tools.misc import estimate_computation_speed
def main():
print('Reports calls-per-second (larger is better)\n')
for size in [32, 512]:
grid = UnitGrid((size, size), periodic=False)
print(grid)
field = ScalarField.random_normal(grid)
bc_value = np.ones(size)
result = field.laplace(bc={'value': 1}).data
for bc in ['scalar', 'array', 'linked']:
if bc == 'scalar':
bcs = {'value': 1}
elif bc == 'array':
bcs = {'value': bc_value}
elif bc == 'linked':
bcs = Boundaries.from_data(grid, {'value': bc_value}, rank=0)
for ax, upper in grid._iter_boundaries():
bcs[ax][upper].link_value(bc_value)
laplace = grid.get_operator("laplace", bc=bcs)
np.testing.assert_allclose(laplace(field.data), result)
speed = estimate_computation_speed(laplace, field.data)
print(f'{bc:>6s}:{int(speed):>9d}')
print()
if __name__ == '__main__':
main()
| true | true |
1c38c3c9be5af7fa0cb76dea02e8fa8c675d90e3 | 1,466 | py | Python | lsml/util/test/test_distance_transform.py | sandeepdas05/lsm-crack-width | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | [
"BSD-3-Clause"
] | 24 | 2020-01-30T15:53:33.000Z | 2022-01-15T09:46:24.000Z | lsml/util/test/test_distance_transform.py | sandeepdas05/lsm-crack-width | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | [
"BSD-3-Clause"
] | null | null | null | lsml/util/test/test_distance_transform.py | sandeepdas05/lsm-crack-width | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | [
"BSD-3-Clause"
] | 13 | 2019-12-05T08:32:11.000Z | 2022-03-20T03:12:03.000Z | import unittest
import numpy as np
from lsml.util.distance_transform import (
distance_transform)
class TestDistanceTransform(unittest.TestCase):
def test_distance_transform(self):
arr = np.r_[-1, -1, 1, -1, -1.]
dist, mask = distance_transform(arr, band=1, dx=[1.])
true_dist = np.r_[0., -0.5, 0.5, -0.5, 0.]
true_mask = np.r_[False, True, True, True, False]
# Floating point comparison is okay here, numbers are 0, 0.5, 1, etc
self.assertTrue((dist == true_dist).all())
self.assertTrue((mask == true_mask).all())
def test_all_positive(self):
arr = np.ones((4, 5, 6))
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(0, mask.sum())
self.assertTrue((dist == np.inf).all())
def test_all_negative(self):
arr = -np.ones((4, 5, 6))
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(0, mask.sum())
self.assertTrue((dist == -np.inf).all())
def test_band_zero(self):
arr = np.ones((4, 5, 6))
arr[2] = -1
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(arr.size, mask.sum())
def test_input_zero(self):
arr = np.zeros((4, 5, 6))
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(arr.size, mask.sum())
self.assertTrue((dist == 0).all())
| 27.148148 | 76 | 0.579127 | import unittest
import numpy as np
from lsml.util.distance_transform import (
distance_transform)
class TestDistanceTransform(unittest.TestCase):
def test_distance_transform(self):
arr = np.r_[-1, -1, 1, -1, -1.]
dist, mask = distance_transform(arr, band=1, dx=[1.])
true_dist = np.r_[0., -0.5, 0.5, -0.5, 0.]
true_mask = np.r_[False, True, True, True, False]
self.assertTrue((dist == true_dist).all())
self.assertTrue((mask == true_mask).all())
def test_all_positive(self):
arr = np.ones((4, 5, 6))
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(0, mask.sum())
self.assertTrue((dist == np.inf).all())
def test_all_negative(self):
arr = -np.ones((4, 5, 6))
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(0, mask.sum())
self.assertTrue((dist == -np.inf).all())
def test_band_zero(self):
arr = np.ones((4, 5, 6))
arr[2] = -1
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(arr.size, mask.sum())
def test_input_zero(self):
arr = np.zeros((4, 5, 6))
dist, mask = distance_transform(arr, band=0, dx=[1, 1, 1.])
self.assertEqual(arr.size, mask.sum())
self.assertTrue((dist == 0).all())
| true | true |
1c38c4f6facdf89d6944b5e36cd8a47770f49d78 | 1,735 | py | Python | yzrpc/schema.py | ml444/yz-rpc | f3b6cb76dab72e1763d759080854c11aa6ade872 | [
"Apache-2.0"
] | 5 | 2021-04-28T09:12:04.000Z | 2021-11-25T13:50:32.000Z | yzrpc/schema.py | ml444/yz-rpc | f3b6cb76dab72e1763d759080854c11aa6ade872 | [
"Apache-2.0"
] | null | null | null | yzrpc/schema.py | ml444/yz-rpc | f3b6cb76dab72e1763d759080854c11aa6ade872 | [
"Apache-2.0"
] | 2 | 2021-07-27T04:11:51.000Z | 2022-01-06T09:36:06.000Z | #!/usr/bin/python3.7+
# -*- coding:utf-8 -*-
from typing import Tuple
from pydantic.main import BaseModel
from yzrpc.proto import __proto_meta__, Protobuf
class SchemaMeta(type):
def __new__(cls, cls_name: str, bases: Tuple, cls_dict: dict):
if not bases or object in bases:
return super().__new__(cls, cls_name, bases, cls_dict)
_meta = {
k: v
for k, v in cls_dict.get("__annotations__", {}).items()
if not k.startswith('_')
}
if _meta:
# 'src.apps.appname.schemas'
_module = cls_dict.get("__module__", '')
# TODO: validate_module_path
app_name = _module.split('.')[-2]
if app_name not in ['src', 'apps']:
if app_name in __proto_meta__:
package_meta = __proto_meta__.get(app_name)
else:
package_meta = Protobuf()
package_meta.messages[cls_name] = _meta
__proto_meta__[app_name] = package_meta
return super().__new__(cls, cls_name, bases, cls_dict)
def metaclass_resolver(*classes):
metaclass = tuple(set(type(cls) for cls in classes))
metaclass = metaclass[0] if len(metaclass) == 1 else type(
"_".join(mcls.__name__ for mcls in metaclass), metaclass, {}) # class M_C
return metaclass("_".join(cls.__name__ for cls in classes), classes, {})
class _SchemaBase(metaclass=SchemaMeta):
pass
class SchemaBase(metaclass_resolver(BaseModel, _SchemaBase)):
pass
# class ModelSchemaMeta(ModelMetaclass, SchemaMeta):
# pass
#
# from pydantic.main import ModelMetaclass
# class SchemaBase(BaseModel, metaclass=ModelSchemaMeta):
# """"""
| 30.438596 | 83 | 0.618444 |
from typing import Tuple
from pydantic.main import BaseModel
from yzrpc.proto import __proto_meta__, Protobuf
class SchemaMeta(type):
def __new__(cls, cls_name: str, bases: Tuple, cls_dict: dict):
if not bases or object in bases:
return super().__new__(cls, cls_name, bases, cls_dict)
_meta = {
k: v
for k, v in cls_dict.get("__annotations__", {}).items()
if not k.startswith('_')
}
if _meta:
_module = cls_dict.get("__module__", '')
app_name = _module.split('.')[-2]
if app_name not in ['src', 'apps']:
if app_name in __proto_meta__:
package_meta = __proto_meta__.get(app_name)
else:
package_meta = Protobuf()
package_meta.messages[cls_name] = _meta
__proto_meta__[app_name] = package_meta
return super().__new__(cls, cls_name, bases, cls_dict)
def metaclass_resolver(*classes):
metaclass = tuple(set(type(cls) for cls in classes))
metaclass = metaclass[0] if len(metaclass) == 1 else type(
"_".join(mcls.__name__ for mcls in metaclass), metaclass, {})
return metaclass("_".join(cls.__name__ for cls in classes), classes, {})
class _SchemaBase(metaclass=SchemaMeta):
pass
class SchemaBase(metaclass_resolver(BaseModel, _SchemaBase)):
pass
| true | true |
1c38c50dc1a147195769659ce2b2a6375f5fd9a1 | 101,789 | py | Python | tests/unit/conftest.py | biorack/metatlas | ce3f03817a0a7c5500452689ad587ce94af26a1a | [
"BSD-3-Clause"
] | 8 | 2016-08-10T22:29:39.000Z | 2021-04-06T22:49:46.000Z | tests/unit/conftest.py | biorack/metatlas | ce3f03817a0a7c5500452689ad587ce94af26a1a | [
"BSD-3-Clause"
] | 111 | 2016-04-27T23:18:12.000Z | 2022-03-15T23:55:52.000Z | tests/unit/conftest.py | biorack/metatlas | ce3f03817a0a7c5500452689ad587ce94af26a1a | [
"BSD-3-Clause"
] | 9 | 2016-08-21T16:23:02.000Z | 2021-04-06T22:49:50.000Z | """
per-directory pytest configuration
fixtures used across multiple files should go in here
"""
# pylint: disable=missing-function-docstring,unused-argument,line-too-long,too-many-lines,too-many-arguments
import getpass
import logging
import os
import sqlite3
import threading
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
from metatlas.datastructures import metatlas_dataset as mads
from metatlas.datastructures import metatlas_objects as metob
from metatlas.datastructures import object_helpers as metoh
logger = logging.getLogger(__name__)
def date_str_to_int(date_str):
return int(datetime.fromisoformat(date_str).timestamp())
@pytest.fixture(name="username", scope="session")
def fixture_username():
return getpass.getuser()
@pytest.fixture(name="analysis_ids")
def fixture_analysis_ids(sqlite_with_atlas, username, lcmsrun, mocker, groups_controlled_vocab):
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.AnalysisIdentifiers(
source_atlas=f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}0",
experiment="20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
output_type="FinalEMA-HILIC",
polarity="positive",
analysis_number=0,
project_directory=str(os.getcwd()),
groups_controlled_vocab=groups_controlled_vocab,
)
@pytest.fixture(name="analysis_ids_with_2_cids")
def fixture_analysis_ids_with_2_cids(
sqlite_with_atlas_with_2_cids, username, lcmsrun, mocker, groups_controlled_vocab
):
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.AnalysisIdentifiers(
source_atlas=f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}1",
experiment="20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
output_type="FinalEMA-HILIC",
polarity="positive",
analysis_number=0,
project_directory=str(os.getcwd()),
groups_controlled_vocab=groups_controlled_vocab,
)
@pytest.fixture(name="sqlite")
def fixture_sqlite(username, change_test_dir, atlas):
logging.debug("creating database file in %s", os.getcwd())
assert not os.path.exists(f"{username}_workspace.db")
sqlite3.connect(f"{username}_workspace.db").close()
logger.debug("Storing empty objects to create tables")
metob.store(metob.Atlas())
metob.store(metob.CompoundIdentification())
metob.store(metob.Compound())
metob.store(metob.MzReference())
metob.store(metob.RtReference())
metob.store(metob.Reference())
metob.store(metob.LcmsRun())
logger.debug("Done storing empty objects to create tables")
yield
metoh.Workspace.get_instance().close_connection()
metoh.Workspace.instance = None
@pytest.fixture(name="sqlite_with_atlas")
def fixture_sqlite_with_atlas(sqlite, atlas, username):
atlas.name = f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}0"
logger.debug("Saving atlas %s", atlas.name)
metob.store(atlas)
@pytest.fixture(name="sqlite_with_atlas_with_2_cids")
def fixture_sqlite_with_atlas_with_2_cids(sqlite, atlas_with_2_cids, username):
atlas_with_2_cids.name = f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}1"
logger.debug("Saving atlas %s", atlas_with_2_cids.name)
metob.store(atlas_with_2_cids)
@pytest.fixture(name="change_test_dir", scope="function", autouse=True)
def fixture_change_test_dir(request, tmp_path):
logger.info("Incoming thread count %d", threading.active_count())
os.chdir(tmp_path)
logger.debug("changing dir to %s", tmp_path)
yield
os.chdir(request.config.invocation_dir)
logger.info("Outgoing thread count %d", threading.active_count())
@pytest.fixture(name="ms1_pos")
def fixture_ms1_pos():
return pd.DataFrame(
data={
"mz": {
"0": 252.1089324951,
"1": 252.1090087891,
"2": 252.1088104248,
"3": 252.1090087891,
"4": 252.10887146,
"5": 252.1089324951,
"6": 252.1089324951,
"7": 252.1088256836,
"8": 252.1088867188,
"9": 252.1090393066,
"10": 252.1089782715,
"11": 252.1089630127,
"12": 252.1089630127,
"13": 252.1089782715,
"14": 252.1090240479,
"15": 252.1089782715,
"16": 252.1090240479,
"17": 252.1089324951,
"18": 252.1090393066,
"19": 252.1088867188,
"20": 252.10887146,
"21": 252.1089324951,
"22": 252.1089630127,
"23": 252.1089935303,
"24": 252.1089172363,
"25": 252.1089477539,
"26": 252.1090545654,
"27": 252.1089630127,
"28": 252.1090240479,
"29": 252.1090087891,
"30": 252.1090393066,
"31": 252.1090240479,
"32": 252.1089935303,
"33": 252.1090240479,
"34": 252.1089630127,
"35": 252.1090087891,
"36": 252.1090240479,
"37": 252.1089172363,
"38": 252.1089019775,
"39": 252.1089477539,
"40": 252.1089324951,
"41": 252.1089477539,
"42": 252.1089477539,
"43": 252.1089477539,
"44": 252.1089782715,
"45": 252.1088867188,
"46": 252.1089172363,
"47": 252.1089324951,
"48": 252.1089782715,
"49": 252.1089477539,
"50": 252.1089172363,
"51": 252.1089324951,
"52": 252.1089630127,
"53": 252.1088867188,
"54": 252.1089630127,
"55": 252.1085205078,
"56": 252.1090545654,
"57": 252.1089935303,
"58": 252.1088104248,
"59": 252.1086578369,
"60": 252.1089935303,
"61": 252.1085510254,
"62": 252.1082763672,
"63": 252.1082458496,
"64": 252.1084136963,
"65": 252.1092224121,
"66": 252.1091766357,
"67": 252.1092834473,
"68": 252.1087493896,
"69": 252.1112518311,
"70": 252.1088409424,
"71": 252.1086425781,
"72": 252.1091766357,
"73": 252.1094055176,
},
"i": {
"0": 312203.5,
"1": 387914.59375,
"2": 308308.5,
"3": 334653.59375,
"4": 339521.625,
"5": 345527.21875,
"6": 292437.34375,
"7": 413614.53125,
"8": 300285.28125,
"9": 383848.71875,
"10": 404313.21875,
"11": 377231.34375,
"12": 453965.5625,
"13": 431327.0,
"14": 523180.0625,
"15": 510239.8125,
"16": 631459.1875,
"17": 807419.5,
"18": 842647.5625,
"19": 1053031.625,
"20": 1082361.625,
"21": 1198966.625,
"22": 1109162.375,
"23": 1126347.125,
"24": 1373071.5,
"25": 1589018.375,
"26": 1281309.875,
"27": 1660166.75,
"28": 1492912.25,
"29": 2029801.5,
"30": 2029874.125,
"31": 2035966.625,
"32": 2010867.875,
"33": 2036981.375,
"34": 2148879.25,
"35": 2359861.25,
"36": 2054066.125,
"37": 1691976.0,
"38": 1778159.125,
"39": 1776166.125,
"40": 1752154.125,
"41": 1575676.875,
"42": 1199910.625,
"43": 1259708.25,
"44": 1087384.375,
"45": 826077.125,
"46": 802296.875,
"47": 547785.125,
"48": 545340.0625,
"49": 584624.4375,
"50": 468524.8125,
"51": 305931.1875,
"52": 330310.34375,
"53": 309740.625,
"54": 289212.71875,
"55": 230440.9375,
"56": 210549.390625,
"57": 169972.390625,
"58": 140521.234375,
"59": 116637.953125,
"60": 117197.625,
"61": 84652.1171875,
"62": 117615.578125,
"63": 103500.921875,
"64": 89320.9453125,
"65": 76313.9296875,
"66": 55575.00390625,
"67": 76784.6796875,
"68": 28829.162109375,
"69": 26051.6171875,
"70": 42957.18359375,
"71": 50342.6953125,
"72": 37611.33984375,
"73": 38202.83203125,
},
"rt": {
"0": 2.1030805111,
"1": 2.1084616184,
"2": 2.1139531136,
"3": 2.1193552017,
"4": 2.1248509884,
"5": 2.1302509308,
"6": 2.135682106,
"7": 2.1411821842,
"8": 2.1459801197,
"9": 2.1513926983,
"10": 2.1568279266,
"11": 2.1622362137,
"12": 2.1676549911,
"13": 2.1730883121,
"14": 2.179015398,
"15": 2.1845297813,
"16": 2.1900422573,
"17": 2.1949694157,
"18": 2.20002985,
"19": 2.2055358887,
"20": 2.2110378742,
"21": 2.2165191174,
"22": 2.2219588757,
"23": 2.2273921967,
"24": 2.2328462601,
"25": 2.2382712364,
"26": 2.2437169552,
"27": 2.2492566109,
"28": 2.2547125816,
"29": 2.2601687908,
"30": 2.2656960487,
"31": 2.2704958916,
"32": 2.2758042812,
"33": 2.2813498974,
"34": 2.2868082523,
"35": 2.2922415733,
"36": 2.2976748943,
"37": 2.3031060696,
"38": 2.308131218,
"39": 2.313628912,
"40": 2.3185498714,
"41": 2.3239560127,
"42": 2.3293914795,
"43": 2.3349123001,
"44": 2.3403663635,
"45": 2.346799612,
"46": 2.3522267342,
"47": 2.3576600552,
"48": 2.3631224632,
"49": 2.3685662746,
"50": 2.3740911484,
"51": 2.3794057369,
"52": 2.3848536015,
"53": 2.3903660774,
"54": 2.3953785896,
"55": 2.4006638527,
"56": 2.4062638283,
"57": 2.411709547,
"58": 2.4171659946,
"59": 2.4226117134,
"60": 2.4302260876,
"61": 2.4357616901,
"62": 2.4407405853,
"63": 2.4461927414,
"64": 2.451615572,
"65": 2.4571509361,
"66": 2.4627010822,
"67": 2.4681572914,
"68": 2.4735822678,
"69": 2.4735822678,
"70": 2.4787945747,
"71": 2.4842174053,
"72": 2.4896612167,
"73": 2.495146513,
},
"polarity": {
"0": 1,
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 1,
"12": 1,
"13": 1,
"14": 1,
"15": 1,
"16": 1,
"17": 1,
"18": 1,
"19": 1,
"20": 1,
"21": 1,
"22": 1,
"23": 1,
"24": 1,
"25": 1,
"26": 1,
"27": 1,
"28": 1,
"29": 1,
"30": 1,
"31": 1,
"32": 1,
"33": 1,
"34": 1,
"35": 1,
"36": 1,
"37": 1,
"38": 1,
"39": 1,
"40": 1,
"41": 1,
"42": 1,
"43": 1,
"44": 1,
"45": 1,
"46": 1,
"47": 1,
"48": 1,
"49": 1,
"50": 1,
"51": 1,
"52": 1,
"53": 1,
"54": 1,
"55": 1,
"56": 1,
"57": 1,
"58": 1,
"59": 1,
"60": 1,
"61": 1,
"62": 1,
"63": 1,
"64": 1,
"65": 1,
"66": 1,
"67": 1,
"68": 1,
"69": 1,
"70": 1,
"71": 1,
"72": 1,
"73": 1,
},
}
)
@pytest.fixture(name="ms2_pos")
def fixture_ms2_pos():
return pd.DataFrame(
data={
"mz": {
"0": 252.1081695557,
"1": 252.1564941406,
"2": 252.1087036133,
"3": 252.1572875977,
"4": 252.1089019775,
"5": 252.1550292969,
"6": 252.1090698242,
"7": 252.1557617188,
},
"i": {
"0": 32103.3515625,
"1": 6470.0009765625,
"2": 93112.0859375,
"3": 7624.11328125,
"4": 131062.0,
"5": 6535.4560546875,
"6": 76976.7265625,
"7": 6090.6440429688,
},
"rt": {
"0": 2.0097544193,
"1": 2.0097544193,
"2": 2.2203779221,
"3": 2.2203779221,
"4": 2.327804327,
"5": 2.327804327,
"6": 2.3452186584,
"7": 2.3452186584,
},
"polarity": {"0": 1, "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1},
"precursor_MZ": {
"0": 252.0195159912,
"1": 252.0195159912,
"2": 252.10887146,
"3": 252.10887146,
"4": 252.0194854736,
"5": 252.0194854736,
"6": 252.1089477539,
"7": 252.1089477539,
},
"precursor_intensity": {
"0": 2748235.5,
"1": 2748235.5,
"2": 2872807.5,
"3": 2872807.5,
"4": 3536752.25,
"5": 3536752.25,
"6": 3046732.75,
"7": 3046732.75,
},
"collision_energy": {
"0": 23.3333339691,
"1": 23.3333339691,
"2": 23.3333339691,
"3": 23.3333339691,
"4": 23.3333339691,
"5": 23.3333339691,
"6": 23.3333339691,
"7": 23.3333339691,
},
}
)
@pytest.fixture(name="ms1_neg_empty")
def fixture_ms1_neg_empty():
return pd.DataFrame(data={"mz": {}, "i": {}, "rt": {}, "polarity": {}})
@pytest.fixture(name="ms2_neg_empty")
def fixture_ms2_neg_empty():
return pd.DataFrame(
data={
"mz": {},
"i": {},
"rt": {},
"polarity": {},
"precursor_MZ": {},
"precursor_intensity": {},
"collision_energy": {},
}
)
@pytest.fixture(name="df_container")
def fixture_df_container(ms1_pos, ms2_pos, ms1_neg_empty, ms2_neg_empty):
return {"ms1_neg": ms1_neg_empty, "ms1_pos": ms1_pos, "ms2_neg": ms2_neg_empty, "ms2_pos": ms2_pos}
@pytest.fixture(name="ms1_summary")
def fixture_ms1_summary():
return {
"num_ms1_datapoints": 85.0,
"mz_peak": 252.1092987060547,
"rt_peak": 2.2775044441223145,
"mz_centroid": 252.10915042669814,
"rt_centroid": 2.218492414487913,
"peak_height": 304761.90625,
"peak_area": 7696977.46875,
}
@pytest.fixture(name="msms")
def fixture_msms():
return {
"data": {
"mz": np.array([], dtype=np.float64),
"i": np.array([], dtype=np.float64),
"rt": np.array([], dtype=np.float64),
"polarity": np.array([], dtype=np.float64),
"precursor_MZ": np.array([], dtype=np.float64),
"precursor_intensity": np.array([], dtype=np.float64),
"collision_energy": np.array([], dtype=np.float64),
}
}
@pytest.fixture(name="groups_controlled_vocab")
def fixture_groups_controlled_vocab():
return ["QC", "InjBl", "ISTD"]
@pytest.fixture(name="metatlas_dataset")
def fixture_metatlas_dataset(mocker, df_container, analysis_ids, lcmsrun, sqlite_with_atlas):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.MetatlasDataset(ids=analysis_ids, save_metadata=False)
@pytest.fixture(name="metatlas_dataset_with_2_cids")
def fixture_metatlas_dataset_with_2_cids(
mocker,
df_container,
analysis_ids_with_2_cids,
lcmsrun,
sqlite_with_atlas_with_2_cids,
):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.MetatlasDataset(ids=analysis_ids_with_2_cids, save_metadata=False)
@pytest.fixture(name="metatlas_dataset_with_qc_runs")
def fixture_metatlas_dataset_with_qc_runs(
mocker, df_container, analysis_ids, lcmsrun, sqlite_with_atlas, qc_lcmsruns
):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=qc_lcmsruns)
return mads.MetatlasDataset(ids=analysis_ids, save_metadata=False)
@pytest.fixture(name="eic")
def fixture_eic():
return {
"mz": [
252.1089324951172,
252.10943603515625,
252.10926818847656,
252.109375,
252.10923767089844,
252.10910034179688,
252.10914611816406,
252.1089630126953,
252.10971069335938,
252.1093292236328,
252.10934448242188,
252.109130859375,
252.10935974121094,
252.10939025878906,
252.1090545654297,
252.10916137695312,
252.10946655273438,
252.10923767089844,
252.1093292236328,
252.10919189453125,
252.10914611816406,
252.10897827148438,
252.10934448242188,
252.10928344726562,
252.10888671875,
252.10926818847656,
252.109130859375,
252.1090087890625,
252.10934448242188,
252.10939025878906,
252.1093292236328,
252.1091766357422,
252.109130859375,
252.1095428466797,
252.10890197753906,
252.1095428466797,
252.109130859375,
252.10911560058594,
252.1091766357422,
252.1088409423828,
252.10916137695312,
252.10935974121094,
252.10928344726562,
252.10922241210938,
252.10914611816406,
252.10922241210938,
252.10894775390625,
252.10906982421875,
252.10914611816406,
252.10916137695312,
252.10910034179688,
252.10916137695312,
252.10934448242188,
252.10899353027344,
252.10928344726562,
252.10897827148438,
252.10916137695312,
252.10928344726562,
252.1092987060547,
252.1089324951172,
252.10914611816406,
252.1090545654297,
252.10914611816406,
252.1090850830078,
252.10894775390625,
252.10914611816406,
252.10911560058594,
252.1090850830078,
252.109130859375,
252.10903930664062,
252.10890197753906,
252.109130859375,
252.10885620117188,
252.10914611816406,
252.10926818847656,
252.10888671875,
252.109619140625,
252.10922241210938,
252.1092529296875,
252.1099853515625,
252.10972595214844,
252.10910034179688,
252.10935974121094,
252.1088409423828,
252.10838317871094,
252.11212158203125,
],
"rt": [
1.7180122137069702,
1.8222843408584595,
1.838305115699768,
1.8444031476974487,
1.8705799579620361,
1.875998616218567,
1.8913277387619019,
1.9020838737487793,
1.9127358198165894,
1.9397128820419312,
1.9451169967651367,
1.9505127668380737,
1.955920934677124,
1.966427206993103,
1.9718105792999268,
1.9769750833511353,
1.9823375940322876,
1.987752079963684,
1.9932082891464233,
1.9986457824707031,
2.0094456672668457,
2.019866466522217,
2.030582904815674,
2.036003589630127,
2.0568389892578125,
2.062201499938965,
2.0675911903381348,
2.0834577083587646,
2.088857650756836,
2.0939910411834717,
2.099109649658203,
2.104536771774292,
2.1208388805389404,
2.1262447834014893,
2.1420176029205322,
2.152921676635742,
2.15836763381958,
2.163788318634033,
2.169198751449585,
2.1755259037017822,
2.180954933166504,
2.18635892868042,
2.191038131713867,
2.1964569091796875,
2.2018840312957764,
2.2069132328033447,
2.21236515045166,
2.2177650928497314,
2.2228589057922363,
2.2283151149749756,
2.2338151931762695,
2.239321231842041,
2.244842052459717,
2.250317096710205,
2.255610704421997,
2.261033535003662,
2.2665293216705322,
2.2720251083374023,
2.2775044441223145,
2.28295636177063,
2.288454294204712,
2.29386043548584,
2.299298048019409,
2.304720878601074,
2.310127019882202,
2.3155603408813477,
2.320981025695801,
2.326420545578003,
2.33160400390625,
2.3370935916900635,
2.3428516387939453,
2.3483099937438965,
2.3535475730895996,
2.3589975833892822,
2.364443302154541,
2.3699119091033936,
2.375347375869751,
2.3808369636535645,
2.3862972259521484,
2.3917577266693115,
2.397282600402832,
2.402780294418335,
2.4081971645355225,
2.419055461883545,
2.457223892211914,
3.3080079555511475,
],
"intensity": [
34249.71484375,
28511.658203125,
41718.13671875,
33448.546875,
40190.94140625,
32525.16015625,
37058.60546875,
51132.91015625,
36473.0546875,
42659.0859375,
45187.6171875,
51186.30078125,
58456.5859375,
43299.24609375,
52062.02734375,
42501.8671875,
39734.91015625,
41848.02734375,
48979.640625,
42957.48046875,
54214.27734375,
63583.64453125,
38661.046875,
47146.54296875,
36974.3046875,
37674.35546875,
37412.4609375,
47036.44921875,
32295.888671875,
39751.12109375,
47359.0,
57496.41796875,
33690.4765625,
36853.53515625,
33045.0703125,
33235.64453125,
52481.1015625,
48210.37109375,
62178.734375,
73049.2109375,
52741.03125,
88225.1953125,
101593.296875,
127965.625,
124079.859375,
134410.46875,
148749.0,
134068.8125,
141625.515625,
202721.015625,
204341.703125,
172160.484375,
185859.765625,
195729.234375,
216657.453125,
239248.65625,
172232.296875,
195105.046875,
304761.90625,
181052.265625,
222467.5625,
251571.53125,
205874.765625,
224279.0625,
173697.359375,
236325.078125,
153999.28125,
156835.59375,
118963.8046875,
105766.234375,
103081.484375,
97180.5625,
95681.4140625,
74239.0703125,
69208.8984375,
60604.1484375,
37020.84765625,
32874.484375,
24641.875,
23305.75,
23413.94140625,
42582.77734375,
35980.16796875,
25743.97265625,
21777.99609375,
59454.40234375,
],
}
@pytest.fixture(name="atlas_df")
def fixture_atlas_df(metatlas_dataset):
return metatlas_dataset.atlas_df
@pytest.fixture(name="compound")
def fixture_compound(username):
compound = metob.Compound()
compound.unique_id = "60cd6743e56545c6a6cb066ec3553450"
compound.mono_isotopic_molecular_weight = 251.101839276
compound.creation_time = 1466212395
compound.synonyms = "2'-deoxyadenosine" # value was pruned down
compound.inchi_key = "OLXZPDWKRNYJJZ-RRKCRQDMSA-N"
compound.chebi_url = "http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:17256"
compound.permanent_charge = 0
compound.img_abc_id = ""
compound.neutralized_2d_inchi = "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)" # noqa: E501
compound.lipidmaps_url = ""
compound.source = "gnps///chebi///metacyc///hmdb"
compound.kegg_url = "http://www.genome.jp/dbget-bin/www_bget?C00559"
compound.hmdb_url = "http://www.hmdb.ca/metabolites/HMDB00101"
compound.wikipedia_url = ""
compound.head_id = "60cd6743e56545c6a6cb066ec3553450"
compound.formula = "C10H13N5O3"
compound.number_components = 1
compound.iupac_name = ""
compound.username = username
compound.pubchem_compound_id = "13730"
compound.description = "A purine 2'-deoxyribonucleoside having adenine as the nucleobase."
compound.metacyc_id = "DEOXYADENOSINE"
compound.kegg_id = "C00559"
compound.hmdb_id = "HMDB00101"
compound.chebi_id = "CHEBI:17256"
compound.inchi = "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1" # noqa: E501
compound.neutralized_inchi_key = "OLXZPDWKRNYJJZ-RRKCRQDMSA-N"
compound.prev_uid = "origin"
compound.neutralized_inchi = "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1" # noqa: E501
compound.name = "2'-deoxyadenosine"
compound.neutralized_2d_inchi_key = "OLXZPDWKRNYJJZ-UHFFFAOYSA-N"
compound.num_free_radicals = 0
compound.lipidmaps_id = ""
compound.last_modified = 1612996604
compound.pubchem_url = "http://pubchem.ncbi.nlm.nih.gov/compound/13730"
return compound
@pytest.fixture(name="rt_reference")
def fixture_rt_reference(username):
rt_ref = metob.RtReference()
rt_ref.unique_id = "a845ddfdf8ef4713bcef3bdb84999030"
rt_ref.username = username
rt_ref.rt_units = "min"
rt_ref.description = "No description"
rt_ref.rt_peak = "2.1964640053707174"
rt_ref.enabled = True
rt_ref.creation_time = 1613002850
rt_ref.lcms_run = None
rt_ref.rt_min = 1.6964640053707174
rt_ref.last_modified = 1613002979
rt_ref.ref_type = ""
rt_ref.prev_uid = "origin"
rt_ref.rt_max = 2.6964640053707174
rt_ref.name = "Untitled"
rt_ref.head_id = "a845ddfdf8ef4713bcef3bdb84999030"
return rt_ref
@pytest.fixture(name="mz_reference")
def fixture_mz_reference(username):
mz_ref = metob.MzReference()
mz_ref.unique_id = "eb6d03c9ef574051b92dad7b2fc259a2"
mz_ref.username = username
mz_ref.adduct = "[M+H]+"
mz_ref.description = "No description"
mz_ref.mz_tolerance_units = "ppm"
mz_ref.enabled = True
mz_ref.mz = 252.1091393
mz_ref.creation_time = 1613002850
mz_ref.lcms_run = None
mz_ref.mz_tolerance = 20.0
mz_ref.last_modified = 1613002979
mz_ref.detected_polarity = "positive"
mz_ref.modification = ""
mz_ref.ref_type = ""
mz_ref.observed_formula = ""
mz_ref.prev_uid = "origin"
mz_ref.name = "Untitled"
mz_ref.head_id = "eb6d03c9ef574051b92dad7b2fc259a2"
return mz_ref
@pytest.fixture(name="compound_identification")
def fixture_compound_identification(compound, rt_reference, mz_reference, username):
ident = metob.CompoundIdentification()
ident.unique_id = "18737c7141cc4efaa4545bead13ac751"
ident.username = username
ident.description = "No description"
ident.creation_time = 1613002849
ident.last_modified = 1613002979
ident.identification_grade = None
ident.prev_uid = "origin"
ident.name = "2'-deoxyadenosine"
ident.head_id = "18737c7141cc4efaa4545bead13ac751"
ident.internal_standard_to_use = ""
ident.internal_standard_id = ""
ident.do_normalization = False
ident.identification_notes = "my id note"
ident.ms2_notes = "-1,bad match to ref"
ident.ms1_notes = "keep"
ident.frag_references = []
ident.intensity_references = []
ident.compound = [compound]
ident.mz_references = [mz_reference]
ident.rt_references = [rt_reference]
return ident
@pytest.fixture(name="atlas")
def fixture_atlas(compound_identification):
small_atlas = metob.Atlas()
small_atlas.compound_identifications = [compound_identification]
return small_atlas
@pytest.fixture(name="compound_2")
def fixture_compound_2(username):
compound = metob.Compound()
compound.chebi_id = "CHEBI:16335"
compound.chebi_url = "http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:16335"
compound.creation_time = 1466212384
compound.description = "A ribonucleoside composed of a molecule of adenine attached to a ribofuranose moiety via a beta1N9-glycosidic bond."
compound.formula = "C10H13N5O4"
compound.head_id = "1ad02275f47b4033a451e99874f4764f"
compound.hmdb_id = "HMDB00050"
compound.hmdb_url = "http://www.hmdb.ca/metabolites/HMDB00050"
compound.img_abc_id = ""
compound.inchi = "InChI=1S/C10H13N5O4/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(18)6(17)4(1-16)19-10/h2-4,6-7,10,16-18H,1H2,(H2,11,12,13)/t4-,6-,7-,10-/m1/s1"
compound.inchi_key = "OIRDTQYFTABQOQ-KQYNXXCUSA-N"
compound.iupac_name = ""
compound.kegg_id = "C00212"
compound.kegg_url = "http://www.genome.jp/dbget-bin/www_bget?C00212"
compound.last_modified = 1612996604
compound.lipidmaps_id = ""
compound.lipidmaps_url = ""
compound.metacyc_id = "ADENOSINE"
compound.mono_isotopic_molecular_weight = 267.096753896
compound.name = "adenosine"
compound.neutralized_2d_inchi = "InChI=1S/C10H13N5O4/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(18)6(17)4(1-16)19-10/h2-4,6-7,10,16-18H,1H2,(H2,11,12,13)"
compound.neutralized_2d_inchi_key = "OIRDTQYFTABQOQ-UHFFFAOYSA-N"
compound.neutralized_inchi = "InChI=1S/C10H13N5O4/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(18)6(17)4(1-16)19-10/h2-4,6-7,10,16-18H,1H2,(H2,11,12,13)/t4-,6-,7-,10-/m1/s1"
compound.neutralized_inchi_key = "OIRDTQYFTABQOQ-KQYNXXCUSA-N"
compound.num_free_radicals = 0
compound.number_components = 1
compound.permanent_charge = 0
compound.prev_uid = "origin"
compound.pubchem_compound_id = "60961"
compound.pubchem_url = "http://pubchem.ncbi.nlm.nih.gov/compound/60961"
compound.source = "chebi///wikidata///metacyc///gnps///hmdb"
compound.synonyms = "adenosine///58-61-7///Adenocard///Adenoscan" # this value was pruned down
compound.unique_id = "1ad02275f47b4033a451e99874f4764f"
compound.username = username
compound.wikipedia_url = ""
return compound
@pytest.fixture(name="rt_reference_2")
def fixture_rt_reference_2(username):
rt_ref = metob.RtReference()
rt_ref.creation_time = 1613002857
rt_ref.description = "No description"
rt_ref.enabled = True
rt_ref.head_id = "f74622bcef924f5390ba6e127633e731"
rt_ref.last_modified = 1613002980
rt_ref.lcms_run = None
rt_ref.name = "Untitled"
rt_ref.prev_uid = "origin"
rt_ref.ref_type = ""
rt_ref.rt_max = 3.5233184079926665
rt_ref.rt_min = 2.5233184079926665
rt_ref.rt_peak = 3.0233184079926665
rt_ref.rt_units = "min"
rt_ref.unique_id = "f74622bcef924f5390ba6e127633e731"
rt_ref.username = username
return rt_ref
@pytest.fixture(name="mz_reference_2")
def fixture_mz_reference_2(username):
mz_ref = metob.MzReference()
mz_ref.adduct = "[M+H]+"
mz_ref.creation_time = 1613002857
mz_ref.description = "No description"
mz_ref.detected_polarity = "positive"
mz_ref.enabled = True
mz_ref.head_id = "b0e3cf0df44a4079be7908c6b525d3ac"
mz_ref.last_modified = 1613002980
mz_ref.lcms_run = None
mz_ref.modification = ""
mz_ref.mz = 268.1040539
mz_ref.mz_tolerance = 20.0
mz_ref.mz_tolerance_units = "ppm"
mz_ref.name = "Untitled"
mz_ref.observed_formula = ""
mz_ref.prev_uid = "origin"
mz_ref.ref_type = ""
mz_ref.unique_id = "b0e3cf0df44a4079be7908c6b525d3ac"
mz_ref.username = username
return mz_ref
@pytest.fixture(name="compound_identification_2")
def fixture_compound_identification_2(compound_2, rt_reference_2, mz_reference_2, username):
ident = metob.CompoundIdentification()
ident.creation_time = 1613002856
ident.description = "No description"
ident.do_normalization = False
ident.frag_references = []
ident.head_id = "6cca7aa44c0e4a109f695ba980d69472"
ident.identification_grade = None
ident.identification_notes = ""
ident.intensity_references = []
ident.internal_standard_id = ""
ident.internal_standard_to_use = ""
ident.last_modified = 1613002980
ident.ms1_notes = ""
ident.ms2_notes = ""
ident.name = "adenosine"
ident.prev_uid = "origin"
ident.unique_id = "6cca7aa44c0e4a109f695ba980d69472"
ident.username = username
ident.frag_references = []
ident.intensity_references = []
ident.compound = [compound_2]
ident.mz_references = [mz_reference_2]
ident.rt_references = [rt_reference_2]
return ident
@pytest.fixture(name="atlas_with_2_cids")
def fixture_atlas_with_2_cids(compound_identification, compound_identification_2):
small_atlas = metob.Atlas()
small_atlas.compound_identifications = [
compound_identification,
compound_identification_2,
]
return small_atlas
@pytest.fixture(name="lcmsrun")
def fixture_lcmsrun(username):
run = metob.LcmsRun()
run.unique_id = "7ce51039cfca4426b4e51999ac45d018"
run.username = username
run.hdf5_file = "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5" # noqa: E501
run.description = "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.mzML" # noqa: E501
run.creation_time = 1605311923
run.sample = None
run.last_modified = 1620101765
run.mzml_file = "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.mzML" # noqa: E501
run.prev_uid = "28323058b6e84a9db0f9e802544764e3"
run.method = None
run.name = "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.mzML" # noqa: E501
run.head_id = "7ce51039cfca4426b4e51999ac45d018"
run.experiment = "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583"
run.injection_volume = 0.0
run.injection_volume_units = "uL"
run.acquisition_time = 1604770080
run.pass_qc = False
return run
@pytest.fixture(name="qc_lcmsruns")
def fixture_qc_lcmsruns(username):
json = [
{
"acquisition_time": 1604734158,
"creation_time": date_str_to_int("2020-11-13T16:05:46"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.h5",
"head_id": "c0459a277f654fdeacf48243a34207b4",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:40:27"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "c0459a277f654fdeacf48243a34207b4",
"username": username,
},
{
"acquisition_time": 1605168081,
"creation_time": date_str_to_int("2020-11-13T15:57:27"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.h5",
"head_id": "9f33a0c1793e46fc9c70a19b587a0117",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:39:25"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "9f33a0c1793e46fc9c70a19b587a0117",
"username": username,
},
{
"acquisition_time": 1605166749,
"creation_time": date_str_to_int("2020-11-13T15:42:04"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.h5",
"head_id": "8c93ee10f2af4238ae905d86debc87ce",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:40:27"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "8c93ee10f2af4238ae905d86debc87ce",
"username": username,
},
{
"acquisition_time": 1604735488,
"creation_time": date_str_to_int("2020-11-13T15:52:48"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.h5",
"head_id": "855e0081dbb2473c8970f40db129d8f7",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:39:25"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "855e0081dbb2473c8970f40db129d8f7",
"username": username,
},
{
"acquisition_time": 1605165417,
"creation_time": date_str_to_int("2020-11-13T16:03:25"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.h5",
"head_id": "58905ea702f44d9199be928bc46fdb20",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:38:49"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "58905ea702f44d9199be928bc46fdb20",
"username": username,
},
{
"acquisition_time": 1604732826,
"creation_time": date_str_to_int("2020-11-13T16:15:04"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.h5",
"head_id": "392b1a859ed54e07bc34b55e06459db2",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:38:49"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "392b1a859ed54e07bc34b55e06459db2",
"username": username,
},
]
return [metob.LcmsRun(**run) for run in json]
@pytest.fixture(name="group")
def fixture_group(lcmsrun, username):
grp = metob.Group()
grp.items = [lcmsrun]
grp.unique_id = "61041d07b5a24ca5b88efbda8f319654"
grp.username = username
grp.description = "No description"
grp.creation_time = 1620146477
grp.last_modified = 1620146477
grp.prev_uid = "origin"
grp.name = (
f"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_{username}0_Cone-S1"
)
grp.head_id = "61041d07b5a24ca5b88efbda8f319654"
grp.short_name = "POS_Cone-S1"
return grp
@pytest.fixture(name="group_with_2_lcmsruns")
def fixture_group_with_2_lcmsruns(lcmsrun, username):
grp = metob.Group()
grp.items = [lcmsrun, lcmsrun]
grp.unique_id = "61041d07b5a24ca5b88efbda8f319654"
grp.username = username
grp.description = "No description"
grp.creation_time = 1620146477
grp.last_modified = 1620146477
grp.prev_uid = "origin"
grp.name = (
f"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_{username}0_Cone-S1"
)
grp.head_id = "61041d07b5a24ca5b88efbda8f319654"
grp.short_name = "POS_Cone-S1"
return grp
@pytest.fixture(name="hits")
def fixture_hits():
"""
the 'data' parameter to pd.DataFrame is generated by:
1. Running the docker testing image docker/local_jupyter.sh
2. open /src/notebooks/reference/Targeted.ipynba
3. Put the following in the second code block:
source_atlas = 'HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_root0'
metatlas_repo_path = '/src'
project_directory = '/out'
max_cpus = 2
experiment = '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583'
4. After metatlas_dataset has been created, add a code block:
import json
import pandas as pd
temp_df = metatlas_dataset.hits
temp_df['copy_index'] = temp_df.index
slice_df = temp_df.loc[:,:,"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5"]
slice_df.index = pd.MultiIndex.from_tuples(
slice_df["copy_index"], names=["database", "id", "file_name", "msms_scan"]
)
parsed = json.loads(slice_df.iloc[:4].to_json())
print(json.dumps(parsed, indent=4, sort_keys=True).replace('null', 'np.nan'))
5. copy the output from the code block into 'data' parameter of the DataFrame definition below
"""
hits_plus = pd.DataFrame(
data={
"adduct": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
},
"copy_index": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"29247268c3cf4acfb649ebce7b0c9e0c",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"50334867a31f4cab973459a59d5731c4",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"8ba70c0f245247eeb6ba90011026763a",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"9d53a44c42004e16a468e92e2b0a7009",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
},
"inchi_key": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
},
"measured_precursor_intensity": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
},
"measured_precursor_mz": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
},
"msv_query_aligned": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
},
"msv_ref_aligned": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
51.3947,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
91.0548,
94.0404,
np.nan,
np.nan,
119.035,
np.nan,
np.nan,
np.nan,
136.022,
136.062,
136.112,
np.nan,
137.046,
],
[
1870.1,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
3051.11,
13543.2,
np.nan,
np.nan,
28284.0,
np.nan,
np.nan,
np.nan,
55585.3,
1607820.0,
17469.6,
np.nan,
43758.8,
],
],
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
52.1001,
53.5537,
54.6096,
57.8238,
63.3067,
64.108,
82.7587,
93.0862,
94.6115,
111.471,
113.584,
115.21,
137.067,
137.476,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
94.0407,
np.nan,
np.nan,
119.036,
np.nan,
np.nan,
np.nan,
np.nan,
136.062,
np.nan,
np.nan,
137.046,
],
[
491091.0,
614205.0,
486992.0,
569335.0,
2513570.0,
554436.0,
577010.0,
580100.0,
930338.0,
567270.0,
515519.0,
616418.0,
17234000.0,
693366.0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2437690.0,
np.nan,
np.nan,
7680000.0,
np.nan,
np.nan,
np.nan,
np.nan,
514804000.0,
np.nan,
np.nan,
4940020.0,
],
],
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
59.3596,
62.4513,
63.2027,
76.4601,
86.8208,
115.912,
115.975,
123.375,
137.067,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
94.0407,
np.nan,
np.nan,
119.036,
np.nan,
np.nan,
np.nan,
np.nan,
136.062,
np.nan,
np.nan,
137.046,
],
[
55769.1,
43616.3,
118692.0,
54358.0,
48393.1,
45996.2,
55157.9,
61623.1,
1357390.0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
121260.0,
np.nan,
np.nan,
306316.0,
np.nan,
np.nan,
np.nan,
np.nan,
41864400.0,
np.nan,
np.nan,
370525.0,
],
],
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
55.0301,
56.3854,
66.7513,
67.0298,
81.1529,
82.4076,
92.0251,
92.3892,
104.302,
109.051,
112.051,
135.054,
135.653,
136.227,
136.474,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
94.0405,
np.nan,
np.nan,
119.035,
np.nan,
np.nan,
np.nan,
np.nan,
136.062,
np.nan,
np.nan,
137.046,
],
[
246689.0,
186484.0,
198526.0,
974057.0,
232546.0,
306008.0,
388476.0,
265393.0,
246201.0,
1625240.0,
1318880.0,
345780.0,
925801.0,
254046.0,
715569.0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
7436560.0,
np.nan,
np.nan,
23732500.0,
np.nan,
np.nan,
np.nan,
np.nan,
884493000.0,
np.nan,
np.nan,
23845700.0,
],
],
},
"name": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
},
"num_matches": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 7,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 4,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 4,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 4,
},
"precursor_mz": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
},
"score": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.7861480398,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.8248297009,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.8078499983,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.8274397807,
},
}
)
hits_plus.index = pd.MultiIndex.from_tuples(
hits_plus["copy_index"], names=["database", "id", "file_name", "msms_scan"]
)
hits_plus.drop(columns=["copy_index"], inplace=True)
return hits_plus
@pytest.fixture(name="msms_refs")
def fixture_msms_refs():
return (
pd.DataFrame(
data={
"name": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "2'-deoxyadenosine",
("mona", "KO002730"): "2'-Deoxyadenosine",
("mona", "KO002729"): "2'-Deoxyadenosine",
("mona", "KO008947"): "2'-Deoxyadenosine",
("mona", "KO002727"): "2'-Deoxyadenosine",
("mona", "KO002728"): "2'-Deoxyadenosine",
("mona", "KO002726"): "2'-Deoxyadenosine",
("mona", "PR100081"): "2'-Deoxyadenosine",
("mona", "PR100080"): "2'-Deoxyadenosine",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "2'-deoxyadenosine",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "2'-deoxyadenosine",
},
"spectrum": {
(
"metatlas",
"c7dddd297e104ca79caea72a90150532",
): "[[57.0345, 63.3177, 63.3205, 69.0344, 71.0499, 73.0292, 84.9778, 99.0447, 117.055, 118.059, 136.062, 137.066, 236.709, 252.109, 253.112], [176328.0, 328818.0, 274432.0, 197637.0, 896360.0, 1192020.0, 378547.0, 3921880.0, 15737700.0, 266131.0, 144220000.0, 3455270.0, 185227.0, 20960800.0, 1284450.0]]",
(
"mona",
"KO002730",
): "[[40.9, 43.1, 45.0, 57.1, 67.1, 69.1, 71.1, 72.7, 76.8, 79.0, 80.8, 83.2, 91.8, 92.4, 93.2, 94.1, 95.0, 102.8, 105.3, 107.3, 109.1, 116.8, 119.2, 123.0, 129.9, 136.2, 165.9], [3.501946, 10.700389, 5.447471, 16.536965, 1.945525, 9.727626, 5.642023, 8.171206, 24.513619, 66.731518, 2.918288, 4.474708, 2.529183, 1.750973, 0.583658, 9.533074, 3.891051, 0.972763, 12.062257, 2.140078, 5.058366, 0.389105, 48.44358, 2.529183, 14.007782, 100.0, 0.389105]]",
(
"mona",
"KO002729",
): "[[35.8, 41.0, 43.1, 45.2, 52.9, 55.2, 57.4, 59.1, 61.4, 69.2, 71.1, 73.0, 77.0, 79.0, 81.3, 83.1, 91.2, 94.0, 99.3, 99.9, 101.1, 103.1, 105.0, 106.7, 107.4, 108.9, 111.1, 115.0, 117.2, 119.1, 120.4, 123.1, 130.1, 135.1, 136.0, 136.9, 141.3, 147.1, 166.0, 170.7], [0.170503, 0.383632, 3.665814, 0.937766, 0.127877, 0.895141, 9.079284, 0.852515, 0.341006, 4.390452, 7.1185, 5.242967, 1.960784, 32.139812, 1.875533, 2.429668, 1.278772, 1.491901, 2.216539, 1.364024, 1.364024, 0.511509, 8.01364, 0.468883, 0.255754, 1.321398, 0.426257, 0.255754, 1.193521, 6.734868, 0.170503, 6.990622, 8.823529, 0.213129, 100.0, 0.468883, 0.085251, 0.29838, 0.639386, 0.127877]]",
(
"mona",
"KO008947",
): "[[71.1, 73.2, 81.1, 89.2, 94.1, 99.1, 101.0, 109.0, 117.1, 119.1, 128.9, 130.0, 133.3, 136.1, 136.9, 137.8, 149.3, 156.5, 165.1, 187.1, 195.1, 213.8, 215.1, 216.1, 217.1, 223.9, 234.1, 251.0, 252.1, 253.0, 270.9], [0.01998, 0.014577, 0.003889, 0.047639, 0.031539, 0.085402, 0.011502, 0.010675, 0.361156, 0.125255, 0.051259, 0.022955, 0.011046, 100.0, 0.116678, 0.01325, 0.029859, 0.006369, 0.003048, 0.01887, 0.066214, 0.003726, 0.011393, 0.013584, 0.013105, 0.010913, 0.080999, 0.012124, 0.179916, 0.010441, 0.005516]]",
(
"mona",
"KO002727",
): "[[54.2, 57.3, 59.1, 69.2, 71.1, 72.2, 72.8, 74.9, 78.9, 80.1, 80.8, 83.1, 85.4, 87.0, 88.9, 91.1, 93.8, 95.2, 99.0, 100.0, 101.0, 105.0, 107.0, 109.0, 111.5, 113.0, 115.2, 116.3, 117.2, 119.1, 121.3, 122.2, 123.2, 124.4, 129.1, 130.0, 133.0, 135.1, 136.1, 139.4, 145.7, 149.4, 153.0, 157.4, 158.4, 163.0, 165.3, 166.4, 175.1, 176.4, 179.3, 181.1, 184.0, 184.7, 189.2, 191.5, 199.3, 203.5, 207.2, 217.3, 220.1, 235.3, 252.2], [2.60144, 3.583115, 0.098168, 0.179974, 9.080497, 0.294503, 0.507199, 0.081806, 1.014398, 0.13089, 0.114529, 0.13089, 0.098168, 0.212696, 0.229058, 0.490838, 0.065445, 0.196335, 0.998037, 5.039267, 4.744764, 1.210733, 0.147251, 0.376309, 1.963351, 1.259817, 0.081806, 0.065445, 5.611911, 0.114529, 0.556283, 1.194372, 35.02945, 0.049084, 0.91623, 1.996073, 0.114529, 0.556283, 100.0, 0.114529, 0.081806, 0.147251, 0.098168, 0.081806, 0.179974, 0.114529, 0.147251, 0.768979, 6.25, 0.114529, 0.343586, 0.032723, 0.310864, 0.163613, 0.310864, 0.278141, 0.65445, 0.39267, 0.212696, 1.897906, 0.294503, 7.509817, 3.043194]]",
(
"mona",
"KO002728",
): "[[36.0, 42.8, 55.4, 57.3, 59.3, 60.8, 68.8, 71.0, 72.8, 76.2, 77.4, 79.1, 80.9, 83.4, 85.3, 87.3, 88.9, 91.0, 93.2, 95.0, 97.0, 99.1, 100.2, 101.1, 102.4, 105.1, 107.0, 109.2, 111.2, 112.9, 117.0, 119.4, 121.0, 122.5, 123.2, 128.9, 130.2, 133.2, 136.2, 150.9, 158.0, 161.1, 163.0, 166.3, 175.2, 179.2, 189.0, 191.2, 207.1, 217.5, 235.3], [0.804783, 0.66682, 0.229938, 6.829156, 0.459876, 0.091975, 2.230398, 10.255231, 3.173143, 0.137963, 0.160957, 13.152449, 0.896758, 1.425615, 0.206944, 0.091975, 0.436882, 0.413888, 0.137963, 0.551851, 0.18395, 3.885951, 2.644286, 2.943205, 0.091975, 4.828696, 0.275926, 0.505863, 1.241665, 0.229938, 4.621752, 0.804783, 0.252932, 0.252932, 20.303518, 0.298919, 6.36928, 0.229938, 100.0, 0.045988, 0.321913, 0.229938, 0.068981, 1.172683, 1.057714, 1.034721, 0.298919, 0.068981, 0.114969, 0.344907, 2.023454]]",
(
"mona",
"KO002726",
): "[[54.0, 57.2, 71.1, 72.2, 73.5, 77.7, 80.2, 82.4, 87.0, 90.3, 100.0, 101.2, 104.6, 106.0, 108.3, 109.4, 111.1, 112.3, 113.3, 116.4, 117.3, 118.2, 121.3, 122.3, 123.2, 125.9, 129.0, 129.9, 131.2, 135.1, 136.2, 137.4, 139.4, 140.9, 143.8, 146.3, 148.2, 152.5, 153.1, 159.7, 162.1, 166.3, 171.1, 175.2, 177.1, 178.0, 179.0, 180.1, 184.1, 185.5, 188.0, 192.2, 198.2, 199.2, 202.6, 203.1, 206.9, 207.4, 216.3, 217.6, 220.2, 224.2, 234.3, 235.2, 252.3], [2.518936, 0.334684, 3.399683, 11.044566, 0.052845, 0.334684, 0.193764, 0.088075, 0.07046, 2.096178, 7.02836, 1.514885, 0.10569, 0.052845, 0.546063, 0.140919, 0.140919, 0.10569, 24.255769, 0.140919, 0.352299, 0.211379, 0.334684, 4.192355, 38.400564, 0.176149, 0.123305, 0.052845, 0.140919, 0.123305, 37.819271, 0.07046, 0.052845, 0.123305, 0.228994, 0.07046, 0.10569, 0.669368, 1.638189, 0.07046, 0.123305, 1.092126, 0.334684, 10.991721, 0.10569, 0.07046, 0.07046, 0.211379, 2.378017, 0.052845, 0.123305, 5.302096, 0.246609, 0.387529, 0.211379, 0.634138, 0.123305, 0.123305, 0.07046, 7.592038, 1.46204, 0.088075, 1.726264, 59.098115, 100.0]]",
("mona", "PR100081"): "[[117.0574, 136.0651, 252.1096], [15.868531, 100.0, 48.929209]]",
("mona", "PR100080"): "[[136.0631, 252.1096], [39.169289, 100.0]]",
(
"metatlas",
"e0025042a1a844d6b6926252edce91e5",
): "[[66.7578, 70.38, 73.6972, 73.9685, 82.2146, 92.3969, 102.12, 104.312, 111.673, 136.062, 139.036, 158.043, 161.337, 168.39, 202.526, 235.987, 246.005, 274.002, 274.091, 274.273], [2649.93, 1977.51, 2080.95, 2643.01, 2450.61, 2214.72, 2214.78, 2349.55, 2163.28, 2982.16, 9507.9, 29909.8, 2525.4, 2199.08, 2170.93, 2443.12, 3793.61, 24676.1, 534389.0, 2775.85]]",
(
"metatlas",
"0568278b45d244fcb5787792fc17b3ec",
): "[[51.5615, 57.0342, 64.0128, 69.0341, 71.0498, 73.029, 73.9804, 81.0338, 82.4275, 88.5237, 93.5638, 99.0444, 105.478, 117.055, 118.698, 126.793, 136.062, 252.108, 252.133], [845648.0, 896704.0, 912599.0, 2052520.0, 5955880.0, 8407590.0, 965782.0, 1548360.0, 1093910.0, 924679.0, 809760.0, 17986900.0, 949617.0, 56688000.0, 1347680.0, 891451.0, 468230000.0, 73715000.0, 1526730.0]]",
},
"decimal": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): 4,
("mona", "KO002730"): 3,
("mona", "KO002729"): 3,
("mona", "KO008947"): 1,
("mona", "KO002727"): 3,
("mona", "KO002728"): 3,
("mona", "KO002726"): 3,
("mona", "PR100081"): 4,
("mona", "PR100080"): 4,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): 4,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): 4,
},
"precursor_mz": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): 252.109,
("mona", "KO002730"): 252.0,
("mona", "KO002729"): 252.0,
("mona", "KO008947"): 252.0,
("mona", "KO002727"): 252.0,
("mona", "KO002728"): 252.0,
("mona", "KO002726"): 252.0,
("mona", "PR100081"): 252.10963,
("mona", "PR100080"): 252.10963,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): 274.091,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): 252.109,
},
"polarity": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "positive",
("mona", "KO002730"): "positive",
("mona", "KO002729"): "positive",
("mona", "KO008947"): "positive",
("mona", "KO002727"): "positive",
("mona", "KO002728"): "positive",
("mona", "KO002726"): "positive",
("mona", "PR100081"): "positive",
("mona", "PR100080"): "positive",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "positive",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "positive",
},
"adduct": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
("mona", "KO002730"): "[M+H]+",
("mona", "KO002729"): "[M+H]+",
("mona", "KO008947"): "[M+H]+",
("mona", "KO002727"): "[M+H]+",
("mona", "KO002728"): "[M+H]+",
("mona", "KO002726"): "[M+H]+",
("mona", "PR100081"): "[M+H]+",
("mona", "PR100080"): "[M+H]+",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "[M+Na]+",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "[M+H]+",
},
"fragmentation_method": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "cid",
("mona", "KO002730"): np.nan,
("mona", "KO002729"): np.nan,
("mona", "KO008947"): np.nan,
("mona", "KO002727"): np.nan,
("mona", "KO002728"): np.nan,
("mona", "KO002726"): np.nan,
("mona", "PR100081"): "LOW-ENERGY CID",
("mona", "PR100080"): "LOW-ENERGY CID",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "cid",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "cid",
},
"collision_energy": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "0",
("mona", "KO002730"): "50 V",
("mona", "KO002729"): "40 V",
("mona", "KO008947"): "0.65",
("mona", "KO002727"): "20 V",
("mona", "KO002728"): "30 V",
("mona", "KO002726"): "10 V",
("mona", "PR100081"): "30 V",
("mona", "PR100080"): "Ramp 5-60 V",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
"instrument": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
("mona", "KO002730"): np.nan,
("mona", "KO002729"): np.nan,
("mona", "KO008947"): np.nan,
("mona", "KO002727"): np.nan,
("mona", "KO002728"): np.nan,
("mona", "KO002726"): np.nan,
("mona", "PR100081"): np.nan,
("mona", "PR100080"): np.nan,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
"instrument_type": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
("mona", "KO002730"): "LC-ESI-QQ",
("mona", "KO002729"): "LC-ESI-QQ",
("mona", "KO008947"): "LC-ESI-IT",
("mona", "KO002727"): "LC-ESI-QQ",
("mona", "KO002728"): "LC-ESI-QQ",
("mona", "KO002726"): "LC-ESI-QQ",
("mona", "PR100081"): "LC-ESI-QTOF",
("mona", "PR100080"): "LC-ESI-QTOF",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
"formula": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "C10H13N5O3",
("mona", "KO002730"): "C10H13N5O3",
("mona", "KO002729"): "C10H13N5O3",
("mona", "KO008947"): "C10H13N5O3",
("mona", "KO002727"): "C10H13N5O3",
("mona", "KO002728"): "C10H13N5O3",
("mona", "KO002726"): "C10H13N5O3",
("mona", "PR100081"): "C10H13N5O3",
("mona", "PR100080"): "C10H13N5O3",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "C10H13N5O3",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "C10H13N5O3",
},
"exact_mass": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): 251.101839276,
("mona", "KO002730"): 251.101839276,
("mona", "KO002729"): 251.101839276,
("mona", "KO008947"): 251.101839276,
("mona", "KO002727"): 251.101839276,
("mona", "KO002728"): 251.101839276,
("mona", "KO002726"): 251.101839276,
("mona", "PR100081"): 251.101839276,
("mona", "PR100080"): 251.101839276,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): 251.101839276,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): 251.101839276,
},
"inchi_key": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002730"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002729"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO008947"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002727"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002728"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002726"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "PR100081"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "PR100080"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
},
"inchi": {
(
"metatlas",
"c7dddd297e104ca79caea72a90150532",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002730",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002729",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO008947",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002727",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002728",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002726",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"PR100081",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"PR100080",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"metatlas",
"e0025042a1a844d6b6926252edce91e5",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"metatlas",
"0568278b45d244fcb5787792fc17b3ec",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
},
"smiles": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
(
"mona",
"KO002730",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002729",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO008947",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002727",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002728",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002726",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"PR100081",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"PR100080",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
}
)
.rename_axis(index=["database", "id"])
.iloc[0:1]
)
| 45.645291 | 1,115 | 0.518199 |
import getpass
import logging
import os
import sqlite3
import threading
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
from metatlas.datastructures import metatlas_dataset as mads
from metatlas.datastructures import metatlas_objects as metob
from metatlas.datastructures import object_helpers as metoh
logger = logging.getLogger(__name__)
def date_str_to_int(date_str):
return int(datetime.fromisoformat(date_str).timestamp())
@pytest.fixture(name="username", scope="session")
def fixture_username():
return getpass.getuser()
@pytest.fixture(name="analysis_ids")
def fixture_analysis_ids(sqlite_with_atlas, username, lcmsrun, mocker, groups_controlled_vocab):
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.AnalysisIdentifiers(
source_atlas=f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}0",
experiment="20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
output_type="FinalEMA-HILIC",
polarity="positive",
analysis_number=0,
project_directory=str(os.getcwd()),
groups_controlled_vocab=groups_controlled_vocab,
)
@pytest.fixture(name="analysis_ids_with_2_cids")
def fixture_analysis_ids_with_2_cids(
sqlite_with_atlas_with_2_cids, username, lcmsrun, mocker, groups_controlled_vocab
):
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.AnalysisIdentifiers(
source_atlas=f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}1",
experiment="20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
output_type="FinalEMA-HILIC",
polarity="positive",
analysis_number=0,
project_directory=str(os.getcwd()),
groups_controlled_vocab=groups_controlled_vocab,
)
@pytest.fixture(name="sqlite")
def fixture_sqlite(username, change_test_dir, atlas):
logging.debug("creating database file in %s", os.getcwd())
assert not os.path.exists(f"{username}_workspace.db")
sqlite3.connect(f"{username}_workspace.db").close()
logger.debug("Storing empty objects to create tables")
metob.store(metob.Atlas())
metob.store(metob.CompoundIdentification())
metob.store(metob.Compound())
metob.store(metob.MzReference())
metob.store(metob.RtReference())
metob.store(metob.Reference())
metob.store(metob.LcmsRun())
logger.debug("Done storing empty objects to create tables")
yield
metoh.Workspace.get_instance().close_connection()
metoh.Workspace.instance = None
@pytest.fixture(name="sqlite_with_atlas")
def fixture_sqlite_with_atlas(sqlite, atlas, username):
atlas.name = f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}0"
logger.debug("Saving atlas %s", atlas.name)
metob.store(atlas)
@pytest.fixture(name="sqlite_with_atlas_with_2_cids")
def fixture_sqlite_with_atlas_with_2_cids(sqlite, atlas_with_2_cids, username):
atlas_with_2_cids.name = f"HILICz150_ANT20190824_PRD_EMA_Unlab_POS_20201106_505892_{username}1"
logger.debug("Saving atlas %s", atlas_with_2_cids.name)
metob.store(atlas_with_2_cids)
@pytest.fixture(name="change_test_dir", scope="function", autouse=True)
def fixture_change_test_dir(request, tmp_path):
logger.info("Incoming thread count %d", threading.active_count())
os.chdir(tmp_path)
logger.debug("changing dir to %s", tmp_path)
yield
os.chdir(request.config.invocation_dir)
logger.info("Outgoing thread count %d", threading.active_count())
@pytest.fixture(name="ms1_pos")
def fixture_ms1_pos():
return pd.DataFrame(
data={
"mz": {
"0": 252.1089324951,
"1": 252.1090087891,
"2": 252.1088104248,
"3": 252.1090087891,
"4": 252.10887146,
"5": 252.1089324951,
"6": 252.1089324951,
"7": 252.1088256836,
"8": 252.1088867188,
"9": 252.1090393066,
"10": 252.1089782715,
"11": 252.1089630127,
"12": 252.1089630127,
"13": 252.1089782715,
"14": 252.1090240479,
"15": 252.1089782715,
"16": 252.1090240479,
"17": 252.1089324951,
"18": 252.1090393066,
"19": 252.1088867188,
"20": 252.10887146,
"21": 252.1089324951,
"22": 252.1089630127,
"23": 252.1089935303,
"24": 252.1089172363,
"25": 252.1089477539,
"26": 252.1090545654,
"27": 252.1089630127,
"28": 252.1090240479,
"29": 252.1090087891,
"30": 252.1090393066,
"31": 252.1090240479,
"32": 252.1089935303,
"33": 252.1090240479,
"34": 252.1089630127,
"35": 252.1090087891,
"36": 252.1090240479,
"37": 252.1089172363,
"38": 252.1089019775,
"39": 252.1089477539,
"40": 252.1089324951,
"41": 252.1089477539,
"42": 252.1089477539,
"43": 252.1089477539,
"44": 252.1089782715,
"45": 252.1088867188,
"46": 252.1089172363,
"47": 252.1089324951,
"48": 252.1089782715,
"49": 252.1089477539,
"50": 252.1089172363,
"51": 252.1089324951,
"52": 252.1089630127,
"53": 252.1088867188,
"54": 252.1089630127,
"55": 252.1085205078,
"56": 252.1090545654,
"57": 252.1089935303,
"58": 252.1088104248,
"59": 252.1086578369,
"60": 252.1089935303,
"61": 252.1085510254,
"62": 252.1082763672,
"63": 252.1082458496,
"64": 252.1084136963,
"65": 252.1092224121,
"66": 252.1091766357,
"67": 252.1092834473,
"68": 252.1087493896,
"69": 252.1112518311,
"70": 252.1088409424,
"71": 252.1086425781,
"72": 252.1091766357,
"73": 252.1094055176,
},
"i": {
"0": 312203.5,
"1": 387914.59375,
"2": 308308.5,
"3": 334653.59375,
"4": 339521.625,
"5": 345527.21875,
"6": 292437.34375,
"7": 413614.53125,
"8": 300285.28125,
"9": 383848.71875,
"10": 404313.21875,
"11": 377231.34375,
"12": 453965.5625,
"13": 431327.0,
"14": 523180.0625,
"15": 510239.8125,
"16": 631459.1875,
"17": 807419.5,
"18": 842647.5625,
"19": 1053031.625,
"20": 1082361.625,
"21": 1198966.625,
"22": 1109162.375,
"23": 1126347.125,
"24": 1373071.5,
"25": 1589018.375,
"26": 1281309.875,
"27": 1660166.75,
"28": 1492912.25,
"29": 2029801.5,
"30": 2029874.125,
"31": 2035966.625,
"32": 2010867.875,
"33": 2036981.375,
"34": 2148879.25,
"35": 2359861.25,
"36": 2054066.125,
"37": 1691976.0,
"38": 1778159.125,
"39": 1776166.125,
"40": 1752154.125,
"41": 1575676.875,
"42": 1199910.625,
"43": 1259708.25,
"44": 1087384.375,
"45": 826077.125,
"46": 802296.875,
"47": 547785.125,
"48": 545340.0625,
"49": 584624.4375,
"50": 468524.8125,
"51": 305931.1875,
"52": 330310.34375,
"53": 309740.625,
"54": 289212.71875,
"55": 230440.9375,
"56": 210549.390625,
"57": 169972.390625,
"58": 140521.234375,
"59": 116637.953125,
"60": 117197.625,
"61": 84652.1171875,
"62": 117615.578125,
"63": 103500.921875,
"64": 89320.9453125,
"65": 76313.9296875,
"66": 55575.00390625,
"67": 76784.6796875,
"68": 28829.162109375,
"69": 26051.6171875,
"70": 42957.18359375,
"71": 50342.6953125,
"72": 37611.33984375,
"73": 38202.83203125,
},
"rt": {
"0": 2.1030805111,
"1": 2.1084616184,
"2": 2.1139531136,
"3": 2.1193552017,
"4": 2.1248509884,
"5": 2.1302509308,
"6": 2.135682106,
"7": 2.1411821842,
"8": 2.1459801197,
"9": 2.1513926983,
"10": 2.1568279266,
"11": 2.1622362137,
"12": 2.1676549911,
"13": 2.1730883121,
"14": 2.179015398,
"15": 2.1845297813,
"16": 2.1900422573,
"17": 2.1949694157,
"18": 2.20002985,
"19": 2.2055358887,
"20": 2.2110378742,
"21": 2.2165191174,
"22": 2.2219588757,
"23": 2.2273921967,
"24": 2.2328462601,
"25": 2.2382712364,
"26": 2.2437169552,
"27": 2.2492566109,
"28": 2.2547125816,
"29": 2.2601687908,
"30": 2.2656960487,
"31": 2.2704958916,
"32": 2.2758042812,
"33": 2.2813498974,
"34": 2.2868082523,
"35": 2.2922415733,
"36": 2.2976748943,
"37": 2.3031060696,
"38": 2.308131218,
"39": 2.313628912,
"40": 2.3185498714,
"41": 2.3239560127,
"42": 2.3293914795,
"43": 2.3349123001,
"44": 2.3403663635,
"45": 2.346799612,
"46": 2.3522267342,
"47": 2.3576600552,
"48": 2.3631224632,
"49": 2.3685662746,
"50": 2.3740911484,
"51": 2.3794057369,
"52": 2.3848536015,
"53": 2.3903660774,
"54": 2.3953785896,
"55": 2.4006638527,
"56": 2.4062638283,
"57": 2.411709547,
"58": 2.4171659946,
"59": 2.4226117134,
"60": 2.4302260876,
"61": 2.4357616901,
"62": 2.4407405853,
"63": 2.4461927414,
"64": 2.451615572,
"65": 2.4571509361,
"66": 2.4627010822,
"67": 2.4681572914,
"68": 2.4735822678,
"69": 2.4735822678,
"70": 2.4787945747,
"71": 2.4842174053,
"72": 2.4896612167,
"73": 2.495146513,
},
"polarity": {
"0": 1,
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 1,
"12": 1,
"13": 1,
"14": 1,
"15": 1,
"16": 1,
"17": 1,
"18": 1,
"19": 1,
"20": 1,
"21": 1,
"22": 1,
"23": 1,
"24": 1,
"25": 1,
"26": 1,
"27": 1,
"28": 1,
"29": 1,
"30": 1,
"31": 1,
"32": 1,
"33": 1,
"34": 1,
"35": 1,
"36": 1,
"37": 1,
"38": 1,
"39": 1,
"40": 1,
"41": 1,
"42": 1,
"43": 1,
"44": 1,
"45": 1,
"46": 1,
"47": 1,
"48": 1,
"49": 1,
"50": 1,
"51": 1,
"52": 1,
"53": 1,
"54": 1,
"55": 1,
"56": 1,
"57": 1,
"58": 1,
"59": 1,
"60": 1,
"61": 1,
"62": 1,
"63": 1,
"64": 1,
"65": 1,
"66": 1,
"67": 1,
"68": 1,
"69": 1,
"70": 1,
"71": 1,
"72": 1,
"73": 1,
},
}
)
@pytest.fixture(name="ms2_pos")
def fixture_ms2_pos():
return pd.DataFrame(
data={
"mz": {
"0": 252.1081695557,
"1": 252.1564941406,
"2": 252.1087036133,
"3": 252.1572875977,
"4": 252.1089019775,
"5": 252.1550292969,
"6": 252.1090698242,
"7": 252.1557617188,
},
"i": {
"0": 32103.3515625,
"1": 6470.0009765625,
"2": 93112.0859375,
"3": 7624.11328125,
"4": 131062.0,
"5": 6535.4560546875,
"6": 76976.7265625,
"7": 6090.6440429688,
},
"rt": {
"0": 2.0097544193,
"1": 2.0097544193,
"2": 2.2203779221,
"3": 2.2203779221,
"4": 2.327804327,
"5": 2.327804327,
"6": 2.3452186584,
"7": 2.3452186584,
},
"polarity": {"0": 1, "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1},
"precursor_MZ": {
"0": 252.0195159912,
"1": 252.0195159912,
"2": 252.10887146,
"3": 252.10887146,
"4": 252.0194854736,
"5": 252.0194854736,
"6": 252.1089477539,
"7": 252.1089477539,
},
"precursor_intensity": {
"0": 2748235.5,
"1": 2748235.5,
"2": 2872807.5,
"3": 2872807.5,
"4": 3536752.25,
"5": 3536752.25,
"6": 3046732.75,
"7": 3046732.75,
},
"collision_energy": {
"0": 23.3333339691,
"1": 23.3333339691,
"2": 23.3333339691,
"3": 23.3333339691,
"4": 23.3333339691,
"5": 23.3333339691,
"6": 23.3333339691,
"7": 23.3333339691,
},
}
)
@pytest.fixture(name="ms1_neg_empty")
def fixture_ms1_neg_empty():
return pd.DataFrame(data={"mz": {}, "i": {}, "rt": {}, "polarity": {}})
@pytest.fixture(name="ms2_neg_empty")
def fixture_ms2_neg_empty():
return pd.DataFrame(
data={
"mz": {},
"i": {},
"rt": {},
"polarity": {},
"precursor_MZ": {},
"precursor_intensity": {},
"collision_energy": {},
}
)
@pytest.fixture(name="df_container")
def fixture_df_container(ms1_pos, ms2_pos, ms1_neg_empty, ms2_neg_empty):
return {"ms1_neg": ms1_neg_empty, "ms1_pos": ms1_pos, "ms2_neg": ms2_neg_empty, "ms2_pos": ms2_pos}
@pytest.fixture(name="ms1_summary")
def fixture_ms1_summary():
return {
"num_ms1_datapoints": 85.0,
"mz_peak": 252.1092987060547,
"rt_peak": 2.2775044441223145,
"mz_centroid": 252.10915042669814,
"rt_centroid": 2.218492414487913,
"peak_height": 304761.90625,
"peak_area": 7696977.46875,
}
@pytest.fixture(name="msms")
def fixture_msms():
return {
"data": {
"mz": np.array([], dtype=np.float64),
"i": np.array([], dtype=np.float64),
"rt": np.array([], dtype=np.float64),
"polarity": np.array([], dtype=np.float64),
"precursor_MZ": np.array([], dtype=np.float64),
"precursor_intensity": np.array([], dtype=np.float64),
"collision_energy": np.array([], dtype=np.float64),
}
}
@pytest.fixture(name="groups_controlled_vocab")
def fixture_groups_controlled_vocab():
return ["QC", "InjBl", "ISTD"]
@pytest.fixture(name="metatlas_dataset")
def fixture_metatlas_dataset(mocker, df_container, analysis_ids, lcmsrun, sqlite_with_atlas):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.MetatlasDataset(ids=analysis_ids, save_metadata=False)
@pytest.fixture(name="metatlas_dataset_with_2_cids")
def fixture_metatlas_dataset_with_2_cids(
mocker,
df_container,
analysis_ids_with_2_cids,
lcmsrun,
sqlite_with_atlas_with_2_cids,
):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
return mads.MetatlasDataset(ids=analysis_ids_with_2_cids, save_metadata=False)
@pytest.fixture(name="metatlas_dataset_with_qc_runs")
def fixture_metatlas_dataset_with_qc_runs(
mocker, df_container, analysis_ids, lcmsrun, sqlite_with_atlas, qc_lcmsruns
):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=qc_lcmsruns)
return mads.MetatlasDataset(ids=analysis_ids, save_metadata=False)
@pytest.fixture(name="eic")
def fixture_eic():
return {
"mz": [
252.1089324951172,
252.10943603515625,
252.10926818847656,
252.109375,
252.10923767089844,
252.10910034179688,
252.10914611816406,
252.1089630126953,
252.10971069335938,
252.1093292236328,
252.10934448242188,
252.109130859375,
252.10935974121094,
252.10939025878906,
252.1090545654297,
252.10916137695312,
252.10946655273438,
252.10923767089844,
252.1093292236328,
252.10919189453125,
252.10914611816406,
252.10897827148438,
252.10934448242188,
252.10928344726562,
252.10888671875,
252.10926818847656,
252.109130859375,
252.1090087890625,
252.10934448242188,
252.10939025878906,
252.1093292236328,
252.1091766357422,
252.109130859375,
252.1095428466797,
252.10890197753906,
252.1095428466797,
252.109130859375,
252.10911560058594,
252.1091766357422,
252.1088409423828,
252.10916137695312,
252.10935974121094,
252.10928344726562,
252.10922241210938,
252.10914611816406,
252.10922241210938,
252.10894775390625,
252.10906982421875,
252.10914611816406,
252.10916137695312,
252.10910034179688,
252.10916137695312,
252.10934448242188,
252.10899353027344,
252.10928344726562,
252.10897827148438,
252.10916137695312,
252.10928344726562,
252.1092987060547,
252.1089324951172,
252.10914611816406,
252.1090545654297,
252.10914611816406,
252.1090850830078,
252.10894775390625,
252.10914611816406,
252.10911560058594,
252.1090850830078,
252.109130859375,
252.10903930664062,
252.10890197753906,
252.109130859375,
252.10885620117188,
252.10914611816406,
252.10926818847656,
252.10888671875,
252.109619140625,
252.10922241210938,
252.1092529296875,
252.1099853515625,
252.10972595214844,
252.10910034179688,
252.10935974121094,
252.1088409423828,
252.10838317871094,
252.11212158203125,
],
"rt": [
1.7180122137069702,
1.8222843408584595,
1.838305115699768,
1.8444031476974487,
1.8705799579620361,
1.875998616218567,
1.8913277387619019,
1.9020838737487793,
1.9127358198165894,
1.9397128820419312,
1.9451169967651367,
1.9505127668380737,
1.955920934677124,
1.966427206993103,
1.9718105792999268,
1.9769750833511353,
1.9823375940322876,
1.987752079963684,
1.9932082891464233,
1.9986457824707031,
2.0094456672668457,
2.019866466522217,
2.030582904815674,
2.036003589630127,
2.0568389892578125,
2.062201499938965,
2.0675911903381348,
2.0834577083587646,
2.088857650756836,
2.0939910411834717,
2.099109649658203,
2.104536771774292,
2.1208388805389404,
2.1262447834014893,
2.1420176029205322,
2.152921676635742,
2.15836763381958,
2.163788318634033,
2.169198751449585,
2.1755259037017822,
2.180954933166504,
2.18635892868042,
2.191038131713867,
2.1964569091796875,
2.2018840312957764,
2.2069132328033447,
2.21236515045166,
2.2177650928497314,
2.2228589057922363,
2.2283151149749756,
2.2338151931762695,
2.239321231842041,
2.244842052459717,
2.250317096710205,
2.255610704421997,
2.261033535003662,
2.2665293216705322,
2.2720251083374023,
2.2775044441223145,
2.28295636177063,
2.288454294204712,
2.29386043548584,
2.299298048019409,
2.304720878601074,
2.310127019882202,
2.3155603408813477,
2.320981025695801,
2.326420545578003,
2.33160400390625,
2.3370935916900635,
2.3428516387939453,
2.3483099937438965,
2.3535475730895996,
2.3589975833892822,
2.364443302154541,
2.3699119091033936,
2.375347375869751,
2.3808369636535645,
2.3862972259521484,
2.3917577266693115,
2.397282600402832,
2.402780294418335,
2.4081971645355225,
2.419055461883545,
2.457223892211914,
3.3080079555511475,
],
"intensity": [
34249.71484375,
28511.658203125,
41718.13671875,
33448.546875,
40190.94140625,
32525.16015625,
37058.60546875,
51132.91015625,
36473.0546875,
42659.0859375,
45187.6171875,
51186.30078125,
58456.5859375,
43299.24609375,
52062.02734375,
42501.8671875,
39734.91015625,
41848.02734375,
48979.640625,
42957.48046875,
54214.27734375,
63583.64453125,
38661.046875,
47146.54296875,
36974.3046875,
37674.35546875,
37412.4609375,
47036.44921875,
32295.888671875,
39751.12109375,
47359.0,
57496.41796875,
33690.4765625,
36853.53515625,
33045.0703125,
33235.64453125,
52481.1015625,
48210.37109375,
62178.734375,
73049.2109375,
52741.03125,
88225.1953125,
101593.296875,
127965.625,
124079.859375,
134410.46875,
148749.0,
134068.8125,
141625.515625,
202721.015625,
204341.703125,
172160.484375,
185859.765625,
195729.234375,
216657.453125,
239248.65625,
172232.296875,
195105.046875,
304761.90625,
181052.265625,
222467.5625,
251571.53125,
205874.765625,
224279.0625,
173697.359375,
236325.078125,
153999.28125,
156835.59375,
118963.8046875,
105766.234375,
103081.484375,
97180.5625,
95681.4140625,
74239.0703125,
69208.8984375,
60604.1484375,
37020.84765625,
32874.484375,
24641.875,
23305.75,
23413.94140625,
42582.77734375,
35980.16796875,
25743.97265625,
21777.99609375,
59454.40234375,
],
}
@pytest.fixture(name="atlas_df")
def fixture_atlas_df(metatlas_dataset):
return metatlas_dataset.atlas_df
@pytest.fixture(name="compound")
def fixture_compound(username):
compound = metob.Compound()
compound.unique_id = "60cd6743e56545c6a6cb066ec3553450"
compound.mono_isotopic_molecular_weight = 251.101839276
compound.creation_time = 1466212395
compound.synonyms = "2'-deoxyadenosine" # value was pruned down
compound.inchi_key = "OLXZPDWKRNYJJZ-RRKCRQDMSA-N"
compound.chebi_url = "http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:17256"
compound.permanent_charge = 0
compound.img_abc_id = ""
compound.neutralized_2d_inchi = "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)" # noqa: E501
compound.lipidmaps_url = ""
compound.source = "gnps///chebi///metacyc///hmdb"
compound.kegg_url = "http://www.genome.jp/dbget-bin/www_bget?C00559"
compound.hmdb_url = "http://www.hmdb.ca/metabolites/HMDB00101"
compound.wikipedia_url = ""
compound.head_id = "60cd6743e56545c6a6cb066ec3553450"
compound.formula = "C10H13N5O3"
compound.number_components = 1
compound.iupac_name = ""
compound.username = username
compound.pubchem_compound_id = "13730"
compound.description = "A purine 2'-deoxyribonucleoside having adenine as the nucleobase."
compound.metacyc_id = "DEOXYADENOSINE"
compound.kegg_id = "C00559"
compound.hmdb_id = "HMDB00101"
compound.chebi_id = "CHEBI:17256"
compound.inchi = "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1"
compound.neutralized_inchi_key = "OLXZPDWKRNYJJZ-RRKCRQDMSA-N"
compound.prev_uid = "origin"
compound.neutralized_inchi = "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1"
compound.name = "2'-deoxyadenosine"
compound.neutralized_2d_inchi_key = "OLXZPDWKRNYJJZ-UHFFFAOYSA-N"
compound.num_free_radicals = 0
compound.lipidmaps_id = ""
compound.last_modified = 1612996604
compound.pubchem_url = "http://pubchem.ncbi.nlm.nih.gov/compound/13730"
return compound
@pytest.fixture(name="rt_reference")
def fixture_rt_reference(username):
rt_ref = metob.RtReference()
rt_ref.unique_id = "a845ddfdf8ef4713bcef3bdb84999030"
rt_ref.username = username
rt_ref.rt_units = "min"
rt_ref.description = "No description"
rt_ref.rt_peak = "2.1964640053707174"
rt_ref.enabled = True
rt_ref.creation_time = 1613002850
rt_ref.lcms_run = None
rt_ref.rt_min = 1.6964640053707174
rt_ref.last_modified = 1613002979
rt_ref.ref_type = ""
rt_ref.prev_uid = "origin"
rt_ref.rt_max = 2.6964640053707174
rt_ref.name = "Untitled"
rt_ref.head_id = "a845ddfdf8ef4713bcef3bdb84999030"
return rt_ref
@pytest.fixture(name="mz_reference")
def fixture_mz_reference(username):
mz_ref = metob.MzReference()
mz_ref.unique_id = "eb6d03c9ef574051b92dad7b2fc259a2"
mz_ref.username = username
mz_ref.adduct = "[M+H]+"
mz_ref.description = "No description"
mz_ref.mz_tolerance_units = "ppm"
mz_ref.enabled = True
mz_ref.mz = 252.1091393
mz_ref.creation_time = 1613002850
mz_ref.lcms_run = None
mz_ref.mz_tolerance = 20.0
mz_ref.last_modified = 1613002979
mz_ref.detected_polarity = "positive"
mz_ref.modification = ""
mz_ref.ref_type = ""
mz_ref.observed_formula = ""
mz_ref.prev_uid = "origin"
mz_ref.name = "Untitled"
mz_ref.head_id = "eb6d03c9ef574051b92dad7b2fc259a2"
return mz_ref
@pytest.fixture(name="compound_identification")
def fixture_compound_identification(compound, rt_reference, mz_reference, username):
ident = metob.CompoundIdentification()
ident.unique_id = "18737c7141cc4efaa4545bead13ac751"
ident.username = username
ident.description = "No description"
ident.creation_time = 1613002849
ident.last_modified = 1613002979
ident.identification_grade = None
ident.prev_uid = "origin"
ident.name = "2'-deoxyadenosine"
ident.head_id = "18737c7141cc4efaa4545bead13ac751"
ident.internal_standard_to_use = ""
ident.internal_standard_id = ""
ident.do_normalization = False
ident.identification_notes = "my id note"
ident.ms2_notes = "-1,bad match to ref"
ident.ms1_notes = "keep"
ident.frag_references = []
ident.intensity_references = []
ident.compound = [compound]
ident.mz_references = [mz_reference]
ident.rt_references = [rt_reference]
return ident
@pytest.fixture(name="atlas")
def fixture_atlas(compound_identification):
small_atlas = metob.Atlas()
small_atlas.compound_identifications = [compound_identification]
return small_atlas
@pytest.fixture(name="compound_2")
def fixture_compound_2(username):
compound = metob.Compound()
compound.chebi_id = "CHEBI:16335"
compound.chebi_url = "http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:16335"
compound.creation_time = 1466212384
compound.description = "A ribonucleoside composed of a molecule of adenine attached to a ribofuranose moiety via a beta1N9-glycosidic bond."
compound.formula = "C10H13N5O4"
compound.head_id = "1ad02275f47b4033a451e99874f4764f"
compound.hmdb_id = "HMDB00050"
compound.hmdb_url = "http://www.hmdb.ca/metabolites/HMDB00050"
compound.img_abc_id = ""
compound.inchi = "InChI=1S/C10H13N5O4/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(18)6(17)4(1-16)19-10/h2-4,6-7,10,16-18H,1H2,(H2,11,12,13)/t4-,6-,7-,10-/m1/s1"
compound.inchi_key = "OIRDTQYFTABQOQ-KQYNXXCUSA-N"
compound.iupac_name = ""
compound.kegg_id = "C00212"
compound.kegg_url = "http://www.genome.jp/dbget-bin/www_bget?C00212"
compound.last_modified = 1612996604
compound.lipidmaps_id = ""
compound.lipidmaps_url = ""
compound.metacyc_id = "ADENOSINE"
compound.mono_isotopic_molecular_weight = 267.096753896
compound.name = "adenosine"
compound.neutralized_2d_inchi = "InChI=1S/C10H13N5O4/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(18)6(17)4(1-16)19-10/h2-4,6-7,10,16-18H,1H2,(H2,11,12,13)"
compound.neutralized_2d_inchi_key = "OIRDTQYFTABQOQ-UHFFFAOYSA-N"
compound.neutralized_inchi = "InChI=1S/C10H13N5O4/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(18)6(17)4(1-16)19-10/h2-4,6-7,10,16-18H,1H2,(H2,11,12,13)/t4-,6-,7-,10-/m1/s1"
compound.neutralized_inchi_key = "OIRDTQYFTABQOQ-KQYNXXCUSA-N"
compound.num_free_radicals = 0
compound.number_components = 1
compound.permanent_charge = 0
compound.prev_uid = "origin"
compound.pubchem_compound_id = "60961"
compound.pubchem_url = "http://pubchem.ncbi.nlm.nih.gov/compound/60961"
compound.source = "chebi///wikidata///metacyc///gnps///hmdb"
compound.synonyms = "adenosine///58-61-7///Adenocard///Adenoscan"
compound.unique_id = "1ad02275f47b4033a451e99874f4764f"
compound.username = username
compound.wikipedia_url = ""
return compound
@pytest.fixture(name="rt_reference_2")
def fixture_rt_reference_2(username):
rt_ref = metob.RtReference()
rt_ref.creation_time = 1613002857
rt_ref.description = "No description"
rt_ref.enabled = True
rt_ref.head_id = "f74622bcef924f5390ba6e127633e731"
rt_ref.last_modified = 1613002980
rt_ref.lcms_run = None
rt_ref.name = "Untitled"
rt_ref.prev_uid = "origin"
rt_ref.ref_type = ""
rt_ref.rt_max = 3.5233184079926665
rt_ref.rt_min = 2.5233184079926665
rt_ref.rt_peak = 3.0233184079926665
rt_ref.rt_units = "min"
rt_ref.unique_id = "f74622bcef924f5390ba6e127633e731"
rt_ref.username = username
return rt_ref
@pytest.fixture(name="mz_reference_2")
def fixture_mz_reference_2(username):
mz_ref = metob.MzReference()
mz_ref.adduct = "[M+H]+"
mz_ref.creation_time = 1613002857
mz_ref.description = "No description"
mz_ref.detected_polarity = "positive"
mz_ref.enabled = True
mz_ref.head_id = "b0e3cf0df44a4079be7908c6b525d3ac"
mz_ref.last_modified = 1613002980
mz_ref.lcms_run = None
mz_ref.modification = ""
mz_ref.mz = 268.1040539
mz_ref.mz_tolerance = 20.0
mz_ref.mz_tolerance_units = "ppm"
mz_ref.name = "Untitled"
mz_ref.observed_formula = ""
mz_ref.prev_uid = "origin"
mz_ref.ref_type = ""
mz_ref.unique_id = "b0e3cf0df44a4079be7908c6b525d3ac"
mz_ref.username = username
return mz_ref
@pytest.fixture(name="compound_identification_2")
def fixture_compound_identification_2(compound_2, rt_reference_2, mz_reference_2, username):
ident = metob.CompoundIdentification()
ident.creation_time = 1613002856
ident.description = "No description"
ident.do_normalization = False
ident.frag_references = []
ident.head_id = "6cca7aa44c0e4a109f695ba980d69472"
ident.identification_grade = None
ident.identification_notes = ""
ident.intensity_references = []
ident.internal_standard_id = ""
ident.internal_standard_to_use = ""
ident.last_modified = 1613002980
ident.ms1_notes = ""
ident.ms2_notes = ""
ident.name = "adenosine"
ident.prev_uid = "origin"
ident.unique_id = "6cca7aa44c0e4a109f695ba980d69472"
ident.username = username
ident.frag_references = []
ident.intensity_references = []
ident.compound = [compound_2]
ident.mz_references = [mz_reference_2]
ident.rt_references = [rt_reference_2]
return ident
@pytest.fixture(name="atlas_with_2_cids")
def fixture_atlas_with_2_cids(compound_identification, compound_identification_2):
small_atlas = metob.Atlas()
small_atlas.compound_identifications = [
compound_identification,
compound_identification_2,
]
return small_atlas
@pytest.fixture(name="lcmsrun")
def fixture_lcmsrun(username):
run = metob.LcmsRun()
run.unique_id = "7ce51039cfca4426b4e51999ac45d018"
run.username = username
run.hdf5_file = "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5"
run.description = "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.mzML"
run.creation_time = 1605311923
run.sample = None
run.last_modified = 1620101765
run.mzml_file = "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.mzML"
run.prev_uid = "28323058b6e84a9db0f9e802544764e3"
run.method = None
run.name = "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.mzML"
run.head_id = "7ce51039cfca4426b4e51999ac45d018"
run.experiment = "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583"
run.injection_volume = 0.0
run.injection_volume_units = "uL"
run.acquisition_time = 1604770080
run.pass_qc = False
return run
@pytest.fixture(name="qc_lcmsruns")
def fixture_qc_lcmsruns(username):
json = [
{
"acquisition_time": 1604734158,
"creation_time": date_str_to_int("2020-11-13T16:05:46"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.h5",
"head_id": "c0459a277f654fdeacf48243a34207b4",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:40:27"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run7.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "c0459a277f654fdeacf48243a34207b4",
"username": username,
},
{
"acquisition_time": 1605168081,
"creation_time": date_str_to_int("2020-11-13T15:57:27"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.h5",
"head_id": "9f33a0c1793e46fc9c70a19b587a0117",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:39:25"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run309.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "9f33a0c1793e46fc9c70a19b587a0117",
"username": username,
},
{
"acquisition_time": 1605166749,
"creation_time": date_str_to_int("2020-11-13T15:42:04"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.h5",
"head_id": "8c93ee10f2af4238ae905d86debc87ce",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:40:27"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_0_QC_Post_Rg70to1050-CE102040--QC_Run308.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "8c93ee10f2af4238ae905d86debc87ce",
"username": username,
},
{
"acquisition_time": 1604735488,
"creation_time": date_str_to_int("2020-11-13T15:52:48"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.h5",
"head_id": "855e0081dbb2473c8970f40db129d8f7",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:39:25"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_NEG_MSMS_0_QC_Pre_Rg70to1050-CE102040--QC_Run8.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "855e0081dbb2473c8970f40db129d8f7",
"username": username,
},
{
"acquisition_time": 1605165417,
"creation_time": date_str_to_int("2020-11-13T16:03:25"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.h5",
"head_id": "58905ea702f44d9199be928bc46fdb20",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:38:49"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Post_Rg70to1050-CE102040--QC_Run307.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "58905ea702f44d9199be928bc46fdb20",
"username": username,
},
{
"acquisition_time": 1604732826,
"creation_time": date_str_to_int("2020-11-13T16:15:04"),
"description": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583 "
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.mzML",
"experiment": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583",
"hdf5_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.h5",
"head_id": "392b1a859ed54e07bc34b55e06459db2",
"injection_volume": 0.0,
"injection_volume_units": "uL",
"last_modified": date_str_to_int("2021-02-16T19:38:49"),
"method": None,
"mzml_file": "/project/projectdirs/metatlas/raw_data/akuftin/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583/20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.mzML",
"name": "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_FPS_MS1_0_QC_Pre_Rg70to1050-CE102040--QC_Run6.mzML",
"pass_qc": False,
"prev_uid": "origin",
"sample": None,
"unique_id": "392b1a859ed54e07bc34b55e06459db2",
"username": username,
},
]
return [metob.LcmsRun(**run) for run in json]
@pytest.fixture(name="group")
def fixture_group(lcmsrun, username):
grp = metob.Group()
grp.items = [lcmsrun]
grp.unique_id = "61041d07b5a24ca5b88efbda8f319654"
grp.username = username
grp.description = "No description"
grp.creation_time = 1620146477
grp.last_modified = 1620146477
grp.prev_uid = "origin"
grp.name = (
f"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_{username}0_Cone-S1"
)
grp.head_id = "61041d07b5a24ca5b88efbda8f319654"
grp.short_name = "POS_Cone-S1"
return grp
@pytest.fixture(name="group_with_2_lcmsruns")
def fixture_group_with_2_lcmsruns(lcmsrun, username):
grp = metob.Group()
grp.items = [lcmsrun, lcmsrun]
grp.unique_id = "61041d07b5a24ca5b88efbda8f319654"
grp.username = username
grp.description = "No description"
grp.creation_time = 1620146477
grp.last_modified = 1620146477
grp.prev_uid = "origin"
grp.name = (
f"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_{username}0_Cone-S1"
)
grp.head_id = "61041d07b5a24ca5b88efbda8f319654"
grp.short_name = "POS_Cone-S1"
return grp
@pytest.fixture(name="hits")
def fixture_hits():
hits_plus = pd.DataFrame(
data={
"adduct": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "[M+H]+",
},
"copy_index": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"29247268c3cf4acfb649ebce7b0c9e0c",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"50334867a31f4cab973459a59d5731c4",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"8ba70c0f245247eeb6ba90011026763a",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
"metatlas",
"9d53a44c42004e16a468e92e2b0a7009",
"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5",
2.6239302158,
],
},
"inchi_key": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "GFFGJBXGBJISGV-UHFFFAOYSA-N",
},
"measured_precursor_intensity": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 1779719.0,
},
"measured_precursor_mz": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.06199646,
},
"msv_query_aligned": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
53.2601699829,
59.4822044373,
65.2955932617,
66.7956771851,
75.0065155029,
75.0689544678,
75.4281921387,
84.2779464722,
91.0504608154,
94.0367355347,
102.1198806763,
108.4924850464,
119.0352630615,
121.0889511108,
123.1165771484,
135.7551269531,
136.0224761963,
136.0620117188,
136.1121368408,
136.3276824951,
137.046295166,
],
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2901.2893066406,
3058.2041015625,
2817.9626464844,
3278.6765136719,
3068.3347167969,
8541.603515625,
2778.4802246094,
2839.1333007812,
4060.1638183594,
5292.673828125,
3443.1560058594,
3947.8520507812,
8919.974609375,
5798.638671875,
3330.2827148438,
2859.4689941406,
18918.111328125,
625742.3125,
91467.8984375,
4438.6645507812,
11957.54296875,
],
],
},
"msv_ref_aligned": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
51.3947,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
91.0548,
94.0404,
np.nan,
np.nan,
119.035,
np.nan,
np.nan,
np.nan,
136.022,
136.062,
136.112,
np.nan,
137.046,
],
[
1870.1,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
3051.11,
13543.2,
np.nan,
np.nan,
28284.0,
np.nan,
np.nan,
np.nan,
55585.3,
1607820.0,
17469.6,
np.nan,
43758.8,
],
],
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
52.1001,
53.5537,
54.6096,
57.8238,
63.3067,
64.108,
82.7587,
93.0862,
94.6115,
111.471,
113.584,
115.21,
137.067,
137.476,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
94.0407,
np.nan,
np.nan,
119.036,
np.nan,
np.nan,
np.nan,
np.nan,
136.062,
np.nan,
np.nan,
137.046,
],
[
491091.0,
614205.0,
486992.0,
569335.0,
2513570.0,
554436.0,
577010.0,
580100.0,
930338.0,
567270.0,
515519.0,
616418.0,
17234000.0,
693366.0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
2437690.0,
np.nan,
np.nan,
7680000.0,
np.nan,
np.nan,
np.nan,
np.nan,
514804000.0,
np.nan,
np.nan,
4940020.0,
],
],
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
59.3596,
62.4513,
63.2027,
76.4601,
86.8208,
115.912,
115.975,
123.375,
137.067,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
94.0407,
np.nan,
np.nan,
119.036,
np.nan,
np.nan,
np.nan,
np.nan,
136.062,
np.nan,
np.nan,
137.046,
],
[
55769.1,
43616.3,
118692.0,
54358.0,
48393.1,
45996.2,
55157.9,
61623.1,
1357390.0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
121260.0,
np.nan,
np.nan,
306316.0,
np.nan,
np.nan,
np.nan,
np.nan,
41864400.0,
np.nan,
np.nan,
370525.0,
],
],
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": [
[
55.0301,
56.3854,
66.7513,
67.0298,
81.1529,
82.4076,
92.0251,
92.3892,
104.302,
109.051,
112.051,
135.054,
135.653,
136.227,
136.474,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
94.0405,
np.nan,
np.nan,
119.035,
np.nan,
np.nan,
np.nan,
np.nan,
136.062,
np.nan,
np.nan,
137.046,
],
[
246689.0,
186484.0,
198526.0,
974057.0,
232546.0,
306008.0,
388476.0,
265393.0,
246201.0,
1625240.0,
1318880.0,
345780.0,
925801.0,
254046.0,
715569.0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
7436560.0,
np.nan,
np.nan,
23732500.0,
np.nan,
np.nan,
np.nan,
np.nan,
884493000.0,
np.nan,
np.nan,
23845700.0,
],
],
},
"name": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": "adenine",
},
"num_matches": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 7,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 4,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 4,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 4,
},
"precursor_mz": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 136.0617952,
},
"score": {
"('metatlas', '29247268c3cf4acfb649ebce7b0c9e0c', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.7861480398,
"('metatlas', '50334867a31f4cab973459a59d5731c4', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.8248297009,
"('metatlas', '8ba70c0f245247eeb6ba90011026763a', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.8078499983,
"('metatlas', '9d53a44c42004e16a468e92e2b0a7009', '20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_49_Cone-S1_1_Rg70to1050-CE102040-QlobataAkingi-S1_Run34.h5', 2.6239302158355713)": 0.8274397807,
},
}
)
hits_plus.index = pd.MultiIndex.from_tuples(
hits_plus["copy_index"], names=["database", "id", "file_name", "msms_scan"]
)
hits_plus.drop(columns=["copy_index"], inplace=True)
return hits_plus
@pytest.fixture(name="msms_refs")
def fixture_msms_refs():
return (
pd.DataFrame(
data={
"name": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "2'-deoxyadenosine",
("mona", "KO002730"): "2'-Deoxyadenosine",
("mona", "KO002729"): "2'-Deoxyadenosine",
("mona", "KO008947"): "2'-Deoxyadenosine",
("mona", "KO002727"): "2'-Deoxyadenosine",
("mona", "KO002728"): "2'-Deoxyadenosine",
("mona", "KO002726"): "2'-Deoxyadenosine",
("mona", "PR100081"): "2'-Deoxyadenosine",
("mona", "PR100080"): "2'-Deoxyadenosine",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "2'-deoxyadenosine",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "2'-deoxyadenosine",
},
"spectrum": {
(
"metatlas",
"c7dddd297e104ca79caea72a90150532",
): "[[57.0345, 63.3177, 63.3205, 69.0344, 71.0499, 73.0292, 84.9778, 99.0447, 117.055, 118.059, 136.062, 137.066, 236.709, 252.109, 253.112], [176328.0, 328818.0, 274432.0, 197637.0, 896360.0, 1192020.0, 378547.0, 3921880.0, 15737700.0, 266131.0, 144220000.0, 3455270.0, 185227.0, 20960800.0, 1284450.0]]",
(
"mona",
"KO002730",
): "[[40.9, 43.1, 45.0, 57.1, 67.1, 69.1, 71.1, 72.7, 76.8, 79.0, 80.8, 83.2, 91.8, 92.4, 93.2, 94.1, 95.0, 102.8, 105.3, 107.3, 109.1, 116.8, 119.2, 123.0, 129.9, 136.2, 165.9], [3.501946, 10.700389, 5.447471, 16.536965, 1.945525, 9.727626, 5.642023, 8.171206, 24.513619, 66.731518, 2.918288, 4.474708, 2.529183, 1.750973, 0.583658, 9.533074, 3.891051, 0.972763, 12.062257, 2.140078, 5.058366, 0.389105, 48.44358, 2.529183, 14.007782, 100.0, 0.389105]]",
(
"mona",
"KO002729",
): "[[35.8, 41.0, 43.1, 45.2, 52.9, 55.2, 57.4, 59.1, 61.4, 69.2, 71.1, 73.0, 77.0, 79.0, 81.3, 83.1, 91.2, 94.0, 99.3, 99.9, 101.1, 103.1, 105.0, 106.7, 107.4, 108.9, 111.1, 115.0, 117.2, 119.1, 120.4, 123.1, 130.1, 135.1, 136.0, 136.9, 141.3, 147.1, 166.0, 170.7], [0.170503, 0.383632, 3.665814, 0.937766, 0.127877, 0.895141, 9.079284, 0.852515, 0.341006, 4.390452, 7.1185, 5.242967, 1.960784, 32.139812, 1.875533, 2.429668, 1.278772, 1.491901, 2.216539, 1.364024, 1.364024, 0.511509, 8.01364, 0.468883, 0.255754, 1.321398, 0.426257, 0.255754, 1.193521, 6.734868, 0.170503, 6.990622, 8.823529, 0.213129, 100.0, 0.468883, 0.085251, 0.29838, 0.639386, 0.127877]]",
(
"mona",
"KO008947",
): "[[71.1, 73.2, 81.1, 89.2, 94.1, 99.1, 101.0, 109.0, 117.1, 119.1, 128.9, 130.0, 133.3, 136.1, 136.9, 137.8, 149.3, 156.5, 165.1, 187.1, 195.1, 213.8, 215.1, 216.1, 217.1, 223.9, 234.1, 251.0, 252.1, 253.0, 270.9], [0.01998, 0.014577, 0.003889, 0.047639, 0.031539, 0.085402, 0.011502, 0.010675, 0.361156, 0.125255, 0.051259, 0.022955, 0.011046, 100.0, 0.116678, 0.01325, 0.029859, 0.006369, 0.003048, 0.01887, 0.066214, 0.003726, 0.011393, 0.013584, 0.013105, 0.010913, 0.080999, 0.012124, 0.179916, 0.010441, 0.005516]]",
(
"mona",
"KO002727",
): "[[54.2, 57.3, 59.1, 69.2, 71.1, 72.2, 72.8, 74.9, 78.9, 80.1, 80.8, 83.1, 85.4, 87.0, 88.9, 91.1, 93.8, 95.2, 99.0, 100.0, 101.0, 105.0, 107.0, 109.0, 111.5, 113.0, 115.2, 116.3, 117.2, 119.1, 121.3, 122.2, 123.2, 124.4, 129.1, 130.0, 133.0, 135.1, 136.1, 139.4, 145.7, 149.4, 153.0, 157.4, 158.4, 163.0, 165.3, 166.4, 175.1, 176.4, 179.3, 181.1, 184.0, 184.7, 189.2, 191.5, 199.3, 203.5, 207.2, 217.3, 220.1, 235.3, 252.2], [2.60144, 3.583115, 0.098168, 0.179974, 9.080497, 0.294503, 0.507199, 0.081806, 1.014398, 0.13089, 0.114529, 0.13089, 0.098168, 0.212696, 0.229058, 0.490838, 0.065445, 0.196335, 0.998037, 5.039267, 4.744764, 1.210733, 0.147251, 0.376309, 1.963351, 1.259817, 0.081806, 0.065445, 5.611911, 0.114529, 0.556283, 1.194372, 35.02945, 0.049084, 0.91623, 1.996073, 0.114529, 0.556283, 100.0, 0.114529, 0.081806, 0.147251, 0.098168, 0.081806, 0.179974, 0.114529, 0.147251, 0.768979, 6.25, 0.114529, 0.343586, 0.032723, 0.310864, 0.163613, 0.310864, 0.278141, 0.65445, 0.39267, 0.212696, 1.897906, 0.294503, 7.509817, 3.043194]]",
(
"mona",
"KO002728",
): "[[36.0, 42.8, 55.4, 57.3, 59.3, 60.8, 68.8, 71.0, 72.8, 76.2, 77.4, 79.1, 80.9, 83.4, 85.3, 87.3, 88.9, 91.0, 93.2, 95.0, 97.0, 99.1, 100.2, 101.1, 102.4, 105.1, 107.0, 109.2, 111.2, 112.9, 117.0, 119.4, 121.0, 122.5, 123.2, 128.9, 130.2, 133.2, 136.2, 150.9, 158.0, 161.1, 163.0, 166.3, 175.2, 179.2, 189.0, 191.2, 207.1, 217.5, 235.3], [0.804783, 0.66682, 0.229938, 6.829156, 0.459876, 0.091975, 2.230398, 10.255231, 3.173143, 0.137963, 0.160957, 13.152449, 0.896758, 1.425615, 0.206944, 0.091975, 0.436882, 0.413888, 0.137963, 0.551851, 0.18395, 3.885951, 2.644286, 2.943205, 0.091975, 4.828696, 0.275926, 0.505863, 1.241665, 0.229938, 4.621752, 0.804783, 0.252932, 0.252932, 20.303518, 0.298919, 6.36928, 0.229938, 100.0, 0.045988, 0.321913, 0.229938, 0.068981, 1.172683, 1.057714, 1.034721, 0.298919, 0.068981, 0.114969, 0.344907, 2.023454]]",
(
"mona",
"KO002726",
): "[[54.0, 57.2, 71.1, 72.2, 73.5, 77.7, 80.2, 82.4, 87.0, 90.3, 100.0, 101.2, 104.6, 106.0, 108.3, 109.4, 111.1, 112.3, 113.3, 116.4, 117.3, 118.2, 121.3, 122.3, 123.2, 125.9, 129.0, 129.9, 131.2, 135.1, 136.2, 137.4, 139.4, 140.9, 143.8, 146.3, 148.2, 152.5, 153.1, 159.7, 162.1, 166.3, 171.1, 175.2, 177.1, 178.0, 179.0, 180.1, 184.1, 185.5, 188.0, 192.2, 198.2, 199.2, 202.6, 203.1, 206.9, 207.4, 216.3, 217.6, 220.2, 224.2, 234.3, 235.2, 252.3], [2.518936, 0.334684, 3.399683, 11.044566, 0.052845, 0.334684, 0.193764, 0.088075, 0.07046, 2.096178, 7.02836, 1.514885, 0.10569, 0.052845, 0.546063, 0.140919, 0.140919, 0.10569, 24.255769, 0.140919, 0.352299, 0.211379, 0.334684, 4.192355, 38.400564, 0.176149, 0.123305, 0.052845, 0.140919, 0.123305, 37.819271, 0.07046, 0.052845, 0.123305, 0.228994, 0.07046, 0.10569, 0.669368, 1.638189, 0.07046, 0.123305, 1.092126, 0.334684, 10.991721, 0.10569, 0.07046, 0.07046, 0.211379, 2.378017, 0.052845, 0.123305, 5.302096, 0.246609, 0.387529, 0.211379, 0.634138, 0.123305, 0.123305, 0.07046, 7.592038, 1.46204, 0.088075, 1.726264, 59.098115, 100.0]]",
("mona", "PR100081"): "[[117.0574, 136.0651, 252.1096], [15.868531, 100.0, 48.929209]]",
("mona", "PR100080"): "[[136.0631, 252.1096], [39.169289, 100.0]]",
(
"metatlas",
"e0025042a1a844d6b6926252edce91e5",
): "[[66.7578, 70.38, 73.6972, 73.9685, 82.2146, 92.3969, 102.12, 104.312, 111.673, 136.062, 139.036, 158.043, 161.337, 168.39, 202.526, 235.987, 246.005, 274.002, 274.091, 274.273], [2649.93, 1977.51, 2080.95, 2643.01, 2450.61, 2214.72, 2214.78, 2349.55, 2163.28, 2982.16, 9507.9, 29909.8, 2525.4, 2199.08, 2170.93, 2443.12, 3793.61, 24676.1, 534389.0, 2775.85]]",
(
"metatlas",
"0568278b45d244fcb5787792fc17b3ec",
): "[[51.5615, 57.0342, 64.0128, 69.0341, 71.0498, 73.029, 73.9804, 81.0338, 82.4275, 88.5237, 93.5638, 99.0444, 105.478, 117.055, 118.698, 126.793, 136.062, 252.108, 252.133], [845648.0, 896704.0, 912599.0, 2052520.0, 5955880.0, 8407590.0, 965782.0, 1548360.0, 1093910.0, 924679.0, 809760.0, 17986900.0, 949617.0, 56688000.0, 1347680.0, 891451.0, 468230000.0, 73715000.0, 1526730.0]]",
},
"decimal": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): 4,
("mona", "KO002730"): 3,
("mona", "KO002729"): 3,
("mona", "KO008947"): 1,
("mona", "KO002727"): 3,
("mona", "KO002728"): 3,
("mona", "KO002726"): 3,
("mona", "PR100081"): 4,
("mona", "PR100080"): 4,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): 4,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): 4,
},
"precursor_mz": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): 252.109,
("mona", "KO002730"): 252.0,
("mona", "KO002729"): 252.0,
("mona", "KO008947"): 252.0,
("mona", "KO002727"): 252.0,
("mona", "KO002728"): 252.0,
("mona", "KO002726"): 252.0,
("mona", "PR100081"): 252.10963,
("mona", "PR100080"): 252.10963,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): 274.091,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): 252.109,
},
"polarity": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "positive",
("mona", "KO002730"): "positive",
("mona", "KO002729"): "positive",
("mona", "KO008947"): "positive",
("mona", "KO002727"): "positive",
("mona", "KO002728"): "positive",
("mona", "KO002726"): "positive",
("mona", "PR100081"): "positive",
("mona", "PR100080"): "positive",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "positive",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "positive",
},
"adduct": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
("mona", "KO002730"): "[M+H]+",
("mona", "KO002729"): "[M+H]+",
("mona", "KO008947"): "[M+H]+",
("mona", "KO002727"): "[M+H]+",
("mona", "KO002728"): "[M+H]+",
("mona", "KO002726"): "[M+H]+",
("mona", "PR100081"): "[M+H]+",
("mona", "PR100080"): "[M+H]+",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "[M+Na]+",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "[M+H]+",
},
"fragmentation_method": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "cid",
("mona", "KO002730"): np.nan,
("mona", "KO002729"): np.nan,
("mona", "KO008947"): np.nan,
("mona", "KO002727"): np.nan,
("mona", "KO002728"): np.nan,
("mona", "KO002726"): np.nan,
("mona", "PR100081"): "LOW-ENERGY CID",
("mona", "PR100080"): "LOW-ENERGY CID",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "cid",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "cid",
},
"collision_energy": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "0",
("mona", "KO002730"): "50 V",
("mona", "KO002729"): "40 V",
("mona", "KO008947"): "0.65",
("mona", "KO002727"): "20 V",
("mona", "KO002728"): "30 V",
("mona", "KO002726"): "10 V",
("mona", "PR100081"): "30 V",
("mona", "PR100080"): "Ramp 5-60 V",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
"instrument": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
("mona", "KO002730"): np.nan,
("mona", "KO002729"): np.nan,
("mona", "KO008947"): np.nan,
("mona", "KO002727"): np.nan,
("mona", "KO002728"): np.nan,
("mona", "KO002726"): np.nan,
("mona", "PR100081"): np.nan,
("mona", "PR100080"): np.nan,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
"instrument_type": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
("mona", "KO002730"): "LC-ESI-QQ",
("mona", "KO002729"): "LC-ESI-QQ",
("mona", "KO008947"): "LC-ESI-IT",
("mona", "KO002727"): "LC-ESI-QQ",
("mona", "KO002728"): "LC-ESI-QQ",
("mona", "KO002726"): "LC-ESI-QQ",
("mona", "PR100081"): "LC-ESI-QTOF",
("mona", "PR100080"): "LC-ESI-QTOF",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
"formula": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "C10H13N5O3",
("mona", "KO002730"): "C10H13N5O3",
("mona", "KO002729"): "C10H13N5O3",
("mona", "KO008947"): "C10H13N5O3",
("mona", "KO002727"): "C10H13N5O3",
("mona", "KO002728"): "C10H13N5O3",
("mona", "KO002726"): "C10H13N5O3",
("mona", "PR100081"): "C10H13N5O3",
("mona", "PR100080"): "C10H13N5O3",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "C10H13N5O3",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "C10H13N5O3",
},
"exact_mass": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): 251.101839276,
("mona", "KO002730"): 251.101839276,
("mona", "KO002729"): 251.101839276,
("mona", "KO008947"): 251.101839276,
("mona", "KO002727"): 251.101839276,
("mona", "KO002728"): 251.101839276,
("mona", "KO002726"): 251.101839276,
("mona", "PR100081"): 251.101839276,
("mona", "PR100080"): 251.101839276,
("metatlas", "e0025042a1a844d6b6926252edce91e5"): 251.101839276,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): 251.101839276,
},
"inchi_key": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002730"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002729"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO008947"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002727"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002728"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "KO002726"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "PR100081"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("mona", "PR100080"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): "OLXZPDWKRNYJJZ-RRKCRQDMSA-N",
},
"inchi": {
(
"metatlas",
"c7dddd297e104ca79caea72a90150532",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002730",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002729",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO008947",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002727",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002728",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"KO002726",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"PR100081",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"mona",
"PR100080",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"metatlas",
"e0025042a1a844d6b6926252edce91e5",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
(
"metatlas",
"0568278b45d244fcb5787792fc17b3ec",
): "InChI=1S/C10H13N5O3/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(17)6(2-16)18-7/h3-7,16-17H,1-2H2,(H2,11,12,13)/t5-,6+,7+/m0/s1",
},
"smiles": {
("metatlas", "c7dddd297e104ca79caea72a90150532"): np.nan,
(
"mona",
"KO002730",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002729",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO008947",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002727",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002728",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"KO002726",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"PR100081",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
(
"mona",
"PR100080",
): "[H]OC([H])([H])C1([H])OC([H])(N2C([H])=NC=3C(=NC([H])=NC32)N([H])[H])C([H])([H])C1([H])O[H]",
("metatlas", "e0025042a1a844d6b6926252edce91e5"): np.nan,
("metatlas", "0568278b45d244fcb5787792fc17b3ec"): np.nan,
},
}
)
.rename_axis(index=["database", "id"])
.iloc[0:1]
)
| true | true |
1c38c6e2555cdc9fef807ccf4fe2adf10311bc9a | 13,688 | py | Python | tensorflow_text/python/ops/bert_tokenizer_test.py | hashim361/text | 141ed3ae72078a5da431831ce718c8d09fbf4f92 | [
"Apache-2.0"
] | 1 | 2020-10-10T14:10:07.000Z | 2020-10-10T14:10:07.000Z | tensorflow_text/python/ops/bert_tokenizer_test.py | pranayjoshi/text | 5a12211ac370f989ca359d232d3081a889e859dd | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/bert_tokenizer_test.py | pranayjoshi/text | 5a12211ac370f989ca359d232d3081a889e859dd | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
r"""Tests for BertTokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow_text.python.ops import bert_tokenizer
def _utf8(x):
return x.encode('utf-8')
# TODO(thuang513): It appears there isn't a Ragged version of substr; consider
# checking this into core TF.
def _ragged_substr(text_input, begin, end):
text_input_flat = None
if ragged_tensor.is_ragged(text_input):
text_input_flat = text_input.flat_values
else:
text_input_flat = text_input
def _ragged_tile(x):
input_text, indices = x
multiple = math_ops.reduce_sum(indices.row_lengths())
return array_ops.tile([input_text], [multiple])
broadcasted_text = ragged_map_ops.map_fn(
_ragged_tile,
(text_input_flat, begin),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),
infer_shape=False,
)
size = math_ops.sub(
array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))
new_tokens = string_ops.substr_v2(broadcasted_text,
array_ops.squeeze(begin.flat_values), size)
return begin.with_flat_values(new_tokens.flat_values)
_VOCAB = [
b'[unused1]',
b'[unused23]',
b"'",
b'##%',
b'##af',
b'##book',
b'##c',
b'##fr',
b'##hey',
b'##is',
b'##o',
b'##ost',
b'##s',
b'##tri',
b'##y',
b'$',
b'%',
b'&',
b'(',
b')',
b'*',
b'-',
b'.',
b'20',
b':',
b'?',
b'[CLS]',
b'[SEP]',
_utf8(u'國'),
_utf8(u'暐'),
_utf8(u'瀚'),
_utf8(u'韓'),
_utf8(u'食'),
_utf8(u'黃'),
_utf8(u'🤔'),
_utf8(u'🤣'),
b'^',
b'a',
b'ago',
b'among',
b'an',
b'and',
b'are',
b'aren',
b'awesome',
b'between',
b'candy',
b'china',
b'companies',
b'company',
b'crushed',
b'dug',
b'earnings',
b'engaged',
b'even',
b'few',
b'forecast',
b'getting',
b'had',
b'han',
b'has',
b'hers',
b'high',
b'hit',
b'hs',
b'hurting',
b'in',
b'indie',
b'is',
b'isn',
b'ka',
b'ku',
b'major',
b'maker',
b'moth',
b'nearly',
b'new',
b'now',
b'president',
b'record',
b'regulators',
b'reported',
b'rift',
b'rust',
b'sales',
b'shares',
b'slightly',
b'sprint',
b'states',
b'stock',
b't',
b'taste',
b'tension',
b'that',
b'the',
b'this',
b'today',
b'told',
b'topped',
b'trade',
b'trump',
b'united',
b'up',
b'weeks',
b'what',
b'why',
b'with',
b'year',
b'yo',
b'yu',
_utf8(u'\u7231'),
_utf8(u'\u4e0a'),
_utf8(u'\u4e00'),
_utf8(u'\u4e2a'),
_utf8(u'\u4e0d'),
_utf8(u'\u56de'),
_utf8(u'\u5bb6'),
_utf8(u'\u7684'),
_utf8(u'\u4eba'),
]
def _create_table(vocab, num_oov=1):
init = lookup_ops.KeyValueTensorInitializer(
vocab,
math_ops.range(
array_ops.size(vocab, out_type=dtypes.int64), dtype=dtypes.int64),
key_dtype=dtypes.string,
value_dtype=dtypes.int64)
return lookup_ops.StaticVocabularyTableV1(
init, num_oov, lookup_key_dtype=dtypes.string)
class BertTokenizerTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_bert_tokenizer_outputs(self):
text_inputs = constant_op.constant([_utf8('Test')])
vocab = _VOCAB
table = _create_table(vocab, 2)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.int32)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results.dtype, dtypes.int32)
@parameterized.parameters([
dict(
text_inputs=[
_utf8(u'taste the rustisc indiefrost'),
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'taste', b'the', b'rustisc', b'indiefrost'],
[
b'Han', b'Kuo', b'-', b'yu', b'(',
b'\xe9\x9f\x93', b'\xe5\x9c\x8b',
b'\xe9\xa3\x9f', b')', b'\xf0\x9f\xa4\x94'
],
[
b'A\xc3\xb1ade', b'la', b'informaci\xc3\xb3n',
b'del', b'formulario', b'y', b'tus', b'preguntas'
]],
),
dict(
text_inputs=[
_utf8(u'UNwant\u00E9d,running'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'unwanted', b',', b'running'],
[
b'anade', b'la', b'informacion', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
lower_case=True,
),
dict(
text_inputs=[
_utf8(u'Añade la información del formulario y tus preguntas')
],
expected_tokens=[[
b'An\xcc\x83ade', b'la', b'informacio\xcc\x81n', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
normalization_form='NFD',
),
# Test CJK are tokenized by unicode characters
dict(
text_inputs=[
_utf8(u'香港では4日'),
_utf8(u'영어독해 자만심 왜 문제일까'),
_utf8(u'據港媒《東網》報導')
],
expected_tokens=[
[_utf8(u'香'),
_utf8(u'港'),
_utf8(u'では4'),
_utf8(u'日')],
[
_utf8(u'영어독해'),
_utf8(u'자만심'),
_utf8(u'왜'),
_utf8(u'문제일까'),
],
[
_utf8(u'據'),
_utf8(u'港'),
_utf8(u'媒'),
_utf8(u'《'),
_utf8(u'東'),
_utf8(u'網'),
_utf8(u'》'),
_utf8(u'報'),
_utf8(u'導')
],
],
normalization_form=None,
),
# Test Katakana followed by Hiragana.
dict(
text_inputs=[_utf8(u'のテキストとして')],
expected_tokens=[
[_utf8(u'のテキストとして')],
],
normalization_form=None,
),
])
@test_util.run_in_graph_and_eager_modes
def test_basic_tokenize(self,
text_inputs,
expected_tokens,
lower_case=False,
normalization_form='NFC'):
text_inputs = ragged_factory_ops.constant(text_inputs)
tokenizer = bert_tokenizer.BasicTokenizer(
lower_case=lower_case, normalization_form=normalization_form)
tokens = tokenizer.tokenize(text_inputs)
self.assertAllEqual(tokens, expected_tokens)
@parameterized.parameters([
dict(
text_inputs=[
b'taste the rustisc indiefrost',
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'dugtrio had an awesome 🤣 dugbook'),
b'yo^what$is*up?',
b'mothaf*&%ka',
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost']],
[[b'han'], [b'ku', b'##o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')], [b')'],
[_utf8(u'🤔')]],
[[b'dug', b'##tri', b'##o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'##book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'], [b'*'],
[b'up'], [b'?']],
[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka']]],
expected_extracted=[[[b'taste'], [b'the'], [b'rust', b'is', b'c'],
[b'indie', b'fr', b'ost']],
[[b'Han'], [b'Ku', b'o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')],
[b')'], [_utf8(u'🤔')]],
[[b'dug', b'tri', b'o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'],
[b'*'], [b'up'], [b'?']],
[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka']]],
lower_case=True,
),
# Test when we are expecting multiple OOV vocab ids and tf.string just
# maps out [UNK] token.
dict(
text_inputs=[
b'mothaf*&%ka cantfindme whodis',
],
expected=[[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka'],
[b'[UNK]'], [b'[UNK]']]],
expected_extracted=[[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka'], [b'cantfindme'], [b'whodis']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
b'candy',
],
expected=[[[b'candy']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
_utf8(u'爱上一个不回家的人'),
],
expected=[[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')]]],
lower_case=True,
num_oov=2,
),
# Test 'preserve_unused_token' option
dict(
text_inputs=[
b'taste the rustisc indiefrost [unused1]',
_utf8(u'爱上一个不回家的人[unused23]'),
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost'], [b'[unused1]']],
[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')], [b'[unused23]']]],
preserve_unused_token=True,
),
])
@test_util.run_in_graph_and_eager_modes
def test_bert_tokenizer(self,
text_inputs,
expected,
vocab=None,
expected_extracted=None,
lower_case=True,
num_oov=1,
preserve_unused_token=False):
text_inputs = constant_op.constant(text_inputs)
if not vocab:
vocab = _VOCAB
table = _create_table(vocab, num_oov)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.string,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results, expected)
# Verify that the int ids are the same.
expected_rt = ragged_factory_ops.constant(expected)
expected_int = table.lookup(expected_rt.flat_values)
expected_int_rt = ragged_tensor.RaggedTensor.from_nested_row_splits(
expected_int, expected_rt.nested_row_splits)
int_tokenizer = bert_tokenizer.BertTokenizer(
vocab_lookup_table=table,
token_out_type=dtypes.int64,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results_int = int_tokenizer.tokenize(text_inputs)
self.assertAllEqual(results_int, expected_int_rt)
# Verify that the offsets can extract the expected tokens
_, begin, end = tokenizer.tokenize_with_offsets(text_inputs)
extracted_wordpieces = _ragged_substr(text_inputs, begin, end)
if expected_extracted:
self.assertAllEqual(extracted_wordpieces, expected_extracted)
else:
# The extracted won't have any wordpieces with '##' prefix. Strip them
# out.
stripped_prefix_flat = string_ops.regex_replace(expected_rt.flat_values,
'##', '')
stripped_prefix = expected_rt.with_flat_values(stripped_prefix_flat)
self.assertAllEqual(extracted_wordpieces, stripped_prefix)
if __name__ == '__main__':
test.main()
| 31.179954 | 80 | 0.512566 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow_text.python.ops import bert_tokenizer
def _utf8(x):
return x.encode('utf-8')
# checking this into core TF.
def _ragged_substr(text_input, begin, end):
text_input_flat = None
if ragged_tensor.is_ragged(text_input):
text_input_flat = text_input.flat_values
else:
text_input_flat = text_input
def _ragged_tile(x):
input_text, indices = x
multiple = math_ops.reduce_sum(indices.row_lengths())
return array_ops.tile([input_text], [multiple])
broadcasted_text = ragged_map_ops.map_fn(
_ragged_tile,
(text_input_flat, begin),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),
infer_shape=False,
)
size = math_ops.sub(
array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))
new_tokens = string_ops.substr_v2(broadcasted_text,
array_ops.squeeze(begin.flat_values), size)
return begin.with_flat_values(new_tokens.flat_values)
_VOCAB = [
b'[unused1]',
b'[unused23]',
b"'",
b'##%',
b'##af',
b'##book',
b'##c',
b'##fr',
b'##hey',
b'##is',
b'##o',
b'##ost',
b'##s',
b'##tri',
b'##y',
b'$',
b'%',
b'&',
b'(',
b')',
b'*',
b'-',
b'.',
b'20',
b':',
b'?',
b'[CLS]',
b'[SEP]',
_utf8(u'國'),
_utf8(u'暐'),
_utf8(u'瀚'),
_utf8(u'韓'),
_utf8(u'食'),
_utf8(u'黃'),
_utf8(u'🤔'),
_utf8(u'🤣'),
b'^',
b'a',
b'ago',
b'among',
b'an',
b'and',
b'are',
b'aren',
b'awesome',
b'between',
b'candy',
b'china',
b'companies',
b'company',
b'crushed',
b'dug',
b'earnings',
b'engaged',
b'even',
b'few',
b'forecast',
b'getting',
b'had',
b'han',
b'has',
b'hers',
b'high',
b'hit',
b'hs',
b'hurting',
b'in',
b'indie',
b'is',
b'isn',
b'ka',
b'ku',
b'major',
b'maker',
b'moth',
b'nearly',
b'new',
b'now',
b'president',
b'record',
b'regulators',
b'reported',
b'rift',
b'rust',
b'sales',
b'shares',
b'slightly',
b'sprint',
b'states',
b'stock',
b't',
b'taste',
b'tension',
b'that',
b'the',
b'this',
b'today',
b'told',
b'topped',
b'trade',
b'trump',
b'united',
b'up',
b'weeks',
b'what',
b'why',
b'with',
b'year',
b'yo',
b'yu',
_utf8(u'\u7231'),
_utf8(u'\u4e0a'),
_utf8(u'\u4e00'),
_utf8(u'\u4e2a'),
_utf8(u'\u4e0d'),
_utf8(u'\u56de'),
_utf8(u'\u5bb6'),
_utf8(u'\u7684'),
_utf8(u'\u4eba'),
]
def _create_table(vocab, num_oov=1):
init = lookup_ops.KeyValueTensorInitializer(
vocab,
math_ops.range(
array_ops.size(vocab, out_type=dtypes.int64), dtype=dtypes.int64),
key_dtype=dtypes.string,
value_dtype=dtypes.int64)
return lookup_ops.StaticVocabularyTableV1(
init, num_oov, lookup_key_dtype=dtypes.string)
class BertTokenizerTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_bert_tokenizer_outputs(self):
text_inputs = constant_op.constant([_utf8('Test')])
vocab = _VOCAB
table = _create_table(vocab, 2)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.int32)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results.dtype, dtypes.int32)
@parameterized.parameters([
dict(
text_inputs=[
_utf8(u'taste the rustisc indiefrost'),
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'taste', b'the', b'rustisc', b'indiefrost'],
[
b'Han', b'Kuo', b'-', b'yu', b'(',
b'\xe9\x9f\x93', b'\xe5\x9c\x8b',
b'\xe9\xa3\x9f', b')', b'\xf0\x9f\xa4\x94'
],
[
b'A\xc3\xb1ade', b'la', b'informaci\xc3\xb3n',
b'del', b'formulario', b'y', b'tus', b'preguntas'
]],
),
dict(
text_inputs=[
_utf8(u'UNwant\u00E9d,running'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'unwanted', b',', b'running'],
[
b'anade', b'la', b'informacion', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
lower_case=True,
),
dict(
text_inputs=[
_utf8(u'Añade la información del formulario y tus preguntas')
],
expected_tokens=[[
b'An\xcc\x83ade', b'la', b'informacio\xcc\x81n', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
normalization_form='NFD',
),
dict(
text_inputs=[
_utf8(u'香港では4日'),
_utf8(u'영어독해 자만심 왜 문제일까'),
_utf8(u'據港媒《東網》報導')
],
expected_tokens=[
[_utf8(u'香'),
_utf8(u'港'),
_utf8(u'では4'),
_utf8(u'日')],
[
_utf8(u'영어독해'),
_utf8(u'자만심'),
_utf8(u'왜'),
_utf8(u'문제일까'),
],
[
_utf8(u'據'),
_utf8(u'港'),
_utf8(u'媒'),
_utf8(u'《'),
_utf8(u'東'),
_utf8(u'網'),
_utf8(u'》'),
_utf8(u'報'),
_utf8(u'導')
],
],
normalization_form=None,
),
dict(
text_inputs=[_utf8(u'のテキストとして')],
expected_tokens=[
[_utf8(u'のテキストとして')],
],
normalization_form=None,
),
])
@test_util.run_in_graph_and_eager_modes
def test_basic_tokenize(self,
text_inputs,
expected_tokens,
lower_case=False,
normalization_form='NFC'):
text_inputs = ragged_factory_ops.constant(text_inputs)
tokenizer = bert_tokenizer.BasicTokenizer(
lower_case=lower_case, normalization_form=normalization_form)
tokens = tokenizer.tokenize(text_inputs)
self.assertAllEqual(tokens, expected_tokens)
@parameterized.parameters([
dict(
text_inputs=[
b'taste the rustisc indiefrost',
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'dugtrio had an awesome 🤣 dugbook'),
b'yo^what$is*up?',
b'mothaf*&%ka',
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost']],
[[b'han'], [b'ku', b'##o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')], [b')'],
[_utf8(u'🤔')]],
[[b'dug', b'##tri', b'##o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'##book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'], [b'*'],
[b'up'], [b'?']],
[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka']]],
expected_extracted=[[[b'taste'], [b'the'], [b'rust', b'is', b'c'],
[b'indie', b'fr', b'ost']],
[[b'Han'], [b'Ku', b'o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')],
[b')'], [_utf8(u'🤔')]],
[[b'dug', b'tri', b'o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'],
[b'*'], [b'up'], [b'?']],
[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka']]],
lower_case=True,
),
dict(
text_inputs=[
b'mothaf*&%ka cantfindme whodis',
],
expected=[[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka'],
[b'[UNK]'], [b'[UNK]']]],
expected_extracted=[[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka'], [b'cantfindme'], [b'whodis']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
b'candy',
],
expected=[[[b'candy']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
_utf8(u'爱上一个不回家的人'),
],
expected=[[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')]]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
b'taste the rustisc indiefrost [unused1]',
_utf8(u'爱上一个不回家的人[unused23]'),
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost'], [b'[unused1]']],
[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')], [b'[unused23]']]],
preserve_unused_token=True,
),
])
@test_util.run_in_graph_and_eager_modes
def test_bert_tokenizer(self,
text_inputs,
expected,
vocab=None,
expected_extracted=None,
lower_case=True,
num_oov=1,
preserve_unused_token=False):
text_inputs = constant_op.constant(text_inputs)
if not vocab:
vocab = _VOCAB
table = _create_table(vocab, num_oov)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.string,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results, expected)
expected_rt = ragged_factory_ops.constant(expected)
expected_int = table.lookup(expected_rt.flat_values)
expected_int_rt = ragged_tensor.RaggedTensor.from_nested_row_splits(
expected_int, expected_rt.nested_row_splits)
int_tokenizer = bert_tokenizer.BertTokenizer(
vocab_lookup_table=table,
token_out_type=dtypes.int64,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results_int = int_tokenizer.tokenize(text_inputs)
self.assertAllEqual(results_int, expected_int_rt)
_, begin, end = tokenizer.tokenize_with_offsets(text_inputs)
extracted_wordpieces = _ragged_substr(text_inputs, begin, end)
if expected_extracted:
self.assertAllEqual(extracted_wordpieces, expected_extracted)
else:
string_ops.regex_replace(expected_rt.flat_values,
'stripped_prefix = expected_rt.with_flat_values(stripped_prefix_flat)
self.assertAllEqual(extracted_wordpieces, stripped_prefix)
if __name__ == '__main__':
test.main()
| true | true |
1c38c88dcbb721938ada5fea44e2f9d10ead5848 | 1,957 | py | Python | src/remake/operators/condition.py | MVilstrup/remake | ddeacae55ef3c329a7780c9ad4e508833cd9112e | [
"MIT"
] | null | null | null | src/remake/operators/condition.py | MVilstrup/remake | ddeacae55ef3c329a7780c9ad4e508833cd9112e | [
"MIT"
] | null | null | null | src/remake/operators/condition.py | MVilstrup/remake | ddeacae55ef3c329a7780c9ad4e508833cd9112e | [
"MIT"
] | null | null | null | import sys
from functools import partial, lru_cache
from remake.errors import ConditionRuntimeError, ConditionTooBroadError, InvalidConditionLocationError
from remake.operators.base import BaseOperator
from remake.utils import get_info
class ConditionOperator(BaseOperator):
START = "inputs"
GROUPS = "groups"
END = "outputs"
def __init__(self, name, sources, transform, on, vectorized=False):
super().__init__(name, sources, priority="GREEDY", order=1, type="SEQUENTIAL")
self.transform = transform
self.on = on
self.vectorized = vectorized
def __call__(self, *args, **kwargs):
try:
results = self.transform(*args, **kwargs)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = f"Failed to compute condition '{self.name}'. Reason: {exc_type(exc_value)}"
raise ConditionRuntimeError(msg).with_traceback(exc_traceback)
if sum(results) == 0:
raise ConditionTooBroadError(f"Condition '{self.name}' leads to empty DataFrame")
return results
def condition(function=None, on=ConditionOperator.START, cache=False, vectorized=False):
from remake.schematic.cache import SchematicCache
if function is None:
return partial(condition, on=on, cache=cache, vectorized=vectorized)
transformer, name, sources = get_info(function)
function = function if not cache else lru_cache(function)
valid_locations = [ConditionOperator.START, ConditionOperator.END, ConditionOperator.GROUPS]
if on not in valid_locations:
msg = f"{name}(..., at={on}) is not a valid condition error. Please use one of {valid_locations}"
raise InvalidConditionLocationError(msg)
spec = ConditionOperator(name=name, on=on, sources=sources, transform=function,
vectorized=vectorized)
SchematicCache.add_condition(transformer, spec)
return function
| 36.924528 | 105 | 0.701073 | import sys
from functools import partial, lru_cache
from remake.errors import ConditionRuntimeError, ConditionTooBroadError, InvalidConditionLocationError
from remake.operators.base import BaseOperator
from remake.utils import get_info
class ConditionOperator(BaseOperator):
START = "inputs"
GROUPS = "groups"
END = "outputs"
def __init__(self, name, sources, transform, on, vectorized=False):
super().__init__(name, sources, priority="GREEDY", order=1, type="SEQUENTIAL")
self.transform = transform
self.on = on
self.vectorized = vectorized
def __call__(self, *args, **kwargs):
try:
results = self.transform(*args, **kwargs)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = f"Failed to compute condition '{self.name}'. Reason: {exc_type(exc_value)}"
raise ConditionRuntimeError(msg).with_traceback(exc_traceback)
if sum(results) == 0:
raise ConditionTooBroadError(f"Condition '{self.name}' leads to empty DataFrame")
return results
def condition(function=None, on=ConditionOperator.START, cache=False, vectorized=False):
from remake.schematic.cache import SchematicCache
if function is None:
return partial(condition, on=on, cache=cache, vectorized=vectorized)
transformer, name, sources = get_info(function)
function = function if not cache else lru_cache(function)
valid_locations = [ConditionOperator.START, ConditionOperator.END, ConditionOperator.GROUPS]
if on not in valid_locations:
msg = f"{name}(..., at={on}) is not a valid condition error. Please use one of {valid_locations}"
raise InvalidConditionLocationError(msg)
spec = ConditionOperator(name=name, on=on, sources=sources, transform=function,
vectorized=vectorized)
SchematicCache.add_condition(transformer, spec)
return function
| true | true |
1c38c8be779aad6c2630ee32ebf7313bb888d8e3 | 869 | py | Python | drfexample/bookmarks/migrations/0002_auto_20190306_1630.py | craigderington/django-drf-examples | 24e3abbdfcd5e5d3fbea9df54f80e41157194339 | [
"Apache-2.0"
] | null | null | null | drfexample/bookmarks/migrations/0002_auto_20190306_1630.py | craigderington/django-drf-examples | 24e3abbdfcd5e5d3fbea9df54f80e41157194339 | [
"Apache-2.0"
] | 4 | 2019-09-17T05:45:01.000Z | 2021-06-10T21:15:07.000Z | drfexample/bookmarks/migrations/0002_auto_20190306_1630.py | craigderington/django-drf-examples | 24e3abbdfcd5e5d3fbea9df54f80e41157194339 | [
"Apache-2.0"
] | 1 | 2020-04-25T06:48:34.000Z | 2020-04-25T06:48:34.000Z | # Generated by Django 2.1.7 on 2019-03-06 16:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='full_url',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='bookmark',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='bookmark',
name='short_url',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| 28.032258 | 136 | 0.61565 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='full_url',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='bookmark',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='bookmark',
name='short_url',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| true | true |
1c38c8e85d5428ec424cf33acc88a75d728a55d7 | 1,180 | py | Python | chapter5/demo/code/5-3_neural_network.py | hitaitengteng/python | 4e07fe6755ef1e0e0c1193249446e5246c89236e | [
"MIT"
] | null | null | null | chapter5/demo/code/5-3_neural_network.py | hitaitengteng/python | 4e07fe6755ef1e0e0c1193249446e5246c89236e | [
"MIT"
] | null | null | null | chapter5/demo/code/5-3_neural_network.py | hitaitengteng/python | 4e07fe6755ef1e0e0c1193249446e5246c89236e | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
# 使用神经网络算法预测销量高低
import pandas as pd
# 参数初始化
inputfile = '../data/sales_data.xls'
data = pd.read_excel(inputfile, index_col=u'序号') # 导入数据
# 数据是类别标签,要将它转换为数据
# 用1来表示“好”、“是”、“高”这三个属性,用0来表示“坏”、“否”、“低”
data[data == u'好'] = 1
data[data == u'是'] = 1
data[data == u'高'] = 1
data[data != 1] = 0
x = data.iloc[:, :3].as_matrix().astype(int)
y = data.iloc[:, 3].as_matrix().astype(int)
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential() # 建立模型
model.add(Dense(input_dim=3, output_dim=10))
model.add(Activation('relu')) # 用relu函数作为激活函数,能够大幅提供准确度
model.add(Dense(input_dim=10, output_dim=1))
model.add(Activation('sigmoid')) # 由于是0-1输出,用sigmoid函数作为激活函数
model.compile(loss='binary_crossentropy',
optimizer='adam', class_mode='binary')
# 编译模型。由于我们做的是二元分类,所以我们指定损失函数为binary_crossentropy,以及模式为binary
# 另外常见的损失函数还有mean_squared_error、categorical_crossentropy等,请阅读帮助文件。
# 求解方法我们指定用adam,还有sgd、rmsprop等可选
model.fit(x, y, nb_epoch=1000, batch_size=10) # 训练模型,学习一千次
yp = model.predict_classes(x).reshape(len(y)) # 分类预测
from cm_plot import * # 导入自行编写的混淆矩阵可视化函数
cm_plot(y, yp).show() # 显示混淆矩阵可视化结果
| 30.25641 | 66 | 0.717797 |
import pandas as pd
inputfile = '../data/sales_data.xls'
data = pd.read_excel(inputfile, index_col=u'序号')
data[data == u'好'] = 1
data[data == u'是'] = 1
data[data == u'高'] = 1
data[data != 1] = 0
x = data.iloc[:, :3].as_matrix().astype(int)
y = data.iloc[:, 3].as_matrix().astype(int)
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential()
model.add(Dense(input_dim=3, output_dim=10))
model.add(Activation('relu'))
model.add(Dense(input_dim=10, output_dim=1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam', class_mode='binary')
model.fit(x, y, nb_epoch=1000, batch_size=10)
yp = model.predict_classes(x).reshape(len(y))
from cm_plot import *
cm_plot(y, yp).show()
| true | true |
1c38c9ad283531a271d1e7b3025f31ceae3f7b89 | 3,629 | py | Python | playbooks/enumerate/interesting_files.py | Pheelbert/phcat | 5a97b2033a733f9be217863f33bc442aee8a419b | [
"MIT"
] | null | null | null | playbooks/enumerate/interesting_files.py | Pheelbert/phcat | 5a97b2033a733f9be217863f33bc442aee8a419b | [
"MIT"
] | null | null | null | playbooks/enumerate/interesting_files.py | Pheelbert/phcat | 5a97b2033a733f9be217863f33bc442aee8a419b | [
"MIT"
] | null | null | null | from typing import List, Tuple
from pheelshell import Pheelshell
from playbooks.playbook import Playbook
class EnumerateInterestingFiles(Playbook):
@staticmethod
def description():
return 'Finds interesting files that your user has some access to.'
def __init__(self):
super().__init__()
self.readable_files: List[str] = []
self.writable_files: List[str] = []
self.executable_files: List[str] = []
self.interesting_directories = [
'/home/'
]
def __str__(self):
output = '[interesting files]\n'
if self.readable_files:
output += '[readable]\n'
output += '\n'.join(self.readable_files)
output += '\n'
if self.writable_files:
output += '[writable]\n'
output += '\n'.join(self.writable_files)
output += '\n'
if self.executable_files:
output += '[executable]\n'
output += '\n'.join(self.executable_files)
return output
def _parse_paths(self, output):
lines = []
for line in output.split('\n'):
if line and ': Permission denied' not in line:
lines.append(line)
return lines
def run(self, shell: Pheelshell):
for interesting_directory in self.interesting_directories:
readable_command = f'find {interesting_directory} -readable -type f'
print(f'Finding readable files in \'{interesting_directory}\' by running \'{readable_command}\'')
output = shell.execute_command(readable_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.readable_files.extend(parsed_output)
for filepath in parsed_output:
if filepath.endswith('user.txt') or filepath.endswith('root.txt'):
shell.add_hint(f'Found readable HTB flag file: \'{filepath}\'')
writable_command = f'find {interesting_directory} -writable -type f'
print(f'Finding writable files in \'{interesting_directory}\' by running \'{writable_command}\'')
output = shell.execute_command(writable_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.writable_files.extend(parsed_output)
executable_command = f'find {interesting_directory} -executable -type f'
print(f'Finding executable files in \'{interesting_directory}\' by running \'{executable_command}\'')
output = shell.execute_command(executable_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.executable_files.extend(parsed_output)
unreadable_executable_directory_command = f'find {interesting_directory} -executable -type d ! -readable'
print(f'Finding executable directories that aren\'t readable in \'{interesting_directory}\' by running \'{unreadable_executable_directory_command}\'')
output = shell.execute_command(unreadable_executable_directory_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.executable_files.extend(parsed_output)
for directory in parsed_output:
hint = (f'Current user has execute rights on directory \'{directory}\'\n'
f'This means you can guess filenames in the directory and run (for example) \'cat {directory}secret.txt\'.')
shell.add_hint(hint)
self._has_run = True
| 43.202381 | 162 | 0.622761 | from typing import List, Tuple
from pheelshell import Pheelshell
from playbooks.playbook import Playbook
class EnumerateInterestingFiles(Playbook):
@staticmethod
def description():
return 'Finds interesting files that your user has some access to.'
def __init__(self):
super().__init__()
self.readable_files: List[str] = []
self.writable_files: List[str] = []
self.executable_files: List[str] = []
self.interesting_directories = [
'/home/'
]
def __str__(self):
output = '[interesting files]\n'
if self.readable_files:
output += '[readable]\n'
output += '\n'.join(self.readable_files)
output += '\n'
if self.writable_files:
output += '[writable]\n'
output += '\n'.join(self.writable_files)
output += '\n'
if self.executable_files:
output += '[executable]\n'
output += '\n'.join(self.executable_files)
return output
def _parse_paths(self, output):
lines = []
for line in output.split('\n'):
if line and ': Permission denied' not in line:
lines.append(line)
return lines
def run(self, shell: Pheelshell):
for interesting_directory in self.interesting_directories:
readable_command = f'find {interesting_directory} -readable -type f'
print(f'Finding readable files in \'{interesting_directory}\' by running \'{readable_command}\'')
output = shell.execute_command(readable_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.readable_files.extend(parsed_output)
for filepath in parsed_output:
if filepath.endswith('user.txt') or filepath.endswith('root.txt'):
shell.add_hint(f'Found readable HTB flag file: \'{filepath}\'')
writable_command = f'find {interesting_directory} -writable -type f'
print(f'Finding writable files in \'{interesting_directory}\' by running \'{writable_command}\'')
output = shell.execute_command(writable_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.writable_files.extend(parsed_output)
executable_command = f'find {interesting_directory} -executable -type f'
print(f'Finding executable files in \'{interesting_directory}\' by running \'{executable_command}\'')
output = shell.execute_command(executable_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.executable_files.extend(parsed_output)
unreadable_executable_directory_command = f'find {interesting_directory} -executable -type d ! -readable'
print(f'Finding executable directories that aren\'t readable in \'{interesting_directory}\' by running \'{unreadable_executable_directory_command}\'')
output = shell.execute_command(unreadable_executable_directory_command)
parsed_output = self._parse_paths(output)
if parsed_output:
self.executable_files.extend(parsed_output)
for directory in parsed_output:
hint = (f'Current user has execute rights on directory \'{directory}\'\n'
f'This means you can guess filenames in the directory and run (for example) \'cat {directory}secret.txt\'.')
shell.add_hint(hint)
self._has_run = True
| true | true |
1c38c9cd1a04d923a16609354e9b9b3da40f76c4 | 602 | py | Python | var/spack/repos/builtin/packages/ngmlr/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/ngmlr/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/ngmlr/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ngmlr(CMakePackage):
"""Ngmlr is a long-read mapper designed to align PacBilo or Oxford
Nanopore to a reference genome with a focus on reads that span
structural variations."""
homepage = "https://github.com/philres/ngmlr"
url = "https://github.com/philres/ngmlr/archive/v0.2.5.tar.gz"
version('0.2.5', '1b2b1aaeb6a3accc8b9f3e5c29e77037')
| 33.444444 | 73 | 0.722591 |
from spack import *
class Ngmlr(CMakePackage):
homepage = "https://github.com/philres/ngmlr"
url = "https://github.com/philres/ngmlr/archive/v0.2.5.tar.gz"
version('0.2.5', '1b2b1aaeb6a3accc8b9f3e5c29e77037')
| true | true |
1c38ca4dc80aa711ec0a11c3a3887d15fac2da33 | 195 | py | Python | micasa/admin.py | kode-ai/JIRANI | cc31397fa9834da17d8fbba210fd6e29b904b118 | [
"MIT"
] | null | null | null | micasa/admin.py | kode-ai/JIRANI | cc31397fa9834da17d8fbba210fd6e29b904b118 | [
"MIT"
] | 2 | 2021-06-10T22:20:59.000Z | 2021-09-08T01:28:01.000Z | micasa/admin.py | kode-ai/JIRANI | cc31397fa9834da17d8fbba210fd6e29b904b118 | [
"MIT"
] | 1 | 2020-02-27T07:05:21.000Z | 2020-02-27T07:05:21.000Z | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Hood)
admin.site.register(Profile)
admin.site.register(Business)
admin.site.register(Post) | 24.375 | 32 | 0.805128 | from django.contrib import admin
from .models import *
admin.site.register(Hood)
admin.site.register(Profile)
admin.site.register(Business)
admin.site.register(Post) | true | true |
1c38ca4f0b0b985f7ba75056ec5c7c6e2582f6e6 | 1,120 | py | Python | tests/funding_sources/test_funding_sources_program_gateway_create.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 21 | 2019-04-12T09:02:17.000Z | 2022-02-18T11:39:06.000Z | tests/funding_sources/test_funding_sources_program_gateway_create.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 1 | 2020-07-22T21:27:40.000Z | 2020-07-23T17:38:43.000Z | tests/funding_sources/test_funding_sources_program_gateway_create.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 10 | 2019-05-08T14:20:37.000Z | 2021-09-20T18:09:26.000Z | import unittest
from tests.lib.client import get_client
from tests.lib.funding_sources import FundingSources
from tests.lib.funding_source_verifications import verify_gateway_program_funding_source_response
from marqeta.errors import MarqetaError
class TestFundingSourceProgramGatewayCreate(unittest.TestCase):
"""Tests for the funding_source.program_gateway.create endpoint."""
@classmethod
def setUpClass(cls):
"""Setup for all tests in the class."""
cls.client = get_client()
def test_program_gateway_create_success(self):
"""Creates a program gateway funding source."""
funding_request = FundingSources.get_program_gateway_funding_request()
source = self.client.funding_sources.program_gateway.create(
funding_request)
verify_gateway_program_funding_source_response(
self, source, funding_request)
def test_program_gateway_create_fail(self):
"""Tests behavior when funding request is bad."""
with self.assertRaises(MarqetaError):
self.client.funding_sources.program_gateway.create({})
| 32.941176 | 97 | 0.748214 | import unittest
from tests.lib.client import get_client
from tests.lib.funding_sources import FundingSources
from tests.lib.funding_source_verifications import verify_gateway_program_funding_source_response
from marqeta.errors import MarqetaError
class TestFundingSourceProgramGatewayCreate(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = get_client()
def test_program_gateway_create_success(self):
funding_request = FundingSources.get_program_gateway_funding_request()
source = self.client.funding_sources.program_gateway.create(
funding_request)
verify_gateway_program_funding_source_response(
self, source, funding_request)
def test_program_gateway_create_fail(self):
with self.assertRaises(MarqetaError):
self.client.funding_sources.program_gateway.create({})
| true | true |
1c38ca8010cf51027ddcb508bb923252c7aac578 | 2,771 | py | Python | scripts/run_frontend_tests.py | carolinajimenez26/oppia | 8a8f07e37e11beb441a53635530e1d3cebe6beef | [
"Apache-2.0"
] | 1 | 2019-11-05T09:32:39.000Z | 2019-11-05T09:32:39.000Z | scripts/run_frontend_tests.py | TakorLucila/oppia | 393a81474ddc02b7141eed80f33c720cde447517 | [
"Apache-2.0"
] | 13 | 2019-03-04T18:29:27.000Z | 2019-08-24T23:27:36.000Z | scripts/run_frontend_tests.py | TakorLucila/oppia | 393a81474ddc02b7141eed80f33c720cde447517 | [
"Apache-2.0"
] | 1 | 2020-10-04T06:54:41.000Z | 2020-10-04T06:54:41.000Z | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script runs unit tests for frontend JavaScript code (using Karma)."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import os
import subprocess
import python_utils
from . import build
from . import common
from . import install_third_party_libs
from . import setup
from . import setup_gae
_PARSER = argparse.ArgumentParser(description="""
Run this script from the oppia root folder:
python -m scripts.run_frontend_tests
The root folder MUST be named 'oppia'.
Note: You can replace 'it' with 'fit' or 'describe' with 'fdescribe' to run
a single test or test suite.
""")
_PARSER.add_argument(
'--skip_install',
help='optional; if specified, skips installing dependencies',
action='store_true')
_PARSER.add_argument(
'--run_minified_tests',
help='optional; if specified, runs frontend karma tests on both minified '
'and non-minified code',
action='store_true')
def main(args=None):
"""Runs the frontend tests."""
parsed_args = _PARSER.parse_args(args=args)
setup.main(args=[])
setup_gae.main(args=[])
if not parsed_args.skip_install:
install_third_party_libs.main()
common.print_each_string_after_two_new_lines([
'View interactive frontend test coverage reports by navigating to',
'../karma_coverage_reports',
'on your filesystem.',
'Running test in development environment'])
build.main(args=[])
subprocess.call([
os.path.join(common.NODE_MODULES_PATH, 'karma', 'bin', 'karma'),
'start', os.path.join('core', 'tests', 'karma.conf.ts')])
if parsed_args.run_minified_tests is True:
python_utils.PRINT('Running test in production environment')
build.main(args=['--prod_env', '--minify_third_party_libs_only'])
subprocess.call([
os.path.join(common.NODE_MODULES_PATH, 'karma', 'bin', 'karma'),
'start', os.path.join('core', 'tests', 'karma.conf.ts'),
'--prodEnv'])
python_utils.PRINT('Done!')
if __name__ == '__main__':
main()
| 31.850575 | 78 | 0.708409 |
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os
import subprocess
import python_utils
from . import build
from . import common
from . import install_third_party_libs
from . import setup
from . import setup_gae
_PARSER = argparse.ArgumentParser(description="""
Run this script from the oppia root folder:
python -m scripts.run_frontend_tests
The root folder MUST be named 'oppia'.
Note: You can replace 'it' with 'fit' or 'describe' with 'fdescribe' to run
a single test or test suite.
""")
_PARSER.add_argument(
'--skip_install',
help='optional; if specified, skips installing dependencies',
action='store_true')
_PARSER.add_argument(
'--run_minified_tests',
help='optional; if specified, runs frontend karma tests on both minified '
'and non-minified code',
action='store_true')
def main(args=None):
parsed_args = _PARSER.parse_args(args=args)
setup.main(args=[])
setup_gae.main(args=[])
if not parsed_args.skip_install:
install_third_party_libs.main()
common.print_each_string_after_two_new_lines([
'View interactive frontend test coverage reports by navigating to',
'../karma_coverage_reports',
'on your filesystem.',
'Running test in development environment'])
build.main(args=[])
subprocess.call([
os.path.join(common.NODE_MODULES_PATH, 'karma', 'bin', 'karma'),
'start', os.path.join('core', 'tests', 'karma.conf.ts')])
if parsed_args.run_minified_tests is True:
python_utils.PRINT('Running test in production environment')
build.main(args=['--prod_env', '--minify_third_party_libs_only'])
subprocess.call([
os.path.join(common.NODE_MODULES_PATH, 'karma', 'bin', 'karma'),
'start', os.path.join('core', 'tests', 'karma.conf.ts'),
'--prodEnv'])
python_utils.PRINT('Done!')
if __name__ == '__main__':
main()
| true | true |
1c38cac45be561ac81e1260eb4cda1d43f3cefaa | 5,525 | py | Python | xero_python/payrolluk/models/earnings_order.py | sromero84/xero-python | 89558c0baa8080c3f522701eb1b94f909248dbd7 | [
"MIT"
] | null | null | null | xero_python/payrolluk/models/earnings_order.py | sromero84/xero-python | 89558c0baa8080c3f522701eb1b94f909248dbd7 | [
"MIT"
] | null | null | null | xero_python/payrolluk/models/earnings_order.py | sromero84/xero-python | 89558c0baa8080c3f522701eb1b94f909248dbd7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.3.4
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class EarningsOrder(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"id": "str",
"name": "str",
"statutory_deduction_category": "StatutoryDeductionCategory",
"liability_account_id": "str",
"current_record": "bool",
}
attribute_map = {
"id": "id",
"name": "name",
"statutory_deduction_category": "statutoryDeductionCategory",
"liability_account_id": "liabilityAccountId",
"current_record": "currentRecord",
}
def __init__(
self,
id=None,
name=None,
statutory_deduction_category=None,
liability_account_id=None,
current_record=True,
): # noqa: E501
"""EarningsOrder - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._name = None
self._statutory_deduction_category = None
self._liability_account_id = None
self._current_record = None
self.discriminator = None
if id is not None:
self.id = id
self.name = name
if statutory_deduction_category is not None:
self.statutory_deduction_category = statutory_deduction_category
if liability_account_id is not None:
self.liability_account_id = liability_account_id
if current_record is not None:
self.current_record = current_record
@property
def id(self):
"""Gets the id of this EarningsOrder. # noqa: E501
Xero unique identifier for an earning rate # noqa: E501
:return: The id of this EarningsOrder. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EarningsOrder.
Xero unique identifier for an earning rate # noqa: E501
:param id: The id of this EarningsOrder. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this EarningsOrder. # noqa: E501
Name of the earning order # noqa: E501
:return: The name of this EarningsOrder. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EarningsOrder.
Name of the earning order # noqa: E501
:param name: The name of this EarningsOrder. # noqa: E501
:type: str
"""
if name is None:
raise ValueError(
"Invalid value for `name`, must not be `None`"
) # noqa: E501
self._name = name
@property
def statutory_deduction_category(self):
"""Gets the statutory_deduction_category of this EarningsOrder. # noqa: E501
:return: The statutory_deduction_category of this EarningsOrder. # noqa: E501
:rtype: StatutoryDeductionCategory
"""
return self._statutory_deduction_category
@statutory_deduction_category.setter
def statutory_deduction_category(self, statutory_deduction_category):
"""Sets the statutory_deduction_category of this EarningsOrder.
:param statutory_deduction_category: The statutory_deduction_category of this EarningsOrder. # noqa: E501
:type: StatutoryDeductionCategory
"""
self._statutory_deduction_category = statutory_deduction_category
@property
def liability_account_id(self):
"""Gets the liability_account_id of this EarningsOrder. # noqa: E501
Xero identifier for Liability Account # noqa: E501
:return: The liability_account_id of this EarningsOrder. # noqa: E501
:rtype: str
"""
return self._liability_account_id
@liability_account_id.setter
def liability_account_id(self, liability_account_id):
"""Sets the liability_account_id of this EarningsOrder.
Xero identifier for Liability Account # noqa: E501
:param liability_account_id: The liability_account_id of this EarningsOrder. # noqa: E501
:type: str
"""
self._liability_account_id = liability_account_id
@property
def current_record(self):
"""Gets the current_record of this EarningsOrder. # noqa: E501
Identifier of a record is active or not. # noqa: E501
:return: The current_record of this EarningsOrder. # noqa: E501
:rtype: bool
"""
return self._current_record
@current_record.setter
def current_record(self, current_record):
"""Sets the current_record of this EarningsOrder.
Identifier of a record is active or not. # noqa: E501
:param current_record: The current_record of this EarningsOrder. # noqa: E501
:type: bool
"""
self._current_record = current_record
| 28.776042 | 114 | 0.628959 |
import re
from xero_python.models import BaseModel
class EarningsOrder(BaseModel):
openapi_types = {
"id": "str",
"name": "str",
"statutory_deduction_category": "StatutoryDeductionCategory",
"liability_account_id": "str",
"current_record": "bool",
}
attribute_map = {
"id": "id",
"name": "name",
"statutory_deduction_category": "statutoryDeductionCategory",
"liability_account_id": "liabilityAccountId",
"current_record": "currentRecord",
}
def __init__(
self,
id=None,
name=None,
statutory_deduction_category=None,
liability_account_id=None,
current_record=True,
):
self._id = None
self._name = None
self._statutory_deduction_category = None
self._liability_account_id = None
self._current_record = None
self.discriminator = None
if id is not None:
self.id = id
self.name = name
if statutory_deduction_category is not None:
self.statutory_deduction_category = statutory_deduction_category
if liability_account_id is not None:
self.liability_account_id = liability_account_id
if current_record is not None:
self.current_record = current_record
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is None:
raise ValueError(
"Invalid value for `name`, must not be `None`"
)
self._name = name
@property
def statutory_deduction_category(self):
return self._statutory_deduction_category
@statutory_deduction_category.setter
def statutory_deduction_category(self, statutory_deduction_category):
self._statutory_deduction_category = statutory_deduction_category
@property
def liability_account_id(self):
return self._liability_account_id
@liability_account_id.setter
def liability_account_id(self, liability_account_id):
self._liability_account_id = liability_account_id
@property
def current_record(self):
return self._current_record
@current_record.setter
def current_record(self, current_record):
self._current_record = current_record
| true | true |
1c38cb38e1d009b37783b5306145cc7c6a958358 | 3,311 | py | Python | pyteiser/wrappers/preprocess_custom_expression_profile.py | goodarzilab/pyteiser | 3ac78604c768957022cc7751ccdd337960a816f2 | [
"MIT"
] | 6 | 2020-12-01T08:10:07.000Z | 2022-01-17T02:09:13.000Z | pyteiser/wrappers/preprocess_custom_expression_profile.py | goodarzilab/pyteiser | 3ac78604c768957022cc7751ccdd337960a816f2 | [
"MIT"
] | 4 | 2021-05-19T06:24:30.000Z | 2022-01-27T20:18:44.000Z | pyteiser/wrappers/preprocess_custom_expression_profile.py | goodarzilab/pyteiser | 3ac78604c768957022cc7751ccdd337960a816f2 | [
"MIT"
] | 5 | 2020-07-04T02:05:30.000Z | 2021-06-26T10:24:16.000Z | import numpy as np
import pandas as pd
import argparse
from .. import IO
def handler(raw_args = None):
parser = argparse.ArgumentParser()
parser.add_argument("--rna_bin_file", help="binarized sequence file", type=str)
parser.add_argument("--exp_values_file", help="expression values in a csv format", type=str)
parser.add_argument("--exp_mask_file", help="output file: indicates which sequences are present in the "
"expression file and the expression values for these sequences", type=str)
parser.add_argument("--anno_name_column", help="column name in exp_values file that contains annotations", type=str)
parser.add_argument("--measur_column", help="column name in exp_values file that contains expression measurements", type=str)
parser.set_defaults(
rna_bin_file='/Users/student/Documents/hani/iTEISER/step_2_preprocessing/reference_files/reference_transcriptomes/binarized/SNRNPA1_SE.hg38.fl250.bin',
exp_values_file='/Users/student/Documents/hani/programs/pyteiser/data/expression_data/hg38_miso_se.txt',
exp_mask_file='/Users/student/Documents/hani/programs/pyteiser/data/mask_files/SNRNPA1_PSI_mask.bin',
anno_name_column='eid',
measur_column='diff',
)
args = parser.parse_args(raw_args)
return args
def read_exp_values_file(args, return_meas_dict = True):
exp_df = pd.read_csv(args.exp_values_file, sep='\t',
dtype = {args.anno_name_column : str})
exp_df.index = exp_df[args.anno_name_column]
if return_meas_dict:
measurements_dict_full = exp_df.to_dict()
measurements_dict = measurements_dict_full[args.measur_column]
return exp_df, measurements_dict
else:
return exp_df
def construct_mask_arrays(args):
seqs_dict, seqs_order = IO.read_rna_bin_file(args.rna_bin_file)
exp_df, measurements_dict = read_exp_values_file(args)
transcripts_measured_list = exp_df[args.anno_name_column].tolist()
transcripts_measured_set = set(transcripts_measured_list)
list_indices_occuring = [1 if x in transcripts_measured_set else 0 for x in seqs_order]
list_measurement_values = [measurements_dict[x] if x in transcripts_measured_set else 0 for x in
seqs_order]
array_indices_occuring = np.array(list_indices_occuring, dtype=np.bool)
array_measurement_values = np.array(list_measurement_values, dtype=np.float32)
return array_indices_occuring, array_measurement_values
def compress_write_mask_arrays(index_array, values_array, args):
assert(index_array.shape == values_array.shape)
length_uint32 = np.array([index_array.shape], dtype=np.uint32)
length_bitstring = length_uint32.tobytes()
index_array_bytes = index_array.tobytes()
values_array_bytes = values_array.tobytes()
full_bytes_string = length_bitstring + index_array_bytes + values_array_bytes
with open(args.exp_mask_file, 'wb') as wb:
wb.write(full_bytes_string)
def main(raw_args = None):
args = handler(raw_args)
array_indices_occuring, array_measurement_values = construct_mask_arrays(args)
compress_write_mask_arrays(array_indices_occuring, array_measurement_values, args)
if __name__ == '__main__':
main()
| 39.891566 | 159 | 0.740562 | import numpy as np
import pandas as pd
import argparse
from .. import IO
def handler(raw_args = None):
parser = argparse.ArgumentParser()
parser.add_argument("--rna_bin_file", help="binarized sequence file", type=str)
parser.add_argument("--exp_values_file", help="expression values in a csv format", type=str)
parser.add_argument("--exp_mask_file", help="output file: indicates which sequences are present in the "
"expression file and the expression values for these sequences", type=str)
parser.add_argument("--anno_name_column", help="column name in exp_values file that contains annotations", type=str)
parser.add_argument("--measur_column", help="column name in exp_values file that contains expression measurements", type=str)
parser.set_defaults(
rna_bin_file='/Users/student/Documents/hani/iTEISER/step_2_preprocessing/reference_files/reference_transcriptomes/binarized/SNRNPA1_SE.hg38.fl250.bin',
exp_values_file='/Users/student/Documents/hani/programs/pyteiser/data/expression_data/hg38_miso_se.txt',
exp_mask_file='/Users/student/Documents/hani/programs/pyteiser/data/mask_files/SNRNPA1_PSI_mask.bin',
anno_name_column='eid',
measur_column='diff',
)
args = parser.parse_args(raw_args)
return args
def read_exp_values_file(args, return_meas_dict = True):
exp_df = pd.read_csv(args.exp_values_file, sep='\t',
dtype = {args.anno_name_column : str})
exp_df.index = exp_df[args.anno_name_column]
if return_meas_dict:
measurements_dict_full = exp_df.to_dict()
measurements_dict = measurements_dict_full[args.measur_column]
return exp_df, measurements_dict
else:
return exp_df
def construct_mask_arrays(args):
seqs_dict, seqs_order = IO.read_rna_bin_file(args.rna_bin_file)
exp_df, measurements_dict = read_exp_values_file(args)
transcripts_measured_list = exp_df[args.anno_name_column].tolist()
transcripts_measured_set = set(transcripts_measured_list)
list_indices_occuring = [1 if x in transcripts_measured_set else 0 for x in seqs_order]
list_measurement_values = [measurements_dict[x] if x in transcripts_measured_set else 0 for x in
seqs_order]
array_indices_occuring = np.array(list_indices_occuring, dtype=np.bool)
array_measurement_values = np.array(list_measurement_values, dtype=np.float32)
return array_indices_occuring, array_measurement_values
def compress_write_mask_arrays(index_array, values_array, args):
assert(index_array.shape == values_array.shape)
length_uint32 = np.array([index_array.shape], dtype=np.uint32)
length_bitstring = length_uint32.tobytes()
index_array_bytes = index_array.tobytes()
values_array_bytes = values_array.tobytes()
full_bytes_string = length_bitstring + index_array_bytes + values_array_bytes
with open(args.exp_mask_file, 'wb') as wb:
wb.write(full_bytes_string)
def main(raw_args = None):
args = handler(raw_args)
array_indices_occuring, array_measurement_values = construct_mask_arrays(args)
compress_write_mask_arrays(array_indices_occuring, array_measurement_values, args)
if __name__ == '__main__':
main()
| true | true |
1c38cc555b631ea1f4f20f9527e9744aeeb5bda1 | 2,928 | py | Python | font.py | SnorlaxH/neodgm | 72844d6a32a6ba6c4064b03bc19ebe21d92037ed | [
"WTFPL"
] | null | null | null | font.py | SnorlaxH/neodgm | 72844d6a32a6ba6c4064b03bc19ebe21d92037ed | [
"WTFPL"
] | null | null | null | font.py | SnorlaxH/neodgm | 72844d6a32a6ba6c4064b03bc19ebe21d92037ed | [
"WTFPL"
] | null | null | null | import os
import sys
import glob
import fontforge
if len(sys.argv) < 2:
print('Expected a version string as the first argument.')
sys.exit(1)
#
# These tables are used to combine hangul syllable characters.
#
cho_tbl = [
[0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3, 1, 2, 4, 4, 4, 2, 1, 3, 0],
[5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 6, 6, 7, 7, 7, 6, 6, 7, 5]
]
jung_tbl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
jong_tbl = [0, 2, 0, 2, 1, 2, 1, 2, 3, 0, 2, 1, 3, 3, 1, 2, 1, 3, 3, 1, 1]
#
# Create an instance of FontForge font and set metadata.
#
# Font names
name = 'NeoDunggeunmo'
font = fontforge.font()
font.familyname = name
font.fontname = name
font.fullname = name
font.version = sys.argv[1]
font.copyright = \
"""Original font was released under the public domain by Jungtae Kim in 1990s.
Conversion & additional character design by Dalgona. <dalgona@hontou.moe>"""
font.appendSFNTName(0x409, 14, 'http://scripts.sil.org/OFL')
with open('ofl_raw.txt', 'r') as f:
font.appendSFNTName(0x409, 13, f.read())
# Font metrics
font.ascent = 12
font.descent = 4
font.upos = -4
font.uwidth = 1
panose = list(font.os2_panose)
panose[3] = 9
font.os2_panose = tuple(panose)
svg_uni = glob.glob('svg/U*.svg')
svg_named = glob.glob('svg/_*.svg')
for path in svg_uni:
code_str, width_str = path[5:-4].split('@')
code = int(code_str, 16)
width = int(width_str)
print('Creating Unicode glyph %d...' % code)
g = font.createChar(code)
g.width = width
g.importOutlines(path)
g.removeOverlap()
g.simplify()
for path in svg_named:
name, width_str = path[5:-4].split('@')
width = int(width_str)
print('Creating named glyph "%s"...' % name)
g = font.createChar(-1, name)
g.width = width
g.importOutlines(path)
g.removeOverlap()
g.simplify()
print('Filling `Hangul Jamo` Unicode block...')
# Fill Hangul Choseong
for i in range(0, 19):
g = font.createChar(0x1100 + i)
g.width = 16
g.addReference('cho_%d_0' % i)
# Fill Hangul Jungseong
for i in range(0, 21):
g = font.createChar(0x1161 + i)
g.width = 16
g.addReference('jung_%d_0' % i)
# Fill Hangul Jongseong
for i in range(0, 27):
g = font.createChar(0x11A8 + i)
g.width = 16
g.addReference('jong_%d_0' % (i + 1))
print('Filling `Hangul Syllables` Unicode block. This may take a while...')
# Compose 11172 glyphs
for i in range(0xAC00, 0xD7A4):
g = font.createChar(i)
g.width = 16
g.clear()
a = (i - 0xAC00) // (21 * 28)
b = ((i - 0xAC00) % (21 * 28)) // 28
c = (i - 0xAC00) % 28
x = cho_tbl[1 if c else 0][b]
y = jung_tbl[a] + (2 if c else 0)
z = jong_tbl[b]
g.addReference('cho_%d_%d' % (a, x))
g.addReference('jung_%d_%d' % (b, y))
if c != 0:
g.addReference('jong_%d_%d' % (c, z))
# all done!
print('Generating TTF...')
font.save('neodgm.sfd')
font.generate('neodgm.ttf')
print('Done.')
| 25.911504 | 78 | 0.612363 | import os
import sys
import glob
import fontforge
if len(sys.argv) < 2:
print('Expected a version string as the first argument.')
sys.exit(1)
cho_tbl = [
[0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3, 1, 2, 4, 4, 4, 2, 1, 3, 0],
[5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 6, 6, 7, 7, 7, 6, 6, 7, 5]
]
jung_tbl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
jong_tbl = [0, 2, 0, 2, 1, 2, 1, 2, 3, 0, 2, 1, 3, 3, 1, 2, 1, 3, 3, 1, 1]
name = 'NeoDunggeunmo'
font = fontforge.font()
font.familyname = name
font.fontname = name
font.fullname = name
font.version = sys.argv[1]
font.copyright = \
"""Original font was released under the public domain by Jungtae Kim in 1990s.
Conversion & additional character design by Dalgona. <dalgona@hontou.moe>"""
font.appendSFNTName(0x409, 14, 'http://scripts.sil.org/OFL')
with open('ofl_raw.txt', 'r') as f:
font.appendSFNTName(0x409, 13, f.read())
font.ascent = 12
font.descent = 4
font.upos = -4
font.uwidth = 1
panose = list(font.os2_panose)
panose[3] = 9
font.os2_panose = tuple(panose)
svg_uni = glob.glob('svg/U*.svg')
svg_named = glob.glob('svg/_*.svg')
for path in svg_uni:
code_str, width_str = path[5:-4].split('@')
code = int(code_str, 16)
width = int(width_str)
print('Creating Unicode glyph %d...' % code)
g = font.createChar(code)
g.width = width
g.importOutlines(path)
g.removeOverlap()
g.simplify()
for path in svg_named:
name, width_str = path[5:-4].split('@')
width = int(width_str)
print('Creating named glyph "%s"...' % name)
g = font.createChar(-1, name)
g.width = width
g.importOutlines(path)
g.removeOverlap()
g.simplify()
print('Filling `Hangul Jamo` Unicode block...')
for i in range(0, 19):
g = font.createChar(0x1100 + i)
g.width = 16
g.addReference('cho_%d_0' % i)
for i in range(0, 21):
g = font.createChar(0x1161 + i)
g.width = 16
g.addReference('jung_%d_0' % i)
for i in range(0, 27):
g = font.createChar(0x11A8 + i)
g.width = 16
g.addReference('jong_%d_0' % (i + 1))
print('Filling `Hangul Syllables` Unicode block. This may take a while...')
for i in range(0xAC00, 0xD7A4):
g = font.createChar(i)
g.width = 16
g.clear()
a = (i - 0xAC00) // (21 * 28)
b = ((i - 0xAC00) % (21 * 28)) // 28
c = (i - 0xAC00) % 28
x = cho_tbl[1 if c else 0][b]
y = jung_tbl[a] + (2 if c else 0)
z = jong_tbl[b]
g.addReference('cho_%d_%d' % (a, x))
g.addReference('jung_%d_%d' % (b, y))
if c != 0:
g.addReference('jong_%d_%d' % (c, z))
print('Generating TTF...')
font.save('neodgm.sfd')
font.generate('neodgm.ttf')
print('Done.')
| true | true |
1c38cc6e68364eecde6dbfe7df3828874708e5bf | 1,335 | py | Python | setup.py | KingNonso/django-paystack | 15cfccd54cf4d6aa45d52c492394280339aca3ca | [
"MIT"
] | null | null | null | setup.py | KingNonso/django-paystack | 15cfccd54cf4d6aa45d52c492394280339aca3ca | [
"MIT"
] | null | null | null | setup.py | KingNonso/django-paystack | 15cfccd54cf4d6aa45d52c492394280339aca3ca | [
"MIT"
] | null | null | null | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-paystack',
version='0.2',
packages=find_packages(),
include_package_data=True,
license='MIT License', # example license
description='A reusable app for making online payments with paystack',
long_description=README,
url='https://www.example.com/',
author='Biola Oyeniyi',
author_email='gbozee@gmail.com',
install_requires=[
'requests','future'
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: X.Y', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) | 35.131579 | 78 | 0.6397 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-paystack',
version='0.2',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A reusable app for making online payments with paystack',
long_description=README,
url='https://www.example.com/',
author='Biola Oyeniyi',
author_email='gbozee@gmail.com',
install_requires=[
'requests','future'
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: X.Y',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) | true | true |
1c38cf6402df531be865106d96fa9846fd8c5086 | 1,622 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/wsgi.py | huogerac/cookiecutter-django-simplesite | 1b332aa1668b85b34dd486c451d1489abc8a719e | [
"BSD-3-Clause"
] | 1 | 2016-10-22T22:42:24.000Z | 2016-10-22T22:42:24.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/wsgi.py | huogerac/cookiecutter-django-simplesite | 1b332aa1668b85b34dd486c451d1489abc8a719e | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/wsgi.py | huogerac/cookiecutter-django-simplesite | 1b332aa1668b85b34dd486c451d1489abc8a719e | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for {{ cookiecutter.project_slug }} project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ cookiecutter.project_slug }}.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application) | 41.589744 | 102 | 0.800863 | import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ cookiecutter.project_slug }}.settings.production")
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application) | true | true |
1c38d23c0dff02c34905f15172951975a609a35b | 4,537 | py | Python | eval_segmentation.py | CheesyB/cpointnet | bcb3eaa44e50fc643e2b226457f4f583989664a9 | [
"MIT"
] | null | null | null | eval_segmentation.py | CheesyB/cpointnet | bcb3eaa44e50fc643e2b226457f4f583989664a9 | [
"MIT"
] | null | null | null | eval_segmentation.py | CheesyB/cpointnet | bcb3eaa44e50fc643e2b226457f4f583989664a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import glob
import os
import numpy as np
import random
import time
import datetime
import torch
import torch.optim as optim
from torch.autograd import Variable
from pathlib import Path
from torch.autograd import Variable
from dataset.scenedataset import SceneDataset
from pointnet import PointNetDenseCls
import torch.nn.functional as F
from pcgen.util import tictoc # brauchen wir das?
from pcgen.util import utils # brauchen wir das?
from logger import Logger
from dataset import renderdataset
def eval_segmentation(folder_path,params):
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('eval.segmentation')
logging.getLogger('pointnet.SceneDataset').setLevel(level=logging.CRITICAL)
""" where to save the net predictions """
now = datetime.datetime.now()
folder_path_eval = folder_path + '/eval_{}'.format(now.strftime('%d.%m_%H:%M'))
os.makedirs(folder_path_eval,exist_ok=True)
assert os.path.isdir(folder_path_eval) , 'folder path does not exist'
logger.info('predicion output folder: {}'.format(folder_path_eval))
""" clean setup """
torch.cuda.empty_cache()
manualSeed = random.randint(1, 10000) # fix seed
random.seed(manualSeed)
torch.manual_seed(manualSeed)
""" get dataset """
batch_size = 20
eval_dataset = SceneDataset(params['testset_path'],2500)
eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size,
shuffle=False, num_workers=0)
logger.info('length dataset: {}\n'
'length training set: {}'.format(len(eval_dataset),len(eval_dataset)))
""" render dataset """
renderdataset.render_dataset(params['testset_path'],folder_path_eval)
""" pararmeter """
num_classes = params['number of classes']
#num_batch = int(len(eval_dataset)/batch_size)
num_batch = 1
logger.info('We are looking for {} classes'.format(num_classes))
""" setup net and load trained weights
search for the latest modified file which is the last model checkpoint """
list_of_files = glob.glob(params['folder_path_chekp'] + '/*')
latest_file = max(list_of_files, key=os.path.getctime)
classifier = PointNetDenseCls(k = num_classes)
classifier.load_state_dict(torch.load(latest_file))
classifier.cuda()
optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
""" for one epoch, why should we go for more:) """
tick = time.time()
for idx in range(num_batch):
tack = time.time() - tick
tick = time.time()
_,data = next(enumerate(eval_dataloader, 0))
points, target = data
points = points.transpose(2,1)
""" tell classifier that its exam today"""
classifier = classifier.eval()
pred, _ = classifier(points)
""" view is better than reshape (not sure) """
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
loss = F.nll_loss(pred, target)
""" reshape until comparison is easy and render"""
pred_choice = pred.data.max(1)[1]
np_pred_choice = pred_choice.cpu().detach().numpy()
np_points = data[0].cpu().detach().numpy()
np_target = data[1].cpu().detach().numpy() - 1
utils.render_batch(np_points,np_pred_choice,
folder_path_eval)
utils.render_batch_bool(np_points,np_pred_choice,np_target,
folder_path_eval)
correct = pred_choice.eq(target.data).cpu().sum()
accuracy = correct.item()/float(batch_size * 2500)
""" tensorflow logger """
info = { 'test_loss': loss.item(), 'test_accuracy': accuracy }
""" console logger """
logger.info('[{}: {}/{}] {} loss: {:2.3f} accuracy: {:2.3f}'.format(1, idx,
num_batch, 'test', loss.item(),accuracy))
if __name__ == "__main__":
now = datetime.datetime.now()
folder_path = '/home/tbreu/workbench/cpointnet/seg/08.12_18:08_run'
params = {'testset_path':'dataset/data/current/test_C11_S15.hd5f',
'folder_path_chekp':'/home/tbreu/workbench/cpointnet/seg/08.12_18:08_run/chekp',
'number of classes':11,}
eval_segmentation(folder_path,params)
#tensorsizes out of the net...
#torch.Size([32, 3, 2500])
#torch.Size([32, 2500])
| 34.633588 | 100 | 0.642495 |
import logging
import glob
import os
import numpy as np
import random
import time
import datetime
import torch
import torch.optim as optim
from torch.autograd import Variable
from pathlib import Path
from torch.autograd import Variable
from dataset.scenedataset import SceneDataset
from pointnet import PointNetDenseCls
import torch.nn.functional as F
from pcgen.util import tictoc
from pcgen.util import utils
from logger import Logger
from dataset import renderdataset
def eval_segmentation(folder_path,params):
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('eval.segmentation')
logging.getLogger('pointnet.SceneDataset').setLevel(level=logging.CRITICAL)
now = datetime.datetime.now()
folder_path_eval = folder_path + '/eval_{}'.format(now.strftime('%d.%m_%H:%M'))
os.makedirs(folder_path_eval,exist_ok=True)
assert os.path.isdir(folder_path_eval) , 'folder path does not exist'
logger.info('predicion output folder: {}'.format(folder_path_eval))
torch.cuda.empty_cache()
manualSeed = random.randint(1, 10000)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
batch_size = 20
eval_dataset = SceneDataset(params['testset_path'],2500)
eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size,
shuffle=False, num_workers=0)
logger.info('length dataset: {}\n'
'length training set: {}'.format(len(eval_dataset),len(eval_dataset)))
renderdataset.render_dataset(params['testset_path'],folder_path_eval)
num_classes = params['number of classes']
num_batch = 1
logger.info('We are looking for {} classes'.format(num_classes))
list_of_files = glob.glob(params['folder_path_chekp'] + '/*')
latest_file = max(list_of_files, key=os.path.getctime)
classifier = PointNetDenseCls(k = num_classes)
classifier.load_state_dict(torch.load(latest_file))
classifier.cuda()
optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
tick = time.time()
for idx in range(num_batch):
tack = time.time() - tick
tick = time.time()
_,data = next(enumerate(eval_dataloader, 0))
points, target = data
points = points.transpose(2,1)
classifier = classifier.eval()
pred, _ = classifier(points)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
loss = F.nll_loss(pred, target)
pred_choice = pred.data.max(1)[1]
np_pred_choice = pred_choice.cpu().detach().numpy()
np_points = data[0].cpu().detach().numpy()
np_target = data[1].cpu().detach().numpy() - 1
utils.render_batch(np_points,np_pred_choice,
folder_path_eval)
utils.render_batch_bool(np_points,np_pred_choice,np_target,
folder_path_eval)
correct = pred_choice.eq(target.data).cpu().sum()
accuracy = correct.item()/float(batch_size * 2500)
info = { 'test_loss': loss.item(), 'test_accuracy': accuracy }
logger.info('[{}: {}/{}] {} loss: {:2.3f} accuracy: {:2.3f}'.format(1, idx,
num_batch, 'test', loss.item(),accuracy))
if __name__ == "__main__":
now = datetime.datetime.now()
folder_path = '/home/tbreu/workbench/cpointnet/seg/08.12_18:08_run'
params = {'testset_path':'dataset/data/current/test_C11_S15.hd5f',
'folder_path_chekp':'/home/tbreu/workbench/cpointnet/seg/08.12_18:08_run/chekp',
'number of classes':11,}
eval_segmentation(folder_path,params)
| true | true |
1c38d3247046cb64c3723310257d23830803db72 | 7,216 | py | Python | src/resources/crawlerResource.py | juliangruendner/ketos_preprocessing | b56f2fd6efe3e9156959b9bafe9b5550214ea65a | [
"MIT"
] | 2 | 2018-07-05T15:37:47.000Z | 2018-09-24T18:11:20.000Z | src/resources/crawlerResource.py | juliangruendner/ketos_preprocessing | b56f2fd6efe3e9156959b9bafe9b5550214ea65a | [
"MIT"
] | null | null | null | src/resources/crawlerResource.py | juliangruendner/ketos_preprocessing | b56f2fd6efe3e9156959b9bafe9b5550214ea65a | [
"MIT"
] | null | null | null | from flask_restful import reqparse, abort
from flask_restful_swagger_2 import swagger, Resource
from resources import aggregationResource
import configuration
from flask import request
from lib import mongodbConnection, crawler
from bson.objectid import ObjectId
from bson import json_util
from datetime import datetime
from cerberus import Validator
import json
import copy
import urllib.parse
import logging
logger = logging.getLogger(__name__)
import sys
from lib.brainApiAccess import BrainApiAccess
NO_PATIENTS_STR = "No patients provided"
FEATURE_SET_SCHEMA = {
'resource': {'required': True, 'type': 'string'}, # Name of resource
'key': {'required': True, 'type': 'string'}, # Key of resource to apply search query, e.g. "code"
'value': {'required': True, 'type': 'string'}, # Value that key of resource must have to be retrieved, e.g. find code "21522001"
'name': {'type': 'string'}, # Human readable name of the value and to be column name of table, e.g. "Abdominal Pain"
'resource_val_path': {'type': 'string'}
}
RESOURCE_CONFIG_SCHEMA = {
'resource_name': {'required': True, 'type': 'string'}, # Name of resource, e.g. "Condition"
'resource_value_relative_path': {'required': True, 'type': 'string'}, # Path to look for actual value of a resource, e.g. "category/coding/code"
'sort_order': {'type': 'list', 'schema': {'type': 'string'}} # Order to sort retrieved values after, necessary for sorting for newest value
}
from swagger_resources import crawlerResourceSwagger
def feature_set_validator(value):
v = Validator(FEATURE_SET_SCHEMA)
if v.validate(value):
return value
else:
raise ValueError(json.dumps(v.errors))
def resource_config_validator(value):
v = Validator(RESOURCE_CONFIG_SCHEMA)
if v.validate(value):
return value
else:
raise ValueError(json.dumps(v.errors))
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('feature_set', type = feature_set_validator, action = 'append', required = True, location = 'json')
parser.add_argument('aggregation_type', type = str, default="latest", location = 'json')
parser.add_argument('resource_configs', type = resource_config_validator, action = 'append', location = 'json')
class Crawler(Resource):
crawler_parser = parser.copy()
crawler_parser.add_argument('patient', type = str, required = True, action='append', help = NO_PATIENTS_STR, location = 'json')
def __init__(self):
super(Crawler, self).__init__()
@swagger.doc({
"description": "Start a Crawler Job for a single patient and wait until it's finished.",
"tags": ["crawler"],
"parameters": crawlerResourceSwagger.swagger_params_patient,
"responses": {
"200": {
"description": "Retrieved a json with a URL to the generated CSV and the exit status of the Crawler.",
"schema": {
"type": "object",
"properties": {
"csv_url": {
"type": "string"
},
"crawler_status": {
"type": "string"
}
},
"example": {
"csv_url": "URL",
"crawler_status": "One of [success, error]"
}
}
}
}
})
@BrainApiAccess()
def post(self):
args = self.crawler_parser.parse_args()
crawler_id = str(ObjectId())
logger.debug(args["resource_configs"])
crawlerJob = crawler.createCrawlerJob(crawler_id, "running", args["patient"], args["feature_set"], args["aggregation_type"], args["resource_configs"])
crawlerStatus = crawler.executeCrawlerJob(crawlerJob)
return {"crawler_id": crawler_id, "csv_url": crawlerJob["url"], "crawler_status": crawlerStatus}
class CrawlerJobs(Resource):
crawler_jobs_parser = parser.copy()
crawler_jobs_parser.add_argument('patient_ids', type = str, action = 'append', required = True, help = NO_PATIENTS_STR, location = 'json')
def __init__(self):
super(CrawlerJobs, self).__init__()
@swagger.doc({
"description": "Get all submitted Crawler Jobs.",
"tags": ["crawler"],
"responses": {
"200": {
"description": "Retrieved Crawler Job(s) as json.",
"schema": {
"type": "array",
"items": crawlerResourceSwagger.swagger_crawler_schema,
}
}
}
})
@BrainApiAccess()
def get(self):
print(request.environ['REMOTE_ADDR'], file=sys.stderr)
return list(mongodbConnection.get_db().crawlerJobs.find())
@swagger.doc({
"description": "Submit a Crawler Job.",
"tags": ["crawler"],
"parameters": crawlerResourceSwagger.swagger_params_patients,
"responses": {
"200": {
"description": "Retrieved a json with the created Crawler ID.",
"schema": {
"type": "object",
"properties": {
"id": {
"type": "string"
}
}
}
},
"400": {
"description": NO_PATIENTS_STR
}
}
})
@BrainApiAccess()
def post(self):
args = self.crawler_jobs_parser.parse_args()
crawlerJob = crawler.createCrawlerJob(str(ObjectId()), "queued", args["patient_ids"], args["feature_set"], args["aggregation_type"], args["resource_configs"])
return {"id": crawlerJob["_id"]}
@swagger.doc({
"description": "Remove all submitted Crawler Jobs.",
"tags": ["crawler"],
"responses": {
"200": {
"description": "Number of deleted Crawler Jobs."
}
}
})
def delete(self):
ret = mongodbConnection.get_db().crawlerJobs.delete_many({})
return ret.deleted_count
class CrawlerJob(Resource):
@BrainApiAccess()
def __init__(self):
super(CrawlerJob, self).__init__()
@swagger.doc({
"description": "Retrieve a single Crawler Job.",
"tags": ["crawler"],
"parameters":[
{
"name": "crawler_id",
"in": "path",
"type": "string",
"description": "The ID of the crawler to be retrieved.",
"required": True
}
],
"responses": {
"200": {
"description": "Retrieved Crawler Job as json.",
"schema": crawlerResourceSwagger.swagger_crawler_schema
}
}
})
def get(self, crawler_id):
return mongodbConnection.get_db().crawlerJobs.find_one({"_id": crawler_id})
def delete(self, crawler_id):
ret = mongodbConnection.get_db().crawlerJobs.delete_many({"_id": crawler_id})
return ret.deleted_count | 34.859903 | 166 | 0.575388 | from flask_restful import reqparse, abort
from flask_restful_swagger_2 import swagger, Resource
from resources import aggregationResource
import configuration
from flask import request
from lib import mongodbConnection, crawler
from bson.objectid import ObjectId
from bson import json_util
from datetime import datetime
from cerberus import Validator
import json
import copy
import urllib.parse
import logging
logger = logging.getLogger(__name__)
import sys
from lib.brainApiAccess import BrainApiAccess
NO_PATIENTS_STR = "No patients provided"
FEATURE_SET_SCHEMA = {
'resource': {'required': True, 'type': 'string'},
'key': {'required': True, 'type': 'string'},
'value': {'required': True, 'type': 'string'},
'name': {'type': 'string'},
'resource_val_path': {'type': 'string'}
}
RESOURCE_CONFIG_SCHEMA = {
'resource_name': {'required': True, 'type': 'string'},
'resource_value_relative_path': {'required': True, 'type': 'string'},
'sort_order': {'type': 'list', 'schema': {'type': 'string'}}
}
from swagger_resources import crawlerResourceSwagger
def feature_set_validator(value):
v = Validator(FEATURE_SET_SCHEMA)
if v.validate(value):
return value
else:
raise ValueError(json.dumps(v.errors))
def resource_config_validator(value):
v = Validator(RESOURCE_CONFIG_SCHEMA)
if v.validate(value):
return value
else:
raise ValueError(json.dumps(v.errors))
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('feature_set', type = feature_set_validator, action = 'append', required = True, location = 'json')
parser.add_argument('aggregation_type', type = str, default="latest", location = 'json')
parser.add_argument('resource_configs', type = resource_config_validator, action = 'append', location = 'json')
class Crawler(Resource):
crawler_parser = parser.copy()
crawler_parser.add_argument('patient', type = str, required = True, action='append', help = NO_PATIENTS_STR, location = 'json')
def __init__(self):
super(Crawler, self).__init__()
@swagger.doc({
"description": "Start a Crawler Job for a single patient and wait until it's finished.",
"tags": ["crawler"],
"parameters": crawlerResourceSwagger.swagger_params_patient,
"responses": {
"200": {
"description": "Retrieved a json with a URL to the generated CSV and the exit status of the Crawler.",
"schema": {
"type": "object",
"properties": {
"csv_url": {
"type": "string"
},
"crawler_status": {
"type": "string"
}
},
"example": {
"csv_url": "URL",
"crawler_status": "One of [success, error]"
}
}
}
}
})
@BrainApiAccess()
def post(self):
args = self.crawler_parser.parse_args()
crawler_id = str(ObjectId())
logger.debug(args["resource_configs"])
crawlerJob = crawler.createCrawlerJob(crawler_id, "running", args["patient"], args["feature_set"], args["aggregation_type"], args["resource_configs"])
crawlerStatus = crawler.executeCrawlerJob(crawlerJob)
return {"crawler_id": crawler_id, "csv_url": crawlerJob["url"], "crawler_status": crawlerStatus}
class CrawlerJobs(Resource):
crawler_jobs_parser = parser.copy()
crawler_jobs_parser.add_argument('patient_ids', type = str, action = 'append', required = True, help = NO_PATIENTS_STR, location = 'json')
def __init__(self):
super(CrawlerJobs, self).__init__()
@swagger.doc({
"description": "Get all submitted Crawler Jobs.",
"tags": ["crawler"],
"responses": {
"200": {
"description": "Retrieved Crawler Job(s) as json.",
"schema": {
"type": "array",
"items": crawlerResourceSwagger.swagger_crawler_schema,
}
}
}
})
@BrainApiAccess()
def get(self):
print(request.environ['REMOTE_ADDR'], file=sys.stderr)
return list(mongodbConnection.get_db().crawlerJobs.find())
@swagger.doc({
"description": "Submit a Crawler Job.",
"tags": ["crawler"],
"parameters": crawlerResourceSwagger.swagger_params_patients,
"responses": {
"200": {
"description": "Retrieved a json with the created Crawler ID.",
"schema": {
"type": "object",
"properties": {
"id": {
"type": "string"
}
}
}
},
"400": {
"description": NO_PATIENTS_STR
}
}
})
@BrainApiAccess()
def post(self):
args = self.crawler_jobs_parser.parse_args()
crawlerJob = crawler.createCrawlerJob(str(ObjectId()), "queued", args["patient_ids"], args["feature_set"], args["aggregation_type"], args["resource_configs"])
return {"id": crawlerJob["_id"]}
@swagger.doc({
"description": "Remove all submitted Crawler Jobs.",
"tags": ["crawler"],
"responses": {
"200": {
"description": "Number of deleted Crawler Jobs."
}
}
})
def delete(self):
ret = mongodbConnection.get_db().crawlerJobs.delete_many({})
return ret.deleted_count
class CrawlerJob(Resource):
@BrainApiAccess()
def __init__(self):
super(CrawlerJob, self).__init__()
@swagger.doc({
"description": "Retrieve a single Crawler Job.",
"tags": ["crawler"],
"parameters":[
{
"name": "crawler_id",
"in": "path",
"type": "string",
"description": "The ID of the crawler to be retrieved.",
"required": True
}
],
"responses": {
"200": {
"description": "Retrieved Crawler Job as json.",
"schema": crawlerResourceSwagger.swagger_crawler_schema
}
}
})
def get(self, crawler_id):
return mongodbConnection.get_db().crawlerJobs.find_one({"_id": crawler_id})
def delete(self, crawler_id):
ret = mongodbConnection.get_db().crawlerJobs.delete_many({"_id": crawler_id})
return ret.deleted_count | true | true |
1c38d5197986669398b23b7516d88f9ff6dafa61 | 18,451 | py | Python | python/paddle/__init__.py | heliqi/Paddle | feb0ed1b912b66de55c30d8af1ccc61cc1fdf59b | [
"Apache-2.0"
] | null | null | null | python/paddle/__init__.py | heliqi/Paddle | feb0ed1b912b66de55c30d8af1ccc61cc1fdf59b | [
"Apache-2.0"
] | 9 | 2021-08-03T11:39:03.000Z | 2021-09-16T08:03:58.000Z | python/paddle/__init__.py | heliqi/Paddle | feb0ed1b912b66de55c30d8af1ccc61cc1fdf59b | [
"Apache-2.0"
] | 1 | 2021-07-15T09:23:23.000Z | 2021-07-15T09:23:23.000Z | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from paddle.version import full_version as __version__
from paddle.version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write('''Warning with import paddle: you should not
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
)
from .batch import batch # noqa: F401
from .fluid import monkey_patch_variable
from .fluid.dygraph import monkey_patch_math_varbase
monkey_patch_variable()
monkey_patch_math_varbase()
from .framework.dtype import dtype as dtype # noqa: F401
from paddle.framework.dtype import uint8 # noqa: F401
from paddle.framework.dtype import int8 # noqa: F401
from paddle.framework.dtype import int16 # noqa: F401
from paddle.framework.dtype import int32 # noqa: F401
from paddle.framework.dtype import int64 # noqa: F401
from paddle.framework.dtype import float16 # noqa: F401
from paddle.framework.dtype import float32 # noqa: F401
from paddle.framework.dtype import float64 # noqa: F401
from paddle.framework.dtype import bfloat16 # noqa: F401
from paddle.framework.dtype import bool # noqa: F401
from paddle.framework.dtype import complex64 # noqa: F401
from paddle.framework.dtype import complex128 # noqa: F401
from .framework import VarBase as Tensor # noqa: F401
Tensor.__qualname__ = 'Tensor' # noqa: F401
import paddle.compat # noqa: F401
import paddle.distributed # noqa: F401
import paddle.sysconfig # noqa: F401
import paddle.distribution # noqa: F401
import paddle.nn # noqa: F401
import paddle.distributed.fleet # noqa: F401
import paddle.optimizer # noqa: F401
import paddle.metric # noqa: F401
import paddle.regularizer # noqa: F401
import paddle.incubate # noqa: F401
import paddle.autograd # noqa: F401
import paddle.device # noqa: F401
import paddle.jit # noqa: F401
import paddle.amp # noqa: F401
import paddle.dataset # noqa: F401
import paddle.inference # noqa: F401
import paddle.io # noqa: F401
import paddle.onnx # noqa: F401
import paddle.reader # noqa: F401
import paddle.static # noqa: F401
import paddle.vision # noqa: F401
from .tensor.random import bernoulli # noqa: F401
from .tensor.attribute import rank # noqa: F401
from .tensor.attribute import shape # noqa: F401
from .tensor.attribute import real # noqa: F401
from .tensor.attribute import imag # noqa: F401
from .tensor.creation import to_tensor # noqa: F401
from .tensor.creation import diag # noqa: F401
from .tensor.creation import diagflat # noqa: F401
from .tensor.creation import eye # noqa: F401
from .tensor.creation import linspace # noqa: F401
from .tensor.creation import ones # noqa: F401
from .tensor.creation import ones_like # noqa: F401
from .tensor.creation import zeros # noqa: F401
from .tensor.creation import zeros_like # noqa: F401
from .tensor.creation import arange # noqa: F401
from .tensor.creation import full # noqa: F401
from .tensor.creation import full_like # noqa: F401
from .tensor.creation import triu # noqa: F401
from .tensor.creation import tril # noqa: F401
from .tensor.creation import meshgrid # noqa: F401
from .tensor.creation import empty # noqa: F401
from .tensor.creation import empty_like # noqa: F401
from .tensor.creation import assign # noqa: F401
from .tensor.linalg import matmul # noqa: F401
from .tensor.linalg import dot # noqa: F401
from .tensor.linalg import norm # noqa: F401
from .tensor.linalg import transpose # noqa: F401
from .tensor.linalg import dist # noqa: F401
from .tensor.linalg import t # noqa: F401
from .tensor.linalg import cross # noqa: F401
from .tensor.linalg import cholesky # noqa: F401
from .tensor.linalg import bmm # noqa: F401
from .tensor.linalg import histogram # noqa: F401
from .tensor.linalg import mv # noqa: F401
from .tensor.linalg import matrix_power # noqa: F401
from .tensor.logic import equal # noqa: F401
from .tensor.logic import greater_equal # noqa: F401
from .tensor.logic import greater_than # noqa: F401
from .tensor.logic import is_empty # noqa: F401
from .tensor.logic import less_equal # noqa: F401
from .tensor.logic import less_than # noqa: F401
from .tensor.logic import logical_and # noqa: F401
from .tensor.logic import logical_not # noqa: F401
from .tensor.logic import logical_or # noqa: F401
from .tensor.logic import logical_xor # noqa: F401
from .tensor.logic import bitwise_and # noqa: F401
from .tensor.logic import bitwise_not # noqa: F401
from .tensor.logic import bitwise_or # noqa: F401
from .tensor.logic import bitwise_xor # noqa: F401
from .tensor.logic import not_equal # noqa: F401
from .tensor.logic import allclose # noqa: F401
from .tensor.logic import equal_all # noqa: F401
from .tensor.logic import is_tensor # noqa: F401
from .tensor.manipulation import cast # noqa: F401
from .tensor.manipulation import concat # noqa: F401
from .tensor.manipulation import broadcast_tensors # noqa: F401
from .tensor.manipulation import expand # noqa: F401
from .tensor.manipulation import broadcast_to # noqa: F401
from .tensor.manipulation import expand_as # noqa: F401
from .tensor.manipulation import tile # noqa: F401
from .tensor.manipulation import flatten # noqa: F401
from .tensor.manipulation import gather # noqa: F401
from .tensor.manipulation import gather_nd # noqa: F401
from .tensor.manipulation import reshape # noqa: F401
from .tensor.manipulation import reshape_ # noqa: F401
from .tensor.manipulation import flip as reverse # noqa: F401
from .tensor.manipulation import scatter # noqa: F401
from .tensor.manipulation import scatter_ # noqa: F401
from .tensor.manipulation import scatter_nd_add # noqa: F401
from .tensor.manipulation import scatter_nd # noqa: F401
from .tensor.manipulation import shard_index # noqa: F401
from .tensor.manipulation import slice # noqa: F401
from .tensor.manipulation import split # noqa: F401
from .tensor.manipulation import squeeze # noqa: F401
from .tensor.manipulation import squeeze_ # noqa: F401
from .tensor.manipulation import stack # noqa: F401
from .tensor.manipulation import strided_slice # noqa: F401
from .tensor.manipulation import unique # noqa: F401
from .tensor.manipulation import unique_consecutive # noqa: F401
from .tensor.manipulation import unsqueeze # noqa: F401
from .tensor.manipulation import unsqueeze_ # noqa: F401
from .tensor.manipulation import unstack # noqa: F401
from .tensor.manipulation import flip # noqa: F401
from .tensor.manipulation import unbind # noqa: F401
from .tensor.manipulation import roll # noqa: F401
from .tensor.manipulation import chunk # noqa: F401
from .tensor.manipulation import tolist # noqa: F401
from .tensor.math import abs # noqa: F401
from .tensor.math import acos # noqa: F401
from .tensor.math import asin # noqa: F401
from .tensor.math import atan # noqa: F401
from .tensor.math import atan2 # noqa: F401
from .tensor.math import ceil # noqa: F401
from .tensor.math import cos # noqa: F401
from .tensor.math import tan # noqa: F401
from .tensor.math import cosh # noqa: F401
from .tensor.math import cumsum # noqa: F401
from .tensor.math import exp # noqa: F401
from .tensor.math import expm1 # noqa: F401
from .tensor.math import floor # noqa: F401
from .tensor.math import increment # noqa: F401
from .tensor.math import log # noqa: F401
from .tensor.math import log2 # noqa: F401
from .tensor.math import log10 # noqa: F401
from .tensor.math import multiplex # noqa: F401
from .tensor.math import pow # noqa: F401
from .tensor.math import reciprocal # noqa: F401
from .tensor.math import all # noqa: F401
from .tensor.math import any # noqa: F401
from .tensor.math import round # noqa: F401
from .tensor.math import rsqrt # noqa: F401
from .tensor.math import scale # noqa: F401
from .tensor.math import sign # noqa: F401
from .tensor.math import sin # noqa: F401
from .tensor.math import sinh # noqa: F401
from .tensor.math import sqrt # noqa: F401
from .tensor.math import square # noqa: F401
from .tensor.math import stanh # noqa: F401
from .tensor.math import sum # noqa: F401
from .tensor.math import tanh # noqa: F401
from .tensor.math import tanh_ # noqa: F401
from .tensor.math import add_n # noqa: F401
from .tensor.math import max # noqa: F401
from .tensor.math import maximum # noqa: F401
from .tensor.math import min # noqa: F401
from .tensor.math import minimum # noqa: F401
from .tensor.math import mm # noqa: F401
from .tensor.math import divide # noqa: F401
from .tensor.math import floor_divide # noqa: F401
from .tensor.math import remainder # noqa: F401
from .tensor.math import mod # noqa: F401
from .tensor.math import floor_mod # noqa: F401
from .tensor.math import multiply # noqa: F401
from .tensor.math import add # noqa: F401
from .tensor.math import subtract # noqa: F401
from .tensor.math import logsumexp # noqa: F401
from .tensor.math import inverse # noqa: F401
from .tensor.math import log1p # noqa: F401
from .tensor.math import erf # noqa: F401
from .tensor.math import addmm # noqa: F401
from .tensor.math import clip # noqa: F401
from .tensor.math import trace # noqa: F401
from .tensor.math import diagonal # noqa: F401
from .tensor.math import kron # noqa: F401
from .tensor.math import isfinite # noqa: F401
from .tensor.math import isinf # noqa: F401
from .tensor.math import isnan # noqa: F401
from .tensor.math import prod # noqa: F401
from .tensor.math import broadcast_shape # noqa: F401
from .tensor.math import conj # noqa: F401
from .tensor.math import trunc # noqa: F401
from .tensor.math import digamma # noqa: F401
from .tensor.math import neg # noqa: F401
from .tensor.math import lgamma # noqa: F401
from .tensor.random import multinomial # noqa: F401
from .tensor.random import standard_normal # noqa: F401
from .tensor.random import normal # noqa: F401
from .tensor.random import uniform # noqa: F401
from .tensor.random import randn # noqa: F401
from .tensor.random import rand # noqa: F401
from .tensor.random import randint # noqa: F401
from .tensor.random import randperm # noqa: F401
from .tensor.search import argmax # noqa: F401
from .tensor.search import argmin # noqa: F401
from .tensor.search import argsort # noqa: F401
from .tensor.search import masked_select # noqa: F401
from .tensor.search import topk # noqa: F401
from .tensor.search import where # noqa: F401
from .tensor.search import index_select # noqa: F401
from .tensor.search import nonzero # noqa: F401
from .tensor.search import sort # noqa: F401
from .tensor.to_string import set_printoptions # noqa: F401
from .tensor.einsum import einsum # noqa: F401
from .framework.random import seed # noqa: F401
from .framework.random import get_cuda_rng_state # noqa: F401
from .framework.random import set_cuda_rng_state # noqa: F401
from .framework import ParamAttr # noqa: F401
from .framework import create_parameter # noqa: F401
from .framework import CPUPlace # noqa: F401
from .framework import CUDAPlace # noqa: F401
from .framework import NPUPlace # noqa: F401
from .framework import CUDAPinnedPlace # noqa: F401
from .framework import grad # noqa: F401
from .framework import no_grad # noqa: F401
from .framework import set_grad_enabled # noqa: F401
from .framework import save # noqa: F401
from .framework import load # noqa: F401
from .framework import DataParallel # noqa: F401
from .framework import set_default_dtype # noqa: F401
from .framework import get_default_dtype # noqa: F401
from .tensor.search import index_sample # noqa: F401
from .tensor.stat import mean # noqa: F401
from .tensor.stat import std # noqa: F401
from .tensor.stat import var # noqa: F401
from .tensor.stat import numel # noqa: F401
from .tensor.stat import median # noqa: F401
from .device import get_cudnn_version # noqa: F401
from .device import set_device # noqa: F401
from .device import get_device # noqa: F401
from .fluid.framework import is_compiled_with_cuda # noqa: F401
from .fluid.framework import is_compiled_with_rocm # noqa: F401
from .fluid.framework import disable_signal_handler # noqa: F401
from .device import is_compiled_with_xpu # noqa: F401
from .device import is_compiled_with_npu # noqa: F401
from .device import XPUPlace # noqa: F401
from .fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401
from .fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401
from .fluid.framework import in_dygraph_mode as in_dynamic_mode # noqa: F401
from .fluid.layers import crop_tensor as crop # noqa: F401
# high-level api
from .hapi import Model # noqa: F401
from . import callbacks # noqa: F401
from .hapi import summary # noqa: F401
from .hapi import flops # noqa: F401
from . import hub # noqa: F401
from . import linalg # noqa: F401
import paddle.text # noqa: F401
import paddle.vision # noqa: F401
from .tensor.random import check_shape # noqa: F401
disable_static()
__all__ = [ # noqa
'dtype',
'uint8',
'int8',
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
'bfloat16',
'bool',
'complex64',
'complex128',
'addmm',
'allclose',
't',
'add',
'subtract',
'diag',
'diagflat',
'isnan',
'scatter_nd_add',
'unstack',
'get_default_dtype',
'save',
'multinomial',
'get_cuda_rng_state',
'rank',
'empty_like',
'eye',
'cumsum',
'sign',
'is_empty',
'equal',
'equal_all',
'is_tensor',
'cross',
'where',
'log1p',
'cos',
'tan',
'mean',
'mv',
'in_dynamic_mode',
'min',
'any',
'slice',
'normal',
'logsumexp',
'full',
'unsqueeze',
'unsqueeze_',
'argmax',
'Model',
'summary',
'flops',
'sort',
'split',
'logical_and',
'full_like',
'less_than',
'kron',
'clip',
'Tensor',
'crop',
'ParamAttr',
'stanh',
'randint',
'assign',
'gather',
'scale',
'zeros',
'rsqrt',
'squeeze',
'squeeze_',
'to_tensor',
'gather_nd',
'isinf',
'uniform',
'floor_divide',
'remainder',
'floor_mod',
'roll',
'batch',
'max',
'norm',
'logical_or',
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'bitwise_not',
'mm',
'flip',
'histogram',
'multiplex',
'CUDAPlace',
'NPUPlace',
'empty',
'shape',
'real',
'imag',
'reciprocal',
'rand',
'less_equal',
'triu',
'sin',
'dist',
'unbind',
'meshgrid',
'arange',
'load',
'numel',
'median',
'inverse',
'no_grad',
'set_grad_enabled',
'mod',
'abs',
'tril',
'pow',
'zeros_like',
'maximum',
'topk',
'index_select',
'CPUPlace',
'matmul',
'seed',
'acos',
'logical_xor',
'exp',
'expm1',
'bernoulli',
'sinh',
'round',
'DataParallel',
'argmin',
'prod',
'broadcast_shape',
'conj',
'neg',
'lgamma',
'square',
'divide',
'ceil',
'atan',
'atan2',
'expand',
'broadcast_to',
'ones_like',
'index_sample',
'cast',
'grad',
'all',
'ones',
'not_equal',
'sum',
'tile',
'greater_equal',
'isfinite',
'create_parameter',
'dot',
'increment',
'erf',
'bmm',
'chunk',
'tolist',
'greater_than',
'shard_index',
'argsort',
'tanh',
'tanh_',
'transpose',
'randn',
'strided_slice',
'unique',
'unique_consecutive',
'set_cuda_rng_state',
'set_printoptions',
'std',
'flatten',
'asin',
'multiply',
'disable_static',
'masked_select',
'var',
'trace',
'enable_static',
'scatter_nd',
'set_default_dtype',
'disable_signal_handler',
'expand_as',
'stack',
'sqrt',
'cholesky',
'matrix_power',
'randperm',
'linspace',
'reshape',
'reshape_',
'reverse',
'nonzero',
'CUDAPinnedPlace',
'logical_not',
'add_n',
'minimum',
'scatter',
'scatter_',
'floor',
'cosh',
'log',
'log2',
'log10',
'concat',
'check_shape',
'trunc',
'digamma',
'standard_normal',
'diagonal',
'broadcast_tensors',
'einsum'
]
| 35.346743 | 90 | 0.643813 |
try:
from paddle.version import full_version as __version__
from paddle.version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write('''Warning with import paddle: you should not
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
)
from .batch import batch
from .fluid import monkey_patch_variable
from .fluid.dygraph import monkey_patch_math_varbase
monkey_patch_variable()
monkey_patch_math_varbase()
from .framework.dtype import dtype as dtype
from paddle.framework.dtype import uint8
from paddle.framework.dtype import int8
from paddle.framework.dtype import int16
from paddle.framework.dtype import int32
from paddle.framework.dtype import int64
from paddle.framework.dtype import float16
from paddle.framework.dtype import float32
from paddle.framework.dtype import float64
from paddle.framework.dtype import bfloat16
from paddle.framework.dtype import bool
from paddle.framework.dtype import complex64
from paddle.framework.dtype import complex128
from .framework import VarBase as Tensor
Tensor.__qualname__ = 'Tensor'
import paddle.compat
import paddle.distributed
import paddle.sysconfig
import paddle.distribution
import paddle.nn
import paddle.distributed.fleet
import paddle.optimizer
import paddle.metric
import paddle.regularizer
import paddle.incubate
import paddle.autograd
import paddle.device
import paddle.jit
import paddle.amp
import paddle.dataset
import paddle.inference
import paddle.io
import paddle.onnx
import paddle.reader
import paddle.static
import paddle.vision
from .tensor.random import bernoulli
from .tensor.attribute import rank
from .tensor.attribute import shape
from .tensor.attribute import real
from .tensor.attribute import imag
from .tensor.creation import to_tensor
from .tensor.creation import diag
from .tensor.creation import diagflat
from .tensor.creation import eye
from .tensor.creation import linspace
from .tensor.creation import ones
from .tensor.creation import ones_like
from .tensor.creation import zeros
from .tensor.creation import zeros_like
from .tensor.creation import arange
from .tensor.creation import full
from .tensor.creation import full_like
from .tensor.creation import triu
from .tensor.creation import tril
from .tensor.creation import meshgrid
from .tensor.creation import empty
from .tensor.creation import empty_like
from .tensor.creation import assign
from .tensor.linalg import matmul
from .tensor.linalg import dot
from .tensor.linalg import norm
from .tensor.linalg import transpose
from .tensor.linalg import dist
from .tensor.linalg import t
from .tensor.linalg import cross
from .tensor.linalg import cholesky
from .tensor.linalg import bmm
from .tensor.linalg import histogram
from .tensor.linalg import mv
from .tensor.linalg import matrix_power
from .tensor.logic import equal
from .tensor.logic import greater_equal
from .tensor.logic import greater_than
from .tensor.logic import is_empty
from .tensor.logic import less_equal
from .tensor.logic import less_than
from .tensor.logic import logical_and
from .tensor.logic import logical_not
from .tensor.logic import logical_or
from .tensor.logic import logical_xor
from .tensor.logic import bitwise_and
from .tensor.logic import bitwise_not
from .tensor.logic import bitwise_or
from .tensor.logic import bitwise_xor
from .tensor.logic import not_equal
from .tensor.logic import allclose
from .tensor.logic import equal_all
from .tensor.logic import is_tensor
from .tensor.manipulation import cast
from .tensor.manipulation import concat
from .tensor.manipulation import broadcast_tensors
from .tensor.manipulation import expand
from .tensor.manipulation import broadcast_to
from .tensor.manipulation import expand_as
from .tensor.manipulation import tile
from .tensor.manipulation import flatten
from .tensor.manipulation import gather
from .tensor.manipulation import gather_nd
from .tensor.manipulation import reshape
from .tensor.manipulation import reshape_
from .tensor.manipulation import flip as reverse
from .tensor.manipulation import scatter
from .tensor.manipulation import scatter_
from .tensor.manipulation import scatter_nd_add
from .tensor.manipulation import scatter_nd
from .tensor.manipulation import shard_index
from .tensor.manipulation import slice
from .tensor.manipulation import split
from .tensor.manipulation import squeeze
from .tensor.manipulation import squeeze_
from .tensor.manipulation import stack
from .tensor.manipulation import strided_slice
from .tensor.manipulation import unique
from .tensor.manipulation import unique_consecutive
from .tensor.manipulation import unsqueeze
from .tensor.manipulation import unsqueeze_
from .tensor.manipulation import unstack
from .tensor.manipulation import flip
from .tensor.manipulation import unbind
from .tensor.manipulation import roll
from .tensor.manipulation import chunk
from .tensor.manipulation import tolist
from .tensor.math import abs
from .tensor.math import acos
from .tensor.math import asin
from .tensor.math import atan
from .tensor.math import atan2
from .tensor.math import ceil
from .tensor.math import cos
from .tensor.math import tan
from .tensor.math import cosh
from .tensor.math import cumsum
from .tensor.math import exp
from .tensor.math import expm1
from .tensor.math import floor
from .tensor.math import increment
from .tensor.math import log
from .tensor.math import log2
from .tensor.math import log10
from .tensor.math import multiplex
from .tensor.math import pow
from .tensor.math import reciprocal
from .tensor.math import all
from .tensor.math import any
from .tensor.math import round
from .tensor.math import rsqrt
from .tensor.math import scale
from .tensor.math import sign
from .tensor.math import sin
from .tensor.math import sinh
from .tensor.math import sqrt
from .tensor.math import square
from .tensor.math import stanh
from .tensor.math import sum
from .tensor.math import tanh
from .tensor.math import tanh_
from .tensor.math import add_n
from .tensor.math import max
from .tensor.math import maximum
from .tensor.math import min
from .tensor.math import minimum
from .tensor.math import mm
from .tensor.math import divide
from .tensor.math import floor_divide
from .tensor.math import remainder
from .tensor.math import mod
from .tensor.math import floor_mod
from .tensor.math import multiply
from .tensor.math import add
from .tensor.math import subtract
from .tensor.math import logsumexp
from .tensor.math import inverse
from .tensor.math import log1p
from .tensor.math import erf
from .tensor.math import addmm
from .tensor.math import clip
from .tensor.math import trace
from .tensor.math import diagonal
from .tensor.math import kron
from .tensor.math import isfinite
from .tensor.math import isinf
from .tensor.math import isnan
from .tensor.math import prod
from .tensor.math import broadcast_shape
from .tensor.math import conj
from .tensor.math import trunc
from .tensor.math import digamma
from .tensor.math import neg
from .tensor.math import lgamma
from .tensor.random import multinomial
from .tensor.random import standard_normal
from .tensor.random import normal
from .tensor.random import uniform
from .tensor.random import randn
from .tensor.random import rand
from .tensor.random import randint
from .tensor.random import randperm
from .tensor.search import argmax
from .tensor.search import argmin
from .tensor.search import argsort
from .tensor.search import masked_select
from .tensor.search import topk
from .tensor.search import where
from .tensor.search import index_select
from .tensor.search import nonzero
from .tensor.search import sort
from .tensor.to_string import set_printoptions
from .tensor.einsum import einsum
from .framework.random import seed
from .framework.random import get_cuda_rng_state
from .framework.random import set_cuda_rng_state
from .framework import ParamAttr
from .framework import create_parameter
from .framework import CPUPlace
from .framework import CUDAPlace
from .framework import NPUPlace
from .framework import CUDAPinnedPlace
from .framework import grad
from .framework import no_grad
from .framework import set_grad_enabled
from .framework import save
from .framework import load
from .framework import DataParallel
from .framework import set_default_dtype
from .framework import get_default_dtype
from .tensor.search import index_sample
from .tensor.stat import mean
from .tensor.stat import std
from .tensor.stat import var
from .tensor.stat import numel
from .tensor.stat import median
from .device import get_cudnn_version
from .device import set_device
from .device import get_device
from .fluid.framework import is_compiled_with_cuda
from .fluid.framework import is_compiled_with_rocm
from .fluid.framework import disable_signal_handler
from .device import is_compiled_with_xpu
from .device import is_compiled_with_npu
from .device import XPUPlace
from .fluid.dygraph.base import enable_dygraph as disable_static
from .fluid.dygraph.base import disable_dygraph as enable_static
from .fluid.framework import in_dygraph_mode as in_dynamic_mode
from .fluid.layers import crop_tensor as crop
from .hapi import Model
from . import callbacks
from .hapi import summary
from .hapi import flops
from . import hub
from . import linalg
import paddle.text
import paddle.vision
from .tensor.random import check_shape
disable_static()
__all__ = [
'dtype',
'uint8',
'int8',
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
'bfloat16',
'bool',
'complex64',
'complex128',
'addmm',
'allclose',
't',
'add',
'subtract',
'diag',
'diagflat',
'isnan',
'scatter_nd_add',
'unstack',
'get_default_dtype',
'save',
'multinomial',
'get_cuda_rng_state',
'rank',
'empty_like',
'eye',
'cumsum',
'sign',
'is_empty',
'equal',
'equal_all',
'is_tensor',
'cross',
'where',
'log1p',
'cos',
'tan',
'mean',
'mv',
'in_dynamic_mode',
'min',
'any',
'slice',
'normal',
'logsumexp',
'full',
'unsqueeze',
'unsqueeze_',
'argmax',
'Model',
'summary',
'flops',
'sort',
'split',
'logical_and',
'full_like',
'less_than',
'kron',
'clip',
'Tensor',
'crop',
'ParamAttr',
'stanh',
'randint',
'assign',
'gather',
'scale',
'zeros',
'rsqrt',
'squeeze',
'squeeze_',
'to_tensor',
'gather_nd',
'isinf',
'uniform',
'floor_divide',
'remainder',
'floor_mod',
'roll',
'batch',
'max',
'norm',
'logical_or',
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'bitwise_not',
'mm',
'flip',
'histogram',
'multiplex',
'CUDAPlace',
'NPUPlace',
'empty',
'shape',
'real',
'imag',
'reciprocal',
'rand',
'less_equal',
'triu',
'sin',
'dist',
'unbind',
'meshgrid',
'arange',
'load',
'numel',
'median',
'inverse',
'no_grad',
'set_grad_enabled',
'mod',
'abs',
'tril',
'pow',
'zeros_like',
'maximum',
'topk',
'index_select',
'CPUPlace',
'matmul',
'seed',
'acos',
'logical_xor',
'exp',
'expm1',
'bernoulli',
'sinh',
'round',
'DataParallel',
'argmin',
'prod',
'broadcast_shape',
'conj',
'neg',
'lgamma',
'square',
'divide',
'ceil',
'atan',
'atan2',
'expand',
'broadcast_to',
'ones_like',
'index_sample',
'cast',
'grad',
'all',
'ones',
'not_equal',
'sum',
'tile',
'greater_equal',
'isfinite',
'create_parameter',
'dot',
'increment',
'erf',
'bmm',
'chunk',
'tolist',
'greater_than',
'shard_index',
'argsort',
'tanh',
'tanh_',
'transpose',
'randn',
'strided_slice',
'unique',
'unique_consecutive',
'set_cuda_rng_state',
'set_printoptions',
'std',
'flatten',
'asin',
'multiply',
'disable_static',
'masked_select',
'var',
'trace',
'enable_static',
'scatter_nd',
'set_default_dtype',
'disable_signal_handler',
'expand_as',
'stack',
'sqrt',
'cholesky',
'matrix_power',
'randperm',
'linspace',
'reshape',
'reshape_',
'reverse',
'nonzero',
'CUDAPinnedPlace',
'logical_not',
'add_n',
'minimum',
'scatter',
'scatter_',
'floor',
'cosh',
'log',
'log2',
'log10',
'concat',
'check_shape',
'trunc',
'digamma',
'standard_normal',
'diagonal',
'broadcast_tensors',
'einsum'
]
| true | true |
1c38d75d08ff1af24cc784a0ef6f11ff30d07729 | 273 | py | Python | python/design_patterns/env/lib/python3.7/site-packages/sphinxcontrib/htmlhelp/version.py | lmregus/Portfolio | 9a751443edbfe5ff2b47cdeacca86761ed03e81f | [
"MIT"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | venv/lib/python3.6/site-packages/sphinxcontrib/htmlhelp/version.py | metu-sparg/higrid | ebee0f35ea1712a01f3fdbaae132127ce4833baf | [
"BSD-3-Clause"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | python/design_patterns/env/lib/python3.7/site-packages/sphinxcontrib/htmlhelp/version.py | lmregus/Portfolio | 9a751443edbfe5ff2b47cdeacca86761ed03e81f | [
"MIT"
] | 5 | 2019-04-27T01:19:47.000Z | 2020-09-20T15:15:19.000Z | """
sphinxcontrib.htmlhelp.version
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2019 by the Sphinx team, see README.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.0.1'
__version_info__ = tuple(map(int, __version__.split('.')))
| 24.818182 | 67 | 0.604396 |
__version__ = '1.0.1'
__version_info__ = tuple(map(int, __version__.split('.')))
| true | true |
1c38d7a1a902eec01def757b468cfc26b6556730 | 8,788 | py | Python | plugins/callbacks.py | midhunsaji/emmawatsonfilter1bot | 947c1649653f8421adb6ce4c1cfd64624ce43835 | [
"MIT"
] | null | null | null | plugins/callbacks.py | midhunsaji/emmawatsonfilter1bot | 947c1649653f8421adb6ce4c1cfd64624ce43835 | [
"MIT"
] | null | null | null | plugins/callbacks.py | midhunsaji/emmawatsonfilter1bot | 947c1649653f8421adb6ce4c1cfd64624ce43835 | [
"MIT"
] | 1 | 2021-12-31T02:30:44.000Z | 2021-12-31T02:30:44.000Z | import os
import ast
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from database.filters_mdb import del_all, find_filter
from database.connections_mdb import(
all_connections,
active_connection,
if_active,
delete_connection,
make_active,
make_inactive
)
@trojanz.on_callback_query()
async def cb_handler(client, query):
if query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Command Help", callback_data="help_data")
]
]
)
await query.message.edit_text(
Script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Creator", url="https://t.me/Curio_VolvEX"),
InlineKeyboardButton("About Me", callback_data="about_data")
],
[
InlineKeyboardButton("Channel", url="https://t.me/MoviePlayr"),
InlineKeyboardButton("Support Group", url="https://t.me/MoviePlayr")
]
]
)
await query.message.edit_text(
Script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"SOURCE CODE", url="https://t.me/ok_bie_1223/4")
],
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
)
await query.message.edit_text(
Script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "close_data":
await query.message.delete()
elif query.data == "delallconfirm":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
grpid = await active_connection(str(userid))
if grpid is not None:
grp_id = grpid
try:
chat = await client.get_chat(grpid)
title = chat.title
except:
await query.message.edit_text("Make sure I'm present in your group!!", quote=True)
return
else:
await query.message.edit_text(
"I'm not connected to any groups!\nCheck /connections or connect to any groups",
quote=True
)
return
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
title = query.message.chat.title
else:
return
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await del_all(query.message, grp_id, title)
else:
await query.answer("You need to be Group Owner or an Auth User to do that!",show_alert=True)
elif query.data == "delallcancel":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
await query.message.reply_to_message.delete()
await query.message.delete()
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await query.message.delete()
try:
await query.message.reply_to_message.delete()
except:
pass
else:
await query.answer("Thats not for you!!",show_alert=True)
elif "groupcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
act = query.data.split(":")[3]
user_id = query.from_user.id
if act == "":
stat = "CONNECT"
cb = "connectcb"
else:
stat = "DISCONNECT"
cb = "disconnect"
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton(f"{stat}", callback_data=f"{cb}:{group_id}:{title}"),
InlineKeyboardButton("DELETE", callback_data=f"deletecb:{group_id}")],
[InlineKeyboardButton("BACK", callback_data="backcb")]
])
await query.message.edit_text(
f"Group Name : **{title}**\nGroup ID : `{group_id}`",
reply_markup=keyboard,
parse_mode="md"
)
return
elif "connectcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
user_id = query.from_user.id
mkact = await make_active(str(user_id), str(group_id))
if mkact:
await query.message.edit_text(
f"Connected to **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "disconnect" in query.data:
await query.answer()
title = query.data.split(":")[2]
user_id = query.from_user.id
mkinact = await make_inactive(str(user_id))
if mkinact:
await query.message.edit_text(
f"Disconnected from **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "deletecb" in query.data:
await query.answer()
user_id = query.from_user.id
group_id = query.data.split(":")[1]
delcon = await delete_connection(str(user_id), str(group_id))
if delcon:
await query.message.edit_text(
"Successfully deleted connection"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif query.data == "backcb":
await query.answer()
userid = query.from_user.id
groupids = await all_connections(str(userid))
if groupids is None:
await query.message.edit_text(
"There are no active connections!! Connect to some groups first.",
)
return
buttons = []
for groupid in groupids:
try:
ttl = await client.get_chat(int(groupid))
title = ttl.title
active = await if_active(str(userid), str(groupid))
if active:
act = " - ACTIVE"
else:
act = ""
buttons.append(
[
InlineKeyboardButton(
text=f"{title}{act}", callback_data=f"groupcb:{groupid}:{title}:{act}"
)
]
)
except:
pass
if buttons:
await query.message.edit_text(
"Your connected group details ;\n\n",
reply_markup=InlineKeyboardMarkup(buttons)
)
elif "alertmessage" in query.data:
grp_id = query.message.chat.id
i = query.data.split(":")[1]
keyword = query.data.split(":")[2]
reply_text, btn, alerts, fileid = await find_filter(grp_id, keyword)
if alerts is not None:
alerts = ast.literal_eval(alerts)
alert = alerts[int(i)]
alert = alert.replace("\\n", "\n").replace("\\t", "\t")
await query.answer(alert,show_alert=True)
| 30.943662 | 104 | 0.517865 | import os
import ast
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from database.filters_mdb import del_all, find_filter
from database.connections_mdb import(
all_connections,
active_connection,
if_active,
delete_connection,
make_active,
make_inactive
)
@trojanz.on_callback_query()
async def cb_handler(client, query):
if query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Command Help", callback_data="help_data")
]
]
)
await query.message.edit_text(
Script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Creator", url="https://t.me/Curio_VolvEX"),
InlineKeyboardButton("About Me", callback_data="about_data")
],
[
InlineKeyboardButton("Channel", url="https://t.me/MoviePlayr"),
InlineKeyboardButton("Support Group", url="https://t.me/MoviePlayr")
]
]
)
await query.message.edit_text(
Script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"SOURCE CODE", url="https://t.me/ok_bie_1223/4")
],
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
)
await query.message.edit_text(
Script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "close_data":
await query.message.delete()
elif query.data == "delallconfirm":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
grpid = await active_connection(str(userid))
if grpid is not None:
grp_id = grpid
try:
chat = await client.get_chat(grpid)
title = chat.title
except:
await query.message.edit_text("Make sure I'm present in your group!!", quote=True)
return
else:
await query.message.edit_text(
"I'm not connected to any groups!\nCheck /connections or connect to any groups",
quote=True
)
return
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
title = query.message.chat.title
else:
return
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await del_all(query.message, grp_id, title)
else:
await query.answer("You need to be Group Owner or an Auth User to do that!",show_alert=True)
elif query.data == "delallcancel":
userid = query.from_user.id
chat_type = query.message.chat.type
if chat_type == "private":
await query.message.reply_to_message.delete()
await query.message.delete()
elif (chat_type == "group") or (chat_type == "supergroup"):
grp_id = query.message.chat.id
st = await client.get_chat_member(grp_id, userid)
if (st.status == "creator") or (str(userid) in Config.AUTH_USERS):
await query.message.delete()
try:
await query.message.reply_to_message.delete()
except:
pass
else:
await query.answer("Thats not for you!!",show_alert=True)
elif "groupcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
act = query.data.split(":")[3]
user_id = query.from_user.id
if act == "":
stat = "CONNECT"
cb = "connectcb"
else:
stat = "DISCONNECT"
cb = "disconnect"
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton(f"{stat}", callback_data=f"{cb}:{group_id}:{title}"),
InlineKeyboardButton("DELETE", callback_data=f"deletecb:{group_id}")],
[InlineKeyboardButton("BACK", callback_data="backcb")]
])
await query.message.edit_text(
f"Group Name : **{title}**\nGroup ID : `{group_id}`",
reply_markup=keyboard,
parse_mode="md"
)
return
elif "connectcb" in query.data:
await query.answer()
group_id = query.data.split(":")[1]
title = query.data.split(":")[2]
user_id = query.from_user.id
mkact = await make_active(str(user_id), str(group_id))
if mkact:
await query.message.edit_text(
f"Connected to **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "disconnect" in query.data:
await query.answer()
title = query.data.split(":")[2]
user_id = query.from_user.id
mkinact = await make_inactive(str(user_id))
if mkinact:
await query.message.edit_text(
f"Disconnected from **{title}**",
parse_mode="md"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif "deletecb" in query.data:
await query.answer()
user_id = query.from_user.id
group_id = query.data.split(":")[1]
delcon = await delete_connection(str(user_id), str(group_id))
if delcon:
await query.message.edit_text(
"Successfully deleted connection"
)
return
else:
await query.message.edit_text(
f"Some error occured!!",
parse_mode="md"
)
return
elif query.data == "backcb":
await query.answer()
userid = query.from_user.id
groupids = await all_connections(str(userid))
if groupids is None:
await query.message.edit_text(
"There are no active connections!! Connect to some groups first.",
)
return
buttons = []
for groupid in groupids:
try:
ttl = await client.get_chat(int(groupid))
title = ttl.title
active = await if_active(str(userid), str(groupid))
if active:
act = " - ACTIVE"
else:
act = ""
buttons.append(
[
InlineKeyboardButton(
text=f"{title}{act}", callback_data=f"groupcb:{groupid}:{title}:{act}"
)
]
)
except:
pass
if buttons:
await query.message.edit_text(
"Your connected group details ;\n\n",
reply_markup=InlineKeyboardMarkup(buttons)
)
elif "alertmessage" in query.data:
grp_id = query.message.chat.id
i = query.data.split(":")[1]
keyword = query.data.split(":")[2]
reply_text, btn, alerts, fileid = await find_filter(grp_id, keyword)
if alerts is not None:
alerts = ast.literal_eval(alerts)
alert = alerts[int(i)]
alert = alert.replace("\\n", "\n").replace("\\t", "\t")
await query.answer(alert,show_alert=True)
| true | true |
1c38d890af8cbf094361f06350486336a75eaec7 | 899 | py | Python | accounts/models.py | DevangS/CoralNet | 7c56d4ec95a771718175bd94c3ef51c4095082e3 | [
"BSD-2-Clause"
] | 4 | 2015-12-23T05:14:35.000Z | 2019-07-09T03:27:10.000Z | accounts/models.py | DevangS/CoralNet | 7c56d4ec95a771718175bd94c3ef51c4095082e3 | [
"BSD-2-Clause"
] | 3 | 2015-04-07T02:45:15.000Z | 2015-07-01T19:25:10.000Z | accounts/models.py | DevangS/CoralNet | 7c56d4ec95a771718175bd94c3ef51c4095082e3 | [
"BSD-2-Clause"
] | 2 | 2016-01-21T17:25:48.000Z | 2019-08-29T18:42:14.000Z | from django.db import models
from django.utils.translation import ugettext_lazy as _
from userena.models import UserenaLanguageBaseProfile
from userena.models import User
class Profile(UserenaLanguageBaseProfile):
user = models.OneToOneField(User, unique = True, verbose_name = _('user'),related_name = 'my_profile')
about_me = models.CharField(_('Name'), max_length=45, blank=True)
website = models.URLField(_('Website'), blank=True, verify_exists=True)
location = models.CharField(_('Location'), max_length=45, blank=True)
# group = models.ForeignKey(Group)
#class Group(models.Model):
# name = models.CharField(max_length=45, blank=True)
# member_permission = choices("View", "Annotate", "None") #Default permissions for GROUP MEMBERS on GROUPS images
# source_permission = choices("View", "Annotate", "None") #Default permissions for EVERYONE ELSE on GROUPS images | 49.944444 | 116 | 0.756396 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from userena.models import UserenaLanguageBaseProfile
from userena.models import User
class Profile(UserenaLanguageBaseProfile):
user = models.OneToOneField(User, unique = True, verbose_name = _('user'),related_name = 'my_profile')
about_me = models.CharField(_('Name'), max_length=45, blank=True)
website = models.URLField(_('Website'), blank=True, verify_exists=True)
location = models.CharField(_('Location'), max_length=45, blank=True)
| true | true |
1c38d961b79a4b3c0fee5af2ed73a4459bd5b507 | 2,988 | py | Python | pyvisdk/client.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/client.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/client.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | '''
Created on Mar 6, 2011
@author: eplaster
'''
from suds import MethodNotFound
import logging
import os.path
import suds
import urllib
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
WSDL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'wsdl')
class SudsClientFactory(object):
_client = None
_sms_client = None
@classmethod
def get_suds_client(cls):
if cls._client is None:
cls._client = suds.client.Client("file://" + urllib.pathname2url(os.path.join(WSDL_DIR, 'vimService.wsdl')),
cachingpolicy=1, autoblend=True)
return cls._client.clone()
@classmethod
def get_sms_suds_client(cls):
if cls._sms_client is None:
cls._sms_client = suds.client.Client("file://" + urllib.pathname2url(os.path.join(WSDL_DIR, 'smsService.wsdl')),
cachingpolicy=1, autoblend=True)
return cls._sms_client.clone()
class Client(object):
'''
The Client class acts as a proxy to the suds.client.Client class, in that it fixes the
ManagedObjectReference objects and provides a more streamline interface specific to VISDK
'''
def __init__(self, server, timeout=90):
'''
Constructor
'''
url = "https://" + server + '/sdk'
client = SudsClientFactory.get_suds_client()
client.set_options(faults=True)
client.set_options(location=url)
client.set_options(timeout=timeout)
self.service = client.service
self.url = url
self.client = client
self.server = server
#
# proxying (special cases)
#
def __getattribute__(self, attr):
service = super(Client, self).__getattribute__("service")
# try the service
try:
_attr = getattr(service, attr)
if _attr.__class__ == suds.client.Method:
# we need to do something about getting to the right method here...
return _attr
else:
return _attr
except (AttributeError, MethodNotFound):
# see if it's in the client object
try:
client = super(Client, self).__getattribute__("client")
_attr = getattr(client, attr)
return _attr
except (AttributeError, MethodNotFound):
# if it's a member of this class...
return super(Client, self).__getattribute__(attr)
class SmsClient(Client):
def __init__(self, server, timeout=90):
'''
Constructor
'''
url = "https://" + server + '/sms/sdk'
client = SudsClientFactory.get_sms_suds_client()
client.set_options(faults=True)
client.set_options(location=url)
client.set_options(timeout=timeout)
self.service = client.service
self.url = url
self.client = client
self.server = server
| 32.129032 | 124 | 0.60241 | from suds import MethodNotFound
import logging
import os.path
import suds
import urllib
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
WSDL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'wsdl')
class SudsClientFactory(object):
_client = None
_sms_client = None
@classmethod
def get_suds_client(cls):
if cls._client is None:
cls._client = suds.client.Client("file://" + urllib.pathname2url(os.path.join(WSDL_DIR, 'vimService.wsdl')),
cachingpolicy=1, autoblend=True)
return cls._client.clone()
@classmethod
def get_sms_suds_client(cls):
if cls._sms_client is None:
cls._sms_client = suds.client.Client("file://" + urllib.pathname2url(os.path.join(WSDL_DIR, 'smsService.wsdl')),
cachingpolicy=1, autoblend=True)
return cls._sms_client.clone()
class Client(object):
def __init__(self, server, timeout=90):
url = "https://" + server + '/sdk'
client = SudsClientFactory.get_suds_client()
client.set_options(faults=True)
client.set_options(location=url)
client.set_options(timeout=timeout)
self.service = client.service
self.url = url
self.client = client
self.server = server
def __getattribute__(self, attr):
service = super(Client, self).__getattribute__("service")
try:
_attr = getattr(service, attr)
if _attr.__class__ == suds.client.Method:
return _attr
else:
return _attr
except (AttributeError, MethodNotFound):
try:
client = super(Client, self).__getattribute__("client")
_attr = getattr(client, attr)
return _attr
except (AttributeError, MethodNotFound):
# if it's a member of this class...
return super(Client, self).__getattribute__(attr)
class SmsClient(Client):
def __init__(self, server, timeout=90):
url = "https://" + server + '/sms/sdk'
client = SudsClientFactory.get_sms_suds_client()
client.set_options(faults=True)
client.set_options(location=url)
client.set_options(timeout=timeout)
self.service = client.service
self.url = url
self.client = client
self.server = server
| true | true |
1c38db8cad4fa2f949d9f9b56d2d2eded4eb5d08 | 1,240 | py | Python | code_samples/main_plotly_widget.py | lcopey/node_editor | 04d56ae4c7f2149e46903d5dd2e46f3906ef69e6 | [
"MIT"
] | 1 | 2021-04-30T11:28:42.000Z | 2021-04-30T11:28:42.000Z | code_samples/main_plotly_widget.py | lcopey/node_editor | 04d56ae4c7f2149e46903d5dd2e46f3906ef69e6 | [
"MIT"
] | null | null | null | code_samples/main_plotly_widget.py | lcopey/node_editor | 04d56ae4c7f2149e46903d5dd2e46f3906ef69e6 | [
"MIT"
] | null | null | null | from PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets
from PyQt5.QtWebEngineWidgets import QWebEngineView
import plotly.express as px
#pip :
# pyqt5 5.15.2
# plotly 4.14.3
# pyqtwebengine 5.15.2
# pip
# pyqt5 : 5.15.2
# plotly : 4.14.3
# pyqtwebengine : 5.15.2
class Widget(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.button = QtWidgets.QPushButton('Plot', self)
self.browser = QWebEngineView(self)
vlayout = QtWidgets.QVBoxLayout(self)
vlayout.addWidget(self.button, alignment=QtCore.Qt.AlignHCenter)
vlayout.addWidget(self.browser)
self.button.clicked.connect(self.show_graph)
self.resize(1000, 800)
def show_graph(self):
df = px.data.tips()
fig = px.box(df, x="day", y="total_bill", color="smoker", points='all')
fig.update_traces(quartilemethod="exclusive") # or "inclusive", or "linear" by default
self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))
if __name__ == "__main__":
app = QtWidgets.QApplication([])
print('app done')
widget = Widget()
print('Widget initialized')
widget.show()
app.exec()
| 28.837209 | 95 | 0.641129 | from PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets
from PyQt5.QtWebEngineWidgets import QWebEngineView
import plotly.express as px
class Widget(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.button = QtWidgets.QPushButton('Plot', self)
self.browser = QWebEngineView(self)
vlayout = QtWidgets.QVBoxLayout(self)
vlayout.addWidget(self.button, alignment=QtCore.Qt.AlignHCenter)
vlayout.addWidget(self.browser)
self.button.clicked.connect(self.show_graph)
self.resize(1000, 800)
def show_graph(self):
df = px.data.tips()
fig = px.box(df, x="day", y="total_bill", color="smoker", points='all')
fig.update_traces(quartilemethod="exclusive")
self.browser.setHtml(fig.to_html(include_plotlyjs='cdn'))
if __name__ == "__main__":
app = QtWidgets.QApplication([])
print('app done')
widget = Widget()
print('Widget initialized')
widget.show()
app.exec()
| true | true |
1c38dccf149299beb8f21628c857cbba419d3635 | 1,839 | py | Python | mkt/purchase/webpay_tasks.py | chrisdavidmills/zamboni | 09e05bad2570663d25408793289c81324d3e952e | [
"BSD-3-Clause"
] | null | null | null | mkt/purchase/webpay_tasks.py | chrisdavidmills/zamboni | 09e05bad2570663d25408793289c81324d3e952e | [
"BSD-3-Clause"
] | null | null | null | mkt/purchase/webpay_tasks.py | chrisdavidmills/zamboni | 09e05bad2570663d25408793289c81324d3e952e | [
"BSD-3-Clause"
] | 1 | 2021-03-13T00:33:12.000Z | 2021-03-13T00:33:12.000Z | import logging
from celeryutils import task
from jingo.helpers import datetime
from tower import ugettext as _
import amo
from amo.decorators import write
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import get_locale_from_lang, send_html_mail_jinja
from stats.models import Contribution
log = logging.getLogger('z.purchase.webpay')
notify_kw = dict(default_retry_delay=15, # seconds
max_tries=5)
@task
def send_purchase_receipt(contrib_id, **kw):
"""
Sends an email to the purchaser of the app.
"""
contrib = Contribution.objects.get(pk=contrib_id)
with contrib.user.activate_lang():
addon = contrib.addon
version = addon.current_version or addon.latest_version
# L10n: {0} is the app name.
subject = _('Receipt for {0}').format(contrib.addon.name)
data = {
'app_name': addon.name,
'developer_name': version.developer_name if version else '',
'price': contrib.get_amount_locale(get_locale_from_lang(
contrib.source_locale)),
'date': datetime(contrib.created.date()),
'purchaser_email': contrib.user.email,
#'purchaser_phone': '', # TODO: See bug 894614.
#'purchaser_last_four': '',
'transaction_id': contrib.uuid,
'purchases_url': absolutify('/purchases'),
'support_url': addon.support_url,
'terms_of_service_url': absolutify(reverse('site.terms')),
}
log.info('Sending email about purchase: %s' % contrib_id)
text_template = 'purchase/receipt.ltxt'
html_template = 'purchase/receipt.html'
send_html_mail_jinja(subject, html_template, text_template, data,
recipient_list=[contrib.user.email])
| 36.058824 | 73 | 0.656879 | import logging
from celeryutils import task
from jingo.helpers import datetime
from tower import ugettext as _
import amo
from amo.decorators import write
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import get_locale_from_lang, send_html_mail_jinja
from stats.models import Contribution
log = logging.getLogger('z.purchase.webpay')
notify_kw = dict(default_retry_delay=15,
max_tries=5)
@task
def send_purchase_receipt(contrib_id, **kw):
contrib = Contribution.objects.get(pk=contrib_id)
with contrib.user.activate_lang():
addon = contrib.addon
version = addon.current_version or addon.latest_version
subject = _('Receipt for {0}').format(contrib.addon.name)
data = {
'app_name': addon.name,
'developer_name': version.developer_name if version else '',
'price': contrib.get_amount_locale(get_locale_from_lang(
contrib.source_locale)),
'date': datetime(contrib.created.date()),
'purchaser_email': contrib.user.email,
'transaction_id': contrib.uuid,
'purchases_url': absolutify('/purchases'),
'support_url': addon.support_url,
'terms_of_service_url': absolutify(reverse('site.terms')),
}
log.info('Sending email about purchase: %s' % contrib_id)
text_template = 'purchase/receipt.ltxt'
html_template = 'purchase/receipt.html'
send_html_mail_jinja(subject, html_template, text_template, data,
recipient_list=[contrib.user.email])
| true | true |
1c38ddf16aa8af9fd8d109632806264210930a3c | 8,795 | py | Python | readers/caption_graph_reader.py | yekeren/WSSGG | 4d20dadffe7584ac2c7f26419960512380b8d06e | [
"Apache-2.0"
] | 33 | 2021-04-14T01:27:34.000Z | 2022-03-30T03:32:00.000Z | readers/caption_graph_reader.py | yekeren/WSSGG | 4d20dadffe7584ac2c7f26419960512380b8d06e | [
"Apache-2.0"
] | null | null | null | readers/caption_graph_reader.py | yekeren/WSSGG | 4d20dadffe7584ac2c7f26419960512380b8d06e | [
"Apache-2.0"
] | 6 | 2021-04-14T14:43:19.000Z | 2022-01-01T14:36:11.000Z | # Copyright 2020 Keren Ye, University of Pittsburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads from tfrecord files and yields batched tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from protos import reader_pb2
from tf_slim import tfexample_decoder
from graph_nets import utils_tf
from graph_nets.graphs import GraphsTuple
def _parse_single_example(example, options):
"""Parses a single tf.Example proto.
Args:
example: An Example proto.
options: An instance of reader_pb2.Reader.
Returns:
A dictionary indexed by tensor name.
"""
# Initialize `keys_to_features`.
example_fmt = {
'id':
tf.io.FixedLenFeature([], tf.int64, default_value=-1),
# Proposals
'image/n_proposal':
tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image/proposal/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/proposal/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/proposal/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/proposal/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/proposal/feature':
tf.io.VarLenFeature(tf.float32),
# Caption graph.
'caption_graph/caption':
tf.io.VarLenFeature(tf.string),
'caption_graph/n_node':
tf.io.VarLenFeature(tf.int64),
'caption_graph/n_edge':
tf.io.VarLenFeature(tf.int64),
'caption_graph/nodes':
tf.io.VarLenFeature(tf.string),
'caption_graph/edges':
tf.io.VarLenFeature(tf.string),
'caption_graph/senders':
tf.io.VarLenFeature(tf.int64),
'caption_graph/receivers':
tf.io.VarLenFeature(tf.int64),
# Ground-truth.
'scene_graph/n_relation':
tf.io.FixedLenFeature([], tf.int64, default_value=0),
'scene_graph/predicate':
tf.io.VarLenFeature(tf.string),
'scene_graph/subject':
tf.io.VarLenFeature(tf.string),
'scene_graph/subject/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/subject/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/subject/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'scene_graph/subject/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object':
tf.io.VarLenFeature(tf.string),
'scene_graph/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
}
# Decode proposals.
parsed = tf.parse_single_example(example, example_fmt)
proposals = tfexample_decoder.BoundingBox(
prefix='image/proposal/bbox/').tensors_to_item(parsed)
n_proposal = tf.minimum(parsed['image/n_proposal'], options.max_n_proposal)
proposals = proposals[:options.max_n_proposal, :]
proposal_features = tf.reshape(
tf.sparse_tensor_to_dense(parsed['image/proposal/feature']),
[-1, options.feature_dimensions])[:options.max_n_proposal, :]
# Decode and randomly sample a caption graph.
graphs = GraphsTuple(
globals=None,
n_node=tf.sparse_tensor_to_dense(parsed['caption_graph/n_node'], 0),
n_edge=tf.sparse_tensor_to_dense(parsed['caption_graph/n_edge'], 0),
nodes=tf.sparse_tensor_to_dense(parsed['caption_graph/nodes'], ''),
edges=tf.sparse_tensor_to_dense(parsed['caption_graph/edges'], ''),
senders=tf.sparse_tensor_to_dense(parsed['caption_graph/senders'], 0),
receivers=tf.sparse_tensor_to_dense(parsed['caption_graph/receivers'], 0))
num_graphs = utils_tf.get_num_graphs(graphs)
index = tf.random.uniform([], minval=0, maxval=num_graphs, dtype=tf.int32)
caption = tf.sparse_tensor_to_dense(parsed['caption_graph/caption'])[index]
caption_graph = utils_tf.get_graph(graphs, index)
# Decode the ground-truth.
subject_boxes = tfexample_decoder.BoundingBox(
prefix='scene_graph/subject/bbox/').tensors_to_item(parsed)
object_boxes = tfexample_decoder.BoundingBox(
prefix='scene_graph/object/bbox/').tensors_to_item(parsed)
feature_dict = {
'id':
parsed['id'],
# Proposals.
'image/n_proposal':
n_proposal,
'image/proposal':
proposals,
'image/proposal/feature':
proposal_features,
# Caption graph.
'caption_graph/caption':
caption,
'caption_graph/n_node':
caption_graph.n_node[0],
'caption_graph/n_edge':
caption_graph.n_edge[0],
'caption_graph/nodes':
caption_graph.nodes,
'caption_graph/edges':
caption_graph.edges,
'caption_graph/senders':
caption_graph.senders,
'caption_graph/receivers':
caption_graph.receivers,
# Ground-truth.
'scene_graph/n_relation':
parsed['scene_graph/n_relation'],
'scene_graph/predicate':
tf.sparse_tensor_to_dense(parsed['scene_graph/predicate'], ''),
'scene_graph/subject':
tf.sparse_tensor_to_dense(parsed['scene_graph/subject'], ''),
'scene_graph/subject/box':
subject_boxes,
'scene_graph/object':
tf.sparse_tensor_to_dense(parsed['scene_graph/object'], ''),
'scene_graph/object/box':
object_boxes,
}
for key in feature_dict.keys():
if key != 'id' and feature_dict[key].dtype == tf.int64:
feature_dict[key] = tf.cast(feature_dict[key], tf.int32)
return feature_dict
def _create_dataset(options, is_training, input_pipeline_context=None):
"""Creates dataset object based on options.
Args:
options: An instance of reader_pb2.Reader.
is_training: If true, shuffle the dataset.
input_pipeline_context: A tf.distribute.InputContext instance.
Returns:
A tf.data.Dataset object.
"""
batch_size = options.batch_size
dataset = tf.data.Dataset.list_files(options.input_pattern[:],
shuffle=is_training)
parse_fn = lambda x: _parse_single_example(x, options)
dataset = dataset.interleave(tf.data.TFRecordDataset,
cycle_length=options.interleave_cycle_length)
dataset = dataset.map(map_func=parse_fn,
num_parallel_calls=options.num_parallel_calls)
if is_training:
dataset = dataset.repeat()
dataset = dataset.shuffle(options.shuffle_buffer_size)
padded_shapes = {
'id': [],
'image/n_proposal': [],
'image/proposal': [None, 4],
'image/proposal/feature': [None, options.feature_dimensions],
'caption_graph/caption': [],
'caption_graph/n_node': [],
'caption_graph/n_edge': [],
'caption_graph/nodes': [None],
'caption_graph/edges': [None],
'caption_graph/senders': [None],
'caption_graph/receivers': [None],
'scene_graph/n_relation': [],
'scene_graph/predicate': [None],
'scene_graph/subject': [None],
'scene_graph/subject/box': [None, 4],
'scene_graph/object': [None],
'scene_graph/object/box': [None, 4],
}
dataset = dataset.padded_batch(batch_size,
padded_shapes=padded_shapes,
drop_remainder=True)
dataset = dataset.prefetch(options.prefetch_buffer_size)
return dataset
def get_input_fn(options, is_training):
"""Returns a function that generate input examples.
Args:
options: An instance of reader_pb2.Reader.
is_training: If true, shuffle the dataset.
Returns:
input_fn: a callable that returns a dataset.
"""
if not isinstance(options, reader_pb2.CaptionGraphReader):
raise ValueError(
'options has to be an instance of SceneGraphTextGraphReader.')
def _input_fn(input_pipeline_context=None):
"""Returns a python dictionary.
Returns:
A dataset that can be fed to estimator.
"""
return _create_dataset(options, is_training, input_pipeline_context)
return _input_fn
| 35.18 | 80 | 0.66606 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from protos import reader_pb2
from tf_slim import tfexample_decoder
from graph_nets import utils_tf
from graph_nets.graphs import GraphsTuple
def _parse_single_example(example, options):
example_fmt = {
'id':
tf.io.FixedLenFeature([], tf.int64, default_value=-1),
'image/n_proposal':
tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image/proposal/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/proposal/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/proposal/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/proposal/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/proposal/feature':
tf.io.VarLenFeature(tf.float32),
'caption_graph/caption':
tf.io.VarLenFeature(tf.string),
'caption_graph/n_node':
tf.io.VarLenFeature(tf.int64),
'caption_graph/n_edge':
tf.io.VarLenFeature(tf.int64),
'caption_graph/nodes':
tf.io.VarLenFeature(tf.string),
'caption_graph/edges':
tf.io.VarLenFeature(tf.string),
'caption_graph/senders':
tf.io.VarLenFeature(tf.int64),
'caption_graph/receivers':
tf.io.VarLenFeature(tf.int64),
'scene_graph/n_relation':
tf.io.FixedLenFeature([], tf.int64, default_value=0),
'scene_graph/predicate':
tf.io.VarLenFeature(tf.string),
'scene_graph/subject':
tf.io.VarLenFeature(tf.string),
'scene_graph/subject/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/subject/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/subject/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'scene_graph/subject/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object':
tf.io.VarLenFeature(tf.string),
'scene_graph/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'scene_graph/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
}
parsed = tf.parse_single_example(example, example_fmt)
proposals = tfexample_decoder.BoundingBox(
prefix='image/proposal/bbox/').tensors_to_item(parsed)
n_proposal = tf.minimum(parsed['image/n_proposal'], options.max_n_proposal)
proposals = proposals[:options.max_n_proposal, :]
proposal_features = tf.reshape(
tf.sparse_tensor_to_dense(parsed['image/proposal/feature']),
[-1, options.feature_dimensions])[:options.max_n_proposal, :]
graphs = GraphsTuple(
globals=None,
n_node=tf.sparse_tensor_to_dense(parsed['caption_graph/n_node'], 0),
n_edge=tf.sparse_tensor_to_dense(parsed['caption_graph/n_edge'], 0),
nodes=tf.sparse_tensor_to_dense(parsed['caption_graph/nodes'], ''),
edges=tf.sparse_tensor_to_dense(parsed['caption_graph/edges'], ''),
senders=tf.sparse_tensor_to_dense(parsed['caption_graph/senders'], 0),
receivers=tf.sparse_tensor_to_dense(parsed['caption_graph/receivers'], 0))
num_graphs = utils_tf.get_num_graphs(graphs)
index = tf.random.uniform([], minval=0, maxval=num_graphs, dtype=tf.int32)
caption = tf.sparse_tensor_to_dense(parsed['caption_graph/caption'])[index]
caption_graph = utils_tf.get_graph(graphs, index)
subject_boxes = tfexample_decoder.BoundingBox(
prefix='scene_graph/subject/bbox/').tensors_to_item(parsed)
object_boxes = tfexample_decoder.BoundingBox(
prefix='scene_graph/object/bbox/').tensors_to_item(parsed)
feature_dict = {
'id':
parsed['id'],
'image/n_proposal':
n_proposal,
'image/proposal':
proposals,
'image/proposal/feature':
proposal_features,
'caption_graph/caption':
caption,
'caption_graph/n_node':
caption_graph.n_node[0],
'caption_graph/n_edge':
caption_graph.n_edge[0],
'caption_graph/nodes':
caption_graph.nodes,
'caption_graph/edges':
caption_graph.edges,
'caption_graph/senders':
caption_graph.senders,
'caption_graph/receivers':
caption_graph.receivers,
'scene_graph/n_relation':
parsed['scene_graph/n_relation'],
'scene_graph/predicate':
tf.sparse_tensor_to_dense(parsed['scene_graph/predicate'], ''),
'scene_graph/subject':
tf.sparse_tensor_to_dense(parsed['scene_graph/subject'], ''),
'scene_graph/subject/box':
subject_boxes,
'scene_graph/object':
tf.sparse_tensor_to_dense(parsed['scene_graph/object'], ''),
'scene_graph/object/box':
object_boxes,
}
for key in feature_dict.keys():
if key != 'id' and feature_dict[key].dtype == tf.int64:
feature_dict[key] = tf.cast(feature_dict[key], tf.int32)
return feature_dict
def _create_dataset(options, is_training, input_pipeline_context=None):
batch_size = options.batch_size
dataset = tf.data.Dataset.list_files(options.input_pattern[:],
shuffle=is_training)
parse_fn = lambda x: _parse_single_example(x, options)
dataset = dataset.interleave(tf.data.TFRecordDataset,
cycle_length=options.interleave_cycle_length)
dataset = dataset.map(map_func=parse_fn,
num_parallel_calls=options.num_parallel_calls)
if is_training:
dataset = dataset.repeat()
dataset = dataset.shuffle(options.shuffle_buffer_size)
padded_shapes = {
'id': [],
'image/n_proposal': [],
'image/proposal': [None, 4],
'image/proposal/feature': [None, options.feature_dimensions],
'caption_graph/caption': [],
'caption_graph/n_node': [],
'caption_graph/n_edge': [],
'caption_graph/nodes': [None],
'caption_graph/edges': [None],
'caption_graph/senders': [None],
'caption_graph/receivers': [None],
'scene_graph/n_relation': [],
'scene_graph/predicate': [None],
'scene_graph/subject': [None],
'scene_graph/subject/box': [None, 4],
'scene_graph/object': [None],
'scene_graph/object/box': [None, 4],
}
dataset = dataset.padded_batch(batch_size,
padded_shapes=padded_shapes,
drop_remainder=True)
dataset = dataset.prefetch(options.prefetch_buffer_size)
return dataset
def get_input_fn(options, is_training):
if not isinstance(options, reader_pb2.CaptionGraphReader):
raise ValueError(
'options has to be an instance of SceneGraphTextGraphReader.')
def _input_fn(input_pipeline_context=None):
return _create_dataset(options, is_training, input_pipeline_context)
return _input_fn
| true | true |
1c38de4ecdbdf4b3e8c0e08cbd427c35f5aa2c18 | 208 | py | Python | directory_app/config.py | mdprewitt/sigmund_droid | aa33df7285f2ab468016c08c98505bc2f8436734 | [
"MIT"
] | null | null | null | directory_app/config.py | mdprewitt/sigmund_droid | aa33df7285f2ab468016c08c98505bc2f8436734 | [
"MIT"
] | null | null | null | directory_app/config.py | mdprewitt/sigmund_droid | aa33df7285f2ab468016c08c98505bc2f8436734 | [
"MIT"
] | 1 | 2017-05-31T16:34:36.000Z | 2017-05-31T16:34:36.000Z | import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'directory.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
| 34.666667 | 78 | 0.764423 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'directory.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
| true | true |
1c38df0326d8b746f40fd4e79b8d48711c60488c | 15,455 | py | Python | oscar/lib/python2.7/site-packages/dns/rdata.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/dns/rdata.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/dns/rdata.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != '\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| 33.671024 | 79 | 0.604465 |
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
return dns.rdatatype.NONE
def extended_rdatatype(self):
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
raise NotImplementedError
def to_digestable(self, origin=None):
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
pass
class GenericRdata(Rdata):
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != '\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| true | true |
1c38df18a2327e94efde55196d9aed1303aa21ac | 51 | py | Python | src/medias/__init__.py | jhernandez18p/backend-lmp | 853e274f37fe8b3c6f811a4ccd8f33bf020015d2 | [
"MIT"
] | 14 | 2019-10-03T19:37:30.000Z | 2019-10-16T02:12:32.000Z | src/medias/__init__.py | jhernandez18p/backend-lmp | 853e274f37fe8b3c6f811a4ccd8f33bf020015d2 | [
"MIT"
] | 6 | 2020-02-11T23:20:46.000Z | 2022-03-11T23:32:23.000Z | src/medias/__init__.py | jhernandez18p/backend-lmp | 853e274f37fe8b3c6f811a4ccd8f33bf020015d2 | [
"MIT"
] | null | null | null | default_app_config = 'src.medias.apps.MediasConfig' | 51 | 51 | 0.843137 | default_app_config = 'src.medias.apps.MediasConfig' | true | true |
1c38dfcb0c4a00f686e8e705afe804167a437737 | 7,944 | py | Python | jarvis.py | vineelsai26/J.A.R.V.I.S | 5866f73c0186687c1f0ca17b6e0fdd04d627265f | [
"MIT"
] | 37 | 2020-10-04T13:34:16.000Z | 2021-12-15T10:44:15.000Z | jarvis.py | vineelsai26/J.A.R.V.I.S | 5866f73c0186687c1f0ca17b6e0fdd04d627265f | [
"MIT"
] | 13 | 2020-10-04T12:22:48.000Z | 2020-11-01T14:03:26.000Z | jarvis.py | vineelsai26/J.A.R.V.I.S | 5866f73c0186687c1f0ca17b6e0fdd04d627265f | [
"MIT"
] | 29 | 2020-10-04T14:02:14.000Z | 2021-10-09T08:02:07.000Z | import pyttsx3
import wikipedia
import speech_recognition as sr
import webbrowser
import datetime
import os
import sys
import smtplib
import psutil
import pyjokes
import pyautogui
from news import speak_news, getNewsUrl
from diction import translate
from loc import weather
from youtube import youtube
from quote import tell_quote
import psutil
import pyjokes
from sys import platform
import os
import getpass
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# print(voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def screenshot():
img = pyautogui.screenshot()
img.save('path of folder you want to save/screenshot.png')
def cpu():
usage = str(psutil.cpu_percent())
speak("CPU is at"+usage)
battery = psutil.sensors_battery()
speak("battery is at")
speak(battery.percent)
def joke():
for i in range(5):
speak(pyjokes.get_jokes()[i])
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening...')
r.pause_threshold = 1
r.energy_threshold = 494
r.adjust_for_ambient_noise(source, duration=1.5)
audio = r.listen(source)
try:
print('Recognizing..')
query = r.recognize_google(audio, language='en-in')
print(f'User said: {query}\n')
except Exception as e:
# print(e)
print('Say that again please...')
return 'None'
return query
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning SIR")
elif hour >= 12 and hour < 18:
speak("Good Afternoon SIR")
else:
speak('Good Evening SIR')
weather()
speak('I am JARVIS. Please tell me how can I help you SIR?')
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('email', 'password')
server.sendmail('email', to, content)
server.close()
def cpu():
usage = str(psutil.cpu_percent())
speak("CPU is at"+usage)
battery = psutil.sensors_battery()
speak("battery is at")
speak(battery.percent)
def joke():
for i in range(5):
speak(pyjokes.get_jokes()[i])
def screenshot():
img = pyautogui.screenshot()
img.save('path of folder you want to save/screenshot.png')
if __name__ == '__main__':
wishMe()
while True:
query = takeCommand().lower()
if 'wikipedia' in query:
speak('Searching Wikipedia....')
query = query.replace('wikipedia', '')
results = wikipedia.summary(query, sentences=2)
speak('According to Wikipedia')
print(results)
speak(results)
elif 'youtube downloader' in query:
exec(open('youtube_downloader.py').read())
elif 'voice' in query:
if 'female' in query:
engine.setProperty('voice', voices[1].id)
else:
engine.setProperty('voice', voices[0].id)
speak("Hello Sir, I have switched my voice. How is it?")
if 'inspirational quote' in query:
tell_quote()
if 'jarvis are you there' in query:
speak("Yes Sir, at your service")
elif 'open youtube' in query:
webbrowser.open_new_tab('https://youtube.com')
elif 'cpu' in query:
cpu()
elif 'joke' in query:
joke()
elif 'screenshot' in query:
speak("taking screenshot")
screenshot()
elif 'open google' in query:
webbrowser.open_new_tab('https://google.com')
elif 'open stackoverflow' in query:
webbrowser.open_new_tab('https://stackoverflow.com')
elif 'play music' in query:
os.startfile("D:\\RoiNa.mp3")
elif 'search youtube' in query:
speak('What you want to search on Youtube?')
youtube(takeCommand())
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f'Sir, the time is {strTime}')
elif 'search' in query:
speak('What do you want to search for?')
search = takeCommand()
url = 'https://google.com/search?q=' + search
webbrowser.open_new_tab(
url)
speak('Here is What I found for' + search)
elif 'location' in query:
speak('What is the location?')
location = takeCommand()
url = 'https://google.nl/maps/place/' + location + '/&'
webbrowser.open_new_tab(url)
speak('Here is the location ' + location)
elif 'your master' in query:
if platform == "win32" or "darwin":
speak('Gaurav is my master. He created me couple of days ago')
elif platform == "linux" or platform == "linux2":
name = getpass.getuser()
speak(name, 'is my master. He is running me right now')
elif 'your name' in query:
speak('My name is JARVIS')
elif 'stands for' in query:
speak('J.A.R.V.I.S stands for JUST A RATHER VERY INTELLIGENT SYSTEM')
elif 'open code' in query:
if platform == "win32":
os.startfile(
"C:\\Users\\gs935\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe")
elif platform == "linux" or platform == "linux2" or "darwin":
os.system('code .')
elif 'shutdown' in query:
if platform == "win32":
os.system('shutdown /p /f')
elif platform == "linux" or platform == "linux2" or "darwin":
os.system('poweroff')
elif 'cpu' in query:
cpu()
elif 'joke' in query:
joke()
elif 'screenshot' in query:
speak("taking screenshot")
screenshot()
elif 'github' in query:
webbrowser.open_new_tab(
'https://github.com/gauravsingh9356')
elif 'remember that' in query:
speak("what should i remember sir")
rememberMessage = takeCommand()
speak("you said me to remember"+rememberMessage)
remember = open('data.txt', 'w')
remember.write(rememberMessage)
remember.close()
elif 'do you remember anything' in query:
remember = open('data.txt', 'r')
speak("you said me to remember that" + remember.read())
elif 'sleep' in query:
sys.exit()
elif 'dictionary' in query:
speak('What you want to search in your intelligent dictionary?')
translate(takeCommand())
elif 'news' in query:
speak('Ofcourse sir..')
speak_news()
speak('Would you like to read the while article?')
test = takeCommand()
if 'yes' in test:
speak('Ok Sir, Opening browser...')
webbrowser.open(getNewsUrl())
speak('You can now read the full news from this website.')
else:
speak('No Problem Sir')
elif 'voice' in query:
if 'female' in query:
engine.setProperty('voice', voices[0].id)
else:
engine.setProperty('voice', voices[1].id)
speak("Hello sir, I have switched my voice. How do you like it?")
elif 'email to gaurav' in query:
try:
speak('What should I say?')
content = takeCommand()
to = 'email'
sendEmail(to, content)
speak('Email has been sent!')
except Exception as e:
speak('Sorry sir, but I am not able to send an email at the moment.')
| 28.6787 | 94 | 0.563444 | import pyttsx3
import wikipedia
import speech_recognition as sr
import webbrowser
import datetime
import os
import sys
import smtplib
import psutil
import pyjokes
import pyautogui
from news import speak_news, getNewsUrl
from diction import translate
from loc import weather
from youtube import youtube
from quote import tell_quote
import psutil
import pyjokes
from sys import platform
import os
import getpass
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def screenshot():
img = pyautogui.screenshot()
img.save('path of folder you want to save/screenshot.png')
def cpu():
usage = str(psutil.cpu_percent())
speak("CPU is at"+usage)
battery = psutil.sensors_battery()
speak("battery is at")
speak(battery.percent)
def joke():
for i in range(5):
speak(pyjokes.get_jokes()[i])
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening...')
r.pause_threshold = 1
r.energy_threshold = 494
r.adjust_for_ambient_noise(source, duration=1.5)
audio = r.listen(source)
try:
print('Recognizing..')
query = r.recognize_google(audio, language='en-in')
print(f'User said: {query}\n')
except Exception as e:
print('Say that again please...')
return 'None'
return query
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning SIR")
elif hour >= 12 and hour < 18:
speak("Good Afternoon SIR")
else:
speak('Good Evening SIR')
weather()
speak('I am JARVIS. Please tell me how can I help you SIR?')
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('email', 'password')
server.sendmail('email', to, content)
server.close()
def cpu():
usage = str(psutil.cpu_percent())
speak("CPU is at"+usage)
battery = psutil.sensors_battery()
speak("battery is at")
speak(battery.percent)
def joke():
for i in range(5):
speak(pyjokes.get_jokes()[i])
def screenshot():
img = pyautogui.screenshot()
img.save('path of folder you want to save/screenshot.png')
if __name__ == '__main__':
wishMe()
while True:
query = takeCommand().lower()
if 'wikipedia' in query:
speak('Searching Wikipedia....')
query = query.replace('wikipedia', '')
results = wikipedia.summary(query, sentences=2)
speak('According to Wikipedia')
print(results)
speak(results)
elif 'youtube downloader' in query:
exec(open('youtube_downloader.py').read())
elif 'voice' in query:
if 'female' in query:
engine.setProperty('voice', voices[1].id)
else:
engine.setProperty('voice', voices[0].id)
speak("Hello Sir, I have switched my voice. How is it?")
if 'inspirational quote' in query:
tell_quote()
if 'jarvis are you there' in query:
speak("Yes Sir, at your service")
elif 'open youtube' in query:
webbrowser.open_new_tab('https://youtube.com')
elif 'cpu' in query:
cpu()
elif 'joke' in query:
joke()
elif 'screenshot' in query:
speak("taking screenshot")
screenshot()
elif 'open google' in query:
webbrowser.open_new_tab('https://google.com')
elif 'open stackoverflow' in query:
webbrowser.open_new_tab('https://stackoverflow.com')
elif 'play music' in query:
os.startfile("D:\\RoiNa.mp3")
elif 'search youtube' in query:
speak('What you want to search on Youtube?')
youtube(takeCommand())
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f'Sir, the time is {strTime}')
elif 'search' in query:
speak('What do you want to search for?')
search = takeCommand()
url = 'https://google.com/search?q=' + search
webbrowser.open_new_tab(
url)
speak('Here is What I found for' + search)
elif 'location' in query:
speak('What is the location?')
location = takeCommand()
url = 'https://google.nl/maps/place/' + location + '/&'
webbrowser.open_new_tab(url)
speak('Here is the location ' + location)
elif 'your master' in query:
if platform == "win32" or "darwin":
speak('Gaurav is my master. He created me couple of days ago')
elif platform == "linux" or platform == "linux2":
name = getpass.getuser()
speak(name, 'is my master. He is running me right now')
elif 'your name' in query:
speak('My name is JARVIS')
elif 'stands for' in query:
speak('J.A.R.V.I.S stands for JUST A RATHER VERY INTELLIGENT SYSTEM')
elif 'open code' in query:
if platform == "win32":
os.startfile(
"C:\\Users\\gs935\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe")
elif platform == "linux" or platform == "linux2" or "darwin":
os.system('code .')
elif 'shutdown' in query:
if platform == "win32":
os.system('shutdown /p /f')
elif platform == "linux" or platform == "linux2" or "darwin":
os.system('poweroff')
elif 'cpu' in query:
cpu()
elif 'joke' in query:
joke()
elif 'screenshot' in query:
speak("taking screenshot")
screenshot()
elif 'github' in query:
webbrowser.open_new_tab(
'https://github.com/gauravsingh9356')
elif 'remember that' in query:
speak("what should i remember sir")
rememberMessage = takeCommand()
speak("you said me to remember"+rememberMessage)
remember = open('data.txt', 'w')
remember.write(rememberMessage)
remember.close()
elif 'do you remember anything' in query:
remember = open('data.txt', 'r')
speak("you said me to remember that" + remember.read())
elif 'sleep' in query:
sys.exit()
elif 'dictionary' in query:
speak('What you want to search in your intelligent dictionary?')
translate(takeCommand())
elif 'news' in query:
speak('Ofcourse sir..')
speak_news()
speak('Would you like to read the while article?')
test = takeCommand()
if 'yes' in test:
speak('Ok Sir, Opening browser...')
webbrowser.open(getNewsUrl())
speak('You can now read the full news from this website.')
else:
speak('No Problem Sir')
elif 'voice' in query:
if 'female' in query:
engine.setProperty('voice', voices[0].id)
else:
engine.setProperty('voice', voices[1].id)
speak("Hello sir, I have switched my voice. How do you like it?")
elif 'email to gaurav' in query:
try:
speak('What should I say?')
content = takeCommand()
to = 'email'
sendEmail(to, content)
speak('Email has been sent!')
except Exception as e:
speak('Sorry sir, but I am not able to send an email at the moment.')
| true | true |
1c38dfd1af8fff86c7708e78abee25fcf5601814 | 1,431 | py | Python | blog/tests.py | Psemp/artsetforme_public | 240bb8ef22c0589f168b24c0ee5ed8e9030fe94a | [
"MIT"
] | null | null | null | blog/tests.py | Psemp/artsetforme_public | 240bb8ef22c0589f168b24c0ee5ed8e9030fe94a | [
"MIT"
] | 1 | 2021-10-08T22:20:09.000Z | 2021-10-08T22:20:09.000Z | blog/tests.py | Psemp/artsetforme_public | 240bb8ef22c0589f168b24c0ee5ed8e9030fe94a | [
"MIT"
] | null | null | null | from django.test import TestCase
from backoffice.models import Topic
from django.utils import timezone
from django.urls import reverse
from .models import Blogpost
class BlogTest(TestCase):
def setUp(self):
self.category_1_info = {
'name': 'topic1',
}
Topic.objects.create(**self.category_1_info)
self.category_2_info = {
'name': 'topic2',
}
Topic.objects.create(**self.category_2_info)
self.category_3_info = {
'name': 'topic3',
}
Topic.objects.create(**self.category_3_info)
self.blogpost1_data = {
'category': Topic.objects.get(name='topic1'),
'title': "bp1",
'content': 'some content',
'date_posted': timezone.now()
}
Blogpost.objects.create(**self.blogpost1_data)
self.blogpost2_data = {
'category': Topic.objects.get(name='topic2'),
'title': "bp2",
'date_posted': timezone.now()
}
Blogpost.objects.create(**self.blogpost2_data)
def test_blogpost_test(self):
response = self.client.get(reverse('blog:actualites'))
print(response.context['object_list'])
self.assertIn(Blogpost.objects.get(title="bp1"), response.context['object_list'])
self.assertIn(Blogpost.objects.get(title="bp2"), response.context['object_list'])
| 30.446809 | 89 | 0.596785 | from django.test import TestCase
from backoffice.models import Topic
from django.utils import timezone
from django.urls import reverse
from .models import Blogpost
class BlogTest(TestCase):
def setUp(self):
self.category_1_info = {
'name': 'topic1',
}
Topic.objects.create(**self.category_1_info)
self.category_2_info = {
'name': 'topic2',
}
Topic.objects.create(**self.category_2_info)
self.category_3_info = {
'name': 'topic3',
}
Topic.objects.create(**self.category_3_info)
self.blogpost1_data = {
'category': Topic.objects.get(name='topic1'),
'title': "bp1",
'content': 'some content',
'date_posted': timezone.now()
}
Blogpost.objects.create(**self.blogpost1_data)
self.blogpost2_data = {
'category': Topic.objects.get(name='topic2'),
'title': "bp2",
'date_posted': timezone.now()
}
Blogpost.objects.create(**self.blogpost2_data)
def test_blogpost_test(self):
response = self.client.get(reverse('blog:actualites'))
print(response.context['object_list'])
self.assertIn(Blogpost.objects.get(title="bp1"), response.context['object_list'])
self.assertIn(Blogpost.objects.get(title="bp2"), response.context['object_list'])
| true | true |
1c38e17812cf212a45e39667811982b47fb89a65 | 1,473 | py | Python | corehq/apps/commtrack/management/commands/check_multiple_parentage.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | corehq/apps/commtrack/management/commands/check_multiple_parentage.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/commtrack/management/commands/check_multiple_parentage.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import Location
import csv
class Command(BaseCommand):
def handle(self, *args, **options):
with open('parentage_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow([
'id',
'name',
'is_test',
'location_type',
'number_of_offending_locations',
])
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
count = len(list(
Location.filter_by_type(
d.name,
loc_type.name,
)
))
csv_writer.writerow([
d._id,
d.name,
d.is_test,
loc_type.name,
count
])
| 32.733333 | 72 | 0.401901 | from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import Location
import csv
class Command(BaseCommand):
def handle(self, *args, **options):
with open('parentage_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow([
'id',
'name',
'is_test',
'location_type',
'number_of_offending_locations',
])
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
count = len(list(
Location.filter_by_type(
d.name,
loc_type.name,
)
))
csv_writer.writerow([
d._id,
d.name,
d.is_test,
loc_type.name,
count
])
| true | true |
1c38e19e93cf4d8e629f1a1cad228bd6e23f1bd9 | 11,832 | py | Python | 051_East_Text_Detection/text_detection_video_openvino.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 1,529 | 2019-12-11T13:36:23.000Z | 2022-03-31T18:38:27.000Z | 051_East_Text_Detection/text_detection_video_openvino.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 200 | 2020-01-06T09:24:42.000Z | 2022-03-31T17:29:08.000Z | 051_East_Text_Detection/text_detection_video_openvino.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 288 | 2020-02-21T14:56:02.000Z | 2022-03-30T03:00:35.000Z | from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import os
try:
from armv7l.openvino.inference_engine import IENetwork, IECore
except:
from openvino.inference_engine import IENetwork, IECore
fpsstr = ""
framecount = 0
time1 = 0
def rotated_Rectangle(img, rotatedRect, color, thickness=1, lineType=cv2.LINE_8, shift=0):
(x, y), (width, height), angle = rotatedRect
pt1_1 = (int(x + width / 2), int(y + height / 2))
pt2_1 = (int(x + width / 2), int(y - height / 2))
pt3_1 = (int(x - width / 2), int(y - height / 2))
pt4_1 = (int(x - width / 2), int(y + height / 2))
t = np.array([[np.cos(angle), -np.sin(angle), x-x*np.cos(angle)+y*np.sin(angle)],
[np.sin(angle), np.cos(angle), y-x*np.sin(angle)-y*np.cos(angle)],
[0, 0, 1]])
tmp_pt1_1 = np.array([[pt1_1[0]], [pt1_1[1]], [1]])
tmp_pt1_2 = np.dot(t, tmp_pt1_1)
pt1_2 = (int(tmp_pt1_2[0][0]), int(tmp_pt1_2[1][0]))
tmp_pt2_1 = np.array([[pt2_1[0]], [pt2_1[1]], [1]])
tmp_pt2_2 = np.dot(t, tmp_pt2_1)
pt2_2 = (int(tmp_pt2_2[0][0]), int(tmp_pt2_2[1][0]))
tmp_pt3_1 = np.array([[pt3_1[0]], [pt3_1[1]], [1]])
tmp_pt3_2 = np.dot(t, tmp_pt3_1)
pt3_2 = (int(tmp_pt3_2[0][0]), int(tmp_pt3_2[1][0]))
tmp_pt4_1 = np.array([[pt4_1[0]], [pt4_1[1]], [1]])
tmp_pt4_2 = np.dot(t, tmp_pt4_1)
pt4_2 = (int(tmp_pt4_2[0][0]), int(tmp_pt4_2[1][0]))
points = np.array([pt1_2, pt2_2, pt3_2, pt4_2])
return points
def non_max_suppression(boxes, probs=None, angles=None, overlapThresh=0.3):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return [], []
# if the bounding boxes are integers, convert them to floats -- this
# is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and grab the indexes to sort
# (in the case that no probabilities are provided, simply sort on the bottom-left y-coordinate)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = y2
# if probabilities are provided, sort on them instead
if probs is not None:
idxs = probs
# sort the indexes
idxs = np.argsort(idxs)
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of the bounding box and the smallest (x, y) coordinates for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have overlap greater than the provided overlap threshold
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked
return boxes[pick].astype("int"), angles[pick]
def decode_predictions(scores, geometry1, geometry2):
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
angles = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry1[0, 0, y]
xData1 = geometry1[0, 1, y]
xData2 = geometry1[0, 2, y]
xData3 = geometry1[0, 3, y]
anglesData = geometry2[0, 0, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < args["min_confidence"]:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
angles.append(angle)
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences, angles)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-east", "--east", type=str, default="openvino/256x256/FP16/frozen_east_text_detection.xml", help="path to input EAST text detector")
ap.add_argument("-v", "--video", type=str, help="path to optinal input video file")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5, help="minimum probability required to inspect a region")
ap.add_argument("-w", "--width", type=int, default=256, help="resized image width (should be multiple of 32)")
ap.add_argument("-e", "--height", type=int, default=256, help="resized image height (should be multiple of 32)")
ap.add_argument("-cw", "--camera_width", type=int, default=640, help='USB Camera resolution (width). (Default=640)')
ap.add_argument("-ch", "--camera_height", type=int, default=480, help='USB Camera resolution (height). (Default=480)')
ap.add_argument('--device', type=str, default='CPU', help='Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. \
Sample will look for a suitable plugin for device specified (CPU by default)')
args = vars(ap.parse_args())
# initialize the original frame dimensions, new frame dimensions,
# and ratio between the dimensions
(W, H) = (None, None)
(newW, newH) = (args["width"], args["height"])
(rW, rH) = (None, None)
mean = np.array([123.68, 116.779, 103.939][::-1], dtype="float32")
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
model_xml = args["east"]
model_bin = os.path.splitext(model_xml)[0] + ".bin"
ie = IECore()
net = ie.read_network(model_xml, model_bin)
input_info = net.input_info
input_blob = next(iter(input_info))
exec_net = ie.load_network(network=net, device_name=args["device"])
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# start the FPS throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
t1 = time.perf_counter()
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame, maintaining the aspect ratio
frame = imutils.resize(frame, width=args["camera_width"])
orig = frame.copy()
# if our frame dimensions are None, we still need to compute the
# ratio of old frame dimensions to new frame dimensions
if W is None or H is None:
(H, W) = frame.shape[:2]
rW = W / float(newW)
rH = H / float(newH)
# resize the frame, this time ignoring aspect ratio
frame = cv2.resize(frame, (newW, newH))
# construct a blob from the frame and then perform a forward pass
# of the model to obtain the two output layer sets
frame = frame.astype(np.float32)
frame -= mean
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = np.expand_dims(frame, axis=0)
frame = np.transpose(frame, [0, 3, 1, 2])
predictions = exec_net.infer(inputs={input_blob: frame})
scores = predictions['feature_fusion/Conv_7/Sigmoid']
geometry1 = predictions['feature_fusion/mul_6']
geometry2 = predictions['feature_fusion/sub/Fused_Add_']
# decode the predictions, then apply non-maxima suppression to
# suppress weak, overlapping bounding boxes
(rects, confidences, angles) = decode_predictions(scores, geometry1, geometry2)
boxes, angles = non_max_suppression(np.array(rects), probs=confidences, angles=np.array(angles))
# loop over the bounding boxes
for ((startX, startY, endX, endY), angle) in zip(boxes, angles):
# scale the bounding box coordinates based on the respective ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
# draw the bounding box on the frame
width = abs(endX - startX)
height = abs(endY - startY)
centerX = int(startX + width / 2)
centerY = int(startY + height / 2)
rotatedRect = ((centerX, centerY), ((endX - startX), (endY - startY)), -angle)
points = rotated_Rectangle(orig, rotatedRect, color=(0, 255, 0), thickness=2)
cv2.polylines(orig, [points], isClosed=True, color=(0, 255, 0), thickness=2, lineType=cv2.LINE_8, shift=0)
cv2.putText(orig, fpsstr, (args["camera_width"]-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
# update the FPS counter
fps.update()
# show the output frame
cv2.imshow("Text Detection", orig)
if cv2.waitKey(1)&0xFF == ord('q'):
break
# FPS calculation
framecount += 1
if framecount >= 10:
fpsstr = "(Playback) {:.1f} FPS".format(time1/10)
framecount = 0
time1 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows() | 38.045016 | 149 | 0.634466 | from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import os
try:
from armv7l.openvino.inference_engine import IENetwork, IECore
except:
from openvino.inference_engine import IENetwork, IECore
fpsstr = ""
framecount = 0
time1 = 0
def rotated_Rectangle(img, rotatedRect, color, thickness=1, lineType=cv2.LINE_8, shift=0):
(x, y), (width, height), angle = rotatedRect
pt1_1 = (int(x + width / 2), int(y + height / 2))
pt2_1 = (int(x + width / 2), int(y - height / 2))
pt3_1 = (int(x - width / 2), int(y - height / 2))
pt4_1 = (int(x - width / 2), int(y + height / 2))
t = np.array([[np.cos(angle), -np.sin(angle), x-x*np.cos(angle)+y*np.sin(angle)],
[np.sin(angle), np.cos(angle), y-x*np.sin(angle)-y*np.cos(angle)],
[0, 0, 1]])
tmp_pt1_1 = np.array([[pt1_1[0]], [pt1_1[1]], [1]])
tmp_pt1_2 = np.dot(t, tmp_pt1_1)
pt1_2 = (int(tmp_pt1_2[0][0]), int(tmp_pt1_2[1][0]))
tmp_pt2_1 = np.array([[pt2_1[0]], [pt2_1[1]], [1]])
tmp_pt2_2 = np.dot(t, tmp_pt2_1)
pt2_2 = (int(tmp_pt2_2[0][0]), int(tmp_pt2_2[1][0]))
tmp_pt3_1 = np.array([[pt3_1[0]], [pt3_1[1]], [1]])
tmp_pt3_2 = np.dot(t, tmp_pt3_1)
pt3_2 = (int(tmp_pt3_2[0][0]), int(tmp_pt3_2[1][0]))
tmp_pt4_1 = np.array([[pt4_1[0]], [pt4_1[1]], [1]])
tmp_pt4_2 = np.dot(t, tmp_pt4_1)
pt4_2 = (int(tmp_pt4_2[0][0]), int(tmp_pt4_2[1][0]))
points = np.array([pt1_2, pt2_2, pt3_2, pt4_2])
return points
def non_max_suppression(boxes, probs=None, angles=None, overlapThresh=0.3):
if len(boxes) == 0:
return [], []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and grab the indexes to sort
# (in the case that no probabilities are provided, simply sort on the bottom-left y-coordinate)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = y2
# if probabilities are provided, sort on them instead
if probs is not None:
idxs = probs
# sort the indexes
idxs = np.argsort(idxs)
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of the bounding box and the smallest (x, y) coordinates for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have overlap greater than the provided overlap threshold
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked
return boxes[pick].astype("int"), angles[pick]
def decode_predictions(scores, geometry1, geometry2):
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
angles = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry1[0, 0, y]
xData1 = geometry1[0, 1, y]
xData2 = geometry1[0, 2, y]
xData3 = geometry1[0, 3, y]
anglesData = geometry2[0, 0, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < args["min_confidence"]:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
angles.append(angle)
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences, angles)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-east", "--east", type=str, default="openvino/256x256/FP16/frozen_east_text_detection.xml", help="path to input EAST text detector")
ap.add_argument("-v", "--video", type=str, help="path to optinal input video file")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5, help="minimum probability required to inspect a region")
ap.add_argument("-w", "--width", type=int, default=256, help="resized image width (should be multiple of 32)")
ap.add_argument("-e", "--height", type=int, default=256, help="resized image height (should be multiple of 32)")
ap.add_argument("-cw", "--camera_width", type=int, default=640, help='USB Camera resolution (width). (Default=640)')
ap.add_argument("-ch", "--camera_height", type=int, default=480, help='USB Camera resolution (height). (Default=480)')
ap.add_argument('--device', type=str, default='CPU', help='Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. \
Sample will look for a suitable plugin for device specified (CPU by default)')
args = vars(ap.parse_args())
# initialize the original frame dimensions, new frame dimensions,
# and ratio between the dimensions
(W, H) = (None, None)
(newW, newH) = (args["width"], args["height"])
(rW, rH) = (None, None)
mean = np.array([123.68, 116.779, 103.939][::-1], dtype="float32")
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
model_xml = args["east"]
model_bin = os.path.splitext(model_xml)[0] + ".bin"
ie = IECore()
net = ie.read_network(model_xml, model_bin)
input_info = net.input_info
input_blob = next(iter(input_info))
exec_net = ie.load_network(network=net, device_name=args["device"])
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# start the FPS throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
t1 = time.perf_counter()
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame, maintaining the aspect ratio
frame = imutils.resize(frame, width=args["camera_width"])
orig = frame.copy()
# if our frame dimensions are None, we still need to compute the
# ratio of old frame dimensions to new frame dimensions
if W is None or H is None:
(H, W) = frame.shape[:2]
rW = W / float(newW)
rH = H / float(newH)
# resize the frame, this time ignoring aspect ratio
frame = cv2.resize(frame, (newW, newH))
# construct a blob from the frame and then perform a forward pass
# of the model to obtain the two output layer sets
frame = frame.astype(np.float32)
frame -= mean
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = np.expand_dims(frame, axis=0)
frame = np.transpose(frame, [0, 3, 1, 2])
predictions = exec_net.infer(inputs={input_blob: frame})
scores = predictions['feature_fusion/Conv_7/Sigmoid']
geometry1 = predictions['feature_fusion/mul_6']
geometry2 = predictions['feature_fusion/sub/Fused_Add_']
# decode the predictions, then apply non-maxima suppression to
# suppress weak, overlapping bounding boxes
(rects, confidences, angles) = decode_predictions(scores, geometry1, geometry2)
boxes, angles = non_max_suppression(np.array(rects), probs=confidences, angles=np.array(angles))
# loop over the bounding boxes
for ((startX, startY, endX, endY), angle) in zip(boxes, angles):
# scale the bounding box coordinates based on the respective ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
# draw the bounding box on the frame
width = abs(endX - startX)
height = abs(endY - startY)
centerX = int(startX + width / 2)
centerY = int(startY + height / 2)
rotatedRect = ((centerX, centerY), ((endX - startX), (endY - startY)), -angle)
points = rotated_Rectangle(orig, rotatedRect, color=(0, 255, 0), thickness=2)
cv2.polylines(orig, [points], isClosed=True, color=(0, 255, 0), thickness=2, lineType=cv2.LINE_8, shift=0)
cv2.putText(orig, fpsstr, (args["camera_width"]-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
# update the FPS counter
fps.update()
# show the output frame
cv2.imshow("Text Detection", orig)
if cv2.waitKey(1)&0xFF == ord('q'):
break
# FPS calculation
framecount += 1
if framecount >= 10:
fpsstr = "(Playback) {:.1f} FPS".format(time1/10)
framecount = 0
time1 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows() | true | true |
1c38e2c7aefebec42c213ec73e22b0e2e0d49554 | 1,135 | py | Python | python-stdlib/ssl/setup.py | mkomon/micropython-lib | 25ebe4a261e7b1c7c8471bceef2fd0e12837cdd2 | [
"PSF-2.0"
] | 1,556 | 2015-01-18T01:10:21.000Z | 2022-03-31T23:27:33.000Z | python-stdlib/ssl/setup.py | Li-Lian1069/micropython-lib | 1dfca5ad343b2841965df6c4e59f92d6d94a24bd | [
"PSF-2.0"
] | 414 | 2015-01-01T09:01:22.000Z | 2022-03-31T15:08:24.000Z | python-stdlib/ssl/setup.py | Li-Lian1069/micropython-lib | 1dfca5ad343b2841965df6c4e59f92d6d94a24bd | [
"PSF-2.0"
] | 859 | 2015-02-05T13:23:00.000Z | 2022-03-28T02:28:16.000Z | import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(
name="micropython-ssl",
version="0.1",
description="Dummy ssl module for MicroPython",
long_description="This is a dummy implementation of a module for MicroPython standard library.\nIt contains zero or very little functionality, and primarily intended to\navoid import errors (using idea that even if an application imports a\nmodule, it may be not using it onevery code path, so may work at least\npartially). It is expected that more complete implementation of the module\nwill be provided later. Please help with the development if you are\ninterested in this module.",
url="https://github.com/micropython/micropython-lib",
author="micropython-lib Developers",
author_email="micro-python@googlegroups.com",
maintainer="micropython-lib Developers",
maintainer_email="micro-python@googlegroups.com",
license="MIT",
cmdclass={"sdist": sdist_upip.sdist},
py_modules=["ssl"],
)
| 45.4 | 490 | 0.757709 | import sys
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(
name="micropython-ssl",
version="0.1",
description="Dummy ssl module for MicroPython",
long_description="This is a dummy implementation of a module for MicroPython standard library.\nIt contains zero or very little functionality, and primarily intended to\navoid import errors (using idea that even if an application imports a\nmodule, it may be not using it onevery code path, so may work at least\npartially). It is expected that more complete implementation of the module\nwill be provided later. Please help with the development if you are\ninterested in this module.",
url="https://github.com/micropython/micropython-lib",
author="micropython-lib Developers",
author_email="micro-python@googlegroups.com",
maintainer="micropython-lib Developers",
maintainer_email="micro-python@googlegroups.com",
license="MIT",
cmdclass={"sdist": sdist_upip.sdist},
py_modules=["ssl"],
)
| true | true |
1c38e33ae64f835b0c8934e37fcf8d2d6100be58 | 2,677 | py | Python | scripts/import_csr_eeg.py | yhc29/eegdb | 3c81921a549018aa0d669c77cd400640c2ca1c72 | [
"MIT"
] | null | null | null | scripts/import_csr_eeg.py | yhc29/eegdb | 3c81921a549018aa0d669c77cd400640c2ca1c72 | [
"MIT"
] | null | null | null | scripts/import_csr_eeg.py | yhc29/eegdb | 3c81921a549018aa0d669c77cd400640c2ca1c72 | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, '..')
import os
import random
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from multiprocessing.pool import ThreadPool
import csv
from Utils.timer import Timer
from Eegdb.data_file import DataFile
from Eegdb.eegdb import Eegdb
import config.db_config_ibm as config_file
def test_data_import(eegdb,data_folder):
data_file_dict = {}
for sessionid in os.listdir(data_folder):
session_folder = os.path.join(data_folder, sessionid)
if os.path.isdir(session_folder):
subjectid = sessionid[:-2]
if subjectid[0]!="I":
continue
for filename in os.listdir(session_folder):
file_ext = filename.split(".")[-1]
fileid = filename.split("."+file_ext)[0]
if file_ext == "edf":
try:
data_file_dict[fileid][2] = session_folder + "/" + filename
except:
data_file_dict[fileid] = [subjectid, sessionid, session_folder + "/" + filename, None]
if file_ext == "txt":
try:
data_file_dict[fileid][3] = session_folder + "/" + filename
except:
data_file_dict[fileid] = [subjectid, sessionid, None, session_folder + "/" + filename]
# for fileid,file_info in data_file_dict.items():
# if not file_info[2] or not file_info[3]:
# print(file_info)
total_file_count = len(data_file_dict.keys())
print(total_file_count, "files found!")
imported_file_count = 0
_sample_rate_set = set([])
exclude_subjectid_set = set([])
for fileid,file_info in data_file_dict.items():
subjectid,sessionid,filepath,annotation_filepath = file_info
if subjectid in exclude_subjectid_set:
continue
if not filepath:
print("No edf found for",file_info)
if not annotation_filepath:
print("No edf found for",file_info)
# eegdb.import_csr_eeg_file(subjectid,sessionid,filepath,max_segment_length=None,annotation_filepath=annotation_filepath,max_sample_rate=500)
_code,_tmp_sr_set = eegdb.import_csr_eeg_file_v2(subjectid,sessionid,filepath,segment_duration=None,annotation_filepath=annotation_filepath,max_sample_rate=299)
if _code == -3:
exclude_subjectid_set.add(subjectid)
if _code == 1:
imported_file_count += 1
if _tmp_sr_set:
_sample_rate_set.update(_tmp_sr_set)
print(_sample_rate_set)
print(imported_file_count,"/",total_file_count, "imported.")
if __name__ == '__main__':
my_timer = Timer()
eegdb = Eegdb(config_file.mongo_url,config_file.eegdb_name,config_file.output_folder,config_file.data_folder)
test_data_import(eegdb,config_file.data_folder)
print(my_timer.stop()) | 35.223684 | 164 | 0.714606 | import sys
sys.path.insert(0, '..')
import os
import random
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from multiprocessing.pool import ThreadPool
import csv
from Utils.timer import Timer
from Eegdb.data_file import DataFile
from Eegdb.eegdb import Eegdb
import config.db_config_ibm as config_file
def test_data_import(eegdb,data_folder):
data_file_dict = {}
for sessionid in os.listdir(data_folder):
session_folder = os.path.join(data_folder, sessionid)
if os.path.isdir(session_folder):
subjectid = sessionid[:-2]
if subjectid[0]!="I":
continue
for filename in os.listdir(session_folder):
file_ext = filename.split(".")[-1]
fileid = filename.split("."+file_ext)[0]
if file_ext == "edf":
try:
data_file_dict[fileid][2] = session_folder + "/" + filename
except:
data_file_dict[fileid] = [subjectid, sessionid, session_folder + "/" + filename, None]
if file_ext == "txt":
try:
data_file_dict[fileid][3] = session_folder + "/" + filename
except:
data_file_dict[fileid] = [subjectid, sessionid, None, session_folder + "/" + filename]
total_file_count = len(data_file_dict.keys())
print(total_file_count, "files found!")
imported_file_count = 0
_sample_rate_set = set([])
exclude_subjectid_set = set([])
for fileid,file_info in data_file_dict.items():
subjectid,sessionid,filepath,annotation_filepath = file_info
if subjectid in exclude_subjectid_set:
continue
if not filepath:
print("No edf found for",file_info)
if not annotation_filepath:
print("No edf found for",file_info)
_code,_tmp_sr_set = eegdb.import_csr_eeg_file_v2(subjectid,sessionid,filepath,segment_duration=None,annotation_filepath=annotation_filepath,max_sample_rate=299)
if _code == -3:
exclude_subjectid_set.add(subjectid)
if _code == 1:
imported_file_count += 1
if _tmp_sr_set:
_sample_rate_set.update(_tmp_sr_set)
print(_sample_rate_set)
print(imported_file_count,"/",total_file_count, "imported.")
if __name__ == '__main__':
my_timer = Timer()
eegdb = Eegdb(config_file.mongo_url,config_file.eegdb_name,config_file.output_folder,config_file.data_folder)
test_data_import(eegdb,config_file.data_folder)
print(my_timer.stop()) | true | true |
1c38e3923e3af9aa9fd3756c54ad05ccca71ff75 | 855 | py | Python | cmsc-135/assignment-4/stadium_seating.py | joeypsmith/mc-projects | 7082988f46036c336b37de25747cec69593156e3 | [
"MIT"
] | null | null | null | cmsc-135/assignment-4/stadium_seating.py | joeypsmith/mc-projects | 7082988f46036c336b37de25747cec69593156e3 | [
"MIT"
] | null | null | null | cmsc-135/assignment-4/stadium_seating.py | joeypsmith/mc-projects | 7082988f46036c336b37de25747cec69593156e3 | [
"MIT"
] | null | null | null | #CMSC-135 Assignment 4: Stadium Seating
#Programmer: Joseph Smith
#Date: 3/2/2021
#Cost of each seat
A_COST = 20
B_COST = 15
C_COST = 10
#Number of each seat
aSeats = int(input("Enter count of A seats: "))
bSeats = int(input("Enter count of B seats: "))
cSeats = int(input("Enter count of C seats: "))
#Calculate income
aIncome = float(aSeats * A_COST)
bIncome = float(bSeats * B_COST)
cIncome = float(cSeats * C_COST)
def ShowIncome(a, b, c):
#Print income
print("Income from class A seats: $" + format(a, '.2f'))
print("Income from class B seats: $" + format(b, '.2f'))
print("Income from class C seats: $" + format(c, '.2f'))
#Calculate and print total income
totalIncome = aIncome + bIncome + cIncome
print("Total income: $" + format(totalIncome, '.2f'))
ShowIncome(aIncome, bIncome, cIncome)
input("Press enter")
| 24.428571 | 60 | 0.670175 |
A_COST = 20
B_COST = 15
C_COST = 10
aSeats = int(input("Enter count of A seats: "))
bSeats = int(input("Enter count of B seats: "))
cSeats = int(input("Enter count of C seats: "))
aIncome = float(aSeats * A_COST)
bIncome = float(bSeats * B_COST)
cIncome = float(cSeats * C_COST)
def ShowIncome(a, b, c):
print("Income from class A seats: $" + format(a, '.2f'))
print("Income from class B seats: $" + format(b, '.2f'))
print("Income from class C seats: $" + format(c, '.2f'))
totalIncome = aIncome + bIncome + cIncome
print("Total income: $" + format(totalIncome, '.2f'))
ShowIncome(aIncome, bIncome, cIncome)
input("Press enter")
| true | true |
1c38e393375be2e446618bdba6d55688060891ab | 661 | py | Python | examples/natel.py | romilly/reggie-dsl | cefe02acebcc3147bd976f91473a1e0e85ed7dac | [
"MIT"
] | 1 | 2018-11-22T04:29:14.000Z | 2018-11-22T04:29:14.000Z | examples/natel.py | romilly/reggie-dsl | cefe02acebcc3147bd976f91473a1e0e85ed7dac | [
"MIT"
] | 1 | 2018-11-22T04:41:13.000Z | 2018-11-22T04:41:13.000Z | examples/natel.py | romilly/reggie-dsl | cefe02acebcc3147bd976f91473a1e0e85ed7dac | [
"MIT"
] | null | null | null | from reggie.core import *
d3 = multiple(digit, 3)
d4 = d3 + digit
international = name(optional(escape('+1')),'i')
area = optional(osp + lp + name(d3,'area') + rp)
local = osp + name(d3,'exchange') + dash + name(d4,'number')
number = international + area + local
def convert(text, area_default='123'):
matched = match_line(number, text)
if matched is None:
return None
default(matched, 'i','+1')
default(matched, 'area', area_default)
return '{i} {area} {exchange} {number}'.format(**matched)
if __name__ == '__main__':
print(convert('(123) 345-2192'))
print(convert('345-2192'))
print(convert('+1 (123) 345-2192'))
| 27.541667 | 61 | 0.635401 | from reggie.core import *
d3 = multiple(digit, 3)
d4 = d3 + digit
international = name(optional(escape('+1')),'i')
area = optional(osp + lp + name(d3,'area') + rp)
local = osp + name(d3,'exchange') + dash + name(d4,'number')
number = international + area + local
def convert(text, area_default='123'):
matched = match_line(number, text)
if matched is None:
return None
default(matched, 'i','+1')
default(matched, 'area', area_default)
return '{i} {area} {exchange} {number}'.format(**matched)
if __name__ == '__main__':
print(convert('(123) 345-2192'))
print(convert('345-2192'))
print(convert('+1 (123) 345-2192'))
| true | true |
1c38e54775ed5406e58b4f38865c4df2653ca4a2 | 678 | py | Python | engine/client.py | aryaman0098/Record-Fetcher | 93090e4eff40b7854a28cb603eb0c0eab2fc6e47 | [
"MIT"
] | null | null | null | engine/client.py | aryaman0098/Record-Fetcher | 93090e4eff40b7854a28cb603eb0c0eab2fc6e47 | [
"MIT"
] | null | null | null | engine/client.py | aryaman0098/Record-Fetcher | 93090e4eff40b7854a28cb603eb0c0eab2fc6e47 | [
"MIT"
] | null | null | null | import sys
from socket import *
from protocol import *
name=sys.argv[1]
lang=sys.argv[2]
email=sys.argv[3]
phone=sys.argv[4]
academic=sys.argv[5]
other=sys.argv[6]
auth=sys.argv[7]
d={'name':name,'lang':lang,'email':email,'phone':phone,'academic':academic,'other':other,'auth':auth}
for key in ['email','phone','academic','other']:
if(d[key]=='0'):
d.pop(key)
content={}
for i in d:
if(i!='auth' and i!='lang'):
content[i]=d[i]
serverName = '127.0.0.1'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
request=Message(clientSocket,serverName,serverPort)
request.write_client(content,d['auth'],d['lang'])
request.read_client()
clientSocket.close() | 19.941176 | 101 | 0.696165 | import sys
from socket import *
from protocol import *
name=sys.argv[1]
lang=sys.argv[2]
email=sys.argv[3]
phone=sys.argv[4]
academic=sys.argv[5]
other=sys.argv[6]
auth=sys.argv[7]
d={'name':name,'lang':lang,'email':email,'phone':phone,'academic':academic,'other':other,'auth':auth}
for key in ['email','phone','academic','other']:
if(d[key]=='0'):
d.pop(key)
content={}
for i in d:
if(i!='auth' and i!='lang'):
content[i]=d[i]
serverName = '127.0.0.1'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
request=Message(clientSocket,serverName,serverPort)
request.write_client(content,d['auth'],d['lang'])
request.read_client()
clientSocket.close() | true | true |
1c38e5c18e7249d59ec2dcd4084f1605730cbcc5 | 5,298 | py | Python | kubernetes/client/models/v1_rolling_update_daemon_set.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2019-10-07T13:54:36.000Z | 2019-10-07T13:54:36.000Z | kubernetes/client/models/v1_rolling_update_daemon_set.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 8 | 2020-10-28T01:18:36.000Z | 2021-06-11T01:06:15.000Z | kubernetes/client/models/v1_rolling_update_daemon_set.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2021-03-16T16:05:33.000Z | 2021-03-16T16:05:33.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1RollingUpdateDaemonSet(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'max_unavailable': 'object'
}
attribute_map = {
'max_unavailable': 'maxUnavailable'
}
def __init__(self, max_unavailable=None, local_vars_configuration=None): # noqa: E501
"""V1RollingUpdateDaemonSet - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_unavailable = None
self.discriminator = None
if max_unavailable is not None:
self.max_unavailable = max_unavailable
@property
def max_unavailable(self):
"""Gets the max_unavailable of this V1RollingUpdateDaemonSet. # noqa: E501
The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. # noqa: E501
:return: The max_unavailable of this V1RollingUpdateDaemonSet. # noqa: E501
:rtype: object
"""
return self._max_unavailable
@max_unavailable.setter
def max_unavailable(self, max_unavailable):
"""Sets the max_unavailable of this V1RollingUpdateDaemonSet.
The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. # noqa: E501
:param max_unavailable: The max_unavailable of this V1RollingUpdateDaemonSet. # noqa: E501
:type: object
"""
self._max_unavailable = max_unavailable
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RollingUpdateDaemonSet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RollingUpdateDaemonSet):
return True
return self.to_dict() != other.to_dict()
| 43.073171 | 852 | 0.662325 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1RollingUpdateDaemonSet(object):
openapi_types = {
'max_unavailable': 'object'
}
attribute_map = {
'max_unavailable': 'maxUnavailable'
}
def __init__(self, max_unavailable=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_unavailable = None
self.discriminator = None
if max_unavailable is not None:
self.max_unavailable = max_unavailable
@property
def max_unavailable(self):
return self._max_unavailable
@max_unavailable.setter
def max_unavailable(self, max_unavailable):
self._max_unavailable = max_unavailable
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1RollingUpdateDaemonSet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1RollingUpdateDaemonSet):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c38e71cf958ba8ee81d66f247f0a559745b91bb | 181 | py | Python | bin/linkEntropy.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | 6 | 2017-07-18T15:28:33.000Z | 2020-03-03T14:45:45.000Z | bin/linkEntropy.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | null | null | null | bin/linkEntropy.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | 3 | 2017-09-09T00:36:48.000Z | 2020-03-03T14:45:49.000Z | #!/usr/bin/env python3
"""Command-line wrapper for stats.cli_linkEntropy."""
import loadPath # Adds the project path.
import linkograph.stats
linkograph.stats.cli_linkEntropy()
| 20.111111 | 53 | 0.773481 |
import loadPath
import linkograph.stats
linkograph.stats.cli_linkEntropy()
| true | true |
1c38e7231d5c77c8bf1ca323c8fc43dd65c74f84 | 3,594 | py | Python | lib/wrapper/faster_rcnn_wrapper.py | nikolaevra/tf-faster-rcnn | 4a5a5f9cfd4dc6548ee9cf63f1122eadbc06ea39 | [
"MIT"
] | null | null | null | lib/wrapper/faster_rcnn_wrapper.py | nikolaevra/tf-faster-rcnn | 4a5a5f9cfd4dc6548ee9cf63f1122eadbc06ea39 | [
"MIT"
] | null | null | null | lib/wrapper/faster_rcnn_wrapper.py | nikolaevra/tf-faster-rcnn | 4a5a5f9cfd4dc6548ee9cf63f1122eadbc06ea39 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import tensorflow as tf
from model.config import cfg
from model.test import im_detect
from nets.resnet_v1 import resnetv1
from nets.vgg16 import vgg16
from utils.timer import Timer
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {
'vgg16': ('vgg16_faster_rcnn_iter_70000.ckpt',),
'res101': ('res101_faster_rcnn_iter_110000.ckpt',)
}
DATASETS = {
'pascal_voc': ('voc_2007_trainval',),
'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)
}
class DetectorWrapper:
def __init__(self, extraction_net='res101', dataset='pascal_voc_0712', num_classes=21,
tag='default', anchor_scales=[8, 16, 32], anchor_ratios=(0.5, 1, 2)):
cfg.TEST.HAS_RPN = True
# model path
self.extraction_net = extraction_net
self.dataset = dataset
self.tfmodel = os.path.join(
'output',
extraction_net,
DATASETS[dataset][0],
'default',
NETS[extraction_net][0]
)
if not os.path.isfile(self.tfmodel + '.meta'):
raise IOError('{:s} not found.\n'.format(self.tfmodel + '.meta'))
# Make sure we allow using CPU when GPU is not available
self.tfconfig = tf.ConfigProto(allow_soft_placement=True)
# make sure we first allocate small amount of GPU power and grow it as needed
self.tfconfig.gpu_options.allow_growth = True
# init tf session
self.sess = tf.Session(config=self.tfconfig)
self.net = None
# load network
if extraction_net == 'vgg16':
self.net = vgg16()
elif extraction_net == 'res101':
self.net = resnetv1(num_layers=101)
else:
raise NotImplementedError
self.net.create_architecture(
"TEST",
num_classes=num_classes,
tag=tag,
anchor_scales=anchor_scales,
anchor_ratios=anchor_ratios
)
# Saver is an easy interface to save/load models and its weights based on a checkpoint
# number.
self.saver = tf.train.Saver()
# Load model and weights for the pre-trained extraction model.
self.saver.restore(self.sess, self.tfmodel)
print('Loaded network {:s}'.format(self.tfmodel))
def detect(self, images):
""" Detect images from array of image filenames.
:param images: list of image filenames to be detected.
:return: dict(dict()) of detections
"""
detections = {}
for image in images:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image)
im = cv2.imread(im_file)
timer = Timer()
timer.tic()
# Get image detections
scores, boxes = im_detect(self.sess, self.net, im)
timer.toc()
total_t = timer.total_time
print('Detection took {:.3f}s for {:d} proposals'.format(total_t, boxes.shape[0]))
detections[image] = {
"scores": scores,
"boxes": boxes,
"detection_time": total_t
}
return detections
| 30.717949 | 94 | 0.584864 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import tensorflow as tf
from model.config import cfg
from model.test import im_detect
from nets.resnet_v1 import resnetv1
from nets.vgg16 import vgg16
from utils.timer import Timer
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {
'vgg16': ('vgg16_faster_rcnn_iter_70000.ckpt',),
'res101': ('res101_faster_rcnn_iter_110000.ckpt',)
}
DATASETS = {
'pascal_voc': ('voc_2007_trainval',),
'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)
}
class DetectorWrapper:
def __init__(self, extraction_net='res101', dataset='pascal_voc_0712', num_classes=21,
tag='default', anchor_scales=[8, 16, 32], anchor_ratios=(0.5, 1, 2)):
cfg.TEST.HAS_RPN = True
self.extraction_net = extraction_net
self.dataset = dataset
self.tfmodel = os.path.join(
'output',
extraction_net,
DATASETS[dataset][0],
'default',
NETS[extraction_net][0]
)
if not os.path.isfile(self.tfmodel + '.meta'):
raise IOError('{:s} not found.\n'.format(self.tfmodel + '.meta'))
self.tfconfig = tf.ConfigProto(allow_soft_placement=True)
self.tfconfig.gpu_options.allow_growth = True
self.sess = tf.Session(config=self.tfconfig)
self.net = None
if extraction_net == 'vgg16':
self.net = vgg16()
elif extraction_net == 'res101':
self.net = resnetv1(num_layers=101)
else:
raise NotImplementedError
self.net.create_architecture(
"TEST",
num_classes=num_classes,
tag=tag,
anchor_scales=anchor_scales,
anchor_ratios=anchor_ratios
)
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.tfmodel)
print('Loaded network {:s}'.format(self.tfmodel))
def detect(self, images):
detections = {}
for image in images:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
im_file = os.path.join(cfg.DATA_DIR, 'demo', image)
im = cv2.imread(im_file)
timer = Timer()
timer.tic()
scores, boxes = im_detect(self.sess, self.net, im)
timer.toc()
total_t = timer.total_time
print('Detection took {:.3f}s for {:d} proposals'.format(total_t, boxes.shape[0]))
detections[image] = {
"scores": scores,
"boxes": boxes,
"detection_time": total_t
}
return detections
| true | true |
1c38e83de610ddff4d22683d51874fa737b54bfb | 65,385 | py | Python | plotly/graph_objs/streamtube/_colorbar.py | paulamool/plotly.py | 6121ac1f324e247e4e4b2964d65d7393377777c0 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/graph_objs/streamtube/_colorbar.py | paulamool/plotly.py | 6121ac1f324e247e4e4b2964d65d7393377777c0 | [
"MIT"
] | 1 | 2020-12-15T16:56:11.000Z | 2020-12-15T16:56:11.000Z | plotly/graph_objs/streamtube/_colorbar.py | skeptycal/plotly.py | 2e5bf6e2f7c213295c405ece3e859f4d3f8030d1 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class ColorBar(BaseTraceHierarchyType):
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['borderwidth']
@borderwidth.setter
def borderwidth(self, val):
self['borderwidth'] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self['dtick']
@dtick.setter
def dtick(self, val):
self['dtick'] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self['exponentformat']
@exponentformat.setter
def exponentformat(self, val):
self['exponentformat'] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['len']
@len.setter
def len(self, val):
self['len'] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self['lenmode']
@lenmode.setter
def lenmode(self, val):
self['lenmode'] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['nticks']
@nticks.setter
def nticks(self, val):
self['nticks'] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['outlinecolor']
@outlinecolor.setter
def outlinecolor(self, val):
self['outlinecolor'] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['outlinewidth']
@outlinewidth.setter
def outlinewidth(self, val):
self['outlinewidth'] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['separatethousands']
@separatethousands.setter
def separatethousands(self, val):
self['separatethousands'] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showexponent']
@showexponent.setter
def showexponent(self, val):
self['showexponent'] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showticklabels']
@showticklabels.setter
def showticklabels(self, val):
self['showticklabels'] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showtickprefix']
@showtickprefix.setter
def showtickprefix(self, val):
self['showtickprefix'] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showticksuffix']
@showticksuffix.setter
def showticksuffix(self, val):
self['showticksuffix'] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['thickness']
@thickness.setter
def thickness(self, val):
self['thickness'] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self['thicknessmode']
@thicknessmode.setter
def thicknessmode(self, val):
self['thicknessmode'] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self['tick0']
@tick0.setter
def tick0(self, val):
self['tick0'] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self['tickangle']
@tickangle.setter
def tickangle(self, val):
self['tickangle'] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['tickcolor']
@tickcolor.setter
def tickcolor(self, val):
self['tickcolor'] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of plotly.graph_objs.streamtube.colorbar.Tickfont
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.streamtube.colorbar.Tickfont
"""
return self['tickfont']
@tickfont.setter
def tickfont(self, val):
self['tickfont'] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-format/blob/master/READM
E.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one item to
d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['tickformat']
@tickformat.setter
def tickformat(self, val):
self['tickformat'] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.streamtube.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.streamtube.colorbar.Tickformatstop]
"""
return self['tickformatstops']
@tickformatstops.setter
def tickformatstops(self, val):
self['tickformatstops'] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['ticklen']
@ticklen.setter
def ticklen(self, val):
self['ticklen'] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self['tickmode']
@tickmode.setter
def tickmode(self, val):
self['tickmode'] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['tickprefix']
@tickprefix.setter
def tickprefix(self, val):
self['tickprefix'] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If **, this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self['ticks']
@ticks.setter
def ticks(self, val):
self['ticks'] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['ticksuffix']
@ticksuffix.setter
def ticksuffix(self, val):
self['ticksuffix'] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ticktext']
@ticktext.setter
def ticktext(self, val):
self['ticktext'] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on plot.ly for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['ticktextsrc']
@ticktextsrc.setter
def ticktextsrc(self, val):
self['ticktextsrc'] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['tickvals']
@tickvals.setter
def tickvals(self, val):
self['tickvals'] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on plot.ly for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['tickvalssrc']
@tickvalssrc.setter
def tickvalssrc(self, val):
self['tickvalssrc'] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['tickwidth']
@tickwidth.setter
def tickwidth(self, val):
self['tickwidth'] = val
# title
# -----
@property
def title(self):
"""
Sets the title of the color bar.
The 'title' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['title']
@title.setter
def title(self, val):
self['title'] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Sets this color bar's title font.
The 'titlefont' property is an instance of Titlefont
that may be specified as:
- An instance of plotly.graph_objs.streamtube.colorbar.Titlefont
- A dict of string/value properties that will be passed
to the Titlefont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.streamtube.colorbar.Titlefont
"""
return self['titlefont']
@titlefont.setter
def titlefont(self, val):
self['titlefont'] = val
# titleside
# ---------
@property
def titleside(self):
"""
Determines the location of the colorbar title with respect to
the color bar.
The 'titleside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self['titleside']
@titleside.setter
def titleside(self, val):
self['titleside'] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self['xanchor']
@xanchor.setter
def xanchor(self, val):
self['xanchor'] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['xpad']
@xpad.setter
def xpad(self, val):
self['xpad'] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self['yanchor']
@yanchor.setter
def yanchor(self, val):
self['yanchor'] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['ypad']
@ypad.setter
def ypad(self, val):
self['ypad'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'streamtube'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
plotly.graph_objs.streamtube.colorbar.Tickformatstop
instance or dict with compatible properties
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If **, this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of the color bar.
titlefont
Sets this color bar's title font.
titleside
Determines the location of the colorbar title with
respect to the color bar.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.streamtube.ColorBar
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
plotly.graph_objs.streamtube.colorbar.Tickformatstop
instance or dict with compatible properties
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If **, this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of the color bar.
titlefont
Sets this color bar's title font.
titleside
Determines the location of the colorbar title with
respect to the color bar.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__('colorbar')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.streamtube.ColorBar
constructor must be a dict or
an instance of plotly.graph_objs.streamtube.ColorBar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.streamtube import (colorbar as v_colorbar)
# Initialize validators
# ---------------------
self._validators['bgcolor'] = v_colorbar.BgcolorValidator()
self._validators['bordercolor'] = v_colorbar.BordercolorValidator()
self._validators['borderwidth'] = v_colorbar.BorderwidthValidator()
self._validators['dtick'] = v_colorbar.DtickValidator()
self._validators['exponentformat'
] = v_colorbar.ExponentformatValidator()
self._validators['len'] = v_colorbar.LenValidator()
self._validators['lenmode'] = v_colorbar.LenmodeValidator()
self._validators['nticks'] = v_colorbar.NticksValidator()
self._validators['outlinecolor'] = v_colorbar.OutlinecolorValidator()
self._validators['outlinewidth'] = v_colorbar.OutlinewidthValidator()
self._validators['separatethousands'
] = v_colorbar.SeparatethousandsValidator()
self._validators['showexponent'] = v_colorbar.ShowexponentValidator()
self._validators['showticklabels'
] = v_colorbar.ShowticklabelsValidator()
self._validators['showtickprefix'
] = v_colorbar.ShowtickprefixValidator()
self._validators['showticksuffix'
] = v_colorbar.ShowticksuffixValidator()
self._validators['thickness'] = v_colorbar.ThicknessValidator()
self._validators['thicknessmode'] = v_colorbar.ThicknessmodeValidator()
self._validators['tick0'] = v_colorbar.Tick0Validator()
self._validators['tickangle'] = v_colorbar.TickangleValidator()
self._validators['tickcolor'] = v_colorbar.TickcolorValidator()
self._validators['tickfont'] = v_colorbar.TickfontValidator()
self._validators['tickformat'] = v_colorbar.TickformatValidator()
self._validators['tickformatstops'
] = v_colorbar.TickformatstopsValidator()
self._validators['ticklen'] = v_colorbar.TicklenValidator()
self._validators['tickmode'] = v_colorbar.TickmodeValidator()
self._validators['tickprefix'] = v_colorbar.TickprefixValidator()
self._validators['ticks'] = v_colorbar.TicksValidator()
self._validators['ticksuffix'] = v_colorbar.TicksuffixValidator()
self._validators['ticktext'] = v_colorbar.TicktextValidator()
self._validators['ticktextsrc'] = v_colorbar.TicktextsrcValidator()
self._validators['tickvals'] = v_colorbar.TickvalsValidator()
self._validators['tickvalssrc'] = v_colorbar.TickvalssrcValidator()
self._validators['tickwidth'] = v_colorbar.TickwidthValidator()
self._validators['title'] = v_colorbar.TitleValidator()
self._validators['titlefont'] = v_colorbar.TitlefontValidator()
self._validators['titleside'] = v_colorbar.TitlesideValidator()
self._validators['x'] = v_colorbar.XValidator()
self._validators['xanchor'] = v_colorbar.XanchorValidator()
self._validators['xpad'] = v_colorbar.XpadValidator()
self._validators['y'] = v_colorbar.YValidator()
self._validators['yanchor'] = v_colorbar.YanchorValidator()
self._validators['ypad'] = v_colorbar.YpadValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('borderwidth', None)
self['borderwidth'] = borderwidth if borderwidth is not None else _v
_v = arg.pop('dtick', None)
self['dtick'] = dtick if dtick is not None else _v
_v = arg.pop('exponentformat', None)
self['exponentformat'
] = exponentformat if exponentformat is not None else _v
_v = arg.pop('len', None)
self['len'] = len if len is not None else _v
_v = arg.pop('lenmode', None)
self['lenmode'] = lenmode if lenmode is not None else _v
_v = arg.pop('nticks', None)
self['nticks'] = nticks if nticks is not None else _v
_v = arg.pop('outlinecolor', None)
self['outlinecolor'] = outlinecolor if outlinecolor is not None else _v
_v = arg.pop('outlinewidth', None)
self['outlinewidth'] = outlinewidth if outlinewidth is not None else _v
_v = arg.pop('separatethousands', None)
self['separatethousands'
] = separatethousands if separatethousands is not None else _v
_v = arg.pop('showexponent', None)
self['showexponent'] = showexponent if showexponent is not None else _v
_v = arg.pop('showticklabels', None)
self['showticklabels'
] = showticklabels if showticklabels is not None else _v
_v = arg.pop('showtickprefix', None)
self['showtickprefix'
] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop('showticksuffix', None)
self['showticksuffix'
] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop('thickness', None)
self['thickness'] = thickness if thickness is not None else _v
_v = arg.pop('thicknessmode', None)
self['thicknessmode'
] = thicknessmode if thicknessmode is not None else _v
_v = arg.pop('tick0', None)
self['tick0'] = tick0 if tick0 is not None else _v
_v = arg.pop('tickangle', None)
self['tickangle'] = tickangle if tickangle is not None else _v
_v = arg.pop('tickcolor', None)
self['tickcolor'] = tickcolor if tickcolor is not None else _v
_v = arg.pop('tickfont', None)
self['tickfont'] = tickfont if tickfont is not None else _v
_v = arg.pop('tickformat', None)
self['tickformat'] = tickformat if tickformat is not None else _v
_v = arg.pop('tickformatstops', None)
self['tickformatstops'
] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop('ticklen', None)
self['ticklen'] = ticklen if ticklen is not None else _v
_v = arg.pop('tickmode', None)
self['tickmode'] = tickmode if tickmode is not None else _v
_v = arg.pop('tickprefix', None)
self['tickprefix'] = tickprefix if tickprefix is not None else _v
_v = arg.pop('ticks', None)
self['ticks'] = ticks if ticks is not None else _v
_v = arg.pop('ticksuffix', None)
self['ticksuffix'] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop('ticktext', None)
self['ticktext'] = ticktext if ticktext is not None else _v
_v = arg.pop('ticktextsrc', None)
self['ticktextsrc'] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop('tickvals', None)
self['tickvals'] = tickvals if tickvals is not None else _v
_v = arg.pop('tickvalssrc', None)
self['tickvalssrc'] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop('tickwidth', None)
self['tickwidth'] = tickwidth if tickwidth is not None else _v
_v = arg.pop('title', None)
self['title'] = title if title is not None else _v
_v = arg.pop('titlefont', None)
self['titlefont'] = titlefont if titlefont is not None else _v
_v = arg.pop('titleside', None)
self['titleside'] = titleside if titleside is not None else _v
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('xanchor', None)
self['xanchor'] = xanchor if xanchor is not None else _v
_v = arg.pop('xpad', None)
self['xpad'] = xpad if xpad is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('yanchor', None)
self['yanchor'] = yanchor if yanchor is not None else _v
_v = arg.pop('ypad', None)
self['ypad'] = ypad if ypad is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 36.815878 | 96 | 0.566323 | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class ColorBar(BaseTraceHierarchyType):
@property
def bgcolor(self):
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
@property
def bordercolor(self):
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
@property
def borderwidth(self):
return self['borderwidth']
@borderwidth.setter
def borderwidth(self, val):
self['borderwidth'] = val
@property
def dtick(self):
return self['dtick']
@dtick.setter
def dtick(self, val):
self['dtick'] = val
@property
def exponentformat(self):
return self['exponentformat']
@exponentformat.setter
def exponentformat(self, val):
self['exponentformat'] = val
@property
def len(self):
return self['len']
@len.setter
def len(self, val):
self['len'] = val
@property
def lenmode(self):
return self['lenmode']
@lenmode.setter
def lenmode(self, val):
self['lenmode'] = val
@property
def nticks(self):
return self['nticks']
@nticks.setter
def nticks(self, val):
self['nticks'] = val
@property
def outlinecolor(self):
return self['outlinecolor']
@outlinecolor.setter
def outlinecolor(self, val):
self['outlinecolor'] = val
@property
def outlinewidth(self):
return self['outlinewidth']
@outlinewidth.setter
def outlinewidth(self, val):
self['outlinewidth'] = val
@property
def separatethousands(self):
return self['separatethousands']
@separatethousands.setter
def separatethousands(self, val):
self['separatethousands'] = val
@property
def showexponent(self):
return self['showexponent']
@showexponent.setter
def showexponent(self, val):
self['showexponent'] = val
@property
def showticklabels(self):
return self['showticklabels']
@showticklabels.setter
def showticklabels(self, val):
self['showticklabels'] = val
@property
def showtickprefix(self):
return self['showtickprefix']
@showtickprefix.setter
def showtickprefix(self, val):
self['showtickprefix'] = val
@property
def showticksuffix(self):
return self['showticksuffix']
@showticksuffix.setter
def showticksuffix(self, val):
self['showticksuffix'] = val
@property
def thickness(self):
return self['thickness']
@thickness.setter
def thickness(self, val):
self['thickness'] = val
@property
def thicknessmode(self):
return self['thicknessmode']
@thicknessmode.setter
def thicknessmode(self, val):
self['thicknessmode'] = val
@property
def tick0(self):
return self['tick0']
@tick0.setter
def tick0(self, val):
self['tick0'] = val
@property
def tickangle(self):
return self['tickangle']
@tickangle.setter
def tickangle(self, val):
self['tickangle'] = val
@property
def tickcolor(self):
return self['tickcolor']
@tickcolor.setter
def tickcolor(self, val):
self['tickcolor'] = val
@property
def tickfont(self):
return self['tickfont']
@tickfont.setter
def tickfont(self, val):
self['tickfont'] = val
@property
def tickformat(self):
return self['tickformat']
@tickformat.setter
def tickformat(self, val):
self['tickformat'] = val
@property
def tickformatstops(self):
return self['tickformatstops']
@tickformatstops.setter
def tickformatstops(self, val):
self['tickformatstops'] = val
@property
def ticklen(self):
return self['ticklen']
@ticklen.setter
def ticklen(self, val):
self['ticklen'] = val
@property
def tickmode(self):
return self['tickmode']
@tickmode.setter
def tickmode(self, val):
self['tickmode'] = val
@property
def tickprefix(self):
return self['tickprefix']
@tickprefix.setter
def tickprefix(self, val):
self['tickprefix'] = val
@property
def ticks(self):
return self['ticks']
@ticks.setter
def ticks(self, val):
self['ticks'] = val
@property
def ticksuffix(self):
return self['ticksuffix']
@ticksuffix.setter
def ticksuffix(self, val):
self['ticksuffix'] = val
@property
def ticktext(self):
return self['ticktext']
@ticktext.setter
def ticktext(self, val):
self['ticktext'] = val
@property
def ticktextsrc(self):
return self['ticktextsrc']
@ticktextsrc.setter
def ticktextsrc(self, val):
self['ticktextsrc'] = val
@property
def tickvals(self):
return self['tickvals']
@tickvals.setter
def tickvals(self, val):
self['tickvals'] = val
@property
def tickvalssrc(self):
return self['tickvalssrc']
@tickvalssrc.setter
def tickvalssrc(self, val):
self['tickvalssrc'] = val
@property
def tickwidth(self):
return self['tickwidth']
@tickwidth.setter
def tickwidth(self, val):
self['tickwidth'] = val
@property
def title(self):
return self['title']
@title.setter
def title(self, val):
self['title'] = val
@property
def titlefont(self):
return self['titlefont']
@titlefont.setter
def titlefont(self, val):
self['titlefont'] = val
@property
def titleside(self):
return self['titleside']
@titleside.setter
def titleside(self, val):
self['titleside'] = val
@property
def x(self):
return self['x']
@x.setter
def x(self, val):
self['x'] = val
@property
def xanchor(self):
return self['xanchor']
@xanchor.setter
def xanchor(self, val):
self['xanchor'] = val
@property
def xpad(self):
return self['xpad']
@xpad.setter
def xpad(self, val):
self['xpad'] = val
@property
def y(self):
return self['y']
@y.setter
def y(self, val):
self['y'] = val
@property
def yanchor(self):
return self['yanchor']
@yanchor.setter
def yanchor(self, val):
self['yanchor'] = val
@property
def ypad(self):
return self['ypad']
@ypad.setter
def ypad(self, val):
self['ypad'] = val
@property
def _parent_path_str(self):
return 'streamtube'
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
plotly.graph_objs.streamtube.colorbar.Tickformatstop
instance or dict with compatible properties
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If **, this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of the color bar.
titlefont
Sets this color bar's title font.
titleside
Determines the location of the colorbar title with
respect to the color bar.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
super(ColorBar, self).__init__('colorbar')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.streamtube.ColorBar
constructor must be a dict or
an instance of plotly.graph_objs.streamtube.ColorBar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.streamtube import (colorbar as v_colorbar)
# Initialize validators
# ---------------------
self._validators['bgcolor'] = v_colorbar.BgcolorValidator()
self._validators['bordercolor'] = v_colorbar.BordercolorValidator()
self._validators['borderwidth'] = v_colorbar.BorderwidthValidator()
self._validators['dtick'] = v_colorbar.DtickValidator()
self._validators['exponentformat'
] = v_colorbar.ExponentformatValidator()
self._validators['len'] = v_colorbar.LenValidator()
self._validators['lenmode'] = v_colorbar.LenmodeValidator()
self._validators['nticks'] = v_colorbar.NticksValidator()
self._validators['outlinecolor'] = v_colorbar.OutlinecolorValidator()
self._validators['outlinewidth'] = v_colorbar.OutlinewidthValidator()
self._validators['separatethousands'
] = v_colorbar.SeparatethousandsValidator()
self._validators['showexponent'] = v_colorbar.ShowexponentValidator()
self._validators['showticklabels'
] = v_colorbar.ShowticklabelsValidator()
self._validators['showtickprefix'
] = v_colorbar.ShowtickprefixValidator()
self._validators['showticksuffix'
] = v_colorbar.ShowticksuffixValidator()
self._validators['thickness'] = v_colorbar.ThicknessValidator()
self._validators['thicknessmode'] = v_colorbar.ThicknessmodeValidator()
self._validators['tick0'] = v_colorbar.Tick0Validator()
self._validators['tickangle'] = v_colorbar.TickangleValidator()
self._validators['tickcolor'] = v_colorbar.TickcolorValidator()
self._validators['tickfont'] = v_colorbar.TickfontValidator()
self._validators['tickformat'] = v_colorbar.TickformatValidator()
self._validators['tickformatstops'
] = v_colorbar.TickformatstopsValidator()
self._validators['ticklen'] = v_colorbar.TicklenValidator()
self._validators['tickmode'] = v_colorbar.TickmodeValidator()
self._validators['tickprefix'] = v_colorbar.TickprefixValidator()
self._validators['ticks'] = v_colorbar.TicksValidator()
self._validators['ticksuffix'] = v_colorbar.TicksuffixValidator()
self._validators['ticktext'] = v_colorbar.TicktextValidator()
self._validators['ticktextsrc'] = v_colorbar.TicktextsrcValidator()
self._validators['tickvals'] = v_colorbar.TickvalsValidator()
self._validators['tickvalssrc'] = v_colorbar.TickvalssrcValidator()
self._validators['tickwidth'] = v_colorbar.TickwidthValidator()
self._validators['title'] = v_colorbar.TitleValidator()
self._validators['titlefont'] = v_colorbar.TitlefontValidator()
self._validators['titleside'] = v_colorbar.TitlesideValidator()
self._validators['x'] = v_colorbar.XValidator()
self._validators['xanchor'] = v_colorbar.XanchorValidator()
self._validators['xpad'] = v_colorbar.XpadValidator()
self._validators['y'] = v_colorbar.YValidator()
self._validators['yanchor'] = v_colorbar.YanchorValidator()
self._validators['ypad'] = v_colorbar.YpadValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('borderwidth', None)
self['borderwidth'] = borderwidth if borderwidth is not None else _v
_v = arg.pop('dtick', None)
self['dtick'] = dtick if dtick is not None else _v
_v = arg.pop('exponentformat', None)
self['exponentformat'
] = exponentformat if exponentformat is not None else _v
_v = arg.pop('len', None)
self['len'] = len if len is not None else _v
_v = arg.pop('lenmode', None)
self['lenmode'] = lenmode if lenmode is not None else _v
_v = arg.pop('nticks', None)
self['nticks'] = nticks if nticks is not None else _v
_v = arg.pop('outlinecolor', None)
self['outlinecolor'] = outlinecolor if outlinecolor is not None else _v
_v = arg.pop('outlinewidth', None)
self['outlinewidth'] = outlinewidth if outlinewidth is not None else _v
_v = arg.pop('separatethousands', None)
self['separatethousands'
] = separatethousands if separatethousands is not None else _v
_v = arg.pop('showexponent', None)
self['showexponent'] = showexponent if showexponent is not None else _v
_v = arg.pop('showticklabels', None)
self['showticklabels'
] = showticklabels if showticklabels is not None else _v
_v = arg.pop('showtickprefix', None)
self['showtickprefix'
] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop('showticksuffix', None)
self['showticksuffix'
] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop('thickness', None)
self['thickness'] = thickness if thickness is not None else _v
_v = arg.pop('thicknessmode', None)
self['thicknessmode'
] = thicknessmode if thicknessmode is not None else _v
_v = arg.pop('tick0', None)
self['tick0'] = tick0 if tick0 is not None else _v
_v = arg.pop('tickangle', None)
self['tickangle'] = tickangle if tickangle is not None else _v
_v = arg.pop('tickcolor', None)
self['tickcolor'] = tickcolor if tickcolor is not None else _v
_v = arg.pop('tickfont', None)
self['tickfont'] = tickfont if tickfont is not None else _v
_v = arg.pop('tickformat', None)
self['tickformat'] = tickformat if tickformat is not None else _v
_v = arg.pop('tickformatstops', None)
self['tickformatstops'
] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop('ticklen', None)
self['ticklen'] = ticklen if ticklen is not None else _v
_v = arg.pop('tickmode', None)
self['tickmode'] = tickmode if tickmode is not None else _v
_v = arg.pop('tickprefix', None)
self['tickprefix'] = tickprefix if tickprefix is not None else _v
_v = arg.pop('ticks', None)
self['ticks'] = ticks if ticks is not None else _v
_v = arg.pop('ticksuffix', None)
self['ticksuffix'] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop('ticktext', None)
self['ticktext'] = ticktext if ticktext is not None else _v
_v = arg.pop('ticktextsrc', None)
self['ticktextsrc'] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop('tickvals', None)
self['tickvals'] = tickvals if tickvals is not None else _v
_v = arg.pop('tickvalssrc', None)
self['tickvalssrc'] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop('tickwidth', None)
self['tickwidth'] = tickwidth if tickwidth is not None else _v
_v = arg.pop('title', None)
self['title'] = title if title is not None else _v
_v = arg.pop('titlefont', None)
self['titlefont'] = titlefont if titlefont is not None else _v
_v = arg.pop('titleside', None)
self['titleside'] = titleside if titleside is not None else _v
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('xanchor', None)
self['xanchor'] = xanchor if xanchor is not None else _v
_v = arg.pop('xpad', None)
self['xpad'] = xpad if xpad is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('yanchor', None)
self['yanchor'] = yanchor if yanchor is not None else _v
_v = arg.pop('ypad', None)
self['ypad'] = ypad if ypad is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| true | true |
1c38e8ca4141892217c904caf65e694fc01e78c1 | 1,325 | py | Python | premailer/cache.py | rdhyee/premailer | bdbb8c263cba5fd5d59bc1690e11a93b15cb9b74 | [
"BSD-3-Clause"
] | 692 | 2015-01-02T19:33:21.000Z | 2022-03-16T09:48:11.000Z | premailer/cache.py | rdhyee/premailer | bdbb8c263cba5fd5d59bc1690e11a93b15cb9b74 | [
"BSD-3-Clause"
] | 161 | 2015-01-01T21:16:21.000Z | 2022-03-20T22:03:58.000Z | premailer/cache.py | rdhyee/premailer | bdbb8c263cba5fd5d59bc1690e11a93b15cb9b74 | [
"BSD-3-Clause"
] | 110 | 2015-01-29T20:37:13.000Z | 2022-03-31T01:08:31.000Z | import functools
import os
import threading
import cachetools
# Available cache options.
CACHE_IMPLEMENTATIONS = {
"LFU": cachetools.LFUCache,
"LRU": cachetools.LRUCache,
"TTL": cachetools.TTLCache,
}
# Time to live (seconds) for entries in TTL cache. Defaults to 1 hour.
TTL_CACHE_TIMEOUT = 1 * 60 * 60
# Maximum no. of items to be saved in cache.
DEFAULT_CACHE_MAXSIZE = 128
# Lock to prevent multiple threads from accessing the cache at same time.
cache_access_lock = threading.RLock()
cache_type = os.environ.get("PREMAILER_CACHE", "LFU")
if cache_type not in CACHE_IMPLEMENTATIONS:
raise ValueError(
"Unsupported cache implementation. Available options: %s"
% "/".join(CACHE_IMPLEMENTATIONS.keys())
)
cache_init_options = {
"maxsize": int(os.environ.get("PREMAILER_CACHE_MAXSIZE", DEFAULT_CACHE_MAXSIZE))
}
if cache_type == "TTL":
cache_init_options["ttl"] = int(
os.environ.get("PREMAILER_CACHE_TTL", TTL_CACHE_TIMEOUT)
)
cache = CACHE_IMPLEMENTATIONS[cache_type](**cache_init_options)
def function_cache(**options):
def decorator(func):
@cachetools.cached(cache, lock=cache_access_lock)
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
return decorator
| 25.480769 | 84 | 0.70717 | import functools
import os
import threading
import cachetools
CACHE_IMPLEMENTATIONS = {
"LFU": cachetools.LFUCache,
"LRU": cachetools.LRUCache,
"TTL": cachetools.TTLCache,
}
TTL_CACHE_TIMEOUT = 1 * 60 * 60
DEFAULT_CACHE_MAXSIZE = 128
cache_access_lock = threading.RLock()
cache_type = os.environ.get("PREMAILER_CACHE", "LFU")
if cache_type not in CACHE_IMPLEMENTATIONS:
raise ValueError(
"Unsupported cache implementation. Available options: %s"
% "/".join(CACHE_IMPLEMENTATIONS.keys())
)
cache_init_options = {
"maxsize": int(os.environ.get("PREMAILER_CACHE_MAXSIZE", DEFAULT_CACHE_MAXSIZE))
}
if cache_type == "TTL":
cache_init_options["ttl"] = int(
os.environ.get("PREMAILER_CACHE_TTL", TTL_CACHE_TIMEOUT)
)
cache = CACHE_IMPLEMENTATIONS[cache_type](**cache_init_options)
def function_cache(**options):
def decorator(func):
@cachetools.cached(cache, lock=cache_access_lock)
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
return decorator
| true | true |
1c38e915f9c1112c51ad32183021cebfb0c7561d | 1,395 | py | Python | firmware/xu4Mqtt/skyCamReaderWithSave.py | mi3nts/centralHub | 92aab9510fff4331b7363cd1272b5779ccf167a1 | [
"CC-BY-3.0"
] | null | null | null | firmware/xu4Mqtt/skyCamReaderWithSave.py | mi3nts/centralHub | 92aab9510fff4331b7363cd1272b5779ccf167a1 | [
"CC-BY-3.0"
] | 6 | 2020-01-10T20:58:20.000Z | 2020-01-10T21:46:54.000Z | firmware/xu4Mqtt/skyCamReaderWithSave.py | mi3nts/centralHub | 92aab9510fff4331b7363cd1272b5779ccf167a1 | [
"CC-BY-3.0"
] | 4 | 2020-04-22T22:59:34.000Z | 2021-04-29T17:33:34.000Z | from datetime import timezone
import time
import os
import datetime
import numpy as np
import pickle
from skimage import io, color
import cv2
from mintsXU4 import mintsSkyCamReader as mSCR
from mintsXU4 import mintsSensorReader as mSR
from mintsXU4 import mintsDefinitions as mD
dataFolder = mD.dataFolder
def main():
sensorName = "SKYCAM003"
dateTimeNow = datetime.datetime.now()
subFolder = mSR.getWritePathSnaps(sensorName,dateTimeNow)
onboardCapture = True
try:
start = time.time()
currentImage,imagePath = mSCR.getSnapShotXU4(subFolder)
modelName = 'naiveBayesModel.sav'
oneDImage, imageShape = mSCR.generateFeatures(currentImage,imagePath)
print("Loading Classifier")
loadedModel = pickle.load(open(modelName, 'rb'))
print("Done Loading")
predictionBinary,prediction = mSCR.getPredictionMatrix(loadedModel,oneDImage)
print("Writing Resulting Images ...")
binaryImage = mSCR.writeBinaryImageXU4(predictionBinary,imageShape,imagePath,onboardCapture)
sensorDictionary = mSCR.getResultsXU4002(currentImage,binaryImage,predictionBinary,prediction,imagePath,dateTimeNow)
mSR.sensorFinisher(dateTimeNow,sensorName,sensorDictionary)
mSCR.timeTaken("Preiction time is ",start)
except:
print("TRY AGAIN")
if __name__ == "__main__":
main()
| 29.0625 | 125 | 0.731183 | from datetime import timezone
import time
import os
import datetime
import numpy as np
import pickle
from skimage import io, color
import cv2
from mintsXU4 import mintsSkyCamReader as mSCR
from mintsXU4 import mintsSensorReader as mSR
from mintsXU4 import mintsDefinitions as mD
dataFolder = mD.dataFolder
def main():
sensorName = "SKYCAM003"
dateTimeNow = datetime.datetime.now()
subFolder = mSR.getWritePathSnaps(sensorName,dateTimeNow)
onboardCapture = True
try:
start = time.time()
currentImage,imagePath = mSCR.getSnapShotXU4(subFolder)
modelName = 'naiveBayesModel.sav'
oneDImage, imageShape = mSCR.generateFeatures(currentImage,imagePath)
print("Loading Classifier")
loadedModel = pickle.load(open(modelName, 'rb'))
print("Done Loading")
predictionBinary,prediction = mSCR.getPredictionMatrix(loadedModel,oneDImage)
print("Writing Resulting Images ...")
binaryImage = mSCR.writeBinaryImageXU4(predictionBinary,imageShape,imagePath,onboardCapture)
sensorDictionary = mSCR.getResultsXU4002(currentImage,binaryImage,predictionBinary,prediction,imagePath,dateTimeNow)
mSR.sensorFinisher(dateTimeNow,sensorName,sensorDictionary)
mSCR.timeTaken("Preiction time is ",start)
except:
print("TRY AGAIN")
if __name__ == "__main__":
main()
| true | true |
1c38e94d2cddaf16dd7ce6167cdfa3d68a7f55d7 | 1,101 | py | Python | GANS/MicroGan_2/sample/Discriminator.py | cmoimoro/gym-micropolis-ga | d105da7c624670083fc1d6e3acf265511d9e2df6 | [
"MIT"
] | null | null | null | GANS/MicroGan_2/sample/Discriminator.py | cmoimoro/gym-micropolis-ga | d105da7c624670083fc1d6e3acf265511d9e2df6 | [
"MIT"
] | null | null | null | GANS/MicroGan_2/sample/Discriminator.py | cmoimoro/gym-micropolis-ga | d105da7c624670083fc1d6e3acf265511d9e2df6 | [
"MIT"
] | null | null | null | from settings import *
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input) | 36.7 | 61 | 0.504087 | from settings import *
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input) | true | true |
1c38eab8d70edab703890f91450fbf31b1951120 | 3,035 | py | Python | bot/main.py | xXebicXx/amog-bot | 1e924ca51a67d2dee6d149686fcee2d1da9bec81 | [
"MIT"
] | null | null | null | bot/main.py | xXebicXx/amog-bot | 1e924ca51a67d2dee6d149686fcee2d1da9bec81 | [
"MIT"
] | null | null | null | bot/main.py | xXebicXx/amog-bot | 1e924ca51a67d2dee6d149686fcee2d1da9bec81 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from asyncio import sleep
import os
import random
from discord.utils import get
thingtowatch = random.randint(1,5)
if thingtowatch == 1:
movie = "Among Us: The Movie"
elif thingtowatch == 2:
movie = "pornography"
elif thingtowatch == 3:
movie = "Uncle Dane"
elif thingtowatch == 4:
movie = "Emo TikTok's"
elif thingtowatch == 5:
movie = "Sesame Street"
randomresponse = ["Man just quit.", "This is why no one likes you.", "That's a bit cringe, innit bruv?"]
insultresponses = ["That's a bit rude ngl."]
bot = commands.Bot(command_prefix="~")
TOKEN = os.environ.get("TOKEN")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=movie))
print("Amogus")
@bot.event
async def on_message(message):
await bot.process_commands(message)
randomthing = random.randint(1,3)
if "moron" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
elif "stupid" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
elif "idiot" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
elif "amogus" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
@bot.command()
async def sus(ctx):
await ctx.send('PLEASE STOP, GET OUT OF MY HEAD!')
@bot.command()
async def killself(ctx):
await ctx.reply("OwO \*kills you and notices your bulgy wulgy\*")
@bot.command()
async def sussypercent(ctx):
sussyperc = random.randint(1,100)
if ctx.author.id == 625336385129414677:
await ctx.reply(ctx.author.name + " is infinite sussy.")
else:
await ctx.reply(ctx.author.name + " is " + str(sussyperc) + "% sussy.")
@bot.command()
async def say(ctx, *, text):
message = ctx.message
await message.delete()
channel = bot.get_channel(917759123968372770)
await channel.send(f"{text}")
@bot.command()
async def coinflip(ctx):
coinflipperooni = random.randint(1,2)
if coinflipperooni == 1:
await ctx.send("Heads! Coinflipper wins!")
with open('amogus.gif', 'rb') as fp:
await ctx.send(file=discord.File(fp, 'Win.gif'))
else:
await ctx.send("Tails, coinflipper loses!")
with open('lose.gif', 'rb') as fp:
await ctx.send(file=discord.File(fp, 'YouLose.gif'))
def run():
bot.run(TOKEN)
if __name__ == "__main__":
run()
| 27.098214 | 104 | 0.683031 | import discord
from discord.ext import commands
from asyncio import sleep
import os
import random
from discord.utils import get
thingtowatch = random.randint(1,5)
if thingtowatch == 1:
movie = "Among Us: The Movie"
elif thingtowatch == 2:
movie = "pornography"
elif thingtowatch == 3:
movie = "Uncle Dane"
elif thingtowatch == 4:
movie = "Emo TikTok's"
elif thingtowatch == 5:
movie = "Sesame Street"
randomresponse = ["Man just quit.", "This is why no one likes you.", "That's a bit cringe, innit bruv?"]
insultresponses = ["That's a bit rude ngl."]
bot = commands.Bot(command_prefix="~")
TOKEN = os.environ.get("TOKEN")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=movie))
print("Amogus")
@bot.event
async def on_message(message):
await bot.process_commands(message)
randomthing = random.randint(1,3)
if "moron" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
elif "stupid" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
elif "idiot" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
elif "amogus" in message.content:
if randomthing == 1:
await message.reply(randomresponse[0])
elif randomthing == 2:
await message.reply(randomresponse[1])
elif randomthing == 3:
await message.reply(randomresponse[2])
@bot.command()
async def sus(ctx):
await ctx.send('PLEASE STOP, GET OUT OF MY HEAD!')
@bot.command()
async def killself(ctx):
await ctx.reply("OwO \*kills you and notices your bulgy wulgy\*")
@bot.command()
async def sussypercent(ctx):
sussyperc = random.randint(1,100)
if ctx.author.id == 625336385129414677:
await ctx.reply(ctx.author.name + " is infinite sussy.")
else:
await ctx.reply(ctx.author.name + " is " + str(sussyperc) + "% sussy.")
@bot.command()
async def say(ctx, *, text):
message = ctx.message
await message.delete()
channel = bot.get_channel(917759123968372770)
await channel.send(f"{text}")
@bot.command()
async def coinflip(ctx):
coinflipperooni = random.randint(1,2)
if coinflipperooni == 1:
await ctx.send("Heads! Coinflipper wins!")
with open('amogus.gif', 'rb') as fp:
await ctx.send(file=discord.File(fp, 'Win.gif'))
else:
await ctx.send("Tails, coinflipper loses!")
with open('lose.gif', 'rb') as fp:
await ctx.send(file=discord.File(fp, 'YouLose.gif'))
def run():
bot.run(TOKEN)
if __name__ == "__main__":
run()
| true | true |
1c38eb6f905bfc3975686a29e979a88645590ddc | 6,967 | py | Python | playbooks/robusta_playbooks/daemonsets.py | pavangudiwada/robusta | cc1cb8a2e198f404e275a3947cf64e9f700f56f4 | [
"MIT"
] | 273 | 2021-12-28T20:48:48.000Z | 2022-03-31T16:03:13.000Z | playbooks/robusta_playbooks/daemonsets.py | pavangudiwada/robusta | cc1cb8a2e198f404e275a3947cf64e9f700f56f4 | [
"MIT"
] | 103 | 2022-01-10T11:45:47.000Z | 2022-03-31T16:31:11.000Z | playbooks/robusta_playbooks/daemonsets.py | pavangudiwada/robusta | cc1cb8a2e198f404e275a3947cf64e9f700f56f4 | [
"MIT"
] | 35 | 2021-12-30T15:30:14.000Z | 2022-03-28T11:43:57.000Z | import logging
from robusta.api import *
@action
def daemonset_fix_config(event: ExecutionBaseEvent):
finding = Finding(
title="Proposed fix",
source=FindingSource.CALLBACK.value,
aggregation_key="daemonset_fix_config",
)
finding.add_enrichment(
[
MarkdownBlock(
textwrap.dedent(
"""\
Add the following to your daemonset pod-template:
```
tolerations:
- effect: NoSchedule
key: ToBeDeletedByClusterAutoscaler
operator: Exists
```"""
)
)
]
)
finding.add_enrichment(
[
MarkdownBlock(
"This will tell Kubernetes that it is OK if daemonsets keep running while a node shuts down. "
"This is desirable for daemonsets like elasticsearch which should keep gathering logs while the "
"node shuts down."
""
)
]
)
event.add_finding(finding)
@action
def daemonset_silence_false_alarm(event: ExecutionBaseEvent):
finding = Finding(
title="Silence the alert",
source=FindingSource.CALLBACK,
aggregation_key="daemonset_silence_false_alarm",
)
finding.add_enrichment(
[
MarkdownBlock(
textwrap.dedent(
"""\
Add the following to your `active_playbooks.yaml`:
```
- name: "alerts_integration"
action_params:
alerts_config:
(...)
- alert_name: "KubernetesDaemonsetMisscheduled"
(...)
silencers:
- name: "DaemonsetMisscheduledSmartSilencer"
```"""
)
)
]
)
finding.add_enrichment(
[
MarkdownBlock(
"This will silence the KubernetesDaemonsetMisscheduled alert when the known false alarm occurs but not under "
"other conditions."
""
)
]
)
event.add_finding(finding)
@action
def daemonset_status_enricher(event: DaemonSetEvent):
"""
Enrich the finding with daemon set stats.
Includes recommendations for the identified cause.
"""
ds = event.get_daemonset()
if not ds:
logging.error(
f"cannot run DaemonsetEnricher on event with no daemonset: {event}"
)
return
event.add_enrichment(
[
MarkdownBlock(f"*Daemonset Stats for {ds.metadata.name}*"),
KubernetesFieldsBlock(
ds,
[
"status.desiredNumberScheduled",
"status.currentNumberScheduled",
"status.numberAvailable",
"status.numberMisscheduled",
],
),
MarkdownBlock(
"_Daemonset lifecycle: pods start out as desired, then get scheduled, and then become available. "
"If Kubernetes then decides a pod shouldn't be running on a node after all, it becomes "
"misscheduled._"
),
]
)
# checks if the issue described here: https://blog.florentdelannoy.com/blog/2020/kube-daemonset-misscheduled/
# we check for it in the simplest way possible to avoid re-implementing k8s' scheduling logic for taints ourselves
def check_for_known_mismatch_false_alarm(ds: DaemonSet) -> bool:
# if the daemonset was configured with an appropriate toleration, this false alarm isn't possible
if does_daemonset_have_toleration(ds, "ToBeDeletedByClusterAutoscaler"):
logging.info(
f"daemonset is configured properly, so we don't have the known mismatch false alarm"
)
return False
nodes_by_name = {n.metadata.name: n for n in NodeList.listNode().obj.items}
ds_pods = RobustaPod.find_pods_with_direct_owner(
ds.metadata.namespace, ds.metadata.uid
)
# look for at least one node where the false alarm is present
for pod in ds_pods:
if pod.spec.nodeName not in nodes_by_name:
# we probably have a node that was created between the time we fetched the nodes and the time we fetched
# the pods
logging.warning(f"we have a pod not running on a known node. pod={pod}")
continue
relevant_node: Node = nodes_by_name[pod.spec.nodeName]
if does_node_have_taint(relevant_node, "ToBeDeletedByClusterAutoscaler"):
logging.info(
f"we found a cluster being deleted by the autoscaler - we have the known mismatch false alert"
)
return True
return False
@action
def daemonset_misscheduled_smart_silencer(alert: PrometheusKubernetesAlert):
"""
Silence daemonset misscheduled alert finding if it's a known false alarm.
checks if the issue described here: https://blog.florentdelannoy.com/blog/2020/kube-daemonset-misscheduled/
"""
if not alert.daemonset:
return
alert.stop_processing = check_for_known_mismatch_false_alarm(alert.daemonset)
@action
def daemonset_misscheduled_analysis_enricher(event: DaemonSetEvent):
"""
Enrich the alert finding with analysis and possible causes for the misscheduling, if the cause is identified.
<https://blog.florentdelannoy.com/blog/2020/kube-daemonset-misscheduled/|Learn more>
"""
ds = event.get_daemonset()
if not ds:
logging.error(
f"cannot run DaemonsetMisscheduledAnalysis on event with no daemonset: {event}"
)
return
if not check_for_known_mismatch_false_alarm(ds):
return
event.add_enrichment(
[
MarkdownBlock(
"*Alert Cause*\n This specific firing of the alert is a *known false alarm* which occurs when the "
"cluster autoscaler removes nodes running daemonsets which didn't explicitly request to remain running "
"during node-shutdown."
),
MarkdownBlock(
textwrap.dedent(
f"""\
(<https://blog.florentdelannoy.com/blog/2020/kube-daemonset-misscheduled/|Learn more>).
*Remediation*
Would you like to:
1. Fix the daemonset configuration to avoid the false alarm
2. Use Robusta to silence the false alarm while passing through real alerts.
Choose an option below to learn more."""
)
),
CallbackBlock(
choices={
"Fix the Configuration": CallbackChoice(
action=daemonset_fix_config
),
"Silence the false alarm": CallbackChoice(
action=daemonset_silence_false_alarm
),
},
),
]
)
| 32.70892 | 126 | 0.591072 | import logging
from robusta.api import *
@action
def daemonset_fix_config(event: ExecutionBaseEvent):
finding = Finding(
title="Proposed fix",
source=FindingSource.CALLBACK.value,
aggregation_key="daemonset_fix_config",
)
finding.add_enrichment(
[
MarkdownBlock(
textwrap.dedent(
"""\
Add the following to your daemonset pod-template:
```
tolerations:
- effect: NoSchedule
key: ToBeDeletedByClusterAutoscaler
operator: Exists
```"""
)
)
]
)
finding.add_enrichment(
[
MarkdownBlock(
"This will tell Kubernetes that it is OK if daemonsets keep running while a node shuts down. "
"This is desirable for daemonsets like elasticsearch which should keep gathering logs while the "
"node shuts down."
""
)
]
)
event.add_finding(finding)
@action
def daemonset_silence_false_alarm(event: ExecutionBaseEvent):
finding = Finding(
title="Silence the alert",
source=FindingSource.CALLBACK,
aggregation_key="daemonset_silence_false_alarm",
)
finding.add_enrichment(
[
MarkdownBlock(
textwrap.dedent(
"""\
Add the following to your `active_playbooks.yaml`:
```
- name: "alerts_integration"
action_params:
alerts_config:
(...)
- alert_name: "KubernetesDaemonsetMisscheduled"
(...)
silencers:
- name: "DaemonsetMisscheduledSmartSilencer"
```"""
)
)
]
)
finding.add_enrichment(
[
MarkdownBlock(
"This will silence the KubernetesDaemonsetMisscheduled alert when the known false alarm occurs but not under "
"other conditions."
""
)
]
)
event.add_finding(finding)
@action
def daemonset_status_enricher(event: DaemonSetEvent):
ds = event.get_daemonset()
if not ds:
logging.error(
f"cannot run DaemonsetEnricher on event with no daemonset: {event}"
)
return
event.add_enrichment(
[
MarkdownBlock(f"*Daemonset Stats for {ds.metadata.name}*"),
KubernetesFieldsBlock(
ds,
[
"status.desiredNumberScheduled",
"status.currentNumberScheduled",
"status.numberAvailable",
"status.numberMisscheduled",
],
),
MarkdownBlock(
"_Daemonset lifecycle: pods start out as desired, then get scheduled, and then become available. "
"If Kubernetes then decides a pod shouldn't be running on a node after all, it becomes "
"misscheduled._"
),
]
)
# checks if the issue described here: https://blog.florentdelannoy.com/blog/2020/kube-daemonset-misscheduled/
# we check for it in the simplest way possible to avoid re-implementing k8s' scheduling logic for taints ourselves
def check_for_known_mismatch_false_alarm(ds: DaemonSet) -> bool:
if does_daemonset_have_toleration(ds, "ToBeDeletedByClusterAutoscaler"):
logging.info(
f"daemonset is configured properly, so we don't have the known mismatch false alarm"
)
return False
nodes_by_name = {n.metadata.name: n for n in NodeList.listNode().obj.items}
ds_pods = RobustaPod.find_pods_with_direct_owner(
ds.metadata.namespace, ds.metadata.uid
)
for pod in ds_pods:
if pod.spec.nodeName not in nodes_by_name:
logging.warning(f"we have a pod not running on a known node. pod={pod}")
continue
relevant_node: Node = nodes_by_name[pod.spec.nodeName]
if does_node_have_taint(relevant_node, "ToBeDeletedByClusterAutoscaler"):
logging.info(
f"we found a cluster being deleted by the autoscaler - we have the known mismatch false alert"
)
return True
return False
@action
def daemonset_misscheduled_smart_silencer(alert: PrometheusKubernetesAlert):
if not alert.daemonset:
return
alert.stop_processing = check_for_known_mismatch_false_alarm(alert.daemonset)
@action
def daemonset_misscheduled_analysis_enricher(event: DaemonSetEvent):
ds = event.get_daemonset()
if not ds:
logging.error(
f"cannot run DaemonsetMisscheduledAnalysis on event with no daemonset: {event}"
)
return
if not check_for_known_mismatch_false_alarm(ds):
return
event.add_enrichment(
[
MarkdownBlock(
"*Alert Cause*\n This specific firing of the alert is a *known false alarm* which occurs when the "
"cluster autoscaler removes nodes running daemonsets which didn't explicitly request to remain running "
"during node-shutdown."
),
MarkdownBlock(
textwrap.dedent(
f"""\
(<https://blog.florentdelannoy.com/blog/2020/kube-daemonset-misscheduled/|Learn more>).
*Remediation*
Would you like to:
1. Fix the daemonset configuration to avoid the false alarm
2. Use Robusta to silence the false alarm while passing through real alerts.
Choose an option below to learn more."""
)
),
CallbackBlock(
choices={
"Fix the Configuration": CallbackChoice(
action=daemonset_fix_config
),
"Silence the false alarm": CallbackChoice(
action=daemonset_silence_false_alarm
),
},
),
]
)
| true | true |
1c38ec052b5ddea8487aba11a06c53ea793be859 | 14,396 | py | Python | src/cloudant/view.py | ssshah5/clone-python-cloudant | 1939f6ffe1b36fd87e94e984fe300e4e109d2171 | [
"Apache-2.0"
] | 1 | 2017-01-22T21:42:20.000Z | 2017-01-22T21:42:20.000Z | venv/lib/python3.5/site-packages/cloudant/view.py | remkohdev/python-flask-d3js-newssentiment | 006e1083b6a307fda55b176eadb810aa08bd8b63 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.5/site-packages/cloudant/view.py | remkohdev/python-flask-d3js-newssentiment | 006e1083b6a307fda55b176eadb810aa08bd8b63 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2015 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API module for interacting with a view in a design document.
"""
import contextlib
import posixpath
from ._2to3 import STRTYPE
from ._common_util import codify, get_docs
from .result import Result
from .error import CloudantArgumentError, CloudantException
class View(dict):
"""
Encapsulates a view as a dictionary based object, exposing the map and
reduce functions as attributes and supporting query/data access through
the view. A View object is instantiated with a reference to a
DesignDocument and is typically used as part of the
:class:`~cloudant.design_document.DesignDocument` view management API.
A View object provides a key accessible, sliceable, and iterable default
result collection that can be used to query the view data through the
``result`` attribute.
For example:
.. code-block:: python
# Access result collection through individual keys
view.result[100]
view.result['foo']
# Access result collection through index slicing:
view.result[100: 200]
view.result[: 200]
view.result[100: ]
view.result[: ]
# Access result collection through key slicing:
view.result['bar': 'foo']
view.result['bar': ]
view.result[: 'foo']
# Iterate over the result collection:
for doc in view.result:
print doc
The default result collection provides basic functionality,
which can be customized with other arguments using the
:func:`~cloudant.view.View.custom_result` context manager.
For example:
.. code-block:: python
# Including documents as part of a custom result
with view.custom_result(include_docs=True) as rslt:
rslt[100: 200] # slice by result
rslt[['2013', '10']: ['2013', '11']] # slice by startkey/endkey
# Iteration
for doc in rslt:
print doc
# Iteration over a view within startkey/endkey range:
with view.custom_result(startkey='2013', endkey='2014') as rslt:
for doc in rslt:
print doc
Note: A view must exist as part of a design document remotely in order to
access result content as depicted in the above examples.
:param DesignDocument ddoc: DesignDocument instance used in part to
identify the view.
:param str view_name: Name used in part to identify the view.
:param str map_func: Optional Javascript map function.
:param str reduce_func: Optional Javascript reduce function.
"""
def __init__(
self,
ddoc,
view_name,
map_func=None,
reduce_func=None,
**kwargs
):
super(View, self).__init__()
self.design_doc = ddoc
self._r_session = self.design_doc.r_session
self.view_name = view_name
if map_func is not None:
self['map'] = codify(map_func)
if reduce_func is not None:
self['reduce'] = codify(reduce_func)
self.update(kwargs)
self.result = Result(self)
@property
def map(self):
"""
Provides an map property accessor and setter.
For example:
.. code-block:: python
# Set the View map property
view.map = 'function (doc) {\\n emit(doc._id, 1);\\n}'
print view.map
:param str js_func: Javascript function.
:returns: Codified map function
"""
return self.get('map')
@map.setter
def map(self, js_func):
"""
Provides a map property setter.
"""
self['map'] = codify(js_func)
@property
def reduce(self):
"""
Provides an reduce property accessor and setter.
For example:
.. code-block:: python
# Set the View reduce property
view.reduce = '_count'
# Get and print the View reduce property
print view.reduce
:param str js_func: Javascript function.
:returns: Codified reduce function
"""
return self.get('reduce')
@reduce.setter
def reduce(self, js_func):
"""
Provides a reduce property setter.
"""
self['reduce'] = codify(js_func)
@property
def url(self):
"""
Constructs and returns the View URL.
:returns: View URL
"""
return posixpath.join(
self.design_doc.document_url,
'_view',
self.view_name
)
def __call__(self, **kwargs):
"""
Makes the View object callable and retrieves the raw JSON content
from the remote database based on the View definition on the server,
using the kwargs provided as query parameters.
For example:
.. code-block:: python
# Construct a View
view = View(ddoc, 'view001')
# Assuming that 'view001' exists as part of the
# design document ddoc in the remote database...
# Use view as a callable
for row in view(include_docs=True, limit=100, skip=100)['rows']:
# Process view data (in JSON format).
Note: Rather than using the View callable directly, if you wish to
retrieve view results in raw JSON format use ``raw_result=True`` with
the provided database API of
:func:`~cloudant.database.CouchDatabase.get_view_result` instead.
:param bool descending: Return documents in descending key order.
:param endkey: Stop returning records at this specified key.
:param str endkey_docid: Stop returning records when the specified
document id is reached.
:param bool group: Using the reduce function, group the results to a
group or single row.
:param group_level: Only applicable if the view uses complex keys: keys
that are JSON arrays. Groups reduce results for the specified number
of array fields.
:param bool include_docs: Include the full content of the documents.
:param bool inclusive_end: Include rows with the specified endkey.
:param str key: Return only documents that match the specified key.
:param list keys: Return only documents that match the specified keys.
:param int limit: Limit the number of returned documents to the
specified count.
:param bool reduce: True to use the reduce function, false otherwise.
:param int skip: Skip this number of rows from the start.
:param str stale: Allow the results from a stale view to be used. This
makes the request return immediately, even if the view has not been
completely built yet. If this parameter is not given, a response is
returned only after the view has been built.
:param startkey: Return records starting with the specified key.
:param str startkey_docid: Return records starting with the specified
document ID.
:returns: View result data in JSON format
"""
resp = get_docs(self._r_session,
self.url,
self.design_doc.encoder,
**kwargs)
return resp.json()
@contextlib.contextmanager
def custom_result(self, **options):
"""
Customizes the :class:`~cloudant.result.Result` behavior and provides
a convenient context manager for the Result. Result customizations
can be made by providing extra options to the result call using this
context manager. Depending on how you are accessing, slicing or
iterating through your result collection certain query parameters are
not permitted. See :class:`~cloudant.result.Result` for additional
details.
For example:
.. code-block:: python
with view.custom_result(include_docs=True, reduce=False) as rslt:
data = rslt[100: 200]
:param bool descending: Return documents in descending key order.
:param endkey: Stop returning records at this specified key.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param str endkey_docid: Stop returning records when the specified
document id is reached.
:param bool group: Using the reduce function, group the results to a
group or single row.
:param group_level: Only applicable if the view uses complex keys: keys
that are JSON arrays. Groups reduce results for the specified number
of array fields.
:param bool include_docs: Include the full content of the documents.
:param bool inclusive_end: Include rows with the specified endkey.
:param key: Return only documents that match the specified key.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param list keys: Return only documents that match the specified keys.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param int limit: Limit the number of returned documents to the
specified count. Not valid when used with
:class:`~cloudant.result.Result` iteration.
:param int page_size: Sets the page size for result iteration.
:param bool reduce: True to use the reduce function, false otherwise.
:param int skip: Skip this number of rows from the start.
Not valid when used with :class:`~cloudant.result.Result` iteration.
:param str stale: Allow the results from a stale view to be used. This
makes the request return immediately, even if the view has not been
completely built yet. If this parameter is not given, a response is
returned only after the view has been built.
:param startkey: Return records starting with the specified key.
Not valid when used with :class:`~cloudant.result.Result` key
access and key slicing.
:param str startkey_docid: Return records starting with the specified
document ID.
:returns: View result data wrapped in a Result instance
"""
rslt = Result(self, **options)
yield rslt
del rslt
class QueryIndexView(View):
"""
A view that defines a JSON query index in a design document.
If you wish to manage a view that represents a JSON query index it
is strongly recommended that
:func:`~cloudant.database.CloudantDatabase.create_query_index`
and :func:`~cloudant.database.CloudantDatabase.delete_query_index` are used.
"""
def __init__(self, ddoc, view_name, map_fields, reduce_func, **kwargs):
if not isinstance(map_fields, dict):
raise CloudantArgumentError('The map property must be a dictionary')
if not isinstance(reduce_func, STRTYPE):
raise CloudantArgumentError('The reduce property must be a string.')
super(QueryIndexView, self).__init__(
ddoc,
view_name,
map_fields,
reduce_func,
**kwargs
)
self['map'] = map_fields
self['reduce'] = reduce_func
self.result = None
@property
def map(self):
"""
Provides a map property accessor and setter.
:param dict map_func: A dictionary of fields defining the index.
:returns: Fields defining the index
"""
return self.get('map')
@map.setter
def map(self, map_func):
"""
Provides a map property setter.
"""
if isinstance(map_func, dict):
self['map'] = map_func
else:
raise CloudantArgumentError('The map property must be a dictionary')
@property
def reduce(self):
"""
Provides a reduce property accessor and setter.
:param str reduce_func: A string representation of the reduce function
used in part to define the index.
:returns: Reduce function as a string
"""
return self.get('reduce')
@reduce.setter
def reduce(self, reduce_func):
"""
Provides a reduce property setter.
"""
if isinstance(reduce_func, STRTYPE):
self['reduce'] = reduce_func
else:
raise CloudantArgumentError('The reduce property must be a string')
def __call__(self, **kwargs):
"""
QueryIndexView objects are not callable. If you wish to execute a query
using a query index, use
:func:`~cloudant.database.CloudantDatabase.get_query_result` instead.
"""
raise CloudantException(
'A QueryIndexView is not callable. If you wish to execute a query '
'use the database \'get_query_result\' convenience method.'
)
def custom_result(self, **options):
"""
This method overrides the View base class
:func:`~cloudant.view.View.custom_result` method with the sole purpose of
disabling it. Since QueryIndexView objects are not callable, there is
no reason to wrap their output in a Result. If you wish to execute a
query using a query index, use
:func:`~cloudant.database.CloudantDatabase.get_query_result` instead.
"""
raise CloudantException(
'Cannot create a custom result context manager using a '
'QueryIndexView. If you wish to execute a query use the '
'database \'get_query_result\' convenience method instead.'
)
| 37.295337 | 81 | 0.633996 |
import contextlib
import posixpath
from ._2to3 import STRTYPE
from ._common_util import codify, get_docs
from .result import Result
from .error import CloudantArgumentError, CloudantException
class View(dict):
def __init__(
self,
ddoc,
view_name,
map_func=None,
reduce_func=None,
**kwargs
):
super(View, self).__init__()
self.design_doc = ddoc
self._r_session = self.design_doc.r_session
self.view_name = view_name
if map_func is not None:
self['map'] = codify(map_func)
if reduce_func is not None:
self['reduce'] = codify(reduce_func)
self.update(kwargs)
self.result = Result(self)
@property
def map(self):
return self.get('map')
@map.setter
def map(self, js_func):
self['map'] = codify(js_func)
@property
def reduce(self):
return self.get('reduce')
@reduce.setter
def reduce(self, js_func):
self['reduce'] = codify(js_func)
@property
def url(self):
return posixpath.join(
self.design_doc.document_url,
'_view',
self.view_name
)
def __call__(self, **kwargs):
resp = get_docs(self._r_session,
self.url,
self.design_doc.encoder,
**kwargs)
return resp.json()
@contextlib.contextmanager
def custom_result(self, **options):
rslt = Result(self, **options)
yield rslt
del rslt
class QueryIndexView(View):
def __init__(self, ddoc, view_name, map_fields, reduce_func, **kwargs):
if not isinstance(map_fields, dict):
raise CloudantArgumentError('The map property must be a dictionary')
if not isinstance(reduce_func, STRTYPE):
raise CloudantArgumentError('The reduce property must be a string.')
super(QueryIndexView, self).__init__(
ddoc,
view_name,
map_fields,
reduce_func,
**kwargs
)
self['map'] = map_fields
self['reduce'] = reduce_func
self.result = None
@property
def map(self):
return self.get('map')
@map.setter
def map(self, map_func):
if isinstance(map_func, dict):
self['map'] = map_func
else:
raise CloudantArgumentError('The map property must be a dictionary')
@property
def reduce(self):
return self.get('reduce')
@reduce.setter
def reduce(self, reduce_func):
if isinstance(reduce_func, STRTYPE):
self['reduce'] = reduce_func
else:
raise CloudantArgumentError('The reduce property must be a string')
def __call__(self, **kwargs):
raise CloudantException(
'A QueryIndexView is not callable. If you wish to execute a query '
'use the database \'get_query_result\' convenience method.'
)
def custom_result(self, **options):
raise CloudantException(
'Cannot create a custom result context manager using a '
'QueryIndexView. If you wish to execute a query use the '
'database \'get_query_result\' convenience method instead.'
)
| true | true |
1c38ec0726481b170f31d8e8aa55c5f50b3a5b24 | 418 | py | Python | scico/test/osver.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 18 | 2021-09-21T18:55:11.000Z | 2022-03-21T20:13:05.000Z | scico/test/osver.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 218 | 2021-09-21T21:45:08.000Z | 2022-03-30T18:45:27.000Z | scico/test/osver.py | lanl/scico | 976c9e5833f8f67eed2eaa43460d89fb09bb9f78 | [
"BSD-3-Clause"
] | 2 | 2021-09-23T22:44:47.000Z | 2021-12-18T16:01:43.000Z | import platform
from packaging.version import parse
def osx_ver_geq_than(verstr):
"""Determine relative platform OSX version.
Determine whether platform has OSX version that is as recent as or
more recent than verstr. Returns ``False`` if the OS is not OSX.
"""
if platform.system() != "Darwin":
return False
osxver = platform.mac_ver()[0]
return parse(osxver) >= parse(verstr)
| 26.125 | 70 | 0.69378 | import platform
from packaging.version import parse
def osx_ver_geq_than(verstr):
if platform.system() != "Darwin":
return False
osxver = platform.mac_ver()[0]
return parse(osxver) >= parse(verstr)
| true | true |
1c38ec844d9bc6690a842eedede54bb33740e663 | 1,682 | py | Python | doks/rst/code.py | rec/doks | 8fb1c2779bb40cbae9427c3be357231543e89d54 | [
"MIT"
] | 1 | 2020-10-31T03:25:23.000Z | 2020-10-31T03:25:23.000Z | doks/rst/code.py | rec/doks | 8fb1c2779bb40cbae9427c3be357231543e89d54 | [
"MIT"
] | 15 | 2020-05-29T11:50:13.000Z | 2020-12-08T11:07:45.000Z | doks/rst/code.py | rec/doks | 8fb1c2779bb40cbae9427c3be357231543e89d54 | [
"MIT"
] | null | null | null | from pathlib import Path
import configparser
import inspect
import os
MSG = '`{msg} <https://{host}/{user}/{project}/{sep}/{file}#L{begin}-L{end}>`_'
SEPARATORS = {
'github.com': 'blob/master',
'gitlab.com': '-/blob/master',
}
SECTIONS = '-=~_+*#`\':<>^"'
_GIT_SUFFIX = '.git'
_SSH_PREFIX = 'git@'
_HTTPS_PREFIX = 'https://'
def code(value):
while True:
v = value
value = getattr(v, '__wrapped__', v)
if v is value:
break
file = os.path.relpath(inspect.getfile(value), '.')
lines, begin = inspect.getsourcelines(value)
end = begin + len(lines)
msg = f'{file}, {begin}-{end}'
remote = _remote()
if remote:
host, user, project = remote
sep = SEPARATORS[host]
msg = MSG.format(**locals())
return f'({msg})'
def _remote():
git_file = Path('.git/config')
if not git_file.exists():
return
cfg = configparser.ConfigParser()
cfg.read(git_file)
remote_urls = (v['url'] for k, v in cfg.items() if k.startswith('remote '))
suffix = _GIT_SUFFIX
prefixes = _SSH_PREFIX, _HTTPS_PREFIX
for remote in remote_urls:
if remote.endswith(suffix) and remote.count(':') == 1:
prefix = next((p for p in prefixes if remote.startswith(p)), None)
if prefix:
remote = remote[len(prefix) : -len(suffix)]
parts = remote.replace(':', '/').split('/')
if len(parts) == 3:
if parts[0] in SEPARATORS:
return parts
else:
continue
raise ValueError('Do not understand remote %s' % remote)
| 25.484848 | 79 | 0.550535 | from pathlib import Path
import configparser
import inspect
import os
MSG = '`{msg} <https://{host}/{user}/{project}/{sep}/{file}#L{begin}-L{end}>`_'
SEPARATORS = {
'github.com': 'blob/master',
'gitlab.com': '-/blob/master',
}
SECTIONS = '-=~_+*#`\':<>^"'
_GIT_SUFFIX = '.git'
_SSH_PREFIX = 'git@'
_HTTPS_PREFIX = 'https://'
def code(value):
while True:
v = value
value = getattr(v, '__wrapped__', v)
if v is value:
break
file = os.path.relpath(inspect.getfile(value), '.')
lines, begin = inspect.getsourcelines(value)
end = begin + len(lines)
msg = f'{file}, {begin}-{end}'
remote = _remote()
if remote:
host, user, project = remote
sep = SEPARATORS[host]
msg = MSG.format(**locals())
return f'({msg})'
def _remote():
git_file = Path('.git/config')
if not git_file.exists():
return
cfg = configparser.ConfigParser()
cfg.read(git_file)
remote_urls = (v['url'] for k, v in cfg.items() if k.startswith('remote '))
suffix = _GIT_SUFFIX
prefixes = _SSH_PREFIX, _HTTPS_PREFIX
for remote in remote_urls:
if remote.endswith(suffix) and remote.count(':') == 1:
prefix = next((p for p in prefixes if remote.startswith(p)), None)
if prefix:
remote = remote[len(prefix) : -len(suffix)]
parts = remote.replace(':', '/').split('/')
if len(parts) == 3:
if parts[0] in SEPARATORS:
return parts
else:
continue
raise ValueError('Do not understand remote %s' % remote)
| true | true |
1c38f095c0a6cd3ff807c4e382e8d14fc253b7e3 | 258 | py | Python | 1038.py | gabriel1lima/Questoes---URI---Python | 4e88d76cf7ea68baf0464071bc4f72ced7d746cd | [
"MIT"
] | 1 | 2020-10-01T14:22:48.000Z | 2020-10-01T14:22:48.000Z | 1038.py | gabriel1lima/Questoes---URI---Python | 4e88d76cf7ea68baf0464071bc4f72ced7d746cd | [
"MIT"
] | null | null | null | 1038.py | gabriel1lima/Questoes---URI---Python | 4e88d76cf7ea68baf0464071bc4f72ced7d746cd | [
"MIT"
] | 7 | 2020-10-01T13:03:22.000Z | 2020-10-02T16:10:25.000Z | a, b = input().split()
a = int(a)
b = int(b)
if(a == 1):
qtd = b * 4.00
elif(a == 2):
qtd = b * 4.50
elif(a == 3):
qtd = b * 5.00
elif(a == 4):
qtd = b * 2.00
elif(a == 5):
qtd = b * 1.50
print("Total: R$ {0:.2f}".format(qtd)) | 9.923077 | 38 | 0.418605 | a, b = input().split()
a = int(a)
b = int(b)
if(a == 1):
qtd = b * 4.00
elif(a == 2):
qtd = b * 4.50
elif(a == 3):
qtd = b * 5.00
elif(a == 4):
qtd = b * 2.00
elif(a == 5):
qtd = b * 1.50
print("Total: R$ {0:.2f}".format(qtd)) | true | true |
1c38f0c0b4f51f9242a1fdcf3df4f3244bb65964 | 431 | py | Python | client/upload.py | Shadey/3dsMouse | c485182d29da706f4b51ade3c63a5e7710bef8fc | [
"Unlicense"
] | null | null | null | client/upload.py | Shadey/3dsMouse | c485182d29da706f4b51ade3c63a5e7710bef8fc | [
"Unlicense"
] | null | null | null | client/upload.py | Shadey/3dsMouse | c485182d29da706f4b51ade3c63a5e7710bef8fc | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
from ftplib import FTP
from glob import glob as files
import os
host = "192.168.0.18"
port = 5000
rootname = os.getcwd().split("/")[-1]
f = FTP()
f.connect(host,port=port)
try:
f.cwd("3ds/"+rootname)
except:
f.mkd("3ds/"+rootname)
for match in files(rootname+"*"):
print("Sending",match)
with open(match,'rb') as file:
f.storbinary('STOR ' + match,file)
print("Files sent over")
f.close()
| 21.55 | 42 | 0.647332 |
from ftplib import FTP
from glob import glob as files
import os
host = "192.168.0.18"
port = 5000
rootname = os.getcwd().split("/")[-1]
f = FTP()
f.connect(host,port=port)
try:
f.cwd("3ds/"+rootname)
except:
f.mkd("3ds/"+rootname)
for match in files(rootname+"*"):
print("Sending",match)
with open(match,'rb') as file:
f.storbinary('STOR ' + match,file)
print("Files sent over")
f.close()
| true | true |
1c38f22e6c7d5e5f1997fb0e2b0250ba88cd41b7 | 7,494 | py | Python | recbole/model/general_recommender/recvae.py | Ahren09/RecBole | b3921818dfbc1b81f9eda8d5e9f05bc9d9114089 | [
"MIT"
] | 1,773 | 2020-11-04T01:22:11.000Z | 2022-03-31T08:05:41.000Z | recbole/model/general_recommender/recvae.py | chenyushuo/RecBole | f04084b8d2cffcb79eb9e4b21325f8f6c75c638e | [
"MIT"
] | 378 | 2020-11-05T02:42:27.000Z | 2022-03-31T22:57:04.000Z | recbole/model/general_recommender/recvae.py | chenyushuo/RecBole | f04084b8d2cffcb79eb9e4b21325f8f6c75c638e | [
"MIT"
] | 354 | 2020-11-04T01:37:09.000Z | 2022-03-31T10:39:32.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/2/28
# @Author : Lanling Xu
# @Email : xulanling_sherry@163.com
r"""
RecVAE
################################################
Reference:
Shenbin, Ilya, et al. "RecVAE: A new variational autoencoder for Top-N recommendations with implicit feedback." In WSDM 2020.
Reference code:
https://github.com/ilya-shenbin/RecVAE
"""
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from recbole.model.abstract_recommender import GeneralRecommender
from recbole.model.init import xavier_normal_initialization
from recbole.utils import InputType
def swish(x):
r"""Swish activation function:
.. math::
\text{Swish}(x) = \frac{x}{1 + \exp(-x)}
"""
return x.mul(torch.sigmoid(x))
def log_norm_pdf(x, mu, logvar):
return -0.5 * (logvar + np.log(2 * np.pi) + (x - mu).pow(2) / logvar.exp())
class CompositePrior(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, mixture_weights):
super(CompositePrior, self).__init__()
self.mixture_weights = mixture_weights
self.mu_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)
self.mu_prior.data.fill_(0)
self.logvar_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)
self.logvar_prior.data.fill_(0)
self.logvar_uniform_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)
self.logvar_uniform_prior.data.fill_(10)
self.encoder_old = Encoder(hidden_dim, latent_dim, input_dim)
self.encoder_old.requires_grad_(False)
def forward(self, x, z):
post_mu, post_logvar = self.encoder_old(x, 0)
stnd_prior = log_norm_pdf(z, self.mu_prior, self.logvar_prior)
post_prior = log_norm_pdf(z, post_mu, post_logvar)
unif_prior = log_norm_pdf(z, self.mu_prior, self.logvar_uniform_prior)
gaussians = [stnd_prior, post_prior, unif_prior]
gaussians = [g.add(np.log(w)) for g, w in zip(gaussians, self.mixture_weights)]
density_per_gaussian = torch.stack(gaussians, dim=-1)
return torch.logsumexp(density_per_gaussian, dim=-1)
class Encoder(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, eps=1e-1):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.ln1 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.ln2 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.ln3 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc4 = nn.Linear(hidden_dim, hidden_dim)
self.ln4 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc5 = nn.Linear(hidden_dim, hidden_dim)
self.ln5 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_logvar = nn.Linear(hidden_dim, latent_dim)
def forward(self, x, dropout_prob):
x = F.normalize(x)
x = F.dropout(x, dropout_prob, training=self.training)
h1 = self.ln1(swish(self.fc1(x)))
h2 = self.ln2(swish(self.fc2(h1) + h1))
h3 = self.ln3(swish(self.fc3(h2) + h1 + h2))
h4 = self.ln4(swish(self.fc4(h3) + h1 + h2 + h3))
h5 = self.ln5(swish(self.fc5(h4) + h1 + h2 + h3 + h4))
return self.fc_mu(h5), self.fc_logvar(h5)
class RecVAE(GeneralRecommender):
r"""Collaborative Denoising Auto-Encoder (RecVAE) is a recommendation model
for top-N recommendation with implicit feedback.
We implement the model following the original author
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(RecVAE, self).__init__(config, dataset)
self.hidden_dim = config["hidden_dimension"]
self.latent_dim = config['latent_dimension']
self.dropout_prob = config['dropout_prob']
self.beta = config['beta']
self.mixture_weights = config['mixture_weights']
self.gamma = config['gamma']
self.history_item_id, self.history_item_value, _ = dataset.history_item_matrix()
self.history_item_id = self.history_item_id.to(self.device)
self.history_item_value = self.history_item_value.to(self.device)
self.encoder = Encoder(self.hidden_dim, self.latent_dim, self.n_items)
self.prior = CompositePrior(self.hidden_dim, self.latent_dim, self.n_items, self.mixture_weights)
self.decoder = nn.Linear(self.latent_dim, self.n_items)
# parameters initialization
self.apply(xavier_normal_initialization)
def get_rating_matrix(self, user):
r"""Get a batch of user's feature with the user's id and history interaction matrix.
Args:
user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ]
Returns:
torch.FloatTensor: The user's feature of a batch of user, shape: [batch_size, n_items]
"""
# Following lines construct tensor of shape [B,n_items] using the tensor of shape [B,H]
col_indices = self.history_item_id[user].flatten()
row_indices = torch.arange(user.shape[0]).to(self.device) \
.repeat_interleave(self.history_item_id.shape[1], dim=0)
rating_matrix = torch.zeros(1).to(self.device).repeat(user.shape[0], self.n_items)
rating_matrix.index_put_((row_indices, col_indices), self.history_item_value[user].flatten())
return rating_matrix
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
epsilon = torch.zeros_like(std).normal_(mean=0, std=0.01)
return mu + epsilon * std
else:
return mu
def forward(self, rating_matrix, dropout_prob):
mu, logvar = self.encoder(rating_matrix, dropout_prob=dropout_prob)
z = self.reparameterize(mu, logvar)
x_pred = self.decoder(z)
return x_pred, mu, logvar, z
def calculate_loss(self, interaction, encoder_flag):
user = interaction[self.USER_ID]
rating_matrix = self.get_rating_matrix(user)
if encoder_flag:
dropout_prob = self.dropout_prob
else:
dropout_prob = 0
x_pred, mu, logvar, z = self.forward(rating_matrix, dropout_prob)
if self.gamma:
norm = rating_matrix.sum(dim=-1)
kl_weight = self.gamma * norm
else:
kl_weight = self.beta
mll = (F.log_softmax(x_pred, dim=-1) * rating_matrix).sum(dim=-1).mean()
kld = (log_norm_pdf(z, mu, logvar) - self.prior(rating_matrix, z)).sum(dim=-1).mul(kl_weight).mean()
negative_elbo = -(mll - kld)
return negative_elbo
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
rating_matrix = self.get_rating_matrix(user)
scores, _, _, _ = self.forward(rating_matrix, self.dropout_prob)
return scores[[torch.arange(len(item)).to(self.device), item]]
def full_sort_predict(self, interaction):
user = interaction[self.USER_ID]
rating_matrix = self.get_rating_matrix(user)
scores, _, _, _ = self.forward(rating_matrix, self.dropout_prob)
return scores.view(-1)
def update_prior(self):
self.prior.encoder_old.load_state_dict(deepcopy(self.encoder.state_dict()))
| 36.202899 | 129 | 0.659995 |
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from recbole.model.abstract_recommender import GeneralRecommender
from recbole.model.init import xavier_normal_initialization
from recbole.utils import InputType
def swish(x):
return x.mul(torch.sigmoid(x))
def log_norm_pdf(x, mu, logvar):
return -0.5 * (logvar + np.log(2 * np.pi) + (x - mu).pow(2) / logvar.exp())
class CompositePrior(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, mixture_weights):
super(CompositePrior, self).__init__()
self.mixture_weights = mixture_weights
self.mu_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)
self.mu_prior.data.fill_(0)
self.logvar_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)
self.logvar_prior.data.fill_(0)
self.logvar_uniform_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)
self.logvar_uniform_prior.data.fill_(10)
self.encoder_old = Encoder(hidden_dim, latent_dim, input_dim)
self.encoder_old.requires_grad_(False)
def forward(self, x, z):
post_mu, post_logvar = self.encoder_old(x, 0)
stnd_prior = log_norm_pdf(z, self.mu_prior, self.logvar_prior)
post_prior = log_norm_pdf(z, post_mu, post_logvar)
unif_prior = log_norm_pdf(z, self.mu_prior, self.logvar_uniform_prior)
gaussians = [stnd_prior, post_prior, unif_prior]
gaussians = [g.add(np.log(w)) for g, w in zip(gaussians, self.mixture_weights)]
density_per_gaussian = torch.stack(gaussians, dim=-1)
return torch.logsumexp(density_per_gaussian, dim=-1)
class Encoder(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, eps=1e-1):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.ln1 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.ln2 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.ln3 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc4 = nn.Linear(hidden_dim, hidden_dim)
self.ln4 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc5 = nn.Linear(hidden_dim, hidden_dim)
self.ln5 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_logvar = nn.Linear(hidden_dim, latent_dim)
def forward(self, x, dropout_prob):
x = F.normalize(x)
x = F.dropout(x, dropout_prob, training=self.training)
h1 = self.ln1(swish(self.fc1(x)))
h2 = self.ln2(swish(self.fc2(h1) + h1))
h3 = self.ln3(swish(self.fc3(h2) + h1 + h2))
h4 = self.ln4(swish(self.fc4(h3) + h1 + h2 + h3))
h5 = self.ln5(swish(self.fc5(h4) + h1 + h2 + h3 + h4))
return self.fc_mu(h5), self.fc_logvar(h5)
class RecVAE(GeneralRecommender):
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(RecVAE, self).__init__(config, dataset)
self.hidden_dim = config["hidden_dimension"]
self.latent_dim = config['latent_dimension']
self.dropout_prob = config['dropout_prob']
self.beta = config['beta']
self.mixture_weights = config['mixture_weights']
self.gamma = config['gamma']
self.history_item_id, self.history_item_value, _ = dataset.history_item_matrix()
self.history_item_id = self.history_item_id.to(self.device)
self.history_item_value = self.history_item_value.to(self.device)
self.encoder = Encoder(self.hidden_dim, self.latent_dim, self.n_items)
self.prior = CompositePrior(self.hidden_dim, self.latent_dim, self.n_items, self.mixture_weights)
self.decoder = nn.Linear(self.latent_dim, self.n_items)
self.apply(xavier_normal_initialization)
def get_rating_matrix(self, user):
col_indices = self.history_item_id[user].flatten()
row_indices = torch.arange(user.shape[0]).to(self.device) \
.repeat_interleave(self.history_item_id.shape[1], dim=0)
rating_matrix = torch.zeros(1).to(self.device).repeat(user.shape[0], self.n_items)
rating_matrix.index_put_((row_indices, col_indices), self.history_item_value[user].flatten())
return rating_matrix
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
epsilon = torch.zeros_like(std).normal_(mean=0, std=0.01)
return mu + epsilon * std
else:
return mu
def forward(self, rating_matrix, dropout_prob):
mu, logvar = self.encoder(rating_matrix, dropout_prob=dropout_prob)
z = self.reparameterize(mu, logvar)
x_pred = self.decoder(z)
return x_pred, mu, logvar, z
def calculate_loss(self, interaction, encoder_flag):
user = interaction[self.USER_ID]
rating_matrix = self.get_rating_matrix(user)
if encoder_flag:
dropout_prob = self.dropout_prob
else:
dropout_prob = 0
x_pred, mu, logvar, z = self.forward(rating_matrix, dropout_prob)
if self.gamma:
norm = rating_matrix.sum(dim=-1)
kl_weight = self.gamma * norm
else:
kl_weight = self.beta
mll = (F.log_softmax(x_pred, dim=-1) * rating_matrix).sum(dim=-1).mean()
kld = (log_norm_pdf(z, mu, logvar) - self.prior(rating_matrix, z)).sum(dim=-1).mul(kl_weight).mean()
negative_elbo = -(mll - kld)
return negative_elbo
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
rating_matrix = self.get_rating_matrix(user)
scores, _, _, _ = self.forward(rating_matrix, self.dropout_prob)
return scores[[torch.arange(len(item)).to(self.device), item]]
def full_sort_predict(self, interaction):
user = interaction[self.USER_ID]
rating_matrix = self.get_rating_matrix(user)
scores, _, _, _ = self.forward(rating_matrix, self.dropout_prob)
return scores.view(-1)
def update_prior(self):
self.prior.encoder_old.load_state_dict(deepcopy(self.encoder.state_dict()))
| true | true |
1c38f295aae119c477441d6ff368dfdc82f6fb32 | 1,560 | py | Python | setup.py | mardiros/creds | d442a05e4213d7ed335b13b5873cca834d0be4c3 | [
"BSD-3-Clause"
] | null | null | null | setup.py | mardiros/creds | d442a05e4213d7ed335b13b5873cca834d0be4c3 | [
"BSD-3-Clause"
] | null | null | null | setup.py | mardiros/creds | d442a05e4213d7ed335b13b5873cca834d0be4c3 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
py_version = sys.version_info[:2]
if py_version < (3, 3):
raise Exception("websockets requires Python >= 3.3.")
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'creds'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
requires = [
'pyramid',
'gunicorn',
'aiohttp',
'pyramid_jinja2',
'asyncio_redis',
'pyramid-kvs',
'psycopg2',
'simplejson',
'pyramid_yards',
'pyramid_asyncio',
'cryptacular',
]
setup(name=NAME,
version='0.0',
description='A Credentials API',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Guillaume Gauvrit',
author_email='guillaume@gauvr.it',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite=''.format('{}.tests'.format(NAME)),
install_requires=requires,
entry_points="""\
[console_scripts]
{name} = {name}.__main__:main
[paste.app_factory]
main = {name}:main
""".format(name=NAME),
)
| 25.57377 | 63 | 0.60641 | import os
import sys
from setuptools import setup, find_packages
py_version = sys.version_info[:2]
if py_version < (3, 3):
raise Exception("websockets requires Python >= 3.3.")
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'creds'
with open(os.path.join(here, 'README.rst')) as readme:
README = readme.read()
with open(os.path.join(here, 'CHANGES.rst')) as changes:
CHANGES = changes.read()
requires = [
'pyramid',
'gunicorn',
'aiohttp',
'pyramid_jinja2',
'asyncio_redis',
'pyramid-kvs',
'psycopg2',
'simplejson',
'pyramid_yards',
'pyramid_asyncio',
'cryptacular',
]
setup(name=NAME,
version='0.0',
description='A Credentials API',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Guillaume Gauvrit',
author_email='guillaume@gauvr.it',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite=''.format('{}.tests'.format(NAME)),
install_requires=requires,
entry_points="""\
[console_scripts]
{name} = {name}.__main__:main
[paste.app_factory]
main = {name}:main
""".format(name=NAME),
)
| true | true |
1c38f463f37b5e3bc7741adb703b39a8e662b295 | 26,825 | py | Python | tools/pg_bloat_check.py | payals/pgtreats | 4aef2730f831886b2d48a39d9f5e8d9cee07178f | [
"PostgreSQL"
] | 86 | 2015-02-18T17:53:23.000Z | 2022-03-31T05:22:18.000Z | tools/pg_bloat_check.py | payals/pgtreats | 4aef2730f831886b2d48a39d9f5e8d9cee07178f | [
"PostgreSQL"
] | 2 | 2016-03-29T14:26:43.000Z | 2017-07-25T19:58:24.000Z | tools/pg_bloat_check.py | payals/pgtreats | 4aef2730f831886b2d48a39d9f5e8d9cee07178f | [
"PostgreSQL"
] | 39 | 2015-01-26T13:10:25.000Z | 2021-11-20T15:11:01.000Z | #!/usr/bin/env python
# Script is maintained at https://github.com/keithf4/pg_bloat_check
import argparse, csv, json, psycopg2, sys
from psycopg2 import extras
version = "2.1.1"
parser = argparse.ArgumentParser(description="Provide a bloat report for PostgreSQL tables and/or indexes. This script uses the pgstattuple contrib module which must be installed first. Note that the query to check for bloat can be extremely expensive on very large databases or those with many tables. The script stores the bloat stats in a table so they can be queried again as needed without having to re-run the entire scan. The table contains a timestamp columns to show when it was obtained.")
args_general = parser.add_argument_group(title="General options")
args_general.add_argument('-c','--connection', default="host=", help="""Connection string for use by psycopg. Defaults to "host=" (local socket).""")
args_general.add_argument('-e', '--exclude_object_file', help="""Full path to file containing a list of objects to exclude from the report (tables and/or indexes). Each line is a CSV entry in the format: objectname,bytes_wasted,percent_wasted. All objects must be schema qualified. bytes_wasted & percent_wasted are additional filter values on top of -s, -p, and -z to exclude the given object unless these values are also exceeded. Set either of these values to zero (or leave them off entirely) to exclude the object no matter what its bloat level. Comments are allowed if the line is prepended with "#". See the README.md for clearer examples of how to use this for more fine grained filtering.""")
args_general.add_argument('-f', '--format', default="simple", choices=["simple", "json", "jsonpretty", "dict"], help="Output formats. Simple is a plaintext version suitable for any output (ex: console, pipe to email). Json provides standardized json output which may be useful if taking input into something that needs a more structured format. Json also provides more details about dead tuples, empty space & free space. jsonpretty outputs in a more human readable format. Dict is the same as json but in the form of a python dictionary. Default is simple.")
args_general.add_argument('-m', '--mode', choices=["tables", "indexes", "both"], default="both", help="""Provide bloat reports for tables, indexes or both. Index bloat is always distinct from table bloat and reported as separate entries in the report. Default is "both". NOTE: GIN indexes are not supported at this time and will be skipped.""")
args_general.add_argument('-n', '--schema', help="Comma separated list of schema to include in report. All other schemas will be ignored.")
args_general.add_argument('-N', '--exclude_schema', help="Comma separated list of schemas to exclude.")
args_general.add_argument('--noscan', action="store_true", help="Set this option to have the script just read from the bloat statistics table without doing a scan of any tables again.")
args_general.add_argument('-p', '--min_wasted_percentage', type=float, default=0.1, help="Minimum percentage of wasted space an object must have to be included in the report. Default and minimum value is 0.1 (DO NOT include percent sign in given value).")
args_general.add_argument('-q', '--quick', action="store_true", help="Use the pgstattuple_approx() function instead of pgstattuple() for a quicker, but possibly less accurate bloat report. Only works for tables. Sets the 'approximate' column in the bloat statistics table to True. Note this only works in PostgreSQL 9.5+.")
args_general.add_argument('--quiet', action="store_true", help="Insert the data into the bloat stastics table without providing any console output.")
args_general.add_argument('-r', '--commit_rate', type=int, default=5, help="Sets how many tables are scanned before commiting inserts into the bloat statistics table. Helps avoid long running transactions when scanning large tables. Default is 5. Set to 0 to avoid committing until all tables are scanned. NOTE: The bloat table is truncated on every run unless --noscan is set.")
args_general.add_argument('-s', '--min_size', type=int, default=1, help="Minimum size in bytes of object to scan (table or index). Default and minimum value is 1.")
args_general.add_argument('-t', '--tablename', help="Scan for bloat only on the given table. Must be schema qualified. This always gets both table and index bloat and overrides all other filter options so you always get the bloat statistics for the table no matter what they are.")
args_general.add_argument('--version', action="store_true", help="Print version of this script.")
args_general.add_argument('-z', '--min_wasted_size', type=int, default=1, help="Minimum size of wasted space in bytes. Default and minimum is 1.")
args_general.add_argument('--debug', action="store_true", help="Output additional debugging information. Overrides quiet option.")
args_setup = parser.add_argument_group(title="Setup")
args_setup.add_argument('--pgstattuple_schema', help="If pgstattuple is not installed in the default search path, use this option to designate the schema where it is installed.")
args_setup.add_argument('--bloat_schema', help="Set the schema that the bloat report table is in if it's not in the default search path. Note this option can also be set when running --create_stats_table to set which schema you want the table created.")
args_setup.add_argument('--create_stats_table', action="store_true", help="Create the required tables that the bloat report uses (bloat_stats + two child tables). Places table in default search path unless --bloat_schema is set.")
args = parser.parse_args()
def check_pgstattuple(conn):
sql = "SELECT e.extversion, n.nspname FROM pg_catalog.pg_extension e JOIN pg_catalog.pg_namespace n ON e.extnamespace = n.oid WHERE extname = 'pgstattuple'"
cur = conn.cursor()
cur.execute(sql)
pgstattuple_info = cur.fetchone()
if pgstattuple_info == None:
print("pgstattuple extension not found. Please ensure it is installed in the database this script is connecting to.")
sys.exit(2)
if args.pgstattuple_schema != None:
if args.pgstattuple_schema != pgstattuple_info[1]:
print("pgstattuple not found in the schema given by --pgstattuple_schema option: " + args.pgstattuple_schema + ". Found instead in: " + pgstattuple_info[1]+".")
sys.exit(2)
return pgstattuple_info[0]
def create_conn():
conn = psycopg2.connect(args.connection)
return conn
def close_conn(conn):
conn.close()
def create_list(list_type, list_items):
split_list = []
if list_type == "csv":
split_list = list_items.split(',')
elif list_type == "file":
with open(list_items, 'r') as csvfile:
objectreader = csv.DictReader(csvfile, fieldnames=['objectname', 'max_wasted', 'max_perc'])
for o in objectreader:
if not o['objectname'].startswith('#'):
o['objectname'] = o['objectname'].strip()
if o['max_wasted'] != None:
o['max_wasted'] = int(o['max_wasted'])
else:
o['max_wasted'] = 0
if o['max_perc'] != None:
o['max_perc'] = float(o['max_perc'])
else:
o['max_perc'] = 0
split_list.append(o)
return split_list
def create_stats_table(conn):
if args.bloat_schema != None:
parent_sql = args.bloat_schema + "." + "bloat_stats"
tables_sql = args.bloat_schema + "." + "bloat_tables"
indexes_sql = args.bloat_schema + "." + "bloat_indexes"
else:
parent_sql = "bloat_stats"
tables_sql = "bloat_tables"
indexes_sql = "bloat_indexes"
drop_sql = "DROP TABLE IF EXISTS " + parent_sql + " CASCADE"
sql = "CREATE TABLE " + parent_sql + """ (schemaname text NOT NULL
, objectname text NOT NULL
, objecttype text NOT NULL
, size_bytes bigint
, live_tuple_count bigint
, live_tuple_percent float8
, dead_tuple_count bigint
, dead_tuple_size_bytes bigint
, dead_tuple_percent float8
, free_space_bytes bigint
, free_percent float8
, stats_timestamp timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP
, approximate boolean NOT NULL DEFAULT false)"""
cur = conn.cursor()
if args.debug:
print(cur.mogrify("drop_sql: " + drop_sql))
cur.execute(drop_sql)
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "CREATE TABLE " + tables_sql + " (LIKE " + parent_sql + " INCLUDING ALL) INHERITS (" + parent_sql + ")"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "CREATE TABLE " + indexes_sql + " (LIKE " + parent_sql + " INCLUDING ALL) INHERITS (" + parent_sql + ")"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "COMMENT ON TABLE " + parent_sql + " IS 'Table providing raw data for table & index bloat'"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "COMMENT ON TABLE " + tables_sql + " IS 'Table providing raw data for table bloat'"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "COMMENT ON TABLE " + indexes_sql + " IS 'Table providing raw data for index bloat'"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
conn.commit()
cur.close()
def get_bloat(conn, exclude_schema_list, include_schema_list, exclude_object_list):
pg_version = get_pg_version(conn)
sql = ""
commit_counter = 0
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
sql_tables = """ SELECT c.oid, c.relkind, c.relname, n.nspname, 'false' as indisprimary
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE relkind IN ('r', 'm')
AND c.relpersistence <> 't' """
sql_indexes = """ SELECT c.oid, c.relkind, c.relname, n.nspname, i.indisprimary
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
JOIN pg_catalog.pg_index i ON c.oid = i.indexrelid
JOIN pg_catalog.pg_am a ON c.relam = a.oid
WHERE c.relkind = 'i'
AND a.amname <> 'gin' """
if int(pg_version[0]) >= 9 and int(pg_version[1]) >= 3:
sql_indexes += " AND indislive = 'true' "
if args.tablename != None:
sql_tables += " AND n.nspname||'.'||c.relname = %s "
sql_indexes += " AND i.indrelid::regclass = %s::regclass "
sql_class = sql_tables + """
UNION
""" + sql_indexes
if args.debug:
print("sql_class: " + cur.mogrify(sql_class, [args.tablename, args.tablename] ) )
cur.execute(sql_class, [args.tablename, args.tablename] )
else:
# IN clauses work with python tuples. lists were converted by get_bloat() call
if include_schema_list:
sql_tables += " AND n.nspname IN %s"
sql_indexes += " AND n.nspname IN %s"
filter_list = include_schema_list
elif exclude_schema_list:
sql_tables += " AND n.nspname NOT IN %s"
sql_indexes += " AND n.nspname NOT IN %s"
filter_list = exclude_schema_list
if args.mode == 'tables':
sql_class = sql_tables
elif args.mode == 'indexes':
sql_class = sql_indexes
elif args.mode == "both":
sql_class = sql_tables + """
UNION
""" + sql_indexes
if args.mode == "both":
if args.debug:
print("sql_class: " + cur.mogrify(sql_class, (filter_list,filter_list) ))
cur.execute(sql_class, (filter_list,filter_list))
elif args.mode == "tables" or args.mode == "indexes":
if args.debug:
print("sql_class: " + cur.mogrify(sql_class, (filter_list,) ))
cur.execute(sql_class, (filter_list,) )
else:
cur.execute(sql)
object_list = cur.fetchall()
sql = "TRUNCATE "
if args.bloat_schema:
sql += args.bloat_schema + "."
if args.mode == "tables" or args.mode == "both":
sql_table = sql + "bloat_tables"
cur.execute(sql_table)
if args.mode == "indexes" or args.mode == "both":
sql_index = sql + "bloat_indexes"
cur.execute(sql_index)
conn.commit()
if args.quick:
approximate = True
else:
approximate = False
for o in object_list:
if args.debug:
print(o)
if exclude_object_list and args.tablename == None:
# completely skip object being scanned if it's in the excluded file list with max values equal to zero
match_found = False
for e in exclude_object_list:
if (e['objectname'] == o['nspname'] + "." + o['relname']) and (e['max_wasted'] == 0) and (e['max_perc'] == 0):
match_found = True
if match_found:
continue
sql = """ SELECT count(*) FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = %s
AND c.relname = %s """
cur.execute(sql, [o['nspname'], o['relname']])
result = cur.fetchone()[0]
if args.debug:
print("Checking for table existance before scanning: " + str(result))
if result == 0:
continue # just skip over it. object was dropped since initial list was made
if args.quick:
sql = "SELECT table_len, approx_tuple_count AS tuple_count, approx_tuple_len AS tuple_len, approx_tuple_percent AS tuple_percent, dead_tuple_count, "
sql += "dead_tuple_len, dead_tuple_percent, approx_free_space AS free_space, approx_free_percent AS free_percent FROM "
else:
sql = "SELECT table_len, tuple_count, tuple_len, tuple_percent, dead_tuple_count, dead_tuple_len, dead_tuple_percent, free_space, free_percent FROM "
if args.pgstattuple_schema != None:
sql += " \"" + args.pgstattuple_schema + "\"."
if args.quick:
sql += "pgstattuple_approx(%s::regclass) "
if args.tablename == None:
sql += " WHERE table_len > %s"
sql += " AND ( (dead_tuple_len + approx_free_space) > %s OR (dead_tuple_percent + approx_free_percent) > %s )"
else:
sql += "pgstattuple(%s::regclass) "
if args.tablename == None:
sql += " WHERE table_len > %s"
sql += " AND ( (dead_tuple_len + free_space) > %s OR (dead_tuple_percent + free_percent) > %s )"
if args.tablename == None:
if args.debug:
print("sql: " + cur.mogrify(sql, [o['oid'], args.min_size, args.min_wasted_size, args.min_wasted_percentage]))
cur.execute(sql, [o['oid'], args.min_size, args.min_wasted_size, args.min_wasted_percentage])
else:
if args.debug:
print("sql: " + cur.mogrify(sql, [o['oid']]))
cur.execute(sql, [o['oid']])
stats = cur.fetchall()
if args.debug:
print(stats)
if stats: # completely empty objects will be zero for all stats, so this would be an empty set
if exclude_object_list and args.tablename == None:
# If object in the exclude list has max values, compare them to see if it should be left out of report
wasted_space = stats[0]['dead_tuple_len'] + stats[0]['free_space']
wasted_perc = stats[0]['dead_tuple_percent'] + stats[0]['free_percent']
for e in exclude_object_list:
if (e['objectname'] == o['nspname'] + "." + o['relname']):
if ( (e['max_wasted'] < wasted_space ) or (e['max_perc'] < wasted_perc ) ):
match_found = False
else:
match_found = True
if match_found:
continue
sql = "INSERT INTO "
if args.bloat_schema != None:
sql += args.bloat_schema + "."
if o['relkind'] == "r" or o['relkind'] == "m":
sql+= "bloat_tables"
if o['relkind'] == "r":
objecttype = "table"
else:
objecttype = "materialized_view"
elif o['relkind'] == "i":
sql+= "bloat_indexes"
if o['indisprimary'] == True:
objecttype = "index_pk"
else:
objecttype = "index"
sql += """ (schemaname
, objectname
, objecttype
, size_bytes
, live_tuple_count
, live_tuple_percent
, dead_tuple_count
, dead_tuple_size_bytes
, dead_tuple_percent
, free_space_bytes
, free_percent
, approximate)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """
if args.debug:
print("insert sql: " + cur.mogrify(sql, [ o['nspname']
, o['relname']
, objecttype
, stats[0]['table_len']
, stats[0]['tuple_count']
, stats[0]['tuple_percent']
, stats[0]['dead_tuple_count']
, stats[0]['dead_tuple_len']
, stats[0]['dead_tuple_percent']
, stats[0]['free_space']
, stats[0]['free_percent']
, approximate
]))
cur.execute(sql, [ o['nspname']
, o['relname']
, objecttype
, stats[0]['table_len']
, stats[0]['tuple_count']
, stats[0]['tuple_percent']
, stats[0]['dead_tuple_count']
, stats[0]['dead_tuple_len']
, stats[0]['dead_tuple_percent']
, stats[0]['free_space']
, stats[0]['free_percent']
, approximate
])
commit_counter += 1
if args.commit_rate > 0 and (commit_counter % args.commit_rate == 0):
if args.debug:
print("Batch committed. Object scanned count: " + str(commit_counter))
conn.commit()
conn.commit()
cur.close()
## end get_bloat()
def get_pg_version(conn):
sql = "SELECT current_setting('server_version')"
cur = conn.cursor()
cur.execute(sql)
pg_version = cur.fetchone()[0].split(".")
return pg_version
def print_report(result_list):
if args.format == "simple":
for r in result_list:
print(r)
else:
print(result_list)
def print_version():
print("Version: " + version)
if __name__ == "__main__":
if args.version:
print_version()
sys.exit(1)
if args.schema != None and args.exclude_schema != None:
print("--schema and --exclude_schema are exclusive options and cannot be set together")
sys.exit(2)
conn = create_conn()
pgstattuple_version = float(check_pgstattuple(conn))
if args.quick:
if pgstattuple_version < 1.3:
print("--quick option requires pgstattuple version 1.3 or greater (PostgreSQL 9.5)")
sys.exit(2)
if (args.mode == "indexes" or args.mode == "both"):
print("--quick option can only be used with --mode=tables")
sys.exit(2)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if args.create_stats_table:
create_stats_table(conn)
close_conn(conn)
sys.exit(1)
sql = "SELECT tablename FROM pg_catalog.pg_tables WHERE tablename = %s"
if args.bloat_schema != None:
sql += " AND schemaname = %s"
cur.execute(sql, ['bloat_stats', args.bloat_schema])
else:
cur.execute(sql, ['bloat_stats'])
table_exists = cur.fetchone()
if table_exists == None:
print("Required statistics table does not exist. Please run --create_stats_table first before running a bloat scan.")
sys.exit(2)
if args.exclude_schema != None:
exclude_schema_list = create_list('csv', args.exclude_schema)
else:
exclude_schema_list = []
exclude_schema_list.append('pg_toast')
if args.schema != None:
include_schema_list = create_list('csv', args.schema)
else:
include_schema_list = []
if args.exclude_object_file != None:
exclude_object_list = create_list('file', args.exclude_object_file)
else:
exclude_object_list = []
if args.noscan == False:
get_bloat(conn, tuple(exclude_schema_list), tuple(include_schema_list), exclude_object_list)
# Final commit to ensure transaction that inserted stats data closes
conn.commit()
counter = 1
result_list = []
if args.quiet == False or args.debug == True:
simple_cols = "schemaname, objectname, objecttype, (dead_tuple_percent + free_percent) AS total_waste_percent, pg_size_pretty(dead_tuple_size_bytes + free_space_bytes) AS total_wasted_size"
dict_cols = "schemaname, objectname, objecttype, size_bytes, live_tuple_count, live_tuple_percent, dead_tuple_count, dead_tuple_size_bytes, dead_tuple_percent, free_space_bytes, free_percent, approximate"
if args.format == "simple":
sql = "SELECT " + simple_cols + " FROM "
elif args.format == "dict" or args.format=="json" or args.format=="jsonpretty":
sql = "SELECT " + dict_cols + " FROM "
else:
print("Unsupported --format given. Use 'simple', 'dict' 'json', or 'jsonpretty'.")
sys.exit(2)
if args.bloat_schema != None:
sql += args.bloat_schema + "."
if args.mode == "tables":
sql += "bloat_tables"
elif args.mode == "indexes":
sql += "bloat_indexes"
else:
sql += "bloat_stats"
sql += " ORDER BY (dead_tuple_size_bytes + free_space_bytes) DESC"
cur.execute(sql)
result = cur.fetchall()
for r in result:
if args.format == "simple":
justify_space = 100 - len(str(counter) + ". " + r['schemaname'] + "." + r['objectname'] + "(" + str(r['total_waste_percent']) + "%)" + r['total_wasted_size'] + " wasted")
result_list.append(str(counter) + ". " + r['schemaname'] + "." + r['objectname'] + "."*justify_space + "(" + str(r['total_waste_percent']) + "%) " + r['total_wasted_size'] + " wasted")
counter += 1
elif args.format == "dict" or args.format == "json" or args.format == "jsonpretty":
result_dict = dict([('schemaname', r['schemaname'])
, ('objectname', r['objectname'])
, ('objecttype', r['objecttype'])
, ('size_bytes', int(r['size_bytes']))
, ('live_tuple_count', int(r['live_tuple_count']))
, ('live_tuple_percent', str(r['live_tuple_percent'])+"%" )
, ('dead_tuple_count', int(r['dead_tuple_count']))
, ('dead_tuple_size_bytes', int(r['dead_tuple_size_bytes']))
, ('dead_tuple_percent', str(r['dead_tuple_percent'])+"%" )
, ('free_space_bytes', int(r['free_space_bytes']))
, ('free_percent', str(r['dead_tuple_percent'])+"%" )
, ('approximate', r['approximate'])
])
result_list.append(result_dict)
if args.format == "json":
result_list = json.dumps(result_list)
elif args.format == "jsonpretty":
result_list = json.dumps(result_list, indent=4, separators=(',',': '))
if len(result_list) >= 1:
print_report(result_list)
else:
if args.quiet == False:
print("No bloat found for given parameters")
close_conn(conn)
"""
LICENSE AND COPYRIGHT
---------------------
pg_bloat_check.py is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses.
Copyright (c) 2016 Keith Fiske
Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHOR HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
| 52.087379 | 701 | 0.586692 |
import argparse, csv, json, psycopg2, sys
from psycopg2 import extras
version = "2.1.1"
parser = argparse.ArgumentParser(description="Provide a bloat report for PostgreSQL tables and/or indexes. This script uses the pgstattuple contrib module which must be installed first. Note that the query to check for bloat can be extremely expensive on very large databases or those with many tables. The script stores the bloat stats in a table so they can be queried again as needed without having to re-run the entire scan. The table contains a timestamp columns to show when it was obtained.")
args_general = parser.add_argument_group(title="General options")
args_general.add_argument('-c','--connection', default="host=", help="""Connection string for use by psycopg. Defaults to "host=" (local socket).""")
args_general.add_argument('-e', '--exclude_object_file', help="""Full path to file containing a list of objects to exclude from the report (tables and/or indexes). Each line is a CSV entry in the format: objectname,bytes_wasted,percent_wasted. All objects must be schema qualified. bytes_wasted & percent_wasted are additional filter values on top of -s, -p, and -z to exclude the given object unless these values are also exceeded. Set either of these values to zero (or leave them off entirely) to exclude the object no matter what its bloat level. Comments are allowed if the line is prepended with "#". See the README.md for clearer examples of how to use this for more fine grained filtering.""")
args_general.add_argument('-f', '--format', default="simple", choices=["simple", "json", "jsonpretty", "dict"], help="Output formats. Simple is a plaintext version suitable for any output (ex: console, pipe to email). Json provides standardized json output which may be useful if taking input into something that needs a more structured format. Json also provides more details about dead tuples, empty space & free space. jsonpretty outputs in a more human readable format. Dict is the same as json but in the form of a python dictionary. Default is simple.")
args_general.add_argument('-m', '--mode', choices=["tables", "indexes", "both"], default="both", help="""Provide bloat reports for tables, indexes or both. Index bloat is always distinct from table bloat and reported as separate entries in the report. Default is "both". NOTE: GIN indexes are not supported at this time and will be skipped.""")
args_general.add_argument('-n', '--schema', help="Comma separated list of schema to include in report. All other schemas will be ignored.")
args_general.add_argument('-N', '--exclude_schema', help="Comma separated list of schemas to exclude.")
args_general.add_argument('--noscan', action="store_true", help="Set this option to have the script just read from the bloat statistics table without doing a scan of any tables again.")
args_general.add_argument('-p', '--min_wasted_percentage', type=float, default=0.1, help="Minimum percentage of wasted space an object must have to be included in the report. Default and minimum value is 0.1 (DO NOT include percent sign in given value).")
args_general.add_argument('-q', '--quick', action="store_true", help="Use the pgstattuple_approx() function instead of pgstattuple() for a quicker, but possibly less accurate bloat report. Only works for tables. Sets the 'approximate' column in the bloat statistics table to True. Note this only works in PostgreSQL 9.5+.")
args_general.add_argument('--quiet', action="store_true", help="Insert the data into the bloat stastics table without providing any console output.")
args_general.add_argument('-r', '--commit_rate', type=int, default=5, help="Sets how many tables are scanned before commiting inserts into the bloat statistics table. Helps avoid long running transactions when scanning large tables. Default is 5. Set to 0 to avoid committing until all tables are scanned. NOTE: The bloat table is truncated on every run unless --noscan is set.")
args_general.add_argument('-s', '--min_size', type=int, default=1, help="Minimum size in bytes of object to scan (table or index). Default and minimum value is 1.")
args_general.add_argument('-t', '--tablename', help="Scan for bloat only on the given table. Must be schema qualified. This always gets both table and index bloat and overrides all other filter options so you always get the bloat statistics for the table no matter what they are.")
args_general.add_argument('--version', action="store_true", help="Print version of this script.")
args_general.add_argument('-z', '--min_wasted_size', type=int, default=1, help="Minimum size of wasted space in bytes. Default and minimum is 1.")
args_general.add_argument('--debug', action="store_true", help="Output additional debugging information. Overrides quiet option.")
args_setup = parser.add_argument_group(title="Setup")
args_setup.add_argument('--pgstattuple_schema', help="If pgstattuple is not installed in the default search path, use this option to designate the schema where it is installed.")
args_setup.add_argument('--bloat_schema', help="Set the schema that the bloat report table is in if it's not in the default search path. Note this option can also be set when running --create_stats_table to set which schema you want the table created.")
args_setup.add_argument('--create_stats_table', action="store_true", help="Create the required tables that the bloat report uses (bloat_stats + two child tables). Places table in default search path unless --bloat_schema is set.")
args = parser.parse_args()
def check_pgstattuple(conn):
sql = "SELECT e.extversion, n.nspname FROM pg_catalog.pg_extension e JOIN pg_catalog.pg_namespace n ON e.extnamespace = n.oid WHERE extname = 'pgstattuple'"
cur = conn.cursor()
cur.execute(sql)
pgstattuple_info = cur.fetchone()
if pgstattuple_info == None:
print("pgstattuple extension not found. Please ensure it is installed in the database this script is connecting to.")
sys.exit(2)
if args.pgstattuple_schema != None:
if args.pgstattuple_schema != pgstattuple_info[1]:
print("pgstattuple not found in the schema given by --pgstattuple_schema option: " + args.pgstattuple_schema + ". Found instead in: " + pgstattuple_info[1]+".")
sys.exit(2)
return pgstattuple_info[0]
def create_conn():
conn = psycopg2.connect(args.connection)
return conn
def close_conn(conn):
conn.close()
def create_list(list_type, list_items):
split_list = []
if list_type == "csv":
split_list = list_items.split(',')
elif list_type == "file":
with open(list_items, 'r') as csvfile:
objectreader = csv.DictReader(csvfile, fieldnames=['objectname', 'max_wasted', 'max_perc'])
for o in objectreader:
if not o['objectname'].startswith('
o['objectname'] = o['objectname'].strip()
if o['max_wasted'] != None:
o['max_wasted'] = int(o['max_wasted'])
else:
o['max_wasted'] = 0
if o['max_perc'] != None:
o['max_perc'] = float(o['max_perc'])
else:
o['max_perc'] = 0
split_list.append(o)
return split_list
def create_stats_table(conn):
if args.bloat_schema != None:
parent_sql = args.bloat_schema + "." + "bloat_stats"
tables_sql = args.bloat_schema + "." + "bloat_tables"
indexes_sql = args.bloat_schema + "." + "bloat_indexes"
else:
parent_sql = "bloat_stats"
tables_sql = "bloat_tables"
indexes_sql = "bloat_indexes"
drop_sql = "DROP TABLE IF EXISTS " + parent_sql + " CASCADE"
sql = "CREATE TABLE " + parent_sql + """ (schemaname text NOT NULL
, objectname text NOT NULL
, objecttype text NOT NULL
, size_bytes bigint
, live_tuple_count bigint
, live_tuple_percent float8
, dead_tuple_count bigint
, dead_tuple_size_bytes bigint
, dead_tuple_percent float8
, free_space_bytes bigint
, free_percent float8
, stats_timestamp timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP
, approximate boolean NOT NULL DEFAULT false)"""
cur = conn.cursor()
if args.debug:
print(cur.mogrify("drop_sql: " + drop_sql))
cur.execute(drop_sql)
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "CREATE TABLE " + tables_sql + " (LIKE " + parent_sql + " INCLUDING ALL) INHERITS (" + parent_sql + ")"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "CREATE TABLE " + indexes_sql + " (LIKE " + parent_sql + " INCLUDING ALL) INHERITS (" + parent_sql + ")"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "COMMENT ON TABLE " + parent_sql + " IS 'Table providing raw data for table & index bloat'"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "COMMENT ON TABLE " + tables_sql + " IS 'Table providing raw data for table bloat'"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
sql = "COMMENT ON TABLE " + indexes_sql + " IS 'Table providing raw data for index bloat'"
if args.debug:
print(cur.mogrify("sql: " + sql))
cur.execute(sql)
conn.commit()
cur.close()
def get_bloat(conn, exclude_schema_list, include_schema_list, exclude_object_list):
pg_version = get_pg_version(conn)
sql = ""
commit_counter = 0
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
sql_tables = """ SELECT c.oid, c.relkind, c.relname, n.nspname, 'false' as indisprimary
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE relkind IN ('r', 'm')
AND c.relpersistence <> 't' """
sql_indexes = """ SELECT c.oid, c.relkind, c.relname, n.nspname, i.indisprimary
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
JOIN pg_catalog.pg_index i ON c.oid = i.indexrelid
JOIN pg_catalog.pg_am a ON c.relam = a.oid
WHERE c.relkind = 'i'
AND a.amname <> 'gin' """
if int(pg_version[0]) >= 9 and int(pg_version[1]) >= 3:
sql_indexes += " AND indislive = 'true' "
if args.tablename != None:
sql_tables += " AND n.nspname||'.'||c.relname = %s "
sql_indexes += " AND i.indrelid::regclass = %s::regclass "
sql_class = sql_tables + """
UNION
""" + sql_indexes
if args.debug:
print("sql_class: " + cur.mogrify(sql_class, [args.tablename, args.tablename] ) )
cur.execute(sql_class, [args.tablename, args.tablename] )
else:
# IN clauses work with python tuples. lists were converted by get_bloat() call
if include_schema_list:
sql_tables += " AND n.nspname IN %s"
sql_indexes += " AND n.nspname IN %s"
filter_list = include_schema_list
elif exclude_schema_list:
sql_tables += " AND n.nspname NOT IN %s"
sql_indexes += " AND n.nspname NOT IN %s"
filter_list = exclude_schema_list
if args.mode == 'tables':
sql_class = sql_tables
elif args.mode == 'indexes':
sql_class = sql_indexes
elif args.mode == "both":
sql_class = sql_tables + """
UNION
""" + sql_indexes
if args.mode == "both":
if args.debug:
print("sql_class: " + cur.mogrify(sql_class, (filter_list,filter_list) ))
cur.execute(sql_class, (filter_list,filter_list))
elif args.mode == "tables" or args.mode == "indexes":
if args.debug:
print("sql_class: " + cur.mogrify(sql_class, (filter_list,) ))
cur.execute(sql_class, (filter_list,) )
else:
cur.execute(sql)
object_list = cur.fetchall()
sql = "TRUNCATE "
if args.bloat_schema:
sql += args.bloat_schema + "."
if args.mode == "tables" or args.mode == "both":
sql_table = sql + "bloat_tables"
cur.execute(sql_table)
if args.mode == "indexes" or args.mode == "both":
sql_index = sql + "bloat_indexes"
cur.execute(sql_index)
conn.commit()
if args.quick:
approximate = True
else:
approximate = False
for o in object_list:
if args.debug:
print(o)
if exclude_object_list and args.tablename == None:
# completely skip object being scanned if it's in the excluded file list with max values equal to zero
match_found = False
for e in exclude_object_list:
if (e['objectname'] == o['nspname'] + "." + o['relname']) and (e['max_wasted'] == 0) and (e['max_perc'] == 0):
match_found = True
if match_found:
continue
sql = """ SELECT count(*) FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = %s
AND c.relname = %s """
cur.execute(sql, [o['nspname'], o['relname']])
result = cur.fetchone()[0]
if args.debug:
print("Checking for table existance before scanning: " + str(result))
if result == 0:
continue
if args.quick:
sql = "SELECT table_len, approx_tuple_count AS tuple_count, approx_tuple_len AS tuple_len, approx_tuple_percent AS tuple_percent, dead_tuple_count, "
sql += "dead_tuple_len, dead_tuple_percent, approx_free_space AS free_space, approx_free_percent AS free_percent FROM "
else:
sql = "SELECT table_len, tuple_count, tuple_len, tuple_percent, dead_tuple_count, dead_tuple_len, dead_tuple_percent, free_space, free_percent FROM "
if args.pgstattuple_schema != None:
sql += " \"" + args.pgstattuple_schema + "\"."
if args.quick:
sql += "pgstattuple_approx(%s::regclass) "
if args.tablename == None:
sql += " WHERE table_len > %s"
sql += " AND ( (dead_tuple_len + approx_free_space) > %s OR (dead_tuple_percent + approx_free_percent) > %s )"
else:
sql += "pgstattuple(%s::regclass) "
if args.tablename == None:
sql += " WHERE table_len > %s"
sql += " AND ( (dead_tuple_len + free_space) > %s OR (dead_tuple_percent + free_percent) > %s )"
if args.tablename == None:
if args.debug:
print("sql: " + cur.mogrify(sql, [o['oid'], args.min_size, args.min_wasted_size, args.min_wasted_percentage]))
cur.execute(sql, [o['oid'], args.min_size, args.min_wasted_size, args.min_wasted_percentage])
else:
if args.debug:
print("sql: " + cur.mogrify(sql, [o['oid']]))
cur.execute(sql, [o['oid']])
stats = cur.fetchall()
if args.debug:
print(stats)
if stats:
if exclude_object_list and args.tablename == None:
wasted_space = stats[0]['dead_tuple_len'] + stats[0]['free_space']
wasted_perc = stats[0]['dead_tuple_percent'] + stats[0]['free_percent']
for e in exclude_object_list:
if (e['objectname'] == o['nspname'] + "." + o['relname']):
if ( (e['max_wasted'] < wasted_space ) or (e['max_perc'] < wasted_perc ) ):
match_found = False
else:
match_found = True
if match_found:
continue
sql = "INSERT INTO "
if args.bloat_schema != None:
sql += args.bloat_schema + "."
if o['relkind'] == "r" or o['relkind'] == "m":
sql+= "bloat_tables"
if o['relkind'] == "r":
objecttype = "table"
else:
objecttype = "materialized_view"
elif o['relkind'] == "i":
sql+= "bloat_indexes"
if o['indisprimary'] == True:
objecttype = "index_pk"
else:
objecttype = "index"
sql += """ (schemaname
, objectname
, objecttype
, size_bytes
, live_tuple_count
, live_tuple_percent
, dead_tuple_count
, dead_tuple_size_bytes
, dead_tuple_percent
, free_space_bytes
, free_percent
, approximate)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """
if args.debug:
print("insert sql: " + cur.mogrify(sql, [ o['nspname']
, o['relname']
, objecttype
, stats[0]['table_len']
, stats[0]['tuple_count']
, stats[0]['tuple_percent']
, stats[0]['dead_tuple_count']
, stats[0]['dead_tuple_len']
, stats[0]['dead_tuple_percent']
, stats[0]['free_space']
, stats[0]['free_percent']
, approximate
]))
cur.execute(sql, [ o['nspname']
, o['relname']
, objecttype
, stats[0]['table_len']
, stats[0]['tuple_count']
, stats[0]['tuple_percent']
, stats[0]['dead_tuple_count']
, stats[0]['dead_tuple_len']
, stats[0]['dead_tuple_percent']
, stats[0]['free_space']
, stats[0]['free_percent']
, approximate
])
commit_counter += 1
if args.commit_rate > 0 and (commit_counter % args.commit_rate == 0):
if args.debug:
print("Batch committed. Object scanned count: " + str(commit_counter))
conn.commit()
conn.commit()
cur.close()
sql = "SELECT current_setting('server_version')"
cur = conn.cursor()
cur.execute(sql)
pg_version = cur.fetchone()[0].split(".")
return pg_version
def print_report(result_list):
if args.format == "simple":
for r in result_list:
print(r)
else:
print(result_list)
def print_version():
print("Version: " + version)
if __name__ == "__main__":
if args.version:
print_version()
sys.exit(1)
if args.schema != None and args.exclude_schema != None:
print("--schema and --exclude_schema are exclusive options and cannot be set together")
sys.exit(2)
conn = create_conn()
pgstattuple_version = float(check_pgstattuple(conn))
if args.quick:
if pgstattuple_version < 1.3:
print("--quick option requires pgstattuple version 1.3 or greater (PostgreSQL 9.5)")
sys.exit(2)
if (args.mode == "indexes" or args.mode == "both"):
print("--quick option can only be used with --mode=tables")
sys.exit(2)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if args.create_stats_table:
create_stats_table(conn)
close_conn(conn)
sys.exit(1)
sql = "SELECT tablename FROM pg_catalog.pg_tables WHERE tablename = %s"
if args.bloat_schema != None:
sql += " AND schemaname = %s"
cur.execute(sql, ['bloat_stats', args.bloat_schema])
else:
cur.execute(sql, ['bloat_stats'])
table_exists = cur.fetchone()
if table_exists == None:
print("Required statistics table does not exist. Please run --create_stats_table first before running a bloat scan.")
sys.exit(2)
if args.exclude_schema != None:
exclude_schema_list = create_list('csv', args.exclude_schema)
else:
exclude_schema_list = []
exclude_schema_list.append('pg_toast')
if args.schema != None:
include_schema_list = create_list('csv', args.schema)
else:
include_schema_list = []
if args.exclude_object_file != None:
exclude_object_list = create_list('file', args.exclude_object_file)
else:
exclude_object_list = []
if args.noscan == False:
get_bloat(conn, tuple(exclude_schema_list), tuple(include_schema_list), exclude_object_list)
conn.commit()
counter = 1
result_list = []
if args.quiet == False or args.debug == True:
simple_cols = "schemaname, objectname, objecttype, (dead_tuple_percent + free_percent) AS total_waste_percent, pg_size_pretty(dead_tuple_size_bytes + free_space_bytes) AS total_wasted_size"
dict_cols = "schemaname, objectname, objecttype, size_bytes, live_tuple_count, live_tuple_percent, dead_tuple_count, dead_tuple_size_bytes, dead_tuple_percent, free_space_bytes, free_percent, approximate"
if args.format == "simple":
sql = "SELECT " + simple_cols + " FROM "
elif args.format == "dict" or args.format=="json" or args.format=="jsonpretty":
sql = "SELECT " + dict_cols + " FROM "
else:
print("Unsupported --format given. Use 'simple', 'dict' 'json', or 'jsonpretty'.")
sys.exit(2)
if args.bloat_schema != None:
sql += args.bloat_schema + "."
if args.mode == "tables":
sql += "bloat_tables"
elif args.mode == "indexes":
sql += "bloat_indexes"
else:
sql += "bloat_stats"
sql += " ORDER BY (dead_tuple_size_bytes + free_space_bytes) DESC"
cur.execute(sql)
result = cur.fetchall()
for r in result:
if args.format == "simple":
justify_space = 100 - len(str(counter) + ". " + r['schemaname'] + "." + r['objectname'] + "(" + str(r['total_waste_percent']) + "%)" + r['total_wasted_size'] + " wasted")
result_list.append(str(counter) + ". " + r['schemaname'] + "." + r['objectname'] + "."*justify_space + "(" + str(r['total_waste_percent']) + "%) " + r['total_wasted_size'] + " wasted")
counter += 1
elif args.format == "dict" or args.format == "json" or args.format == "jsonpretty":
result_dict = dict([('schemaname', r['schemaname'])
, ('objectname', r['objectname'])
, ('objecttype', r['objecttype'])
, ('size_bytes', int(r['size_bytes']))
, ('live_tuple_count', int(r['live_tuple_count']))
, ('live_tuple_percent', str(r['live_tuple_percent'])+"%" )
, ('dead_tuple_count', int(r['dead_tuple_count']))
, ('dead_tuple_size_bytes', int(r['dead_tuple_size_bytes']))
, ('dead_tuple_percent', str(r['dead_tuple_percent'])+"%" )
, ('free_space_bytes', int(r['free_space_bytes']))
, ('free_percent', str(r['dead_tuple_percent'])+"%" )
, ('approximate', r['approximate'])
])
result_list.append(result_dict)
if args.format == "json":
result_list = json.dumps(result_list)
elif args.format == "jsonpretty":
result_list = json.dumps(result_list, indent=4, separators=(',',': '))
if len(result_list) >= 1:
print_report(result_list)
else:
if args.quiet == False:
print("No bloat found for given parameters")
close_conn(conn)
| true | true |
1c38f4d7f27db7ee57691713ddffe76cb71264a9 | 6,279 | py | Python | Bots/Python/Skills/CodeFirst/WaterfallSkillBot/dialogs/activity_router_dialog.py | gabog/BotFramework-FunctionalTests | 9ebd811539648bf7d53f6f92af42ff8ef60344a1 | [
"MIT"
] | 1 | 2021-07-06T14:29:26.000Z | 2021-07-06T14:29:26.000Z | Bots/Python/Skills/CodeFirst/WaterfallSkillBot/dialogs/activity_router_dialog.py | gabog/BotFramework-FunctionalTests | 9ebd811539648bf7d53f6f92af42ff8ef60344a1 | [
"MIT"
] | 1 | 2021-04-23T14:54:34.000Z | 2021-04-23T19:56:22.000Z | Bots/Python/Skills/CodeFirst/WaterfallSkillBot/dialogs/activity_router_dialog.py | GiulianoDolceTalianoSantoro/BotFramework-FunctionalTests | f21bded918cf9e51a296fcb0b87ad6495c59bea8 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
from typing import Dict
from datetime import datetime
from botbuilder.core import MessageFactory, ConversationState
from botbuilder.dialogs import (
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
DialogTurnStatus,
ComponentDialog,
)
from botbuilder.dialogs.skills import (
SkillDialogOptions,
SkillDialog,
BeginSkillDialogOptions,
)
from botbuilder.schema import Activity, ActivityTypes, InputHints
from botbuilder.integration.aiohttp.skills import SkillHttpClient
from config import DefaultConfig
from skill_conversation_id_factory import SkillConversationIdFactory
from dialogs.cards import CardDialog
from dialogs.delete import DeleteDialog
from dialogs.proactive import WaitForProactiveDialog
from dialogs.message_with_attachment import MessageWithAttachmentDialog
from dialogs.auth import AuthDialog
from dialogs.sso import SsoSkillDialog
from dialogs.file_upload import FileUploadDialog
from dialogs.update import UpdateDialog
ECHO_SKILL = "EchoSkill"
class ActivityRouterDialog(ComponentDialog):
def __init__(
self,
configuration: DefaultConfig,
conversation_state: ConversationState,
conversation_id_factory: SkillConversationIdFactory,
skill_client: SkillHttpClient,
continuation_parameters_store: Dict,
):
super().__init__(ActivityRouterDialog.__name__)
self.add_dialog(CardDialog(configuration))
self.add_dialog(MessageWithAttachmentDialog(configuration))
self.add_dialog(
WaitForProactiveDialog(configuration, continuation_parameters_store)
)
self.add_dialog(AuthDialog(configuration))
self.add_dialog(SsoSkillDialog(configuration))
self.add_dialog(FileUploadDialog())
self.add_dialog(DeleteDialog())
self.add_dialog(UpdateDialog())
self.add_dialog(
self.create_echo_skill_dialog(
configuration, conversation_state, conversation_id_factory, skill_client
)
)
self.add_dialog(
WaterfallDialog(WaterfallDialog.__name__, [self.process_activity])
)
self.initial_dialog_id = WaterfallDialog.__name__
def create_echo_skill_dialog(
self,
configuration: DefaultConfig,
conversation_state: ConversationState,
conversation_id_factory: SkillConversationIdFactory,
skill_client: SkillHttpClient,
) -> SkillDialog:
if configuration.SKILL_HOST_ENDPOINT is None:
raise Exception("SkillHostEndpoint is not in configuration")
if configuration.ECHO_SKILL_INFO is None:
raise Exception("EchoSkillInfo is not set in configuration")
options = SkillDialogOptions(
bot_id=configuration.APP_ID,
conversation_id_factory=conversation_id_factory,
skill_client=skill_client,
skill_host_endpoint=configuration.SKILL_HOST_ENDPOINT,
conversation_state=conversation_state,
skill=configuration.ECHO_SKILL_INFO,
)
return SkillDialog(options, ECHO_SKILL)
async def process_activity(self, step_context: WaterfallStepContext):
# A skill can send trace activities, if needed.
await step_context.context.send_activity(
Activity(
type=ActivityTypes.trace,
timestamp=datetime.utcnow(),
name="ActivityRouterDialog.process_activity()",
label=f"Got ActivityType: {step_context.context.activity.type}",
)
)
if step_context.context.activity.type == ActivityTypes.event:
return await self.on_event_activity(step_context)
# We didn't get an activity type we can handle.
await step_context.context.send_activity(
activity_or_text=f'Unrecognized ActivityType: "{step_context.context.activity.type}".',
input_hint=InputHints.ignoring_input,
)
return DialogTurnResult(DialogTurnStatus.Complete)
async def on_event_activity(self, step_context: WaterfallStepContext):
activity = step_context.context.activity
await step_context.context.send_activity(
Activity(
type=ActivityTypes.trace,
timestamp=datetime.utcnow(),
name="ActivityRouterDialog.on_event_activity()",
label=f"Name: {activity.name}. Value: {json.dumps(activity.value)}",
)
)
if activity.name == "Cards":
return await step_context.begin_dialog(CardDialog.__name__)
if activity.name == "Proactive":
return await step_context.begin_dialog(WaitForProactiveDialog.__name__)
if activity.name == "MessageWithAttachment":
return await step_context.begin_dialog(MessageWithAttachmentDialog.__name__)
if activity.name == "Auth":
return await step_context.begin_dialog(AuthDialog.__name__)
if activity.name == "Sso":
return await step_context.begin_dialog(SsoSkillDialog.__name__)
if activity.name == "FileUpload":
return await step_context.begin_dialog(FileUploadDialog.__name__)
if activity.name == "Echo":
# Start the EchoSkillBot
message_activity = MessageFactory.text("I'm the echo skill bot")
message_activity.delivery_mode = activity.delivery_mode
dialog = await self.find_dialog(ECHO_SKILL)
return await step_context.begin_dialog(
dialog.id, BeginSkillDialogOptions(activity=message_activity)
)
if activity.name == "Delete":
return await step_context.begin_dialog(DeleteDialog.__name__)
if activity.name == "Update":
return await step_context.begin_dialog(UpdateDialog.__name__)
# We didn't get an event name we can handle.
await step_context.context.send_activity(
activity_or_text=f'Unrecognized EventName: "{step_context.context.activity.name}".',
input_hint=InputHints.ignoring_input,
)
return DialogTurnResult(DialogTurnStatus.Complete)
| 37.598802 | 99 | 0.697723 |
import json
from typing import Dict
from datetime import datetime
from botbuilder.core import MessageFactory, ConversationState
from botbuilder.dialogs import (
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
DialogTurnStatus,
ComponentDialog,
)
from botbuilder.dialogs.skills import (
SkillDialogOptions,
SkillDialog,
BeginSkillDialogOptions,
)
from botbuilder.schema import Activity, ActivityTypes, InputHints
from botbuilder.integration.aiohttp.skills import SkillHttpClient
from config import DefaultConfig
from skill_conversation_id_factory import SkillConversationIdFactory
from dialogs.cards import CardDialog
from dialogs.delete import DeleteDialog
from dialogs.proactive import WaitForProactiveDialog
from dialogs.message_with_attachment import MessageWithAttachmentDialog
from dialogs.auth import AuthDialog
from dialogs.sso import SsoSkillDialog
from dialogs.file_upload import FileUploadDialog
from dialogs.update import UpdateDialog
ECHO_SKILL = "EchoSkill"
class ActivityRouterDialog(ComponentDialog):
def __init__(
self,
configuration: DefaultConfig,
conversation_state: ConversationState,
conversation_id_factory: SkillConversationIdFactory,
skill_client: SkillHttpClient,
continuation_parameters_store: Dict,
):
super().__init__(ActivityRouterDialog.__name__)
self.add_dialog(CardDialog(configuration))
self.add_dialog(MessageWithAttachmentDialog(configuration))
self.add_dialog(
WaitForProactiveDialog(configuration, continuation_parameters_store)
)
self.add_dialog(AuthDialog(configuration))
self.add_dialog(SsoSkillDialog(configuration))
self.add_dialog(FileUploadDialog())
self.add_dialog(DeleteDialog())
self.add_dialog(UpdateDialog())
self.add_dialog(
self.create_echo_skill_dialog(
configuration, conversation_state, conversation_id_factory, skill_client
)
)
self.add_dialog(
WaterfallDialog(WaterfallDialog.__name__, [self.process_activity])
)
self.initial_dialog_id = WaterfallDialog.__name__
def create_echo_skill_dialog(
self,
configuration: DefaultConfig,
conversation_state: ConversationState,
conversation_id_factory: SkillConversationIdFactory,
skill_client: SkillHttpClient,
) -> SkillDialog:
if configuration.SKILL_HOST_ENDPOINT is None:
raise Exception("SkillHostEndpoint is not in configuration")
if configuration.ECHO_SKILL_INFO is None:
raise Exception("EchoSkillInfo is not set in configuration")
options = SkillDialogOptions(
bot_id=configuration.APP_ID,
conversation_id_factory=conversation_id_factory,
skill_client=skill_client,
skill_host_endpoint=configuration.SKILL_HOST_ENDPOINT,
conversation_state=conversation_state,
skill=configuration.ECHO_SKILL_INFO,
)
return SkillDialog(options, ECHO_SKILL)
async def process_activity(self, step_context: WaterfallStepContext):
await step_context.context.send_activity(
Activity(
type=ActivityTypes.trace,
timestamp=datetime.utcnow(),
name="ActivityRouterDialog.process_activity()",
label=f"Got ActivityType: {step_context.context.activity.type}",
)
)
if step_context.context.activity.type == ActivityTypes.event:
return await self.on_event_activity(step_context)
await step_context.context.send_activity(
activity_or_text=f'Unrecognized ActivityType: "{step_context.context.activity.type}".',
input_hint=InputHints.ignoring_input,
)
return DialogTurnResult(DialogTurnStatus.Complete)
async def on_event_activity(self, step_context: WaterfallStepContext):
activity = step_context.context.activity
await step_context.context.send_activity(
Activity(
type=ActivityTypes.trace,
timestamp=datetime.utcnow(),
name="ActivityRouterDialog.on_event_activity()",
label=f"Name: {activity.name}. Value: {json.dumps(activity.value)}",
)
)
if activity.name == "Cards":
return await step_context.begin_dialog(CardDialog.__name__)
if activity.name == "Proactive":
return await step_context.begin_dialog(WaitForProactiveDialog.__name__)
if activity.name == "MessageWithAttachment":
return await step_context.begin_dialog(MessageWithAttachmentDialog.__name__)
if activity.name == "Auth":
return await step_context.begin_dialog(AuthDialog.__name__)
if activity.name == "Sso":
return await step_context.begin_dialog(SsoSkillDialog.__name__)
if activity.name == "FileUpload":
return await step_context.begin_dialog(FileUploadDialog.__name__)
if activity.name == "Echo":
# Start the EchoSkillBot
message_activity = MessageFactory.text("I'm the echo skill bot")
message_activity.delivery_mode = activity.delivery_mode
dialog = await self.find_dialog(ECHO_SKILL)
return await step_context.begin_dialog(
dialog.id, BeginSkillDialogOptions(activity=message_activity)
)
if activity.name == "Delete":
return await step_context.begin_dialog(DeleteDialog.__name__)
if activity.name == "Update":
return await step_context.begin_dialog(UpdateDialog.__name__)
await step_context.context.send_activity(
activity_or_text=f'Unrecognized EventName: "{step_context.context.activity.name}".',
input_hint=InputHints.ignoring_input,
)
return DialogTurnResult(DialogTurnStatus.Complete)
| true | true |
1c38f55a2049f9d2130eac35f1bf089857d791f8 | 144 | py | Python | fondo_api/templatetags/env_var.py | Fonmon/Fondo-API | 0c78eaab259df18219c01fceb67bd1b6ff8ec941 | [
"MIT"
] | null | null | null | fondo_api/templatetags/env_var.py | Fonmon/Fondo-API | 0c78eaab259df18219c01fceb67bd1b6ff8ec941 | [
"MIT"
] | 48 | 2018-01-13T14:52:52.000Z | 2022-03-13T17:41:42.000Z | fondo_api/templatetags/env_var.py | Fonmon/Fondo-API | 0c78eaab259df18219c01fceb67bd1b6ff8ec941 | [
"MIT"
] | null | null | null | import os
from django import template
register = template.Library()
@register.simple_tag
def host():
return os.environ.get('HOST_URL_APP') | 18 | 41 | 0.763889 | import os
from django import template
register = template.Library()
@register.simple_tag
def host():
return os.environ.get('HOST_URL_APP') | true | true |
1c38f55ce8ecf3a5a54085f7c982ef92d79f5983 | 87 | py | Python | tutors/apps.py | JCorn64/QT-Study-App | f7c02878f0cf15e99d0a07c1a9d1b3cda745e77e | [
"PostgreSQL",
"Unlicense"
] | 7 | 2021-01-17T23:10:15.000Z | 2021-02-01T21:35:36.000Z | main/tutors/apps.py | DiveshTheReal/studentutor | 0d3ef57887bde4dd2ee40d68015598f9c8052ffd | [
"MIT"
] | 7 | 2021-01-17T15:10:47.000Z | 2022-03-12T00:53:49.000Z | tutors/apps.py | JCorn64/QT-Study-App | f7c02878f0cf15e99d0a07c1a9d1b3cda745e77e | [
"PostgreSQL",
"Unlicense"
] | 3 | 2021-01-18T09:36:16.000Z | 2021-01-20T16:29:40.000Z | from django.apps import AppConfig
class TutorsConfig(AppConfig):
name = 'tutors'
| 14.5 | 33 | 0.747126 | from django.apps import AppConfig
class TutorsConfig(AppConfig):
name = 'tutors'
| true | true |
1c38f596a128f5b5e06f64318ca590e1a20a4d12 | 4,934 | py | Python | pydataproc/clusters.py | oli-hall/py-dataproc | c931f01731e451accb63b2f37d9b9f36d73b36b5 | [
"MIT"
] | 1 | 2017-11-03T11:11:25.000Z | 2017-11-03T11:11:25.000Z | pydataproc/clusters.py | oli-hall/py-dataproc | c931f01731e451accb63b2f37d9b9f36d73b36b5 | [
"MIT"
] | null | null | null | pydataproc/clusters.py | oli-hall/py-dataproc | c931f01731e451accb63b2f37d9b9f36d73b36b5 | [
"MIT"
] | null | null | null | import time
from googleapiclient.errors import HttpError
from pydataproc.cluster import Cluster
from pydataproc.logger import log
from pydataproc.errors import ClusterAlreadyExistsException
class Clusters(object):
def __init__(self, dataproc):
assert dataproc
self.dataproc = dataproc
def list(self, minimal=True):
"""
Queries the DataProc API, returning a dict of all currently active clusters,
keyed by cluster name.
If 'minimal' is specified, each cluster's current state will be returned,
otherwise the full cluster configuration will be returned.
:param minimal: returns only the cluster state if set to True.
:return: dict of cluster name -> cluster information
"""
result = self.dataproc.client.projects().regions().clusters().list(
projectId=self.dataproc.project,
region=self.dataproc.region).execute()
if minimal:
return {c['clusterName']: c['status']['state'] for c in result.get('clusters', [])}
return {c['clusterName']: c for c in result.get('clusters', [])}
# TODO add support for preemptible workers
def create(self, cluster_name, num_masters=1, num_workers=2,
master_type='n1-standard-1', worker_type='n1-standard-1',
master_disk_gb=50, worker_disk_gb=50, init_scripts=[], block=True):
"""Creates a DataProc cluster with the provided settings, returning a dict
of the results returned from the API. It can wait for cluster creation if desired.
If block is set to True, the method will block until the cluster reaches either
a RUNNING or an ERROR state. If the cluster errors, an Exception will be raised.
:param cluster_name: the name of the cluster
:param num_masters: the number of master instances to use (default: 1)
:param num_workers: the number of worker instances to use (default: 2)
:param master_disk_gb: the size of the boot disk on each master (default: 50GB)
:param worker_disk_gb: the size of the boot disk on each worker (default: 50GB)
:param master_type: the type of instance to use for each master (default: n1-standard-1)
:param worker_type: the type of instance to use for each worker (default: n1-standard-1)
:param init_scripts: location initialisation scripts (default: [])
:param block: whether to block upon cluster creation.
:return: Cluster object
"""
log.info("Creating cluster '{}'".format(cluster_name))
zone_uri = 'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
self.dataproc.project, self.dataproc.zone)
cluster_data = {
'projectId': self.dataproc.project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
},
'workerConfig': {
'numInstances': num_workers,
'machineTypeUri': worker_type,
'diskConfig': {
'bootDiskSizeGb': worker_disk_gb
}
},
'masterConfig': {
'numInstances': num_masters,
'machineTypeUri': master_type,
'diskConfig': {
'bootDiskSizeGb': master_disk_gb
}
}
}
}
if init_scripts:
cluster_data['config']['initializationActions'] = [
{'executableFile': init_script} for init_script in init_scripts
]
log.debug('Cluster settings: {}'.format(cluster_data))
try:
result = self.dataproc.client.projects().regions().clusters().create(
projectId=self.dataproc.project,
region=self.dataproc.region,
body=cluster_data
).execute()
except HttpError as e:
if e.resp['status'] == '409':
raise ClusterAlreadyExistsException("Cluster '{}' already exists".format(cluster_name))
raise e
log.debug("Create call for cluster '{}' returned: {}".format(cluster_name, result))
cluster = Cluster(self.dataproc, cluster_name)
if not block:
return cluster
status = cluster.status()
log.info("Waiting for cluster to be ready...")
while not status in ['RUNNING', 'ERROR']:
time.sleep(5)
status = cluster.status()
if status == 'ERROR':
cluster_info = cluster.info()
status_detail = cluster_info['status'].get('detail', '')
raise Exception("Cluster encountered an error: {}".format(status_detail))
log.info("Cluster '{}' is ready.".format(cluster_name))
return cluster
| 39.472 | 103 | 0.597892 | import time
from googleapiclient.errors import HttpError
from pydataproc.cluster import Cluster
from pydataproc.logger import log
from pydataproc.errors import ClusterAlreadyExistsException
class Clusters(object):
def __init__(self, dataproc):
assert dataproc
self.dataproc = dataproc
def list(self, minimal=True):
result = self.dataproc.client.projects().regions().clusters().list(
projectId=self.dataproc.project,
region=self.dataproc.region).execute()
if minimal:
return {c['clusterName']: c['status']['state'] for c in result.get('clusters', [])}
return {c['clusterName']: c for c in result.get('clusters', [])}
def create(self, cluster_name, num_masters=1, num_workers=2,
master_type='n1-standard-1', worker_type='n1-standard-1',
master_disk_gb=50, worker_disk_gb=50, init_scripts=[], block=True):
log.info("Creating cluster '{}'".format(cluster_name))
zone_uri = 'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
self.dataproc.project, self.dataproc.zone)
cluster_data = {
'projectId': self.dataproc.project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
},
'workerConfig': {
'numInstances': num_workers,
'machineTypeUri': worker_type,
'diskConfig': {
'bootDiskSizeGb': worker_disk_gb
}
},
'masterConfig': {
'numInstances': num_masters,
'machineTypeUri': master_type,
'diskConfig': {
'bootDiskSizeGb': master_disk_gb
}
}
}
}
if init_scripts:
cluster_data['config']['initializationActions'] = [
{'executableFile': init_script} for init_script in init_scripts
]
log.debug('Cluster settings: {}'.format(cluster_data))
try:
result = self.dataproc.client.projects().regions().clusters().create(
projectId=self.dataproc.project,
region=self.dataproc.region,
body=cluster_data
).execute()
except HttpError as e:
if e.resp['status'] == '409':
raise ClusterAlreadyExistsException("Cluster '{}' already exists".format(cluster_name))
raise e
log.debug("Create call for cluster '{}' returned: {}".format(cluster_name, result))
cluster = Cluster(self.dataproc, cluster_name)
if not block:
return cluster
status = cluster.status()
log.info("Waiting for cluster to be ready...")
while not status in ['RUNNING', 'ERROR']:
time.sleep(5)
status = cluster.status()
if status == 'ERROR':
cluster_info = cluster.info()
status_detail = cluster_info['status'].get('detail', '')
raise Exception("Cluster encountered an error: {}".format(status_detail))
log.info("Cluster '{}' is ready.".format(cluster_name))
return cluster
| true | true |
1c38f6066920b707f5006a22c318e9355d2e4728 | 3,591 | py | Python | code/setup.py | amarallab/waldo | e38d23d9474a0bcb7a94e685545edb0115b12af4 | [
"MIT"
] | null | null | null | code/setup.py | amarallab/waldo | e38d23d9474a0bcb7a94e685545edb0115b12af4 | [
"MIT"
] | null | null | null | code/setup.py | amarallab/waldo | e38d23d9474a0bcb7a94e685545edb0115b12af4 | [
"MIT"
] | null | null | null | import os
import glob
from distutils.core import setup
import sys
import py2exe
import matplotlib
import FileDialog
import multiworm
HERE = os.path.dirname(os.path.abspath(__file__))
def find_data_files(source, target, patterns):
if glob.has_magic(source) or glob.has_magic(target):
raise ValueError("Magic not allowed in source, target")
ret = {}
for pattern in patterns:
pattern = os.path.join(source, pattern)
for filename in glob.glob(pattern):
if os.path.isfile(filename):
targetpath = os.path.join(target, os.path.relpath(filename, source))
path = os.path.dirname(targetpath)
ret.setdefault(path, []).append(filename)
return sorted(ret.items())
data_files = []
data_files.extend(matplotlib.get_py2exe_datafiles())
data_files.extend(find_data_files('C:\\Program Files (x86)\\Graphviz2.38\\bin', '', ['config6', '*.dll', '*.exe']))
data_files.extend(find_data_files('c:\\Python27\\Lib\\site-packages\\skimage\io\_plugins', 'skimage\\io\\_plugins', ['*.ini']))
data_files.extend(find_data_files('c:\\Python27\\lib\\site-packages\\brewer2mpl\data', 'brewer2mpl\\data', ['*.json', '*.txt']))
import numpy as np
def numpy_dll_paths_fix():
paths = set()
np_path = np.__path__[0]
for dirpath, _, filenames in os.walk(np_path):
for item in filenames:
if item.endswith('.dll'):
paths.add(dirpath)
break
sys.path.append(*list(paths))
numpy_dll_paths_fix()
setup(windows=[os.path.join(HERE, 'guiwaldo.py')],
data_files=data_files,
options = {"py2exe": {"skip_archive": True,
"packages": ["matplotlib", "pytz", "skimage"],
"includes": ["sip",
"graphviz",
"skimage.*",
"skimage.io.*",
"PIL",
"skimage.io._plugins.*",
"scipy.sparse.csgraph._validation",
"scipy.special._ufuncs_cxx",
],
# "tcl"],
#"bundle_files": 1,
"dll_excludes": ["MSVCP90.dll",
"libgdk-win32-2.0-0.dll",
"libgobject-2.0-0.dll",
"libgdk_pixbuf-2.0-0.dll",
"libgtk-win32-2.0-0.dll",
"libglib-2.0-0.dll",
"libcairo-2.dll",
"libpango-1.0-0.dll",
"libpangowin32-1.0-0.dll",
"libpangocairo-1.0-0.dll",
"libglade-2.0-0.dll",
"libgmodule-2.0-0.dll",
"libgthread-2.0-0.dll",
#"QtGui4.dll",
#"QtCore.dll",
#"QtCore4.dll"
]
}
})
| 44.8875 | 129 | 0.416319 | import os
import glob
from distutils.core import setup
import sys
import py2exe
import matplotlib
import FileDialog
import multiworm
HERE = os.path.dirname(os.path.abspath(__file__))
def find_data_files(source, target, patterns):
if glob.has_magic(source) or glob.has_magic(target):
raise ValueError("Magic not allowed in source, target")
ret = {}
for pattern in patterns:
pattern = os.path.join(source, pattern)
for filename in glob.glob(pattern):
if os.path.isfile(filename):
targetpath = os.path.join(target, os.path.relpath(filename, source))
path = os.path.dirname(targetpath)
ret.setdefault(path, []).append(filename)
return sorted(ret.items())
data_files = []
data_files.extend(matplotlib.get_py2exe_datafiles())
data_files.extend(find_data_files('C:\\Program Files (x86)\\Graphviz2.38\\bin', '', ['config6', '*.dll', '*.exe']))
data_files.extend(find_data_files('c:\\Python27\\Lib\\site-packages\\skimage\io\_plugins', 'skimage\\io\\_plugins', ['*.ini']))
data_files.extend(find_data_files('c:\\Python27\\lib\\site-packages\\brewer2mpl\data', 'brewer2mpl\\data', ['*.json', '*.txt']))
import numpy as np
def numpy_dll_paths_fix():
paths = set()
np_path = np.__path__[0]
for dirpath, _, filenames in os.walk(np_path):
for item in filenames:
if item.endswith('.dll'):
paths.add(dirpath)
break
sys.path.append(*list(paths))
numpy_dll_paths_fix()
setup(windows=[os.path.join(HERE, 'guiwaldo.py')],
data_files=data_files,
options = {"py2exe": {"skip_archive": True,
"packages": ["matplotlib", "pytz", "skimage"],
"includes": ["sip",
"graphviz",
"skimage.*",
"skimage.io.*",
"PIL",
"skimage.io._plugins.*",
"scipy.sparse.csgraph._validation",
"scipy.special._ufuncs_cxx",
],
"dll_excludes": ["MSVCP90.dll",
"libgdk-win32-2.0-0.dll",
"libgobject-2.0-0.dll",
"libgdk_pixbuf-2.0-0.dll",
"libgtk-win32-2.0-0.dll",
"libglib-2.0-0.dll",
"libcairo-2.dll",
"libpango-1.0-0.dll",
"libpangowin32-1.0-0.dll",
"libpangocairo-1.0-0.dll",
"libglade-2.0-0.dll",
"libgmodule-2.0-0.dll",
"libgthread-2.0-0.dll",
]
}
})
| true | true |
1c38f7d107393081787ef9e7d849ab1e3d091575 | 18,433 | py | Python | .history/implementations/pixelda/pixelda_try_20190106200556.py | Napkin-DL/PyTorch-GAN | 4668fb434a74a4e4771631944e4abfb0ec1c8795 | [
"MIT"
] | null | null | null | .history/implementations/pixelda/pixelda_try_20190106200556.py | Napkin-DL/PyTorch-GAN | 4668fb434a74a4e4771631944e4abfb0ec1c8795 | [
"MIT"
] | null | null | null | .history/implementations/pixelda/pixelda_try_20190106200556.py | Napkin-DL/PyTorch-GAN | 4668fb434a74a4e4771631944e4abfb0ec1c8795 | [
"MIT"
] | null | null | null | import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class encode_ResidualBlock1(nn.Module):
def __init__(self, in_features=32, out_features=64, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return x, encode_x
class encode_ResidualBlock2(nn.Module):
def __init__(self, in_features=64, out_features=128, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class encode_ResidualBlock3(nn.Module):
def __init__(self, in_features=128, out_features=256, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class decode_ResidualBlock1(nn.Module):
def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class decode_ResidualBlock2(nn.Module):
def __init__(self, in_features=128, out_features=64, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class decode_ResidualBlock3(nn.Module):
def __init__(self, in_features=64, out_features=32, kernel_size=3, stride=2, padding=1):
super(decode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.decode_block(encode_x)
return decode_x
class source_encode_Generator(nn.Module):
def __init__(self):
super(source_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class target_encode_Generator(nn.Module):
def __init__(self):
super(target_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class decode_Generator(nn.Module):
def __init__(self):
super(decode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.decode_resblocks1 = decode_ResidualBlock1()
self.decode_resblocks2 = decode_ResidualBlock2()
self.decode_resblocks3 = decode_ResidualBlock3()
self.l2 = nn.Sequential(nn.Conv2d(32, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, x, encode_out1, encode_out2, encode_out3):
print(x.size(),encode_out1.size(), encode_out2.size(), encode_out3.size() )
decode_out1 = self.decode_resblocks1(encode_out3)
print(decode_out1.size())
decode_out2 = self.decode_resblocks2(torch.cat([decode_out1,encode_out2], dim=1))
decode_out3 = self.decode_resblocks3(torch.cat([decode_out2+encode_out1], dim=1))
decode_x = F.sigmoid(decode_out3)
decode_x = decode_x[:, :, :-1, :-1]
out = x + decode_x
img_ = self.l2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(512, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
target_encode_generator = target_encode_Generator()
source_encode_generator = source_encode_Generator()
decode_generator = decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
source_encode_generator.cuda()
decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
target_encode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(),
decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
encode_valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
encode_fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
imgs_A_x, sencode_1, sencode_2, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = decode_generator(imgs_A_x, sencode_1, sencode_2, encode_fake_B)
# Perform task on translated source image
label_pred = classifier(decode_fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), encode_valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, tencode_1, tencode_2, encode_real_B = target_encode_generator(imgs_B, z)
decode_real_B = decode_generator(imgs_B_x, tencode_1, tencode_2, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = encode_adversarial_loss(encode_discriminator(encode_real_B), encode_valid)
encode_fake_loss = encode_adversarial_loss(encode_discriminator(encode_fake_B.detach()), encode_fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], decode_fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| 39.219149 | 145 | 0.646287 | import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class encode_ResidualBlock1(nn.Module):
def __init__(self, in_features=32, out_features=64, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return x, encode_x
class encode_ResidualBlock2(nn.Module):
def __init__(self, in_features=64, out_features=128, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class encode_ResidualBlock3(nn.Module):
def __init__(self, in_features=128, out_features=256, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class decode_ResidualBlock1(nn.Module):
def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class decode_ResidualBlock2(nn.Module):
def __init__(self, in_features=128, out_features=64, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class decode_ResidualBlock3(nn.Module):
def __init__(self, in_features=64, out_features=32, kernel_size=3, stride=2, padding=1):
super(decode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.decode_block(encode_x)
return decode_x
class source_encode_Generator(nn.Module):
def __init__(self):
super(source_encode_Generator, self).__init__()
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class target_encode_Generator(nn.Module):
def __init__(self):
super(target_encode_Generator, self).__init__()
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class decode_Generator(nn.Module):
def __init__(self):
super(decode_Generator, self).__init__()
self.decode_resblocks1 = decode_ResidualBlock1()
self.decode_resblocks2 = decode_ResidualBlock2()
self.decode_resblocks3 = decode_ResidualBlock3()
self.l2 = nn.Sequential(nn.Conv2d(32, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, x, encode_out1, encode_out2, encode_out3):
print(x.size(),encode_out1.size(), encode_out2.size(), encode_out3.size() )
decode_out1 = self.decode_resblocks1(encode_out3)
print(decode_out1.size())
decode_out2 = self.decode_resblocks2(torch.cat([decode_out1,encode_out2], dim=1))
decode_out3 = self.decode_resblocks3(torch.cat([decode_out2+encode_out1], dim=1))
decode_x = F.sigmoid(decode_out3)
decode_x = decode_x[:, :, :-1, :-1]
out = x + decode_x
img_ = self.l2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(512, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
lambda_adv = 1
lambda_task = 0.1
target_encode_generator = target_encode_Generator()
source_encode_generator = source_encode_Generator()
decode_generator = decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
source_encode_generator.cuda()
decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
target_encode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(),
decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
encode_valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
encode_fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
optimizer_G.zero_grad()
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
imgs_A_x, sencode_1, sencode_2, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = decode_generator(imgs_A_x, sencode_1, sencode_2, encode_fake_B)
label_pred = classifier(decode_fake_B)
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), encode_valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, tencode_1, tencode_2, encode_real_B = target_encode_generator(imgs_B, z)
decode_real_B = decode_generator(imgs_B_x, tencode_1, tencode_2, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = encode_adversarial_loss(encode_discriminator(encode_real_B), encode_valid)
encode_fake_loss = encode_adversarial_loss(encode_discriminator(encode_fake_B.detach()), encode_fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], decode_fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| true | true |
1c38f90544ad7f9caa50911d9ab401d1222696e5 | 748 | py | Python | aqi_data_crawler/aqi/pipelines.py | firest547/firest547.github.io | aef008faef4d7a37a5696cd2c52e84d48773d097 | [
"MIT"
] | null | null | null | aqi_data_crawler/aqi/pipelines.py | firest547/firest547.github.io | aef008faef4d7a37a5696cd2c52e84d48773d097 | [
"MIT"
] | 1 | 2020-12-08T21:26:28.000Z | 2020-12-08T21:26:28.000Z | aqi_data_crawler/aqi/pipelines.py | firest547/firest547.github.io | aef008faef4d7a37a5696cd2c52e84d48773d097 | [
"MIT"
] | 2 | 2020-12-08T21:10:15.000Z | 2021-01-27T01:26:20.000Z | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to t he ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.files import FilesPipeline
import os
import pathlib
import re
import subprocess
class AQIPipeline(FilesPipeline):
def process_item(self, item, spider):
url = item["file_urls"][-1]
file_name = re.split(r'Data\-Donnees\/', url)[-1]
path = pathlib.Path(file_name).parent
path = os.path.join('download', path)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
subprocess.call(['wget', '-nH', item["file_urls"][-1],
f'-P{path}'])
| 27.703704 | 66 | 0.640374 |
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.files import FilesPipeline
import os
import pathlib
import re
import subprocess
class AQIPipeline(FilesPipeline):
def process_item(self, item, spider):
url = item["file_urls"][-1]
file_name = re.split(r'Data\-Donnees\/', url)[-1]
path = pathlib.Path(file_name).parent
path = os.path.join('download', path)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
subprocess.call(['wget', '-nH', item["file_urls"][-1],
f'-P{path}'])
| true | true |
1c38f9b6834b5e89da2e65143cfac40c9a8f8555 | 525 | py | Python | Recursion2/Conclusion/largest_rectangle.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | Recursion2/Conclusion/largest_rectangle.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | Recursion2/Conclusion/largest_rectangle.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
def rec(start, end):
if start > end:
return 0
min_idx = start
for i in range(start, end + 1):
if heights[min_idx] > heights[i]:
min_idx = i
return max(
heights[min_idx] * (end - start + 1),
rec(start, min_idx - 1),
rec(min_idx + 1, end),
)
return rec(0, len(heights) -1) | 35 | 62 | 0.447619 | class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
def rec(start, end):
if start > end:
return 0
min_idx = start
for i in range(start, end + 1):
if heights[min_idx] > heights[i]:
min_idx = i
return max(
heights[min_idx] * (end - start + 1),
rec(start, min_idx - 1),
rec(min_idx + 1, end),
)
return rec(0, len(heights) -1) | true | true |
1c38fa1649a2f3ffec8b11017b75e247c0ba37f2 | 11,051 | py | Python | app/gameimpl/x01_match.py | jtessier-cit/soft8023-darts | 51fb480b4c21a0cf49ffe1ef6e8e30855aee744e | [
"MIT"
] | null | null | null | app/gameimpl/x01_match.py | jtessier-cit/soft8023-darts | 51fb480b4c21a0cf49ffe1ef6e8e30855aee744e | [
"MIT"
] | null | null | null | app/gameimpl/x01_match.py | jtessier-cit/soft8023-darts | 51fb480b4c21a0cf49ffe1ef6e8e30855aee744e | [
"MIT"
] | null | null | null | import json
import pika as pika
from service.match_service import MatchVisitTemplate
from service.match_service import MatchManager
# Lab 03 add MatchStatus
from datatype.enums import DartMultiplier, MatchStatus
# CHECKOUTS = {
# 170: "T20 T20 Bull",
# 167: "T20 T19 Bull",
# 164: "T20 T18 Bull",
# 161: "T20 T17 Bull",
# 160: "T20 T20 D20",
#
# 136: "T20 T20 D8",
#
# 36: "D18"
# }
CHECKOUTS = {
2: "D1",
3: "1 D1",
4: "D2",
5: "3 D1",
6: "D3",
7: "5 D1",
8: "D4",
9: "5 D2",
10: "D5",
11: "3 D4",
12: "D6",
13: "3 D5",
14: "D7",
15: "3 D6",
16: "D8",
17: "3 D7",
18: "D9",
19: "3 D8",
20: "D10",
21: "1 D10",
22: "D11",
23: "5 D9",
24: "D12",
25: "3 D11",
26: "D13",
27: "3 D12",
28: "D14",
29: "1 D14",
30: "D15",
31: "3 D14",
32: "D16",
33: "3 D15",
34: "D17",
35: "7 D14",
36: "D18",
37: "5 D16",
38: "D19",
39: "1 D19",
40: "D20",
41: "3 D19",
42: "4 D19",
43: "5 D19",
44: "6 D19",
45: "7 D19",
46: "8 D19",
47: "9 D19",
48: "10 D19",
49: "11 D19",
50: "12 D19",
51: "13 D19",
52: "12 D20",
53: "15 D19",
54: "14 D20",
55: "15 D20",
56: "16 D20",
57: "17 D20",
58: "18 D20",
59: "19 D20",
60: "20 D20",
61: "3 20 D19",
62: "16 8 D19",
63: "14 9 D20",
64: "9 15 D20",
65: "7 18 D20",
66: "6 20 D20",
67: "13 16 D19",
68: "11 19 D19",
69: "10 19 D20",
70: "10 20 D20",
71: "14 19 D19",
72: "18 16 D19",
73: "17 18 D19",
74: "16 18 D20",
75: "15 20 D20",
76: "20 16 D20",
77: "17 20 D20",
78: "18 20 D20",
79: "19 20 D20",
80: "20 20 D20",
81: "D19 3 D20",
82: "D19 4 D20",
83: "5 D20 D19",
84: "6 D20 D19",
85: "7 D19 D20",
86: "D20 8 D19",
87: "9 D19 D20",
88: "D20 10 D19",
89: "11 D19 D20",
90: "12 D19 D20",
91: "13 D19 D20",
92: "D19 14 D20",
93: "15 D19 D20",
94: "16 D20 D19",
95: "17 D20 D19",
96: "D20 18 D19",
97: "19 D20 D19",
98: "D20 20 D19",
99: "19 D20 D20",
100: "20 D20 D20",
101: "6 T19 D19",
102: "D20 D12 D19",
103: "5 T20 D19",
104: "9 T19 D19",
105: "T20 5 D20",
106: "D14 D19 D20",
107: "T19 12 D19",
108: "D19 D20 D15",
109: "T20 9 D20",
110: "T19 13 D20",
111: "16 T19 D19",
112: "D19 D20 D17",
113: "T19 18 D19",
114: "T19 19 D19",
115: "T19 18 D20",
116: "16 T20 D20",
117: "19 T20 D19",
118: "18 T20 D20",
119: "19 T20 D20",
120: "T20 20 D20",
121: "T19 D19 D13",
122: "D20 T20 D11",
123: "T19 D19 D14",
124: "D20 T20 D12",
125: "D15 T19 D19",
126: "T20 D19 D14",
127: "D15 T19 D20",
128: "D15 T20 D19",
129: "D17 T19 D19",
130: "T20 D15 D20",
131: "T19 D17 D20",
132: "T20 D17 D19",
133: "D18 T19 D20",
134: "D18 T20 D19",
135: "T19 D20 D19",
136: "D18 T20 D20",
137: "T19 D20 D20",
138: "T20 D19 D20",
139: "T19 T14 D20",
140: "T20 D20 D20",
141: "T19 T20 D12",
142: "T20 T14 D20",
143: "T15 T20 D19",
144: "T20 T18 D15",
145: "T15 T20 D20",
146: "T16 T20 D19",
147: "T20 T19 D15",
148: "T20 T16 D20",
149: "T20 T19 D16",
150: "T20 T18 D18",
151: "T20 T19 D17",
152: "T20 T18 D19",
153: "T20 T19 D18",
154: "T20 T18 D20",
155: "T20 T19 D19",
156: "T20 T20 D18",
157: "T20 T19 D20",
158: "T20 T20 D19",
159: "No checkout",
160: "T20 T20 D20",
161: "T20 T17 Bull",
162: "No checkout",
163: "No checkout",
164: "T20 T18 Bull",
165: "No checkout",
166: "No checkout",
167: "T20 T19 Bull",
168: "No checkout",
169: "No checkout",
170: "T20 T20 Bull"
}
# STARTING_TOTAL = 501
class X01Match(MatchManager, MatchVisitTemplate):
# def __init__(self, starting_total=501):
def __init__(self, starting_total=501):
super().__init__()
# self._starting_total = starting_total
self._starting_total = starting_total
self.scores = [] # list of scores remaining parallel to players
self.averages = [] # single-dart average (x 3 for 3-dart average)
self.first9 = [] # average for first 9 darts
# This has the potential to be buggy if the match is set first and players registered after
def post_init(self):
for i in range(0, len(self.match.players)):
self.scores.append(self._starting_total) # Might want to parameterize the starting total
self.first9.append(None)
self.averages.append(None)
# match is in progress after initializing
self.match.status = MatchStatus.IN_PROGRESS
def validate_visit(self, player_index, visit):
# if the last player is the same as the current player, visit isn't valid (out of turn)
if self.match.last_player_index is player_index: # Note: this won't work properly for 3 players...
return False, "Player " + str(player_index + 1) + " is not in the correct sequence. Visit ignored."
# if the match status is not active, visit isn't valid (inactive game)
if self.match.status != MatchStatus.IN_PROGRESS:
return False, "Game has ended."
# print(str(self.match.last_Player_index) + "-" + str(player_index))
# advance the last player index - player's turn will proceed
self.match.last_player_index = player_index
return True, None
def check_winning_condition(self, player_index, visit):
"""returns 1, 2 or 3 for when a dart closes the game / leg (i.e. finishing double) or 0 if not closed out
:param player_index: position of player details in various lists
:param visit: a list of 3 Darts (each containing multiplier and segment)
:return: 0, 1, 2 or 3
"""
i = 0
# loop over the darts in the visit
for dart in visit.darts:
i = i + 1
# if the dart is a double and the score for the dart would make the score 0
if dart.multiplier == DartMultiplier.DOUBLE and self.scores[player_index] - dart.get_score() == 0:
# game, shot!
self.scores[player_index] = 0 # set the player's score to 0
self.match.status = MatchStatus.FINISHED # game is no longer active
return i # return the dart number
else:
print("deducting for " + str(player_index))
self.scores[player_index] -= dart.get_score() # reduce the player's score
return 0 # return 0 - game isn't done
def record_statistics(self, player_index, visit, result):
"""Store stats both for in-memory immediate use and on disk for later recall
:return:
"""
if result != 0:
# result was 1 2 or 3 (which dart ended the game)
# so this removes the remaining darts from the visit
visit.remove_trailing_darts(result) # a double finished the game, so ignore any subsequent darts
# adds the visit to the player's visits
self.match.visits[player_index].append(visit)
# Calculate first 9 if, and only if, this is the 3rd visit
if len(self.match.visits[player_index]) == 3:
# subtract the remaining score from starting score and / 3 to get average?
# check logic, why / 3?
self.first9[player_index] = (self._starting_total - self.scores[player_index]) / 3
# Calculate single-dart average taking account of a double being hit with dart 1 or 2 when checking out
# player threw 3 darts per visit unless on the winning one
num_darts_thrown = (len(self.match.visits[player_index]) - 1) * 3
num_darts_thrown += 3 if result == 0 else result # add 3 or whatever the result was (# darts thrown)
# if winning visit we can complete the stats for the visit/match
if result != 0:
self.match.winning_num_darts = num_darts_thrown
self.match.winning_player_index = player_index
# set averages for player
self.averages[player_index] = (self._starting_total - self.scores[player_index]) / num_darts_thrown
# send a message using RabbitMQ - note this is too implementation specific and should be abstracted, i.e. move
# rabbitmq specific code to a separate service layer class
# Let's do something simple - store the darts so a lifetime 3-dart average can be calculated; this is something
# of a lower priority than the ongoing match, so can be backgrounded / temporally-decoupled somewhat with a
# message queue (handle load better).
# We will need to serialize the darts list - JSON is very suitable for this
username = self.match.players[player_index]
match_type = "X01"
darts = []
for dart in visit.darts:
darts.append([dart.multiplier, dart.segment])
message = [username, match_type, darts]
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) # we could parameterize the host
channel = connection.channel()
channel.queue_declare(queue='player-stats')
channel.basic_publish(exchange='',
routing_key='player-stats',
body=json.dumps(message))
connection.close()
def format_summary(self, player_index, visit):
# Include suggested checkout if remaining score can be checked out in 3 darts
summary = "Last visit was by " + self.match.players[player_index] + " with " + visit.to_string() + "\n"
# if the game was won, can add winning portion to summary
if self.match.winning_player_index != -1:
summary += self.match.players[self.match.winning_player_index] + " wins in "\
+ str(self.match.winning_num_darts) + " darts\n"
# start at 0 for stats[i]
i = 0
for player in self.match.players:
summary = summary + player + ": " + str(self.scores[i]) + " Remaining"
if self.scores[i] in CHECKOUTS.keys():
summary += " (" + CHECKOUTS.get(self.scores[i]) + ")"
if self.first9[i]:
summary += "\n - [First 9 Avg: " + '{0:.2f}'.format(self.first9[i]) + "] "
if self.averages[i]:
summary += "\n - [3-dart Avg: " + '{0:.2f}'.format(self.averages[i] * 3) + "] "
i = i + 1
summary += "\n"
return summary
class X01MatchBuilder:
"""
This could be extended to include dynamic key-value pair parameters (see object_factory.py),
or make it a singleton, etc.
"""
def __init__(self):
pass
# def __call__(self, starting_total):
def __call__(self, **kwargs):
# return X01Match(starting_total)
return X01Match(**kwargs) | 32.218659 | 119 | 0.56936 | import json
import pika as pika
from service.match_service import MatchVisitTemplate
from service.match_service import MatchManager
from datatype.enums import DartMultiplier, MatchStatus
CHECKOUTS = {
2: "D1",
3: "1 D1",
4: "D2",
5: "3 D1",
6: "D3",
7: "5 D1",
8: "D4",
9: "5 D2",
10: "D5",
11: "3 D4",
12: "D6",
13: "3 D5",
14: "D7",
15: "3 D6",
16: "D8",
17: "3 D7",
18: "D9",
19: "3 D8",
20: "D10",
21: "1 D10",
22: "D11",
23: "5 D9",
24: "D12",
25: "3 D11",
26: "D13",
27: "3 D12",
28: "D14",
29: "1 D14",
30: "D15",
31: "3 D14",
32: "D16",
33: "3 D15",
34: "D17",
35: "7 D14",
36: "D18",
37: "5 D16",
38: "D19",
39: "1 D19",
40: "D20",
41: "3 D19",
42: "4 D19",
43: "5 D19",
44: "6 D19",
45: "7 D19",
46: "8 D19",
47: "9 D19",
48: "10 D19",
49: "11 D19",
50: "12 D19",
51: "13 D19",
52: "12 D20",
53: "15 D19",
54: "14 D20",
55: "15 D20",
56: "16 D20",
57: "17 D20",
58: "18 D20",
59: "19 D20",
60: "20 D20",
61: "3 20 D19",
62: "16 8 D19",
63: "14 9 D20",
64: "9 15 D20",
65: "7 18 D20",
66: "6 20 D20",
67: "13 16 D19",
68: "11 19 D19",
69: "10 19 D20",
70: "10 20 D20",
71: "14 19 D19",
72: "18 16 D19",
73: "17 18 D19",
74: "16 18 D20",
75: "15 20 D20",
76: "20 16 D20",
77: "17 20 D20",
78: "18 20 D20",
79: "19 20 D20",
80: "20 20 D20",
81: "D19 3 D20",
82: "D19 4 D20",
83: "5 D20 D19",
84: "6 D20 D19",
85: "7 D19 D20",
86: "D20 8 D19",
87: "9 D19 D20",
88: "D20 10 D19",
89: "11 D19 D20",
90: "12 D19 D20",
91: "13 D19 D20",
92: "D19 14 D20",
93: "15 D19 D20",
94: "16 D20 D19",
95: "17 D20 D19",
96: "D20 18 D19",
97: "19 D20 D19",
98: "D20 20 D19",
99: "19 D20 D20",
100: "20 D20 D20",
101: "6 T19 D19",
102: "D20 D12 D19",
103: "5 T20 D19",
104: "9 T19 D19",
105: "T20 5 D20",
106: "D14 D19 D20",
107: "T19 12 D19",
108: "D19 D20 D15",
109: "T20 9 D20",
110: "T19 13 D20",
111: "16 T19 D19",
112: "D19 D20 D17",
113: "T19 18 D19",
114: "T19 19 D19",
115: "T19 18 D20",
116: "16 T20 D20",
117: "19 T20 D19",
118: "18 T20 D20",
119: "19 T20 D20",
120: "T20 20 D20",
121: "T19 D19 D13",
122: "D20 T20 D11",
123: "T19 D19 D14",
124: "D20 T20 D12",
125: "D15 T19 D19",
126: "T20 D19 D14",
127: "D15 T19 D20",
128: "D15 T20 D19",
129: "D17 T19 D19",
130: "T20 D15 D20",
131: "T19 D17 D20",
132: "T20 D17 D19",
133: "D18 T19 D20",
134: "D18 T20 D19",
135: "T19 D20 D19",
136: "D18 T20 D20",
137: "T19 D20 D20",
138: "T20 D19 D20",
139: "T19 T14 D20",
140: "T20 D20 D20",
141: "T19 T20 D12",
142: "T20 T14 D20",
143: "T15 T20 D19",
144: "T20 T18 D15",
145: "T15 T20 D20",
146: "T16 T20 D19",
147: "T20 T19 D15",
148: "T20 T16 D20",
149: "T20 T19 D16",
150: "T20 T18 D18",
151: "T20 T19 D17",
152: "T20 T18 D19",
153: "T20 T19 D18",
154: "T20 T18 D20",
155: "T20 T19 D19",
156: "T20 T20 D18",
157: "T20 T19 D20",
158: "T20 T20 D19",
159: "No checkout",
160: "T20 T20 D20",
161: "T20 T17 Bull",
162: "No checkout",
163: "No checkout",
164: "T20 T18 Bull",
165: "No checkout",
166: "No checkout",
167: "T20 T19 Bull",
168: "No checkout",
169: "No checkout",
170: "T20 T20 Bull"
}
class X01Match(MatchManager, MatchVisitTemplate):
def __init__(self, starting_total=501):
super().__init__()
self._starting_total = starting_total
self.scores = []
self.averages = []
self.first9 = []
def post_init(self):
for i in range(0, len(self.match.players)):
self.scores.append(self._starting_total)
self.first9.append(None)
self.averages.append(None)
self.match.status = MatchStatus.IN_PROGRESS
def validate_visit(self, player_index, visit):
if self.match.last_player_index is player_index: # Note: this won't work properly for 3 players...
return False, "Player " + str(player_index + 1) + " is not in the correct sequence. Visit ignored."
if self.match.status != MatchStatus.IN_PROGRESS:
return False, "Game has ended."
# print(str(self.match.last_Player_index) + "-" + str(player_index))
# advance the last player index - player's turn will proceed
self.match.last_player_index = player_index
return True, None
def check_winning_condition(self, player_index, visit):
i = 0
for dart in visit.darts:
i = i + 1
if dart.multiplier == DartMultiplier.DOUBLE and self.scores[player_index] - dart.get_score() == 0:
self.scores[player_index] = 0
self.match.status = MatchStatus.FINISHED # game is no longer active
return i # return the dart number
else:
print("deducting for " + str(player_index))
self.scores[player_index] -= dart.get_score() # reduce the player's score
return 0
def record_statistics(self, player_index, visit, result):
if result != 0:
# result was 1 2 or 3 (which dart ended the game)
# so this removes the remaining darts from the visit
visit.remove_trailing_darts(result) # a double finished the game, so ignore any subsequent darts
# adds the visit to the player's visits
self.match.visits[player_index].append(visit)
if len(self.match.visits[player_index]) == 3:
self.first9[player_index] = (self._starting_total - self.scores[player_index]) / 3
num_darts_thrown = (len(self.match.visits[player_index]) - 1) * 3
num_darts_thrown += 3 if result == 0 else result if result != 0:
self.match.winning_num_darts = num_darts_thrown
self.match.winning_player_index = player_index
self.averages[player_index] = (self._starting_total - self.scores[player_index]) / num_darts_thrown
# of a lower priority than the ongoing match, so can be backgrounded / temporally-decoupled somewhat with a
# message queue (handle load better).
# We will need to serialize the darts list - JSON is very suitable for this
username = self.match.players[player_index]
match_type = "X01"
darts = []
for dart in visit.darts:
darts.append([dart.multiplier, dart.segment])
message = [username, match_type, darts]
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) # we could parameterize the host
channel = connection.channel()
channel.queue_declare(queue='player-stats')
channel.basic_publish(exchange='',
routing_key='player-stats',
body=json.dumps(message))
connection.close()
def format_summary(self, player_index, visit):
# Include suggested checkout if remaining score can be checked out in 3 darts
summary = "Last visit was by " + self.match.players[player_index] + " with " + visit.to_string() + "\n"
# if the game was won, can add winning portion to summary
if self.match.winning_player_index != -1:
summary += self.match.players[self.match.winning_player_index] + " wins in "\
+ str(self.match.winning_num_darts) + " darts\n"
# start at 0 for stats[i]
i = 0
for player in self.match.players:
summary = summary + player + ": " + str(self.scores[i]) + " Remaining"
if self.scores[i] in CHECKOUTS.keys():
summary += " (" + CHECKOUTS.get(self.scores[i]) + ")"
if self.first9[i]:
summary += "\n - [First 9 Avg: " + '{0:.2f}'.format(self.first9[i]) + "] "
if self.averages[i]:
summary += "\n - [3-dart Avg: " + '{0:.2f}'.format(self.averages[i] * 3) + "] "
i = i + 1
summary += "\n"
return summary
class X01MatchBuilder:
def __init__(self):
pass
# def __call__(self, starting_total):
def __call__(self, **kwargs):
# return X01Match(starting_total)
return X01Match(**kwargs) | true | true |
1c38fb26366ef519e4f9e2c34379dc99e196f4c0 | 5,752 | py | Python | spond/experimental/openimage/test/test_cooccurrence.py | rekcahpassyla/spond | 8282a4f40bd145243f0e37b8542f0f30b16b96f9 | [
"Apache-2.0"
] | 1 | 2021-02-12T19:08:25.000Z | 2021-02-12T19:08:25.000Z | spond/experimental/openimage/test/test_cooccurrence.py | rekcahpassyla/spond | 8282a4f40bd145243f0e37b8542f0f30b16b96f9 | [
"Apache-2.0"
] | 8 | 2020-09-29T12:36:14.000Z | 2021-02-18T16:25:03.000Z | spond/experimental/openimage/test/test_cooccurrence.py | rekcahpassyla/spond | 8282a4f40bd145243f0e37b8542f0f30b16b96f9 | [
"Apache-2.0"
] | 5 | 2020-09-28T06:55:30.000Z | 2021-01-06T14:21:16.000Z | import os
import unittest
import torch
from spond.experimental.openimage import readfile
class TestProcessing(unittest.TestCase):
def setUp(self):
self.rootdir = "."
# image metadata file, contains image labels
self.imgfn = "test-image-labels.csv"
# labels metadata file, contains labels to descriptions
self.labelsfn = "test-labels.csv"
# annotations file, contains what labels map to what images
self.datafn = "test-annotations.csv"
self.imgdict = readfile.readimgs(self.imgfn, self.rootdir)
self.labelsdict, self.namesdict = readfile.readlabels(self.labelsfn, self.rootdir)
os.environ['TESTING'] = "TESTING"
def lookup(self, image, label):
# given the label strings, return the indexes in the dictionaries
return (
self.imgdict[image],
self.labelsdict[label]
)
def test_process_images(self):
imgdict = self.imgdict
# there are only 3 images in this file
self.assertEquals(len(imgdict), 3)
self.assertEquals(min(imgdict.values()), 0)
self.assertEquals(max(imgdict.values()), len(imgdict) - 1)
def test_process_labels(self):
labelsdict, names = self.labelsdict, self.namesdict
# there are only 9 images in this file
self.assertEquals(len(labelsdict), 9)
self.assertEquals(min(labelsdict.values()), 0)
self.assertEquals(max(labelsdict.values()), len(labelsdict) - 1)
self.assertEquals(len(names), 9)
def test_cooccurrence_matrix_use_confidence(self):
imgdict = readfile.readimgs(self.imgfn, self.rootdir)
labelsdict = readfile.readimgs(self.labelsfn, self.rootdir)
for parallel in (False, True):
coo = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=True, parallel=parallel
)
# 064jy_j and 05r655 co-occur twice:
# once each in images 497919baa5f92e69 and 0899cae1f10e5f9f
i, j = self.labelsdict["/m/064jy_j"], self.labelsdict["/m/05r655"]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 2)
# 064kdv_ and 0271t do not co-occur
i, j = self.labelsdict["/m/064kdv_"], self.labelsdict["/m/0271t"]
self.assertEquals(coo[(i, j)], 0)
# We requested to use_confidence, and
# 0643t and 02smb6 occur in 0899cae1f10e5f9f but with confidence 0
# so they should present but with co-occurrence score of 0,
# with every other label in 0899cae1f10e5f9f
zeroconf = ('/m/0643t', '/m/02smb6')
# all these other items are present only once in 0899cae1f10e5f9f
present = ['/m/0271t',
'/m/0118n_9r',
'/m/04dr76w',
'/m/020p1v']
for label in present:
for other in present:
if label == other:
continue
# each pair should have a score of 1.
i, j = self.labelsdict[label], self.labelsdict[other]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 1)
for other in zeroconf:
i, j = self.labelsdict[label], self.labelsdict[other]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 0)
def test_cooccurrence_matrix_without_confidence(self):
imgdict = readfile.readimgs(self.imgfn, self.rootdir)
labelsdict = readfile.readimgs(self.labelsfn, self.rootdir)
for parallel in (False, True):
coo = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=False, parallel=parallel
)
# 064jy_j and 05r655 co-occur twice:
# once each in images 497919baa5f92e69 and 0899cae1f10e5f9f
i, j = self.labelsdict["/m/064jy_j"], self.labelsdict["/m/05r655"]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 2)
# 064kdv_ and 0271t do not co-occur
i, j = self.labelsdict["/m/064kdv_"], self.labelsdict["/m/0271t"]
self.assertEquals(coo[(i, j)], 0)
present = ['/m/0271t',
'/m/0118n_9r',
'/m/04dr76w',
'/m/0643t',
'/m/02smb6',
'/m/020p1v']
for label in present:
for other in present:
if label == other:
continue
# each pair should have a score of 1.
i, j = self.labelsdict[label], self.labelsdict[other]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 1)
def test_parallel(self):
imgdict = readfile.readimgs(self.imgfn, self.rootdir)
labelsdict = readfile.readimgs(self.labelsfn, self.rootdir)
coo = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=False, parallel=False
).coalesce()
coo_parallel = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=False, parallel=True
).coalesce()
torch.allclose(coo.indices(), coo_parallel.indices())
torch.allclose(coo.values(), coo_parallel.values())
| 44.246154 | 90 | 0.573713 | import os
import unittest
import torch
from spond.experimental.openimage import readfile
class TestProcessing(unittest.TestCase):
def setUp(self):
self.rootdir = "."
self.imgfn = "test-image-labels.csv"
self.labelsfn = "test-labels.csv"
self.datafn = "test-annotations.csv"
self.imgdict = readfile.readimgs(self.imgfn, self.rootdir)
self.labelsdict, self.namesdict = readfile.readlabels(self.labelsfn, self.rootdir)
os.environ['TESTING'] = "TESTING"
def lookup(self, image, label):
return (
self.imgdict[image],
self.labelsdict[label]
)
def test_process_images(self):
imgdict = self.imgdict
self.assertEquals(len(imgdict), 3)
self.assertEquals(min(imgdict.values()), 0)
self.assertEquals(max(imgdict.values()), len(imgdict) - 1)
def test_process_labels(self):
labelsdict, names = self.labelsdict, self.namesdict
self.assertEquals(len(labelsdict), 9)
self.assertEquals(min(labelsdict.values()), 0)
self.assertEquals(max(labelsdict.values()), len(labelsdict) - 1)
self.assertEquals(len(names), 9)
def test_cooccurrence_matrix_use_confidence(self):
imgdict = readfile.readimgs(self.imgfn, self.rootdir)
labelsdict = readfile.readimgs(self.labelsfn, self.rootdir)
for parallel in (False, True):
coo = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=True, parallel=parallel
)
i, j = self.labelsdict["/m/064jy_j"], self.labelsdict["/m/05r655"]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 2)
i, j = self.labelsdict["/m/064kdv_"], self.labelsdict["/m/0271t"]
self.assertEquals(coo[(i, j)], 0)
zeroconf = ('/m/0643t', '/m/02smb6')
present = ['/m/0271t',
'/m/0118n_9r',
'/m/04dr76w',
'/m/020p1v']
for label in present:
for other in present:
if label == other:
continue
i, j = self.labelsdict[label], self.labelsdict[other]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 1)
for other in zeroconf:
i, j = self.labelsdict[label], self.labelsdict[other]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 0)
def test_cooccurrence_matrix_without_confidence(self):
imgdict = readfile.readimgs(self.imgfn, self.rootdir)
labelsdict = readfile.readimgs(self.labelsfn, self.rootdir)
for parallel in (False, True):
coo = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=False, parallel=parallel
)
i, j = self.labelsdict["/m/064jy_j"], self.labelsdict["/m/05r655"]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 2)
i, j = self.labelsdict["/m/064kdv_"], self.labelsdict["/m/0271t"]
self.assertEquals(coo[(i, j)], 0)
present = ['/m/0271t',
'/m/0118n_9r',
'/m/04dr76w',
'/m/0643t',
'/m/02smb6',
'/m/020p1v']
for label in present:
for other in present:
if label == other:
continue
i, j = self.labelsdict[label], self.labelsdict[other]
self.assertEquals(coo[(i, j)], coo[(j, i)])
self.assertEquals(coo[(i, j)], 1)
def test_parallel(self):
imgdict = readfile.readimgs(self.imgfn, self.rootdir)
labelsdict = readfile.readimgs(self.labelsfn, self.rootdir)
coo = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=False, parallel=False
).coalesce()
coo_parallel = readfile.generate_cooccurrence(
self.datafn, labelsdict, imgdict, rootdir=self.rootdir,
use_confidence=False, parallel=True
).coalesce()
torch.allclose(coo.indices(), coo_parallel.indices())
torch.allclose(coo.values(), coo_parallel.values())
| true | true |
1c38fbc4fcfc4b3815961329dd7b42ba0e0af30a | 3,892 | py | Python | python3/koans/about_iteration.py | Buraisx/python_koans | de12cdfcf6bfb53b1d4d05dc5e567c53963607fe | [
"MIT"
] | null | null | null | python3/koans/about_iteration.py | Buraisx/python_koans | de12cdfcf6bfb53b1d4d05dc5e567c53963607fe | [
"MIT"
] | null | null | null | python3/koans/about_iteration.py | Buraisx/python_koans | de12cdfcf6bfb53b1d4d05dc5e567c53963607fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out of iterations')
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, mapping.__class__)
self.assertEqual(map, mapping.__class__)
# In Python 3 built in iterator funcs return iterable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual([11,12,13], mapped_seq)
# Note, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual([2,4,6], even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual(__, name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(__, result.__class__)
# Reduce() syntax is same as Python 2
self.assertEqual(__, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(__, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(__, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iterable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual(__, list(result))
try:
file = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(__, list(upcase_lines))
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
file.close()
except IOError:
# should never happen
self.fail()
| 28.202899 | 77 | 0.536485 |
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out of iterations')
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, mapping.__class__)
self.assertEqual(map, mapping.__class__)
for item in mapping:
mapped_seq.append(item)
self.assertEqual([11,12,13], mapped_seq)
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual([2,4,6], even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual(__, name)
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(__, result.__class__)
self.assertEqual(__, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(__, result2)
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(__, num)
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
result = map(self.add_ten, range(1,4))
self.assertEqual(__, list(result))
try:
file = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(__, list(upcase_lines))
finally:
file.close()
except IOError:
self.fail()
| true | true |
1c38fc8f7c0c371003b969b2014fe5096eb2f9fd | 333 | py | Python | normflow/simple_flow_model.py | mbaddar1/normalizing-flows | d1409464a65234354b29ed9ea0ede2d12100440c | [
"MIT"
] | null | null | null | normflow/simple_flow_model.py | mbaddar1/normalizing-flows | d1409464a65234354b29ed9ea0ede2d12100440c | [
"MIT"
] | null | null | null | normflow/simple_flow_model.py | mbaddar1/normalizing-flows | d1409464a65234354b29ed9ea0ede2d12100440c | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
class SimpleFlowModel(nn.Module):
def __init__(self, flows):
super().__init__()
self.flows = nn.ModuleList(flows)
def forward(self, z):
ld = 0.
for flow in self.flows:
z, ld_ = flow(z)
ld += ld_
return z, ld
| 20.8125 | 42 | 0.522523 | import torch.nn as nn
import torch
class SimpleFlowModel(nn.Module):
def __init__(self, flows):
super().__init__()
self.flows = nn.ModuleList(flows)
def forward(self, z):
ld = 0.
for flow in self.flows:
z, ld_ = flow(z)
ld += ld_
return z, ld
| true | true |
1c38fdbc199692b0e9f4a403726624c0ee8ac914 | 476 | py | Python | apps/wallet/migrations/0009_auto_20200707_1324.py | ecoo-app/ecoo-backend | ffe54abcd2e8c1a18ef2fa992c45a10f8232a4a0 | [
"MIT"
] | 1 | 2021-03-31T18:25:44.000Z | 2021-03-31T18:25:44.000Z | apps/wallet/migrations/0009_auto_20200707_1324.py | ecoo-app/ecoo-backend | ffe54abcd2e8c1a18ef2fa992c45a10f8232a4a0 | [
"MIT"
] | null | null | null | apps/wallet/migrations/0009_auto_20200707_1324.py | ecoo-app/ecoo-backend | ffe54abcd2e8c1a18ef2fa992c45a10f8232a4a0 | [
"MIT"
] | 1 | 2021-01-14T09:27:42.000Z | 2021-01-14T09:27:42.000Z | # Generated by Django 2.2.13 on 2020-07-07 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wallet", "0008_auto_20200625_1546"),
]
operations = [
migrations.AlterField(
model_name="wallet",
name="state",
field=models.IntegerField(
choices=[(0, "Unverified"), (1, "Pending"), (2, "Verified")], default=0
),
),
]
| 22.666667 | 87 | 0.554622 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wallet", "0008_auto_20200625_1546"),
]
operations = [
migrations.AlterField(
model_name="wallet",
name="state",
field=models.IntegerField(
choices=[(0, "Unverified"), (1, "Pending"), (2, "Verified")], default=0
),
),
]
| true | true |
1c38fe8d107ec09d3d5e7b582b79068ac3df3f61 | 18,926 | py | Python | aiodropbox/aiodropbox/utils/__init__.py | bossjones/sandbox | 9ae6a01576d42ff2616f38bad0df2c6d73afe91f | [
"MIT"
] | 4 | 2022-02-23T11:03:32.000Z | 2022-03-07T20:01:42.000Z | aiodropbox/aiodropbox/utils/__init__.py | bossjones/sandbox | 9ae6a01576d42ff2616f38bad0df2c6d73afe91f | [
"MIT"
] | null | null | null | aiodropbox/aiodropbox/utils/__init__.py | bossjones/sandbox | 9ae6a01576d42ff2616f38bad0df2c6d73afe91f | [
"MIT"
] | 1 | 2022-02-23T11:03:32.000Z | 2022-02-23T11:03:32.000Z | # NOTE: Via Red https://github.com/Cog-Creators/Red-DiscordBot/tree/V3/develop/redbot
from __future__ import annotations
import asyncio
from asyncio import Semaphore, as_completed
from asyncio.futures import isfuture
from inspect import isawaitable as _isawaitable, signature as _signature
from itertools import chain
import json
import logging
from pathlib import Path
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Coroutine,
Generator,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
from aiodropbox import constants
from aiodropbox.dbx_logger import get_logger # noqa: E402
from aiodropbox.utils.config import Config
# from discord.utils import maybe_coroutine
# __all__ = (
# "bounded_gather",
# "bounded_gather_iter",
# "deduplicate_iterables",
# "AsyncIter",
# "get_end_user_data_statement",
# "get_end_user_data_statement_or_raise",
# )
# log = logging.getLogger("aiodropbox.utils")
LOGGER = get_logger("aiodropbox.utils", provider="Utils", level=logging.DEBUG)
_T = TypeVar("_T")
_S = TypeVar("_S")
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
# Benchmarked to be the fastest method.
def deduplicate_iterables(*iterables):
"""
Returns a list of all unique items in ``iterables``, in the order they
were first encountered.
"""
# dict insertion order is guaranteed to be preserved in 3.6+
return list(dict.fromkeys(chain.from_iterable(iterables)))
# https://github.com/PyCQA/pylint/issues/2717
class AsyncFilter(
AsyncIterator[_T], Awaitable[List[_T]]
): # pylint: disable=duplicate-bases
"""Class returned by `async_filter`. See that function for details.
We don't recommend instantiating this class directly.
"""
def __init__(
self,
func: Callable[[_T], Union[bool, Awaitable[bool]]],
iterable: Union[AsyncIterable[_T], Iterable[_T]],
) -> None:
self.__func: Callable[[_T], Union[bool, Awaitable[bool]]] = func
self.__iterable: Union[AsyncIterable[_T], Iterable[_T]] = iterable
# We assign the generator strategy based on the arguments' types
if isinstance(iterable, AsyncIterable):
if asyncio.iscoroutinefunction(func):
self.__generator_instance = self.__async_generator_async_pred()
else:
self.__generator_instance = self.__async_generator_sync_pred()
elif asyncio.iscoroutinefunction(func):
self.__generator_instance = self.__sync_generator_async_pred()
else:
raise TypeError(
"Must be either an async predicate, an async iterable, or both."
)
async def __sync_generator_async_pred(self) -> AsyncIterator[_T]:
for item in self.__iterable:
if await self.__func(item):
yield item
async def __async_generator_sync_pred(self) -> AsyncIterator[_T]:
async for item in self.__iterable:
if self.__func(item):
yield item
async def __async_generator_async_pred(self) -> AsyncIterator[_T]:
async for item in self.__iterable:
if await self.__func(item):
yield item
async def __flatten(self) -> List[_T]:
return [item async for item in self]
def __aiter__(self):
return self
def __await__(self):
# Simply return the generator filled into a list
return self.__flatten().__await__()
async def __anext__(self) -> Awaitable[_T]:
# This will use the generator strategy set in __init__
return self.__generator_instance.__anext__()
def async_filter(
func: Callable[[_T], Union[bool, Awaitable[bool]]],
iterable: Union[AsyncIterable[_T], Iterable[_T]],
) -> AsyncFilter[_T]:
"""Filter an (optionally async) iterable with an (optionally async) predicate.
At least one of the arguments must be async.
Parameters
----------
func : Callable[[T], Union[bool, Awaitable[bool]]]
A function or coroutine function which takes one item of ``iterable``
as an argument, and returns ``True`` or ``False``.
iterable : Union[AsyncIterable[_T], Iterable[_T]]
An iterable or async iterable which is to be filtered.
Raises
------
TypeError
If neither of the arguments are async.
Returns
-------
AsyncFilter[T]
An object which can either be awaited to yield a list of the filtered
items, or can also act as an async iterator to yield items one by one.
"""
return AsyncFilter(func, iterable)
async def async_enumerate(
async_iterable: AsyncIterable[_T], start: int = 0
) -> AsyncIterator[Tuple[int, _T]]:
"""Async iterable version of `enumerate`.
Parameters
----------
async_iterable : AsyncIterable[T]
The iterable to enumerate.
start : int
The index to start from. Defaults to 0.
Returns
-------
AsyncIterator[Tuple[int, T]]
An async iterator of tuples in the form of ``(index, item)``.
"""
async for item in async_iterable:
yield start, item
start += 1
async def _sem_wrapper(sem, task):
async with sem:
return await task
def bounded_gather_iter(
*coros_or_futures, limit: int = 4, semaphore: Optional[Semaphore] = None
) -> Iterator[Awaitable[Any]]:
"""
An iterator that returns tasks as they are ready, but limits the
number of tasks running at a time.
Parameters
----------
*coros_or_futures
The awaitables to run in a bounded concurrent fashion.
limit : Optional[`int`]
The maximum number of concurrent tasks. Used when no ``semaphore``
is passed.
semaphore : Optional[:class:`asyncio.Semaphore`]
The semaphore to use for bounding tasks. If `None`, create one
using ``loop`` and ``limit``.
Raises
------
TypeError
When invalid parameters are passed
"""
loop = asyncio.get_running_loop()
if semaphore is None:
if not isinstance(limit, int) or limit <= 0:
raise TypeError("limit must be an int > 0")
semaphore = Semaphore(limit)
pending = []
for cof in coros_or_futures:
if isfuture(cof) and cof._loop is not loop:
raise ValueError("futures are tied to different event loops")
cof = _sem_wrapper(semaphore, cof)
pending.append(cof)
return as_completed(pending)
def bounded_gather(
*coros_or_futures,
return_exceptions: bool = False,
limit: int = 4,
semaphore: Optional[Semaphore] = None,
) -> Awaitable[List[Any]]:
"""
A semaphore-bounded wrapper to :meth:`asyncio.gather`.
Parameters
----------
*coros_or_futures
The awaitables to run in a bounded concurrent fashion.
return_exceptions : bool
If true, gather exceptions in the result list instead of raising.
limit : Optional[`int`]
The maximum number of concurrent tasks. Used when no ``semaphore``
is passed.
semaphore : Optional[:class:`asyncio.Semaphore`]
The semaphore to use for bounding tasks. If `None`, create one
using ``loop`` and ``limit``.
Raises
------
TypeError
When invalid parameters are passed
"""
loop = asyncio.get_running_loop()
if semaphore is None:
if not isinstance(limit, int) or limit <= 0:
raise TypeError("limit must be an int > 0")
semaphore = Semaphore(limit)
tasks = (_sem_wrapper(semaphore, task) for task in coros_or_futures)
return asyncio.gather(*tasks, return_exceptions=return_exceptions)
class AsyncIter(
AsyncIterator[_T], Awaitable[List[_T]]
): # pylint: disable=duplicate-bases
"""Asynchronous iterator yielding items from ``iterable``
that sleeps for ``delay`` seconds every ``steps`` items.
Parameters
----------
iterable: Iterable
The iterable to make async.
delay: Union[float, int]
The amount of time in seconds to sleep.
steps: int
The number of iterations between sleeps.
Raises
------
ValueError
When ``steps`` is lower than 1.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> async for value in AsyncIter(range(3)):
... print(value)
0
1
2
"""
def __init__(
self, iterable: Iterable[_T], delay: Union[float, int] = 0, steps: int = 1
) -> None:
if steps < 1:
raise ValueError("Steps must be higher than or equals to 1")
self._delay = delay
self._iterator = iter(iterable)
self._i = 0
self._steps = steps
self._map = None
def __aiter__(self) -> AsyncIter[_T]:
return self
async def __anext__(self) -> _T:
try:
item = next(self._iterator)
except StopIteration:
raise StopAsyncIteration
if self._i == self._steps:
self._i = 0
await asyncio.sleep(self._delay)
self._i += 1
return await maybe_coroutine(self._map, item) if self._map is not None else item
def __await__(self) -> Generator[Any, None, List[_T]]:
"""Returns a list of the iterable.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> iterator = AsyncIter(range(5))
>>> await iterator
[0, 1, 2, 3, 4]
"""
return self.flatten().__await__()
async def next(self, default: Any = ...) -> _T:
"""Returns a next entry of the iterable.
Parameters
----------
default: Optional[Any]
The value to return if the iterator is exhausted.
Raises
------
StopAsyncIteration
When ``default`` is not specified and the iterator has been exhausted.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> iterator = AsyncIter(range(5))
>>> await iterator.next()
0
>>> await iterator.next()
1
"""
try:
value = await self.__anext__()
except StopAsyncIteration:
if default is ...:
raise
value = default
return value
async def flatten(self) -> List[_T]:
"""Returns a list of the iterable.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> iterator = AsyncIter(range(5))
>>> await iterator.flatten()
[0, 1, 2, 3, 4]
"""
return [item async for item in self]
def filter(
self, function: Callable[[_T], Union[bool, Awaitable[bool]]]
) -> AsyncFilter[_T]:
"""Filter the iterable with an (optionally async) predicate.
Parameters
----------
function: Callable[[T], Union[bool, Awaitable[bool]]]
A function or coroutine function which takes one item of ``iterable``
as an argument, and returns ``True`` or ``False``.
Returns
-------
AsyncFilter[T]
An object which can either be awaited to yield a list of the filtered
items, or can also act as an async iterator to yield items one by one.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> def predicate(value):
... return value <= 5
>>> iterator = AsyncIter([1, 10, 5, 100])
>>> async for i in iterator.filter(predicate):
... print(i)
1
5
>>> from redbot.core.utils import AsyncIter
>>> def predicate(value):
... return value <= 5
>>> iterator = AsyncIter([1, 10, 5, 100])
>>> await iterator.filter(predicate)
[1, 5]
"""
return async_filter(function, self)
def enumerate(self, start: int = 0) -> AsyncIterator[Tuple[int, _T]]:
"""Async iterable version of `enumerate`.
Parameters
----------
start: int
The index to start from. Defaults to 0.
Returns
-------
AsyncIterator[Tuple[int, T]]
An async iterator of tuples in the form of ``(index, item)``.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> iterator = AsyncIter(['one', 'two', 'three'])
>>> async for i in iterator.enumerate(start=10):
... print(i)
(10, 'one')
(11, 'two')
(12, 'three')
"""
return async_enumerate(self, start)
async def without_duplicates(self) -> AsyncIterator[_T]:
"""
Iterates while omitting duplicated entries.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> iterator = AsyncIter([1,2,3,3,4,4,5])
>>> async for i in iterator.without_duplicates():
... print(i)
1
2
3
4
5
"""
_temp = set()
async for item in self:
if item not in _temp:
yield item
_temp.add(item)
del _temp
async def find(
self,
predicate: Callable[[_T], Union[bool, Awaitable[bool]]],
default: Optional[Any] = None,
) -> AsyncIterator[_T]:
"""Calls ``predicate`` over items in iterable and return first value to match.
Parameters
----------
predicate: Union[Callable, Coroutine]
A function that returns a boolean-like result. The predicate provided can be a coroutine.
default: Optional[Any]
The value to return if there are no matches.
Raises
------
TypeError
When ``predicate`` is not a callable.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> await AsyncIter(range(3)).find(lambda x: x == 1)
1
"""
while True:
try:
elem = await self.__anext__()
except StopAsyncIteration:
return default
ret = await maybe_coroutine(predicate, elem)
if ret:
return elem
def map(self, func: Callable[[_T], Union[_S, Awaitable[_S]]]) -> AsyncIter[_S]:
"""Set the mapping callable for this instance of `AsyncIter`.
.. important::
This should be called after AsyncIter initialization and before any other of its methods.
Parameters
----------
func: Union[Callable, Coroutine]
The function to map values to. The function provided can be a coroutine.
Raises
------
TypeError
When ``func`` is not a callable.
Examples
--------
>>> from redbot.core.utils import AsyncIter
>>> async for value in AsyncIter(range(3)).map(bool):
... print(value)
False
True
True
"""
if not callable(func):
raise TypeError("Mapping must be a callable.")
self._map = func
return self
def get_end_user_data_statement(file: Union[Path, str]) -> Optional[str]:
"""
This function attempts to get the ``end_user_data_statement`` key from cog's ``info.json``.
This will log the reason if ``None`` is returned.
Parameters
----------
file: Union[pathlib.Path, str]
The ``__file__`` variable for the cog's ``__init__.py`` file.
Returns
-------
Optional[str]
The end user data statement found in the info.json
or ``None`` if there was an issue finding one.
Examples
--------
>>> # In cog's `__init__.py`
>>> from redbot.core.utils import get_end_user_data_statement
>>> __red_end_user_data_statement__ = get_end_user_data_statement(__file__)
>>> def setup(bot):
... ...
"""
try:
file = Path(file).parent.absolute()
info_json = file / "info.json"
statement = get_end_user_data_statement_or_raise(info_json)
except FileNotFoundError:
LOGGER.critical("'%s' does not exist.", str(info_json))
except KeyError:
LOGGER.critical(
"'%s' is missing an entry for 'end_user_data_statement'", str(info_json)
)
except json.JSONDecodeError as exc:
LOGGER.critical("'%s' is not a valid JSON file.", str(info_json), exc_info=exc)
except UnicodeError as exc:
LOGGER.critical("'%s' has a bad encoding.", str(info_json), exc_info=exc)
except Exception as exc:
LOGGER.critical(
"There was an error when trying to load the end user data statement from '%s'.",
str(info_json),
exc_info=exc,
)
else:
return statement
return None
def get_end_user_data_statement_or_raise(file: Union[Path, str]) -> str:
"""
This function attempts to get the ``end_user_data_statement`` key from cog's ``info.json``.
Parameters
----------
file: Union[pathlib.Path, str]
The ``__file__`` variable for the cog's ``__init__.py`` file.
Returns
-------
str
The end user data statement found in the info.json.
Raises
------
FileNotFoundError
When ``info.json`` does not exist.
KeyError
When ``info.json`` does not have the ``end_user_data_statement`` key.
json.JSONDecodeError
When ``info.json`` can't be decoded with ``json.load()``
UnicodeError
When ``info.json`` can't be decoded due to bad encoding.
Exception
Any other exception raised from ``pathlib`` and ``json`` modules
when attempting to parse the ``info.json`` for the ``end_user_data_statement`` key.
"""
file = Path(file).parent.absolute()
info_json = file / "info.json"
with info_json.open(encoding="utf-8") as fp:
return json.load(fp)["end_user_data_statement"]
# SOURCE: https://github.com/makupi/cookiecutter-discord.py-postgres/blob/133702ceb8682ec3927530ac35ad28d47a42802e/%7B%7Bcookiecutter.bot_slug%7D%7D/bot/utils/__init__.py
config = Config()
# SOURCE: https://github.com/makupi/cookiecutter-discord.py-postgres/blob/master/%7B%7Bcookiecutter.bot_slug%7D%7D/bot/utils/__init__.py
def get_guild_prefix(_bot, guild_id):
LOGGER.info(
f"get_guild_prefix(_bot, guild_id) - > get_guild_prefix({_bot}, {guild_id})"
)
prefix = config.prefix
guild_data = _bot.guild_data.get(guild_id, None)
if guild_data is not None:
_prefix = guild_data.get("prefix")
if _prefix is not None:
prefix = _prefix
LOGGER.info(
f"inside get_guild_prefix(_bot, guild_id) - > get_guild_prefix({_bot}, {guild_id})"
)
return prefix
| 29.251932 | 170 | 0.601765 |
from __future__ import annotations
import asyncio
from asyncio import Semaphore, as_completed
from asyncio.futures import isfuture
from inspect import isawaitable as _isawaitable, signature as _signature
from itertools import chain
import json
import logging
from pathlib import Path
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Coroutine,
Generator,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
from aiodropbox import constants
from aiodropbox.dbx_logger import get_logger
from aiodropbox.utils.config import Config
LOGGER = get_logger("aiodropbox.utils", provider="Utils", level=logging.DEBUG)
_T = TypeVar("_T")
_S = TypeVar("_S")
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
def deduplicate_iterables(*iterables):
return list(dict.fromkeys(chain.from_iterable(iterables)))
class AsyncFilter(
AsyncIterator[_T], Awaitable[List[_T]]
):
def __init__(
self,
func: Callable[[_T], Union[bool, Awaitable[bool]]],
iterable: Union[AsyncIterable[_T], Iterable[_T]],
) -> None:
self.__func: Callable[[_T], Union[bool, Awaitable[bool]]] = func
self.__iterable: Union[AsyncIterable[_T], Iterable[_T]] = iterable
if isinstance(iterable, AsyncIterable):
if asyncio.iscoroutinefunction(func):
self.__generator_instance = self.__async_generator_async_pred()
else:
self.__generator_instance = self.__async_generator_sync_pred()
elif asyncio.iscoroutinefunction(func):
self.__generator_instance = self.__sync_generator_async_pred()
else:
raise TypeError(
"Must be either an async predicate, an async iterable, or both."
)
async def __sync_generator_async_pred(self) -> AsyncIterator[_T]:
for item in self.__iterable:
if await self.__func(item):
yield item
async def __async_generator_sync_pred(self) -> AsyncIterator[_T]:
async for item in self.__iterable:
if self.__func(item):
yield item
async def __async_generator_async_pred(self) -> AsyncIterator[_T]:
async for item in self.__iterable:
if await self.__func(item):
yield item
async def __flatten(self) -> List[_T]:
return [item async for item in self]
def __aiter__(self):
return self
def __await__(self):
# Simply return the generator filled into a list
return self.__flatten().__await__()
async def __anext__(self) -> Awaitable[_T]:
# This will use the generator strategy set in __init__
return self.__generator_instance.__anext__()
def async_filter(
func: Callable[[_T], Union[bool, Awaitable[bool]]],
iterable: Union[AsyncIterable[_T], Iterable[_T]],
) -> AsyncFilter[_T]:
return AsyncFilter(func, iterable)
async def async_enumerate(
async_iterable: AsyncIterable[_T], start: int = 0
) -> AsyncIterator[Tuple[int, _T]]:
async for item in async_iterable:
yield start, item
start += 1
async def _sem_wrapper(sem, task):
async with sem:
return await task
def bounded_gather_iter(
*coros_or_futures, limit: int = 4, semaphore: Optional[Semaphore] = None
) -> Iterator[Awaitable[Any]]:
loop = asyncio.get_running_loop()
if semaphore is None:
if not isinstance(limit, int) or limit <= 0:
raise TypeError("limit must be an int > 0")
semaphore = Semaphore(limit)
pending = []
for cof in coros_or_futures:
if isfuture(cof) and cof._loop is not loop:
raise ValueError("futures are tied to different event loops")
cof = _sem_wrapper(semaphore, cof)
pending.append(cof)
return as_completed(pending)
def bounded_gather(
*coros_or_futures,
return_exceptions: bool = False,
limit: int = 4,
semaphore: Optional[Semaphore] = None,
) -> Awaitable[List[Any]]:
loop = asyncio.get_running_loop()
if semaphore is None:
if not isinstance(limit, int) or limit <= 0:
raise TypeError("limit must be an int > 0")
semaphore = Semaphore(limit)
tasks = (_sem_wrapper(semaphore, task) for task in coros_or_futures)
return asyncio.gather(*tasks, return_exceptions=return_exceptions)
class AsyncIter(
AsyncIterator[_T], Awaitable[List[_T]]
): # pylint: disable=duplicate-bases
def __init__(
self, iterable: Iterable[_T], delay: Union[float, int] = 0, steps: int = 1
) -> None:
if steps < 1:
raise ValueError("Steps must be higher than or equals to 1")
self._delay = delay
self._iterator = iter(iterable)
self._i = 0
self._steps = steps
self._map = None
def __aiter__(self) -> AsyncIter[_T]:
return self
async def __anext__(self) -> _T:
try:
item = next(self._iterator)
except StopIteration:
raise StopAsyncIteration
if self._i == self._steps:
self._i = 0
await asyncio.sleep(self._delay)
self._i += 1
return await maybe_coroutine(self._map, item) if self._map is not None else item
def __await__(self) -> Generator[Any, None, List[_T]]:
return self.flatten().__await__()
async def next(self, default: Any = ...) -> _T:
try:
value = await self.__anext__()
except StopAsyncIteration:
if default is ...:
raise
value = default
return value
async def flatten(self) -> List[_T]:
return [item async for item in self]
def filter(
self, function: Callable[[_T], Union[bool, Awaitable[bool]]]
) -> AsyncFilter[_T]:
return async_filter(function, self)
def enumerate(self, start: int = 0) -> AsyncIterator[Tuple[int, _T]]:
return async_enumerate(self, start)
async def without_duplicates(self) -> AsyncIterator[_T]:
_temp = set()
async for item in self:
if item not in _temp:
yield item
_temp.add(item)
del _temp
async def find(
self,
predicate: Callable[[_T], Union[bool, Awaitable[bool]]],
default: Optional[Any] = None,
) -> AsyncIterator[_T]:
while True:
try:
elem = await self.__anext__()
except StopAsyncIteration:
return default
ret = await maybe_coroutine(predicate, elem)
if ret:
return elem
def map(self, func: Callable[[_T], Union[_S, Awaitable[_S]]]) -> AsyncIter[_S]:
if not callable(func):
raise TypeError("Mapping must be a callable.")
self._map = func
return self
def get_end_user_data_statement(file: Union[Path, str]) -> Optional[str]:
try:
file = Path(file).parent.absolute()
info_json = file / "info.json"
statement = get_end_user_data_statement_or_raise(info_json)
except FileNotFoundError:
LOGGER.critical("'%s' does not exist.", str(info_json))
except KeyError:
LOGGER.critical(
"'%s' is missing an entry for 'end_user_data_statement'", str(info_json)
)
except json.JSONDecodeError as exc:
LOGGER.critical("'%s' is not a valid JSON file.", str(info_json), exc_info=exc)
except UnicodeError as exc:
LOGGER.critical("'%s' has a bad encoding.", str(info_json), exc_info=exc)
except Exception as exc:
LOGGER.critical(
"There was an error when trying to load the end user data statement from '%s'.",
str(info_json),
exc_info=exc,
)
else:
return statement
return None
def get_end_user_data_statement_or_raise(file: Union[Path, str]) -> str:
file = Path(file).parent.absolute()
info_json = file / "info.json"
with info_json.open(encoding="utf-8") as fp:
return json.load(fp)["end_user_data_statement"]
# SOURCE: https://github.com/makupi/cookiecutter-discord.py-postgres/blob/133702ceb8682ec3927530ac35ad28d47a42802e/%7B%7Bcookiecutter.bot_slug%7D%7D/bot/utils/__init__.py
config = Config()
# SOURCE: https://github.com/makupi/cookiecutter-discord.py-postgres/blob/master/%7B%7Bcookiecutter.bot_slug%7D%7D/bot/utils/__init__.py
def get_guild_prefix(_bot, guild_id):
LOGGER.info(
f"get_guild_prefix(_bot, guild_id) - > get_guild_prefix({_bot}, {guild_id})"
)
prefix = config.prefix
guild_data = _bot.guild_data.get(guild_id, None)
if guild_data is not None:
_prefix = guild_data.get("prefix")
if _prefix is not None:
prefix = _prefix
LOGGER.info(
f"inside get_guild_prefix(_bot, guild_id) - > get_guild_prefix({_bot}, {guild_id})"
)
return prefix
| true | true |
1c38fff85fd13310d28ad936b660726bf37a0a86 | 1,510 | py | Python | docs/source/conf.py | Dlubal-Software/RFEM_Python_Client | 9e29c598dadf380d49677c463931f0be659ccc40 | [
"MIT"
] | 16 | 2021-10-13T21:00:11.000Z | 2022-03-21T11:12:09.000Z | docs/source/conf.py | Dlubal-Software/RFEM_Python_Client | 9e29c598dadf380d49677c463931f0be659ccc40 | [
"MIT"
] | 49 | 2021-10-19T13:18:51.000Z | 2022-03-30T08:20:17.000Z | docs/source/conf.py | Dlubal-Software/RFEM_Python_Client | 9e29c598dadf380d49677c463931f0be659ccc40 | [
"MIT"
] | 7 | 2021-10-13T06:06:24.000Z | 2022-03-29T17:48:39.000Z | import os
import sys
sys.path.insert(0, os.path.abspath('..\RFEM'))
# -- Project information -----------------------------------------------------
project = 'RFEM/RSTAB Webservices'
copyright = '2022, Dlubal Software'
author = 'Dlubal Software'
# The full version, including alpha/beta/rc tags
release = '1.0.2'
# Sphinx Extensions
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx_autodoc_typehints']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 20,
'includehidden': False,
'titles_only': False
}
html_logo = "pics/logo.png"
html_theme_options = {
'logo_only': True,
'display_version': False,
}
# Add any paths that contain custom static files
html_static_path = ['_static']
import mock
MOCK_MODULES = ['RFEM.initModel', 'RFEM.enums', 'math']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
autodoc_dumb_docstring = True
autodoc_preserve_defaults = True
autodoc_process_signature = True
autodoc_typehints = "none"
autoclass_content = 'both' | 25.59322 | 86 | 0.691391 | import os
import sys
sys.path.insert(0, os.path.abspath('..\RFEM'))
project = 'RFEM/RSTAB Webservices'
copyright = '2022, Dlubal Software'
author = 'Dlubal Software'
release = '1.0.2'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx_autodoc_typehints']
templates_path = ['_templates']
exclude_patterns = []
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 20,
'includehidden': False,
'titles_only': False
}
html_logo = "pics/logo.png"
html_theme_options = {
'logo_only': True,
'display_version': False,
}
html_static_path = ['_static']
import mock
MOCK_MODULES = ['RFEM.initModel', 'RFEM.enums', 'math']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
autodoc_dumb_docstring = True
autodoc_preserve_defaults = True
autodoc_process_signature = True
autodoc_typehints = "none"
autoclass_content = 'both' | true | true |
1c39007f27f05eeab87f9f4f74dadb32c2d358d9 | 2,433 | py | Python | runtime/translation/models/gnmt_large/gpus=4/stage1.py | vibhatha/pipedream | af6b811f5d01a68e9eb91065e5242fc1a075f279 | [
"MIT"
] | null | null | null | runtime/translation/models/gnmt_large/gpus=4/stage1.py | vibhatha/pipedream | af6b811f5d01a68e9eb91065e5242fc1a075f279 | [
"MIT"
] | null | null | null | runtime/translation/models/gnmt_large/gpus=4/stage1.py | vibhatha/pipedream | af6b811f5d01a68e9eb91065e5242fc1a075f279 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from seq2seq.models.decoder import RecurrentAttention
class Stage1(torch.nn.Module):
def __init__(self):
super(Stage1, self).__init__()
self.layer10 = torch.nn.Embedding(32320, 1024, padding_idx=0)
self.layer12 = torch.nn.Dropout(p=0.2)
self.layer13 = torch.nn.LSTM(1024, 1024)
self.layer16 = torch.nn.Dropout(p=0.2)
self.layer17 = torch.nn.LSTM(1024, 1024)
self.layer20 = torch.nn.Dropout(p=0.2)
self.layer21 = torch.nn.LSTM(1024, 1024)
self.layer24 = RecurrentAttention(1024, 1024, 1024)
self.layer27 = torch.nn.Dropout(p=0.2)
self.layer29 = torch.nn.LSTM(2048, 1024)
def forward(self, input1, input2, input0, input3):
out0 = input0.clone()
out1 = input1.clone()
out2 = input2.clone()
out3 = input3.clone()
out5 = None
out6 = None
out7 = None
out8 = None
out9 = None
out10 = self.layer10(out3)
out0 = out0 + out1
out12 = self.layer12(out0)
out13 = self.layer13(out12)
out14 = out13[0]
out14 = out14 + out0
out16 = self.layer16(out14)
out17 = self.layer17(out16)
out18 = out17[0]
out18 = out18 + out14
out20 = self.layer20(out18)
out21 = self.layer21(out20)
out22 = out21[0]
out22 = out22 + out18
out24 = self.layer24(out10, out9, out22, out2)
out25 = out24[2]
out26 = out24[0]
out27 = self.layer27(out26)
out28 = torch.cat([out27, out25], 2)
out29 = self.layer29(out28, out8)
out30 = out29[0]
return (out25, out30)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| 36.863636 | 93 | 0.560625 |
import torch
from seq2seq.models.decoder import RecurrentAttention
class Stage1(torch.nn.Module):
def __init__(self):
super(Stage1, self).__init__()
self.layer10 = torch.nn.Embedding(32320, 1024, padding_idx=0)
self.layer12 = torch.nn.Dropout(p=0.2)
self.layer13 = torch.nn.LSTM(1024, 1024)
self.layer16 = torch.nn.Dropout(p=0.2)
self.layer17 = torch.nn.LSTM(1024, 1024)
self.layer20 = torch.nn.Dropout(p=0.2)
self.layer21 = torch.nn.LSTM(1024, 1024)
self.layer24 = RecurrentAttention(1024, 1024, 1024)
self.layer27 = torch.nn.Dropout(p=0.2)
self.layer29 = torch.nn.LSTM(2048, 1024)
def forward(self, input1, input2, input0, input3):
out0 = input0.clone()
out1 = input1.clone()
out2 = input2.clone()
out3 = input3.clone()
out5 = None
out6 = None
out7 = None
out8 = None
out9 = None
out10 = self.layer10(out3)
out0 = out0 + out1
out12 = self.layer12(out0)
out13 = self.layer13(out12)
out14 = out13[0]
out14 = out14 + out0
out16 = self.layer16(out14)
out17 = self.layer17(out16)
out18 = out17[0]
out18 = out18 + out14
out20 = self.layer20(out18)
out21 = self.layer21(out20)
out22 = out21[0]
out22 = out22 + out18
out24 = self.layer24(out10, out9, out22, out2)
out25 = out24[2]
out26 = out24[0]
out27 = self.layer27(out26)
out28 = torch.cat([out27, out25], 2)
out29 = self.layer29(out28, out8)
out30 = out29[0]
return (out25, out30)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| true | true |
1c390119a9c7cccdc60788e81648e86dbe5b1433 | 3,592 | py | Python | lib/detect/utils/json_utils_test.py | nhattruongpham/ALPR_SSD_CTPN_LPRNet | b20bf84f1ecc56252da37c25d08a0b7d7e10477c | [
"MIT"
] | null | null | null | lib/detect/utils/json_utils_test.py | nhattruongpham/ALPR_SSD_CTPN_LPRNet | b20bf84f1ecc56252da37c25d08a0b7d7e10477c | [
"MIT"
] | null | null | null | lib/detect/utils/json_utils_test.py | nhattruongpham/ALPR_SSD_CTPN_LPRNet | b20bf84f1ecc56252da37c25d08a0b7d7e10477c | [
"MIT"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.object_detection.utils.json_utils."""
import os
import tensorflow as tf
from retail_demo.object_detection.utils import json_utils
class JsonUtilsTest(tf.test.TestCase):
def testDumpReasonablePrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=2)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.00')
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.0], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.00\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=0, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1')
def testDumpUnspecifiedPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.012345, f)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.0, float_digits=2)
self.assertEqual(s, '1.00')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
self.assertEqual(s, '[\n 1.00\n]')
def testDumpsZeroPrecision(self):
s = json_utils.Dumps(1.0, float_digits=0)
self.assertEqual(s, '1')
def testDumpsUnspecifiedPrecision(self):
s = json_utils.Dumps(1.012345)
self.assertEqual(s, '1.012345')
def testPrettyParams(self):
s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}')
def testPrettyParamsExtraParamsInside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True))
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False))
def testPrettyParamsExtraParamsOutside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams())
if __name__ == '__main__':
tf.test.main()
| 36.653061 | 80 | 0.657016 |
import os
import tensorflow as tf
from retail_demo.object_detection.utils import json_utils
class JsonUtilsTest(tf.test.TestCase):
def testDumpReasonablePrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=2)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.00')
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.0], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.00\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=0, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1')
def testDumpUnspecifiedPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.012345, f)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.0, float_digits=2)
self.assertEqual(s, '1.00')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
self.assertEqual(s, '[\n 1.00\n]')
def testDumpsZeroPrecision(self):
s = json_utils.Dumps(1.0, float_digits=0)
self.assertEqual(s, '1')
def testDumpsUnspecifiedPrecision(self):
s = json_utils.Dumps(1.012345)
self.assertEqual(s, '1.012345')
def testPrettyParams(self):
s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}')
def testPrettyParamsExtraParamsInside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True))
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False))
def testPrettyParamsExtraParamsOutside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams())
if __name__ == '__main__':
tf.test.main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.