max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
tests/text_data_test.py
|
lalitaalaalitah/doc_curation
| 0
|
6626451
|
import os
from doc_curation import text_data
def test_get_subunit_list():
unit_info_file = os.path.join(os.path.dirname(text_data.__file__), "vedaH/vAjasaneyi/shatapatha.json")
assert text_data.get_subunit_list(json_file=unit_info_file, unit_path_list=[]) == range(1, 15)
assert text_data.get_subunit_list(json_file=unit_info_file, unit_path_list=[2]) == range(1, 7)
def test_get_subunit_path_list():
unit_info_file = os.path.join(os.path.dirname(text_data.__file__), "vedaH/taittirIya/bhAShya/bhaTTa-bhAskara/saMhitA.json")
assert text_data.get_subunit_path_list(json_file=unit_info_file, unit_path_list=[]) == [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [5, 6], [5, 7], [6, 1], [6, 2], [6, 3], [6, 4], [6, 5], [6, 6], [7, 1], [7, 2], [7, 3], [7, 4], [7, 5]]
|
import os
from doc_curation import text_data
def test_get_subunit_list():
unit_info_file = os.path.join(os.path.dirname(text_data.__file__), "vedaH/vAjasaneyi/shatapatha.json")
assert text_data.get_subunit_list(json_file=unit_info_file, unit_path_list=[]) == range(1, 15)
assert text_data.get_subunit_list(json_file=unit_info_file, unit_path_list=[2]) == range(1, 7)
def test_get_subunit_path_list():
unit_info_file = os.path.join(os.path.dirname(text_data.__file__), "vedaH/taittirIya/bhAShya/bhaTTa-bhAskara/saMhitA.json")
assert text_data.get_subunit_path_list(json_file=unit_info_file, unit_path_list=[]) == [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [5, 6], [5, 7], [6, 1], [6, 2], [6, 3], [6, 4], [6, 5], [6, 6], [7, 1], [7, 2], [7, 3], [7, 4], [7, 5]]
|
none
| 1
| 2.416558
| 2
|
|
holoviews/plotting/bokeh/annotation.py
|
TheoMathurin/holoviews
| 304
|
6626452
|
<filename>holoviews/plotting/bokeh/annotation.py
from collections import defaultdict
try:
from html import escape
except:
from cgi import escape
import param
import numpy as np
from bokeh.models import BoxAnnotation, Span, Arrow, Slope
from panel.models import HTML
try:
from bokeh.models.arrow_heads import TeeHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': TeeHead, '-|>': NormalHead,
'-': None}
except:
from bokeh.models.arrow_heads import OpenHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': OpenHead, '-|>': NormalHead,
'-': None}
from bokeh.transform import dodge
from ...core.util import datetime_types, dimension_sanitizer
from ...element import HLine, VLine, VSpan
from ..plot import GenericElementPlot
from .element import AnnotationPlot, ElementPlot, CompositeElementPlot, ColorbarPlot
from .selection import BokehOverlaySelectionDisplay
from .styles import base_properties, fill_properties, line_properties, text_properties
from .plot import BokehPlot
from .util import date_to_integer
class TextPlot(ElementPlot, AnnotationPlot):
style_opts = text_properties+['color', 'angle', 'visible']
_plot_methods = dict(single='text', batched='text')
selection_display = None
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y', text='text')
if self.static_source:
return dict(x=[], y=[], text=[]), mapping, style
if self.invert_axes:
data = dict(x=[element.y], y=[element.x])
else:
data = dict(x=[element.x], y=[element.y])
self._categorize_data(data, ('x', 'y'), element.dimensions())
data['text'] = [element.text]
if 'text_align' not in style:
style['text_align'] = element.halign
baseline = 'middle' if element.valign == 'center' else element.valign
if 'text_baseline' not in style:
style['text_baseline'] = baseline
if 'text_font_size' not in style:
style['text_font_size'] = '%dPt' % element.fontsize
if 'color' in style:
style['text_color'] = style.pop('color')
style['angle'] = np.deg2rad(style.get('angle', element.rotation))
return (data, mapping, style)
def get_batched_data(self, element, ranges=None):
data = defaultdict(list)
zorders = self._updated_zorders(element)
for (key, el), zorder in zip(element.data.items(), zorders):
style = self.lookup_options(element.last, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
for k, eld in eldata.items():
data[k].extend(eld)
return data, elmapping, style
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class LabelsPlot(ColorbarPlot, AnnotationPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
xoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
yoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
# Deprecated options
color_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = base_properties + text_properties + ['cmap', 'angle']
_nonvectorized_styles = base_properties + ['cmap']
_plot_methods = dict(single='text', batched='text')
_batched_style_opts = text_properties
def get_data(self, element, ranges, style):
style = self.style[self.cyclic_index]
if 'angle' in style and isinstance(style['angle'], (int, float)):
style['angle'] = np.deg2rad(style.get('angle', 0))
dims = element.dimensions()
coords = (1, 0) if self.invert_axes else (0, 1)
xdim, ydim, tdim = (dimension_sanitizer(dims[i].name) for i in coords+(2,))
mapping = dict(x=xdim, y=ydim, text=tdim)
data = {d: element.dimension_values(d) for d in (xdim, ydim)}
if self.xoffset is not None:
mapping['x'] = dodge(xdim, self.xoffset)
if self.yoffset is not None:
mapping['y'] = dodge(ydim, self.yoffset)
data[tdim] = [dims[2].pprint_value(v) for v in element.dimension_values(2)]
self._categorize_data(data, (xdim, ydim), element.dimensions())
cdim = element.get_dimension(self.color_index)
if cdim is None:
return data, mapping, style
cdata, cmapping = self._get_color_data(element, ranges, style, name='text_color')
if dims[2] is cdim and cdata:
# If color dim is same as text dim, rename color column
data['text_color'] = cdata[tdim]
mapping['text_color'] = dict(cmapping['text_color'], field='text_color')
else:
data.update(cdata)
mapping.update(cmapping)
return data, mapping, style
class LineAnnotationPlot(ElementPlot, AnnotationPlot):
style_opts = line_properties + ['level', 'visible']
apply_ranges = param.Boolean(default=False, doc="""
Whether to include the annotation in axis range calculations.""")
_allow_implicit_categories = False
_plot_methods = dict(single='Span')
selection_display = None
def get_data(self, element, ranges, style):
data, mapping = {}, {}
dim = 'width' if isinstance(element, HLine) else 'height'
if self.invert_axes:
dim = 'width' if dim == 'height' else 'height'
mapping['dimension'] = dim
loc = element.data
if isinstance(loc, datetime_types):
loc = date_to_integer(loc)
mapping['location'] = loc
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
box = Span(level=properties.get('level', 'glyph'), **mapping)
plot.renderers.append(box)
return None, box
def get_extents(self, element, ranges=None, range_type='combined'):
loc = element.data
if isinstance(element, VLine):
dim = 'x'
elif isinstance(element, HLine):
dim = 'y'
if self.invert_axes:
dim = 'x' if dim == 'y' else 'x'
ranges[dim]['soft'] = loc, loc
return super().get_extents(element, ranges, range_type)
class BoxAnnotationPlot(ElementPlot, AnnotationPlot):
apply_ranges = param.Boolean(default=False, doc="""
Whether to include the annotation in axis range calculations.""")
style_opts = line_properties + fill_properties + ['level', 'visible']
_allow_implicit_categories = False
_plot_methods = dict(single='BoxAnnotation')
selection_display = None
def get_data(self, element, ranges, style):
data, mapping = {}, {}
kwd_dim1 = 'left' if isinstance(element, VSpan) else 'bottom'
kwd_dim2 = 'right' if isinstance(element, VSpan) else 'top'
if self.invert_axes:
kwd_dim1 = 'bottom' if kwd_dim1 == 'left' else 'left'
kwd_dim2 = 'top' if kwd_dim2 == 'right' else 'right'
locs = element.data
if isinstance(locs, datetime_types):
locs = [date_to_integer(loc) for loc in locs]
mapping[kwd_dim1] = locs[0]
mapping[kwd_dim2] = locs[1]
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
box = BoxAnnotation(level=properties.get('level', 'glyph'), **mapping)
plot.renderers.append(box)
return None, box
class SlopePlot(ElementPlot, AnnotationPlot):
style_opts = line_properties + ['level']
_plot_methods = dict(single='Slope')
selection_display = None
def get_data(self, element, ranges, style):
data, mapping = {}, {}
gradient, intercept = element.data
if self.invert_axes:
if gradient == 0:
gradient = np.inf, np.inf
else:
gradient, intercept = 1/gradient, -(intercept/gradient)
mapping['gradient'] = gradient
mapping['y_intercept'] = intercept
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
slope = Slope(level=properties.get('level', 'glyph'), **mapping)
plot.add_layout(slope)
return None, slope
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class SplinePlot(ElementPlot, AnnotationPlot):
"""
Draw the supplied Spline annotation (see Spline docstring).
Does not support matplotlib Path codes.
"""
style_opts = line_properties + ['visible']
_plot_methods = dict(single='bezier')
selection_display = None
def get_data(self, element, ranges, style):
if self.invert_axes:
data_attrs = ['y0', 'x0', 'cy0', 'cx0', 'cy1', 'cx1', 'y1', 'x1']
else:
data_attrs = ['x0', 'y0', 'cx0', 'cy0', 'cx1', 'cy1', 'x1', 'y1']
verts = np.array(element.data[0])
inds = np.where(np.array(element.data[1])==1)[0]
data = {da: [] for da in data_attrs}
skipped = False
for vs in np.split(verts, inds[1:]):
if len(vs) != 4:
skipped = len(vs) > 1
continue
for x, y, xl, yl in zip(vs[:, 0], vs[:, 1], data_attrs[::2], data_attrs[1::2]):
data[xl].append(x)
data[yl].append(y)
if skipped:
self.param.warning(
'Bokeh SplinePlot only support cubic splines, unsupported '
'splines were skipped during plotting.')
data = {da: data[da] for da in data_attrs}
return (data, dict(zip(data_attrs, data_attrs)), style)
class ArrowPlot(CompositeElementPlot, AnnotationPlot):
style_opts = (['arrow_%s' % p for p in line_properties+fill_properties+['size']] +
text_properties)
_style_groups = {'arrow': 'arrow', 'text': 'text'}
_draw_order = ['arrow_1', 'text_1']
selection_display = None
def get_data(self, element, ranges, style):
plot = self.state
label_mapping = dict(x='x', y='y', text='text')
arrow_mapping = dict(x_start='x_start', x_end='x_end',
y_start='y_start', y_end='y_end')
# Compute arrow
x1, y1 = element.x, element.y
axrange = plot.x_range if self.invert_axes else plot.y_range
span = (axrange.end - axrange.start) / 6.
if element.direction == '^':
x2, y2 = x1, y1-span
label_mapping['text_baseline'] = 'top'
elif element.direction == '<':
x2, y2 = x1+span, y1
label_mapping['text_align'] = 'left'
label_mapping['text_baseline'] = 'middle'
elif element.direction == '>':
x2, y2 = x1-span, y1
label_mapping['text_align'] = 'right'
label_mapping['text_baseline'] = 'middle'
else:
x2, y2 = x1, y1+span
label_mapping['text_baseline'] = 'bottom'
arrow_data = {'x_end': [x1], 'y_end': [y1],
'x_start': [x2], 'y_start': [y2]}
# Define arrowhead
arrow_mapping['arrow_start'] = arrow_start.get(element.arrowstyle, None)
arrow_mapping['arrow_end'] = arrow_end.get(element.arrowstyle, NormalHead)
# Compute label
if self.invert_axes:
label_data = dict(x=[y2], y=[x2])
else:
label_data = dict(x=[x2], y=[y2])
label_data['text'] = [element.text]
return ({'text_1': label_data, 'arrow_1': arrow_data},
{'arrow_1': arrow_mapping, 'text_1': label_mapping}, style)
def _init_glyph(self, plot, mapping, properties, key):
"""
Returns a Bokeh glyph object.
"""
properties = {k: v for k, v in properties.items() if 'legend' not in k}
if key == 'arrow_1':
source = properties.pop('source')
arrow_end = mapping.pop('arrow_end')
arrow_start = mapping.pop('arrow_start')
for p in ('alpha', 'color'):
v = properties.pop(p, None)
for t in ('line', 'fill'):
if v is None:
continue
key = '_'.join([t, p])
if key not in properties:
properties[key] = v
start = arrow_start(**properties) if arrow_start else None
end = arrow_end(**properties) if arrow_end else None
line_props = {p: v for p, v in properties.items() if p.startswith('line_')}
renderer = Arrow(start=start, end=end, source=source,
**dict(line_props, **mapping))
glyph = renderer
else:
properties = {p if p == 'source' else 'text_'+p: v
for p, v in properties.items()}
renderer, glyph = super()._init_glyph(
plot, mapping, properties, key)
plot.renderers.append(renderer)
return renderer, glyph
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class DivPlot(BokehPlot, GenericElementPlot, AnnotationPlot):
height = param.Number(default=300)
width = param.Number(default=300)
sizing_mode = param.ObjectSelector(default=None, objects=[
'fixed', 'stretch_width', 'stretch_height', 'stretch_both',
'scale_width', 'scale_height', 'scale_both', None], doc="""
How the component should size itself.
* "fixed" :
Component is not responsive. It will retain its original
width and height regardless of any subsequent browser window
resize events.
* "stretch_width"
Component will responsively resize to stretch to the
available width, without maintaining any aspect ratio. The
height of the component depends on the type of the component
and may be fixed or fit to component's contents.
* "stretch_height"
Component will responsively resize to stretch to the
available height, without maintaining any aspect ratio. The
width of the component depends on the type of the component
and may be fixed or fit to component's contents.
* "stretch_both"
Component is completely responsive, independently in width
and height, and will occupy all the available horizontal and
vertical space, even if this changes the aspect ratio of the
component.
* "scale_width"
Component will responsively resize to stretch to the
available width, while maintaining the original or provided
aspect ratio.
* "scale_height"
Component will responsively resize to stretch to the
available height, while maintaining the original or provided
aspect ratio.
* "scale_both"
Component will responsively resize to both the available
width and height, while maintaining the original or provided
aspect ratio.
""")
finalize_hooks = param.HookList(default=[], doc="""
Deprecated; use hooks options instead.""")
hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a plot. The
hook is passed the plot object and the displayed element, and
other plotting handles can be accessed via plot.handles.""")
_stream_data = False
selection_display = None
def __init__(self, element, plot=None, **params):
super().__init__(element, **params)
self.callbacks = []
self.handles = {} if plot is None else self.handles['plot']
self.static = len(self.hmap) == 1 and len(self.keys) == len(self.hmap)
def get_data(self, element, ranges, style):
return element.data, {}, style
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
data, _, _ = self.get_data(element, ranges, {})
div = HTML(text=escape(data), width=self.width, height=self.height,
sizing_mode=self.sizing_mode)
self.handles['plot'] = div
self._execute_hooks(element)
self.drawn = True
return div
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
text, _, _ = self.get_data(element, ranges, {})
self.state.update(text=text, sizing_mode=self.sizing_mode)
|
<filename>holoviews/plotting/bokeh/annotation.py
from collections import defaultdict
try:
from html import escape
except:
from cgi import escape
import param
import numpy as np
from bokeh.models import BoxAnnotation, Span, Arrow, Slope
from panel.models import HTML
try:
from bokeh.models.arrow_heads import TeeHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': TeeHead, '-|>': NormalHead,
'-': None}
except:
from bokeh.models.arrow_heads import OpenHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': OpenHead, '-|>': NormalHead,
'-': None}
from bokeh.transform import dodge
from ...core.util import datetime_types, dimension_sanitizer
from ...element import HLine, VLine, VSpan
from ..plot import GenericElementPlot
from .element import AnnotationPlot, ElementPlot, CompositeElementPlot, ColorbarPlot
from .selection import BokehOverlaySelectionDisplay
from .styles import base_properties, fill_properties, line_properties, text_properties
from .plot import BokehPlot
from .util import date_to_integer
class TextPlot(ElementPlot, AnnotationPlot):
style_opts = text_properties+['color', 'angle', 'visible']
_plot_methods = dict(single='text', batched='text')
selection_display = None
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y', text='text')
if self.static_source:
return dict(x=[], y=[], text=[]), mapping, style
if self.invert_axes:
data = dict(x=[element.y], y=[element.x])
else:
data = dict(x=[element.x], y=[element.y])
self._categorize_data(data, ('x', 'y'), element.dimensions())
data['text'] = [element.text]
if 'text_align' not in style:
style['text_align'] = element.halign
baseline = 'middle' if element.valign == 'center' else element.valign
if 'text_baseline' not in style:
style['text_baseline'] = baseline
if 'text_font_size' not in style:
style['text_font_size'] = '%dPt' % element.fontsize
if 'color' in style:
style['text_color'] = style.pop('color')
style['angle'] = np.deg2rad(style.get('angle', element.rotation))
return (data, mapping, style)
def get_batched_data(self, element, ranges=None):
data = defaultdict(list)
zorders = self._updated_zorders(element)
for (key, el), zorder in zip(element.data.items(), zorders):
style = self.lookup_options(element.last, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
for k, eld in eldata.items():
data[k].extend(eld)
return data, elmapping, style
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class LabelsPlot(ColorbarPlot, AnnotationPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
xoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
yoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
# Deprecated options
color_index = param.ClassSelector(default=None, class_=(str, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
selection_display = BokehOverlaySelectionDisplay()
style_opts = base_properties + text_properties + ['cmap', 'angle']
_nonvectorized_styles = base_properties + ['cmap']
_plot_methods = dict(single='text', batched='text')
_batched_style_opts = text_properties
def get_data(self, element, ranges, style):
style = self.style[self.cyclic_index]
if 'angle' in style and isinstance(style['angle'], (int, float)):
style['angle'] = np.deg2rad(style.get('angle', 0))
dims = element.dimensions()
coords = (1, 0) if self.invert_axes else (0, 1)
xdim, ydim, tdim = (dimension_sanitizer(dims[i].name) for i in coords+(2,))
mapping = dict(x=xdim, y=ydim, text=tdim)
data = {d: element.dimension_values(d) for d in (xdim, ydim)}
if self.xoffset is not None:
mapping['x'] = dodge(xdim, self.xoffset)
if self.yoffset is not None:
mapping['y'] = dodge(ydim, self.yoffset)
data[tdim] = [dims[2].pprint_value(v) for v in element.dimension_values(2)]
self._categorize_data(data, (xdim, ydim), element.dimensions())
cdim = element.get_dimension(self.color_index)
if cdim is None:
return data, mapping, style
cdata, cmapping = self._get_color_data(element, ranges, style, name='text_color')
if dims[2] is cdim and cdata:
# If color dim is same as text dim, rename color column
data['text_color'] = cdata[tdim]
mapping['text_color'] = dict(cmapping['text_color'], field='text_color')
else:
data.update(cdata)
mapping.update(cmapping)
return data, mapping, style
class LineAnnotationPlot(ElementPlot, AnnotationPlot):
style_opts = line_properties + ['level', 'visible']
apply_ranges = param.Boolean(default=False, doc="""
Whether to include the annotation in axis range calculations.""")
_allow_implicit_categories = False
_plot_methods = dict(single='Span')
selection_display = None
def get_data(self, element, ranges, style):
data, mapping = {}, {}
dim = 'width' if isinstance(element, HLine) else 'height'
if self.invert_axes:
dim = 'width' if dim == 'height' else 'height'
mapping['dimension'] = dim
loc = element.data
if isinstance(loc, datetime_types):
loc = date_to_integer(loc)
mapping['location'] = loc
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
box = Span(level=properties.get('level', 'glyph'), **mapping)
plot.renderers.append(box)
return None, box
def get_extents(self, element, ranges=None, range_type='combined'):
loc = element.data
if isinstance(element, VLine):
dim = 'x'
elif isinstance(element, HLine):
dim = 'y'
if self.invert_axes:
dim = 'x' if dim == 'y' else 'x'
ranges[dim]['soft'] = loc, loc
return super().get_extents(element, ranges, range_type)
class BoxAnnotationPlot(ElementPlot, AnnotationPlot):
apply_ranges = param.Boolean(default=False, doc="""
Whether to include the annotation in axis range calculations.""")
style_opts = line_properties + fill_properties + ['level', 'visible']
_allow_implicit_categories = False
_plot_methods = dict(single='BoxAnnotation')
selection_display = None
def get_data(self, element, ranges, style):
data, mapping = {}, {}
kwd_dim1 = 'left' if isinstance(element, VSpan) else 'bottom'
kwd_dim2 = 'right' if isinstance(element, VSpan) else 'top'
if self.invert_axes:
kwd_dim1 = 'bottom' if kwd_dim1 == 'left' else 'left'
kwd_dim2 = 'top' if kwd_dim2 == 'right' else 'right'
locs = element.data
if isinstance(locs, datetime_types):
locs = [date_to_integer(loc) for loc in locs]
mapping[kwd_dim1] = locs[0]
mapping[kwd_dim2] = locs[1]
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
box = BoxAnnotation(level=properties.get('level', 'glyph'), **mapping)
plot.renderers.append(box)
return None, box
class SlopePlot(ElementPlot, AnnotationPlot):
style_opts = line_properties + ['level']
_plot_methods = dict(single='Slope')
selection_display = None
def get_data(self, element, ranges, style):
data, mapping = {}, {}
gradient, intercept = element.data
if self.invert_axes:
if gradient == 0:
gradient = np.inf, np.inf
else:
gradient, intercept = 1/gradient, -(intercept/gradient)
mapping['gradient'] = gradient
mapping['y_intercept'] = intercept
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
slope = Slope(level=properties.get('level', 'glyph'), **mapping)
plot.add_layout(slope)
return None, slope
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class SplinePlot(ElementPlot, AnnotationPlot):
"""
Draw the supplied Spline annotation (see Spline docstring).
Does not support matplotlib Path codes.
"""
style_opts = line_properties + ['visible']
_plot_methods = dict(single='bezier')
selection_display = None
def get_data(self, element, ranges, style):
if self.invert_axes:
data_attrs = ['y0', 'x0', 'cy0', 'cx0', 'cy1', 'cx1', 'y1', 'x1']
else:
data_attrs = ['x0', 'y0', 'cx0', 'cy0', 'cx1', 'cy1', 'x1', 'y1']
verts = np.array(element.data[0])
inds = np.where(np.array(element.data[1])==1)[0]
data = {da: [] for da in data_attrs}
skipped = False
for vs in np.split(verts, inds[1:]):
if len(vs) != 4:
skipped = len(vs) > 1
continue
for x, y, xl, yl in zip(vs[:, 0], vs[:, 1], data_attrs[::2], data_attrs[1::2]):
data[xl].append(x)
data[yl].append(y)
if skipped:
self.param.warning(
'Bokeh SplinePlot only support cubic splines, unsupported '
'splines were skipped during plotting.')
data = {da: data[da] for da in data_attrs}
return (data, dict(zip(data_attrs, data_attrs)), style)
class ArrowPlot(CompositeElementPlot, AnnotationPlot):
style_opts = (['arrow_%s' % p for p in line_properties+fill_properties+['size']] +
text_properties)
_style_groups = {'arrow': 'arrow', 'text': 'text'}
_draw_order = ['arrow_1', 'text_1']
selection_display = None
def get_data(self, element, ranges, style):
plot = self.state
label_mapping = dict(x='x', y='y', text='text')
arrow_mapping = dict(x_start='x_start', x_end='x_end',
y_start='y_start', y_end='y_end')
# Compute arrow
x1, y1 = element.x, element.y
axrange = plot.x_range if self.invert_axes else plot.y_range
span = (axrange.end - axrange.start) / 6.
if element.direction == '^':
x2, y2 = x1, y1-span
label_mapping['text_baseline'] = 'top'
elif element.direction == '<':
x2, y2 = x1+span, y1
label_mapping['text_align'] = 'left'
label_mapping['text_baseline'] = 'middle'
elif element.direction == '>':
x2, y2 = x1-span, y1
label_mapping['text_align'] = 'right'
label_mapping['text_baseline'] = 'middle'
else:
x2, y2 = x1, y1+span
label_mapping['text_baseline'] = 'bottom'
arrow_data = {'x_end': [x1], 'y_end': [y1],
'x_start': [x2], 'y_start': [y2]}
# Define arrowhead
arrow_mapping['arrow_start'] = arrow_start.get(element.arrowstyle, None)
arrow_mapping['arrow_end'] = arrow_end.get(element.arrowstyle, NormalHead)
# Compute label
if self.invert_axes:
label_data = dict(x=[y2], y=[x2])
else:
label_data = dict(x=[x2], y=[y2])
label_data['text'] = [element.text]
return ({'text_1': label_data, 'arrow_1': arrow_data},
{'arrow_1': arrow_mapping, 'text_1': label_mapping}, style)
def _init_glyph(self, plot, mapping, properties, key):
"""
Returns a Bokeh glyph object.
"""
properties = {k: v for k, v in properties.items() if 'legend' not in k}
if key == 'arrow_1':
source = properties.pop('source')
arrow_end = mapping.pop('arrow_end')
arrow_start = mapping.pop('arrow_start')
for p in ('alpha', 'color'):
v = properties.pop(p, None)
for t in ('line', 'fill'):
if v is None:
continue
key = '_'.join([t, p])
if key not in properties:
properties[key] = v
start = arrow_start(**properties) if arrow_start else None
end = arrow_end(**properties) if arrow_end else None
line_props = {p: v for p, v in properties.items() if p.startswith('line_')}
renderer = Arrow(start=start, end=end, source=source,
**dict(line_props, **mapping))
glyph = renderer
else:
properties = {p if p == 'source' else 'text_'+p: v
for p, v in properties.items()}
renderer, glyph = super()._init_glyph(
plot, mapping, properties, key)
plot.renderers.append(renderer)
return renderer, glyph
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class DivPlot(BokehPlot, GenericElementPlot, AnnotationPlot):
height = param.Number(default=300)
width = param.Number(default=300)
sizing_mode = param.ObjectSelector(default=None, objects=[
'fixed', 'stretch_width', 'stretch_height', 'stretch_both',
'scale_width', 'scale_height', 'scale_both', None], doc="""
How the component should size itself.
* "fixed" :
Component is not responsive. It will retain its original
width and height regardless of any subsequent browser window
resize events.
* "stretch_width"
Component will responsively resize to stretch to the
available width, without maintaining any aspect ratio. The
height of the component depends on the type of the component
and may be fixed or fit to component's contents.
* "stretch_height"
Component will responsively resize to stretch to the
available height, without maintaining any aspect ratio. The
width of the component depends on the type of the component
and may be fixed or fit to component's contents.
* "stretch_both"
Component is completely responsive, independently in width
and height, and will occupy all the available horizontal and
vertical space, even if this changes the aspect ratio of the
component.
* "scale_width"
Component will responsively resize to stretch to the
available width, while maintaining the original or provided
aspect ratio.
* "scale_height"
Component will responsively resize to stretch to the
available height, while maintaining the original or provided
aspect ratio.
* "scale_both"
Component will responsively resize to both the available
width and height, while maintaining the original or provided
aspect ratio.
""")
finalize_hooks = param.HookList(default=[], doc="""
Deprecated; use hooks options instead.""")
hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a plot. The
hook is passed the plot object and the displayed element, and
other plotting handles can be accessed via plot.handles.""")
_stream_data = False
selection_display = None
def __init__(self, element, plot=None, **params):
super().__init__(element, **params)
self.callbacks = []
self.handles = {} if plot is None else self.handles['plot']
self.static = len(self.hmap) == 1 and len(self.keys) == len(self.hmap)
def get_data(self, element, ranges, style):
return element.data, {}, style
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
data, _, _ = self.get_data(element, ranges, {})
div = HTML(text=escape(data), width=self.width, height=self.height,
sizing_mode=self.sizing_mode)
self.handles['plot'] = div
self._execute_hooks(element)
self.drawn = True
return div
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
text, _, _ = self.get_data(element, ranges, {})
self.state.update(text=text, sizing_mode=self.sizing_mode)
|
en
| 0.799873
|
Whether to show legend for the plot. Amount of offset to apply to labels along x-axis. Amount of offset to apply to labels along x-axis. # Deprecated options Deprecated in favor of color style mapping, e.g. `color=dim('color')` # If color dim is same as text dim, rename color column Whether to include the annotation in axis range calculations. Returns a Bokeh glyph object. Whether to include the annotation in axis range calculations. Returns a Bokeh glyph object. Returns a Bokeh glyph object. Draw the supplied Spline annotation (see Spline docstring). Does not support matplotlib Path codes. # Compute arrow # Define arrowhead # Compute label Returns a Bokeh glyph object. How the component should size itself. * "fixed" : Component is not responsive. It will retain its original width and height regardless of any subsequent browser window resize events. * "stretch_width" Component will responsively resize to stretch to the available width, without maintaining any aspect ratio. The height of the component depends on the type of the component and may be fixed or fit to component's contents. * "stretch_height" Component will responsively resize to stretch to the available height, without maintaining any aspect ratio. The width of the component depends on the type of the component and may be fixed or fit to component's contents. * "stretch_both" Component is completely responsive, independently in width and height, and will occupy all the available horizontal and vertical space, even if this changes the aspect ratio of the component. * "scale_width" Component will responsively resize to stretch to the available width, while maintaining the original or provided aspect ratio. * "scale_height" Component will responsively resize to stretch to the available height, while maintaining the original or provided aspect ratio. * "scale_both" Component will responsively resize to both the available width and height, while maintaining the original or provided aspect ratio. Deprecated; use hooks options instead. Optional list of hooks called when finalizing a plot. The hook is passed the plot object and the displayed element, and other plotting handles can be accessed via plot.handles. Initializes a new plot object with the last available frame. # Get element key and ranges for frame Updates an existing plot with data corresponding to the key.
| 2.238845
| 2
|
src/tensorseason/utils.py
|
canerturkmen/tensorseason
| 0
|
6626453
|
<gh_stars>0
import warnings
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorly as tl
from gluonts.dataset.util import to_pandas
from gluonts.dataset.repository.datasets import get_dataset, dataset_recipes
from scipy import interpolate
from scipy.stats import linregress
from scipy.fftpack import rfft, irfft, dct, idct
from tensorly.decomposition import parafac, tucker
# introduce some utility functions
def repeat(ts: np.ndarray, times: int) -> np.ndarray:
assert ts.ndim == 1
out = np.array(ts)
for _ in range(times - 1):
out = np.r_[out, ts]
return out
def fold(X: np.ndarray, n_p: int):
"""fold first mode into n_p tubes"""
newshape = [-1, n_p] + list(X.shape[1:])
return np.reshape(X, newshape)
def multifold(X: np.ndarray, n_ps: List[int]):
for n_p in n_ps:
X = fold(X, n_p)
return X
def rmse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
return np.sqrt(
np.nanmean(np.square(y_true - y_pred))
)
def mad(y_true: np.ndarray, y_pred: np.ndarray) -> float:
return np.nanmean(np.abs(y_true - y_pred)).sum()
def dct_dft_errors(data, max_params, param_sweep_scale="linear", no_params=1000, error_fn=rmse):
# RMSEs with DCT
param_sweep = get_param_sweep(max_params, param_sweep_scale, no_params)
dct_errors = [
error_fn(data, dct_reconstruct(data, p))
for p in param_sweep
]
# RMSEs with DFT
dft_errors = [
error_fn(data, dft_reconstruct(data, p))
for p in param_sweep
]
return dct_errors, dft_errors, param_sweep
def dct_reconstruct(data: np.ndarray, n: int):
z = dct(data) # take the DCT
# get the frequencies with most magnitude
top_n = np.argsort(np.abs(z))[-n:]
mask = np.zeros(len(z), dtype=bool)
mask[top_n] = True
# zero out the other frequencies
z_masked = np.array(z)
z_masked[~mask] = 0
# reconstruct
return dct(z_masked, type=3) / len(z) / 2
def dft_reconstruct(data: np.ndarray, n: int):
z = rfft(data) # take the DCT
# get the frequencies with most magnitude
top_n = np.argsort(np.abs(z))[-n:]
mask = np.zeros(len(z), dtype=bool)
mask[top_n] = True
# zero out the other frequencies
z_masked = np.array(z)
z_masked[~mask] = 0
# reconstruct
return irfft(z_masked)
def tensor_errors(data, folds, ranks, decomposition_type="parafac", error_fn=rmse):
# with tensors
tensor_results = []
for rank in ranks:
_ = print(rank) if rank % 3 == 0 else None
data_approx, npars = tensor_reconstruction(data, folds, rank, decomposition_type=decomposition_type)
tensor_results.append(
[error_fn(data, data_approx), npars]
)
ten_errors, ten_params = zip(*tensor_results)
return ten_errors, ten_params
# melih utility functions
def tensor_errors_test(data, test_data, folds, ranks, decomposition_type="parafac"):
# with tensors
tensor_results = []
for rank in ranks:
_ = print(rank) if (rank + 1) % 2 == 0 else None
data_approx, npars = tensor_reconstruction(data, folds, rank, decomposition_type=decomposition_type)
# calculate the training RMSE (we will change data approx below)
rmse_train = rmse(data, data_approx)
# take means of the tensor in the trivial direction
# mean_trivial_direction = data_approx.mean(0)[np.newaxis, ...]
# broadcast the mean to each slice in the trivial direction
# for i in range(data_approx.shape[0]):
# data_approx[i, ...] = mean_trivial_direction
tensor_results.append(
[rmse_train, rmse(test_data, data_approx), npars]
)
ten_errors, test_errors, ten_params = zip(*tensor_results)
return ten_errors, test_errors, ten_params
def get_param_sweep(max_params, param_sweep_scale, no_params):
if param_sweep_scale == "linear":
return np.floor(np.linspace(1, max_params, no_params)).astype(int)
elif param_sweep_scale == "log":
return np.unique(np.floor(np.logspace(0, np.log10(max_params), no_params))).astype(int)
else:
raise Exception("Param sweep scale not defined")
def dct_dft_errors_test(data, test_data, max_params, param_sweep_scale, no_params):
dct_errors, dft_errors, param_sweep = dct_dft_errors(data=data, max_params=max_params,
param_sweep_scale=param_sweep_scale, no_params=no_params)
dct_test_errors = [
rmse(test_data, dct_reconstruct(data, p))
for p in param_sweep
]
dft_test_errors = [
rmse(test_data, dft_reconstruct(data, p))
for p in param_sweep
]
return dct_errors, dct_test_errors, dft_errors, dft_test_errors, param_sweep
def plot_comparison(dct_errors, dft_errors, ten_params, ten_errors, param_sweep, folds, td_params=None, td_errors=None):
f, ax = plt.subplots(figsize=(8, 6))
ax.plot(param_sweep, dct_errors, 'b.-', label="DCT")
ax.plot(param_sweep, dft_errors, 'g.-', label="DFT")
ax.plot(ten_params, ten_errors, 'r.-', label="CP")
if td_params is not None:
ax.plot(td_params, td_errors, 'm.-', label="Tucker")
ax.axvline(np.product(folds), color='grey', linestyle='--', label='$\dim \, \mathbf{s}$')
ax.set(xlabel="# Parameters (logarithmic)", ylabel="RMSE")
ax.legend()
ax.semilogx();
def get_plot_data(idx, train_datas, test_datas, freq, plot=True):
data = pd.concat((to_pandas(train_datas[idx]), to_pandas(test_datas[idx])))
data.index = pd.date_range(start=data.index[0], freq=freq, periods=len(data))
if plot:
data.plot();
return data
def get_gluonts_dataset(dataset_name):
dataset = get_dataset(dataset_name, regenerate=False)
train_datas = list(iter(dataset.train))
test_datas = list(iter(dataset.test))
lens = [len(d["target"]) for d in train_datas]
freqs = [d["start"].freqstr for d in train_datas]
print(pd.Series(lens).value_counts())
print(pd.Series(freqs).value_counts())
del dataset
return train_datas, test_datas, lens, freqs
def trend_cycle_decompose(df: pd.Series, w: int, df_train=None):
assert type(df) == pd.core.series.Series
assert type(w) == int
assert w > 1
dfi = df.interpolate("linear")
trend_cycle = dfi.rolling(w).mean().fillna(method="bfill")
residual = dfi - trend_cycle
return trend_cycle, residual
def naive_seasonal_decompose(df: pd.Series, w: int, df_train=None):
a = np.array(df)
new_len_a = (len(a) // w) * w
seasonal = multifold(a[:new_len_a], [w]).mean(0)
seas_effect = pd.Series(
repeat(seasonal, len(a) // w + 1)[:len(a)], index=df.index
)
return seas_effect, df - seas_effect
def analyze_and_plot(df, period: int, plot=True):
tc, res_tc = trend_cycle_decompose(df, period * 2)
seas, res_seas = naive_seasonal_decompose(res_tc, period * 7)
r2 = np.square(seas).sum() / np.square(res_tc).sum()
if plot:
f, axes = plt.subplots(3, figsize=(8, 5), sharex=True)
for ax_, title, obj in zip(
axes,
["Trend-cycle", "Seasonal", "Residual"],
[tc, seas, res_seas]
):
ax_.plot(obj)
ax_.set(title=title)
f.suptitle(f"R^2: {r2: .2f}")
plt.show()
return r2
# DEPRECATED
def tc_decompose(df, w, df_train=None):
assert type(df) == pd.core.series.Series
assert type(w) == int
assert w > 1
if w / len(df) > .10:
print("Too many null values, using linear first order polynomial for detrending.")
ma = df.copy()
if df_train is None:
m = linregress(range(len(df.values)), df.values)
ma[:] = m.intercept + m.slope * np.arange(len(df.values))
else:
print("Using training data for linear regression, assuming continuity.")
m = linregress(range(len(df_train.values)), df_train.values)
ma[:] = m.intercept + m.slope * (len(df_train) + np.arange(len(df.values)))
else:
if w % 2 == 0:
lower_cumsum = df.cumsum().shift((w // 2))
lower_cumsum.iloc[w // 2 - 1] = 0.
ma_w = (df.cumsum().shift(-(w // 2)) - lower_cumsum) / w
lower_cumsum = ma_w.cumsum().shift(2)
lower_cumsum.iloc[w // 2] = 0.
ma = (ma_w.cumsum() - lower_cumsum) / 2
elif w % 2 == 1:
lower_cumsum = df.cumsum().shift((w // 2 + 1))
lower_cumsum.iloc[w // 2] = 0.
ma = (df.cumsum().shift(-(w // 2)) - lower_cumsum) / w
f = interpolate.interp1d(ma.reset_index(drop=True).dropna().index, ma.dropna().values, fill_value='extrapolate')
ma[:] = f(range(len(ma)))
return ma
def remove_ma(data, w, df_train=None):
return data - tc_decompose(data, w, df_train=df_train)
def plot_tc_decomposition(data, ma_folds, df_train=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ma = tc_decompose(data, int(np.prod(ma_folds)), df_train=df_train)
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 4))
data.plot(ax=axes[0])
ma.plot(ax=axes[1])
(data - ma).plot(ax=axes[2])
def extend_test_data(data, test_data, freq):
temp = pd.Series(
index=pd.date_range(test_data.index[0], test_data.index[0] + (data.index[-1] - data.index[0]), freq=freq))
temp.loc[test_data.index] = test_data.values
print("You have extended your test data using null values at a frequency of: {}".format(freq))
return temp
# tensor reconstruction
def tensor_reconstruction(data: np.ndarray, folds: List[int], rank: int, decomposition_type: object = "parafac"):
tensor = multifold(data, folds)
if decomposition_type == "parafac":
fac = parafac(tensor, rank=rank, n_iter_max=10000, tol=1.0e-15, linesearch=True)
return tl.cp_to_tensor(fac).ravel(), np.sum([f.size for f in fac[1]][1:])
elif decomposition_type == "tucker":
if (type(rank) == int) or (type(rank) == float):
rank = int(rank)
rank = [rank for i in range(len(data.shape))]
ranks = np.minimum(tensor.shape, rank)
ranks[0] = 1
core, factors = tucker(tensor, ranks=ranks, n_iter_max=10000, tol=1.0e-15)
return tl.tucker_to_tensor((core, factors)).ravel(), np.sum(
[ranks[i] * tensor.shape[i] for i in range(1, len(tensor.shape))]) + np.prod(ranks[1:])
def idct(w: np.ndarray, extr: int) -> np.ndarray:
"""
Inverse DCT with extrapolation.
:param w: series to apply IDCT (DCT-III)
:param extr: number of time steps to extrapolate
:return:
"""
N = len(w)
y = np.zeros(N + extr)
for k in range(N):
y[k] = w[0] + 2 * np.dot(
w[1:], np.cos(np.pi * (2 * k + 1) * np.arange(1, N) / (2 * N))
)
y[-extr:] = y[:extr]
return y / N / 2
|
import warnings
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorly as tl
from gluonts.dataset.util import to_pandas
from gluonts.dataset.repository.datasets import get_dataset, dataset_recipes
from scipy import interpolate
from scipy.stats import linregress
from scipy.fftpack import rfft, irfft, dct, idct
from tensorly.decomposition import parafac, tucker
# introduce some utility functions
def repeat(ts: np.ndarray, times: int) -> np.ndarray:
assert ts.ndim == 1
out = np.array(ts)
for _ in range(times - 1):
out = np.r_[out, ts]
return out
def fold(X: np.ndarray, n_p: int):
"""fold first mode into n_p tubes"""
newshape = [-1, n_p] + list(X.shape[1:])
return np.reshape(X, newshape)
def multifold(X: np.ndarray, n_ps: List[int]):
for n_p in n_ps:
X = fold(X, n_p)
return X
def rmse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
return np.sqrt(
np.nanmean(np.square(y_true - y_pred))
)
def mad(y_true: np.ndarray, y_pred: np.ndarray) -> float:
return np.nanmean(np.abs(y_true - y_pred)).sum()
def dct_dft_errors(data, max_params, param_sweep_scale="linear", no_params=1000, error_fn=rmse):
# RMSEs with DCT
param_sweep = get_param_sweep(max_params, param_sweep_scale, no_params)
dct_errors = [
error_fn(data, dct_reconstruct(data, p))
for p in param_sweep
]
# RMSEs with DFT
dft_errors = [
error_fn(data, dft_reconstruct(data, p))
for p in param_sweep
]
return dct_errors, dft_errors, param_sweep
def dct_reconstruct(data: np.ndarray, n: int):
z = dct(data) # take the DCT
# get the frequencies with most magnitude
top_n = np.argsort(np.abs(z))[-n:]
mask = np.zeros(len(z), dtype=bool)
mask[top_n] = True
# zero out the other frequencies
z_masked = np.array(z)
z_masked[~mask] = 0
# reconstruct
return dct(z_masked, type=3) / len(z) / 2
def dft_reconstruct(data: np.ndarray, n: int):
z = rfft(data) # take the DCT
# get the frequencies with most magnitude
top_n = np.argsort(np.abs(z))[-n:]
mask = np.zeros(len(z), dtype=bool)
mask[top_n] = True
# zero out the other frequencies
z_masked = np.array(z)
z_masked[~mask] = 0
# reconstruct
return irfft(z_masked)
def tensor_errors(data, folds, ranks, decomposition_type="parafac", error_fn=rmse):
# with tensors
tensor_results = []
for rank in ranks:
_ = print(rank) if rank % 3 == 0 else None
data_approx, npars = tensor_reconstruction(data, folds, rank, decomposition_type=decomposition_type)
tensor_results.append(
[error_fn(data, data_approx), npars]
)
ten_errors, ten_params = zip(*tensor_results)
return ten_errors, ten_params
# melih utility functions
def tensor_errors_test(data, test_data, folds, ranks, decomposition_type="parafac"):
# with tensors
tensor_results = []
for rank in ranks:
_ = print(rank) if (rank + 1) % 2 == 0 else None
data_approx, npars = tensor_reconstruction(data, folds, rank, decomposition_type=decomposition_type)
# calculate the training RMSE (we will change data approx below)
rmse_train = rmse(data, data_approx)
# take means of the tensor in the trivial direction
# mean_trivial_direction = data_approx.mean(0)[np.newaxis, ...]
# broadcast the mean to each slice in the trivial direction
# for i in range(data_approx.shape[0]):
# data_approx[i, ...] = mean_trivial_direction
tensor_results.append(
[rmse_train, rmse(test_data, data_approx), npars]
)
ten_errors, test_errors, ten_params = zip(*tensor_results)
return ten_errors, test_errors, ten_params
def get_param_sweep(max_params, param_sweep_scale, no_params):
if param_sweep_scale == "linear":
return np.floor(np.linspace(1, max_params, no_params)).astype(int)
elif param_sweep_scale == "log":
return np.unique(np.floor(np.logspace(0, np.log10(max_params), no_params))).astype(int)
else:
raise Exception("Param sweep scale not defined")
def dct_dft_errors_test(data, test_data, max_params, param_sweep_scale, no_params):
dct_errors, dft_errors, param_sweep = dct_dft_errors(data=data, max_params=max_params,
param_sweep_scale=param_sweep_scale, no_params=no_params)
dct_test_errors = [
rmse(test_data, dct_reconstruct(data, p))
for p in param_sweep
]
dft_test_errors = [
rmse(test_data, dft_reconstruct(data, p))
for p in param_sweep
]
return dct_errors, dct_test_errors, dft_errors, dft_test_errors, param_sweep
def plot_comparison(dct_errors, dft_errors, ten_params, ten_errors, param_sweep, folds, td_params=None, td_errors=None):
f, ax = plt.subplots(figsize=(8, 6))
ax.plot(param_sweep, dct_errors, 'b.-', label="DCT")
ax.plot(param_sweep, dft_errors, 'g.-', label="DFT")
ax.plot(ten_params, ten_errors, 'r.-', label="CP")
if td_params is not None:
ax.plot(td_params, td_errors, 'm.-', label="Tucker")
ax.axvline(np.product(folds), color='grey', linestyle='--', label='$\dim \, \mathbf{s}$')
ax.set(xlabel="# Parameters (logarithmic)", ylabel="RMSE")
ax.legend()
ax.semilogx();
def get_plot_data(idx, train_datas, test_datas, freq, plot=True):
data = pd.concat((to_pandas(train_datas[idx]), to_pandas(test_datas[idx])))
data.index = pd.date_range(start=data.index[0], freq=freq, periods=len(data))
if plot:
data.plot();
return data
def get_gluonts_dataset(dataset_name):
dataset = get_dataset(dataset_name, regenerate=False)
train_datas = list(iter(dataset.train))
test_datas = list(iter(dataset.test))
lens = [len(d["target"]) for d in train_datas]
freqs = [d["start"].freqstr for d in train_datas]
print(pd.Series(lens).value_counts())
print(pd.Series(freqs).value_counts())
del dataset
return train_datas, test_datas, lens, freqs
def trend_cycle_decompose(df: pd.Series, w: int, df_train=None):
assert type(df) == pd.core.series.Series
assert type(w) == int
assert w > 1
dfi = df.interpolate("linear")
trend_cycle = dfi.rolling(w).mean().fillna(method="bfill")
residual = dfi - trend_cycle
return trend_cycle, residual
def naive_seasonal_decompose(df: pd.Series, w: int, df_train=None):
a = np.array(df)
new_len_a = (len(a) // w) * w
seasonal = multifold(a[:new_len_a], [w]).mean(0)
seas_effect = pd.Series(
repeat(seasonal, len(a) // w + 1)[:len(a)], index=df.index
)
return seas_effect, df - seas_effect
def analyze_and_plot(df, period: int, plot=True):
tc, res_tc = trend_cycle_decompose(df, period * 2)
seas, res_seas = naive_seasonal_decompose(res_tc, period * 7)
r2 = np.square(seas).sum() / np.square(res_tc).sum()
if plot:
f, axes = plt.subplots(3, figsize=(8, 5), sharex=True)
for ax_, title, obj in zip(
axes,
["Trend-cycle", "Seasonal", "Residual"],
[tc, seas, res_seas]
):
ax_.plot(obj)
ax_.set(title=title)
f.suptitle(f"R^2: {r2: .2f}")
plt.show()
return r2
# DEPRECATED
def tc_decompose(df, w, df_train=None):
assert type(df) == pd.core.series.Series
assert type(w) == int
assert w > 1
if w / len(df) > .10:
print("Too many null values, using linear first order polynomial for detrending.")
ma = df.copy()
if df_train is None:
m = linregress(range(len(df.values)), df.values)
ma[:] = m.intercept + m.slope * np.arange(len(df.values))
else:
print("Using training data for linear regression, assuming continuity.")
m = linregress(range(len(df_train.values)), df_train.values)
ma[:] = m.intercept + m.slope * (len(df_train) + np.arange(len(df.values)))
else:
if w % 2 == 0:
lower_cumsum = df.cumsum().shift((w // 2))
lower_cumsum.iloc[w // 2 - 1] = 0.
ma_w = (df.cumsum().shift(-(w // 2)) - lower_cumsum) / w
lower_cumsum = ma_w.cumsum().shift(2)
lower_cumsum.iloc[w // 2] = 0.
ma = (ma_w.cumsum() - lower_cumsum) / 2
elif w % 2 == 1:
lower_cumsum = df.cumsum().shift((w // 2 + 1))
lower_cumsum.iloc[w // 2] = 0.
ma = (df.cumsum().shift(-(w // 2)) - lower_cumsum) / w
f = interpolate.interp1d(ma.reset_index(drop=True).dropna().index, ma.dropna().values, fill_value='extrapolate')
ma[:] = f(range(len(ma)))
return ma
def remove_ma(data, w, df_train=None):
return data - tc_decompose(data, w, df_train=df_train)
def plot_tc_decomposition(data, ma_folds, df_train=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ma = tc_decompose(data, int(np.prod(ma_folds)), df_train=df_train)
fig, axes = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 4))
data.plot(ax=axes[0])
ma.plot(ax=axes[1])
(data - ma).plot(ax=axes[2])
def extend_test_data(data, test_data, freq):
temp = pd.Series(
index=pd.date_range(test_data.index[0], test_data.index[0] + (data.index[-1] - data.index[0]), freq=freq))
temp.loc[test_data.index] = test_data.values
print("You have extended your test data using null values at a frequency of: {}".format(freq))
return temp
# tensor reconstruction
def tensor_reconstruction(data: np.ndarray, folds: List[int], rank: int, decomposition_type: object = "parafac"):
tensor = multifold(data, folds)
if decomposition_type == "parafac":
fac = parafac(tensor, rank=rank, n_iter_max=10000, tol=1.0e-15, linesearch=True)
return tl.cp_to_tensor(fac).ravel(), np.sum([f.size for f in fac[1]][1:])
elif decomposition_type == "tucker":
if (type(rank) == int) or (type(rank) == float):
rank = int(rank)
rank = [rank for i in range(len(data.shape))]
ranks = np.minimum(tensor.shape, rank)
ranks[0] = 1
core, factors = tucker(tensor, ranks=ranks, n_iter_max=10000, tol=1.0e-15)
return tl.tucker_to_tensor((core, factors)).ravel(), np.sum(
[ranks[i] * tensor.shape[i] for i in range(1, len(tensor.shape))]) + np.prod(ranks[1:])
def idct(w: np.ndarray, extr: int) -> np.ndarray:
"""
Inverse DCT with extrapolation.
:param w: series to apply IDCT (DCT-III)
:param extr: number of time steps to extrapolate
:return:
"""
N = len(w)
y = np.zeros(N + extr)
for k in range(N):
y[k] = w[0] + 2 * np.dot(
w[1:], np.cos(np.pi * (2 * k + 1) * np.arange(1, N) / (2 * N))
)
y[-extr:] = y[:extr]
return y / N / 2
|
en
| 0.812562
|
# introduce some utility functions fold first mode into n_p tubes # RMSEs with DCT # RMSEs with DFT # take the DCT # get the frequencies with most magnitude # zero out the other frequencies # reconstruct # take the DCT # get the frequencies with most magnitude # zero out the other frequencies # reconstruct # with tensors # melih utility functions # with tensors # calculate the training RMSE (we will change data approx below) # take means of the tensor in the trivial direction # mean_trivial_direction = data_approx.mean(0)[np.newaxis, ...] # broadcast the mean to each slice in the trivial direction # for i in range(data_approx.shape[0]): # data_approx[i, ...] = mean_trivial_direction # DEPRECATED # tensor reconstruction Inverse DCT with extrapolation. :param w: series to apply IDCT (DCT-III) :param extr: number of time steps to extrapolate :return:
| 2.276521
| 2
|
concurrency/test_charfinder.py
|
pythonfluente/example-code
| 7
|
6626454
|
<gh_stars>1-10
import pytest
from charfinder import UnicodeNameIndex, tokenize, sample_chars
from unicodedata import name
@pytest.fixture
def sample_index():
return UnicodeNameIndex(sample_chars)
@pytest.fixture(scope="module")
def full_index():
return UnicodeNameIndex()
def test_tokenize():
assert list(tokenize('')) == []
assert list(tokenize('a b')) == ['A', 'B']
assert list(tokenize('a-b')) == ['A', 'B']
assert list(tokenize('abc')) == ['ABC']
assert list(tokenize('café')) == ['CAFÉ']
def test_index():
sample_index = UnicodeNameIndex(sample_chars)
assert len(sample_index) == 9
def test_find_word_no_match(sample_index):
res = list(sample_index.find_codes('qwertyuiop'))
assert len(res) == 0
def test_find_word_1_match(sample_index):
res = [(code, name(chr(code)))
for code in sample_index.find_codes('currency')]
assert res == [(8352, 'EURO-CURRENCY SIGN')]
def test_find_word_2_matches(sample_index):
res = [(code, name(chr(code)))
for code in sample_index.find_codes('Euro')]
assert res == [(8352, 'EURO-CURRENCY SIGN'),
(8364, 'EURO SIGN')]
def test_find_2_words_no_matches(sample_index):
res = list(sample_index.find_codes('Euro letter'))
assert len(res) == 0
def test_find_2_words_no_matches_because_one_not_found(sample_index):
res = list(sample_index.find_codes('letter qwertyuiop'))
assert len(res) == 0
def test_find_2_words_1_match(sample_index):
res = list(sample_index.find_codes('sign dollar'))
assert len(res) == 1
def test_find_2_words_2_matches(sample_index):
res = list(sample_index.find_codes('latin letter'))
assert len(res) == 2
def test_find_codes_many_matches_full(full_index):
res = list(full_index.find_codes('letter'))
assert len(res) > 7000
def test_find_1_word_1_match_full(full_index):
res = [(code, name(chr(code)))
for code in full_index.find_codes('registered')]
assert res == [(174, 'REGISTERED SIGN')]
def test_find_1_word_2_matches_full(full_index):
res = list(full_index.find_codes('rook'))
assert len(res) == 2
def test_find_3_words_no_matches_full(full_index):
res = list(full_index.find_codes('no such character'))
assert len(res) == 0
|
import pytest
from charfinder import UnicodeNameIndex, tokenize, sample_chars
from unicodedata import name
@pytest.fixture
def sample_index():
return UnicodeNameIndex(sample_chars)
@pytest.fixture(scope="module")
def full_index():
return UnicodeNameIndex()
def test_tokenize():
assert list(tokenize('')) == []
assert list(tokenize('a b')) == ['A', 'B']
assert list(tokenize('a-b')) == ['A', 'B']
assert list(tokenize('abc')) == ['ABC']
assert list(tokenize('café')) == ['CAFÉ']
def test_index():
sample_index = UnicodeNameIndex(sample_chars)
assert len(sample_index) == 9
def test_find_word_no_match(sample_index):
res = list(sample_index.find_codes('qwertyuiop'))
assert len(res) == 0
def test_find_word_1_match(sample_index):
res = [(code, name(chr(code)))
for code in sample_index.find_codes('currency')]
assert res == [(8352, 'EURO-CURRENCY SIGN')]
def test_find_word_2_matches(sample_index):
res = [(code, name(chr(code)))
for code in sample_index.find_codes('Euro')]
assert res == [(8352, 'EURO-CURRENCY SIGN'),
(8364, 'EURO SIGN')]
def test_find_2_words_no_matches(sample_index):
res = list(sample_index.find_codes('Euro letter'))
assert len(res) == 0
def test_find_2_words_no_matches_because_one_not_found(sample_index):
res = list(sample_index.find_codes('letter qwertyuiop'))
assert len(res) == 0
def test_find_2_words_1_match(sample_index):
res = list(sample_index.find_codes('sign dollar'))
assert len(res) == 1
def test_find_2_words_2_matches(sample_index):
res = list(sample_index.find_codes('latin letter'))
assert len(res) == 2
def test_find_codes_many_matches_full(full_index):
res = list(full_index.find_codes('letter'))
assert len(res) > 7000
def test_find_1_word_1_match_full(full_index):
res = [(code, name(chr(code)))
for code in full_index.find_codes('registered')]
assert res == [(174, 'REGISTERED SIGN')]
def test_find_1_word_2_matches_full(full_index):
res = list(full_index.find_codes('rook'))
assert len(res) == 2
def test_find_3_words_no_matches_full(full_index):
res = list(full_index.find_codes('no such character'))
assert len(res) == 0
|
none
| 1
| 2.521135
| 3
|
|
scripts/train.0527.py
|
cocoaaa/ReprLearn
| 0
|
6626455
|
"""train.py
Train a single configuration of a model specified on a specified data
Required CLI args
-----------------
--model_name: "vae", "beta_vae", "iwae", "bivae"
--data_name: "mnist", "multi_rotated_mnist", "multi_mono_mnist",
"maptiles", "multi_maptiles"
--latent_dim: int, eg. 10
Optional CLI args: (partial)
----------------------------
--hidden_dims: eg. --hidden_dims 32 64 128 256 (which is default)
Note
----
Each model (specified by --model_name) and datamodule (specified by --data_name)
expects a different set of arguments. For example, `bivae` models allow the following
arguments:
Required:
--n_styles
--adversary_dim
--adv_loss_weight
To run: (at the root of the project, ie. /data/hayley-old/Tenanbaum2000
nohup python train.py --model_name="vae" --data_name="mnist" --latent_dim=10
nohup python train.py --model_name="iwae" --data_name="mnist" --latent_dim=10
# Train BetaVAE on MNIST
nohup python train.py --model_name="beta_vae" \
--enc_type "conv" --dec_type "conv" \
--latent_dim=10 --hidden_dims 32 64 128 256 \
--kld_weight=1.0 --use_beta_scheduler \
--data_name="mnist" --data_root='/data/hayley-old/Tenanbaum2000/data' \
-lr 3e-4 -bs 32 \
--gpu_id=1 --max_epochs=200 --terminate_on_nan=True \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-01-30/" &
# Train BiVAE on Multi Monochrome MNIST
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 --adv_dim 32 32 -lr 1e-3 --adv_weight 15.0 \
--data_name="multi_mono_mnist" --colors red green blue --n_styles=3 \
--gpu_id=1
# Train BiVAE on Multi Rotated MNIST
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 \
--data_name="multi_rotated_mnist" --angles -45 0 45 --n_styles=3 \
--gpu_id=1
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 64 64 --adv_dim 32 \
--data_name="multi_rotated_mnist" --angles 0 15 30 45 60 --n_styles=5 \
--gpu_id=3 --max_epochs=400 --terminate_on_nan=True \
-lr 3e-3 --adv_weight 2000.0 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-23/" &
## Specify which indices to use among the MNIST -- comparable to DIVA's experiments
## change 0 to anything inbtw 0,...,9
nohup python train.py --model_name="bivae" \
--latent_dim=128 --hidden_dims 32 64 64 64 --adv_dim 32 32 32 \
--data_name="multi_rotated_mnist" --angles -45 0 45 --n_styles=3 \
--selected_inds_fp='/data/hayley-old/Tenanbaum2000/data/Rotated-MNIST/supervised_inds_0.npy' \
--gpu_id=1
# Train BiVAE on Multi Maptiles MNIST
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 15.0 \
--data_name="multi_maptiles" \
--cities la paris \
--styles CartoVoyagerNoLabels StamenTonerBackground --n_styles=3 \
--zooms 14 \
--gpu_id=2 --max_epochs=400 --terminate_on_nan=True \
-lr 3e-4 -bs 32 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-01-23/" &
# Train BiVAE on Multi OSMnxRoads
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 1.0 \
--data_root="/data/hayley-old/osmnx_data/images" \
--data_name="osmnx_roads" \
--cities paris \
--bgcolors "k" "r" "g" "b" "y" --n_styles=5 \
--zooms 14 \
--gpu_id=2 --max_epochs=300 --terminate_on_nan=True \
-lr 3e-4 -bs 32 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-18/" &
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 1.0 \
--data_root="/data/hayley-old/osmnx_data/images" \
--data_name="osmnx_roads" \
--cities 'la' 'charlotte' 'vegas' 'boston' 'paris' \
'amsterdam' 'shanghai' 'seoul' 'chicago' 'manhattan' \
'berlin' 'montreal' 'rome' \
--bgcolors "k" "r" "g" "b" "y" --n_styles=5 \
--zooms 14 \
--gpu_id=2 --max_epochs=300 --terminate_on_nan=True \
-lr 3e-4 -bs 32 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-18/" &
"""
import os,sys
from datetime import datetime
import time
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from pathlib import Path
from typing import List, Set, Dict, Tuple, Optional, Iterable, Mapping, Union, Callable, TypeVar
import warnings
from pprint import pprint
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
# callbacks
from src.callbacks.recon_logger import ReconLogger
from src.callbacks.hist_logger import HistogramLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from src.callbacks.beta_scheduler import BetaScheduler
# src helpers
from src.utils.misc import info
from src.models.model_wrapper import ModelWrapper
from src.utils.misc import info, n_iter_per_epoch
# utils for instatiating a selected datamodule and a selected model
from utils import get_model_class, get_dm_class
from utils import instantiate_model, instantiate_dm
from utils import add_base_arguments
def train(args: Union[Dict, Namespace]):
# Init. datamodule and model
dm = instantiate_dm(args)
dm.setup('fit')
model = instantiate_model(args)
# Specify logger
exp_name = f'{model.name}_{dm.name}'
print('Exp name: ', exp_name)
tb_logger = pl_loggers.TensorBoardLogger(save_dir=args.log_root,
name=exp_name,
default_hp_metric=False,
)
log_dir = Path(tb_logger.log_dir)
print("Log Dir: ", log_dir)
# breakpoint()
if not log_dir.exists():
log_dir.mkdir(parents=True)
print("Created: ", log_dir)
# Specify callbacks
callbacks = [
LearningRateMonitor(logging_interval='epoch')
# HistogramLogger(hist_epoch_interval=args.hist_epoch_interval),
# ReconLogger(recon_epoch_interval=args.recon_epoch_interval),
# EarlyStopping('val_loss', patience=10),
]
if args.use_beta_scheduler:
max_iters = n_iter_per_epoch(dm.train_dataloader()) * args.max_epochs
callbacks.append(BetaScheduler(max_iters,
start=args.beta_start,
stop=args.beta_stop,
n_cycle=args.beta_n_cycle,
ratio=args.beta_ratio,
log_tag=args.beta_log_tag))
trainer_overwrites = {
'gpus':1, #use a single gpu
'progress_bar_refresh_rate':0, # don't print out progress bar
'terminate_on_nan':True,
'check_val_every_n_epoch':10,
'logger': tb_logger,
'callbacks': callbacks
}
# Init. trainer
trainer = pl.Trainer.from_argparse_args(args, **trainer_overwrites)
# Log model's computational graph
model_wrapper = ModelWrapper(model)
# tb_logger.experiment.add_graph(model_wrapper, model.)
tb_logger.log_graph(model_wrapper)
# ------------------------------------------------------------------------
# Run the experiment
# ------------------------------------------------------------------------
start_time = time.time()
print(f"{exp_name} started...")
print(f"Logging to {Path(tb_logger.log_dir).absolute()}")
trainer.fit(model, dm)
print(f"Finished at ep {trainer.current_epoch, trainer.batch_idx}")
# ------------------------------------------------------------------------
# Log the best score and current experiment's hyperparameters
# ------------------------------------------------------------------------
hparams = model.hparams.copy()
hparams.update(dm.hparams)
hparams['use_beta_scheduler'] = args.use_beta_scheduler
best_score = trainer.checkpoint_callback.best_model_score.item()
metrics = {'hparam/best_score': best_score} # todo: define a metric and use it here
trainer.logger.log_hyperparams(hparams, metrics)
print("Logged hparams and metrics...")
print("\t hparams: ")
pprint(hparams)
print("=====")
print("\t metrics: ", metrics)
print(f"Training Done: took {time.time() - start_time}")
# ------------------------------------------------------------------------
# Evaluation
# 1. Reconstructions:
# x --> model.encoder(x) --> theta_z
# --> sample N latent codes from the Pr(z; theta_z)
# --> model.decoder(z) for each sampled z's
# 2. Embedding:
# a mini-batch input -> mu_z, logvar_z
# -> rsample
# -> project to 2D -> visualize
# 3. Inspect the topology/landscape of the learned latent space
# Latent traversal: Pick a dimension of the latent space.
# - Keep all other dimensions' values constant.
# - Vary the chosen dimenion's values (eg. linearly, spherically)
# - and decode the latent codes. Show the outputs of the decoder.
# 4. Marginal Loglikelihood of train/val/test dataset
# ------------------------------------------------------------------------
# print("Evaluations...")
# model.eval()
if __name__ == '__main__':
parser = ArgumentParser()
# ------------------------------------------------------------------------
# Add general arguments for this CLI script for training/testing
# ------------------------------------------------------------------------
parser = add_base_arguments(parser)
args, unknown = parser.parse_known_args()
print("Base CLI args: ")
pprint(args)
# ------------------------------------------------------------------------
# Add model/datamodule/trainer specific args
# ------------------------------------------------------------------------
model_class = get_model_class(args.model_name)
dm_class = get_dm_class(args.data_name)
parser = model_class.add_model_specific_args(parser)
parser = dm_class.add_model_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument("--gpu_id", type=str, required=True, help="ID of GPU to use")
# Callback switch args
parser = BetaScheduler.add_argparse_args(parser)
# parser.add_argument("--hist_epoch_interval", type=int, default=10, help="Epoch interval to plot histogram of q's parameter")
# parser.add_argument("--recon_epoch_interval", type=int, default=10, help="Epoch interval to plot reconstructions of train and val samples")
args = parser.parse_args()
print("Final args: ")
pprint(args)
# breakpoint()
# ------------------------------------------------------------------------
# Run the training workflow
# -- Select Visible GPU
# -- Initialize model, datamodule, trainer using the parsed arg dict.
# -- Specify callbacks
# -- Init. trainer
# -- Run the experiment
# -- Log the best score and current experiment's hyperparameters
# -- TODO: Add Evaluation
# ------------------------------------------------------------------------
# Select Visible GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
# Start experiment
train(args)
|
"""train.py
Train a single configuration of a model specified on a specified data
Required CLI args
-----------------
--model_name: "vae", "beta_vae", "iwae", "bivae"
--data_name: "mnist", "multi_rotated_mnist", "multi_mono_mnist",
"maptiles", "multi_maptiles"
--latent_dim: int, eg. 10
Optional CLI args: (partial)
----------------------------
--hidden_dims: eg. --hidden_dims 32 64 128 256 (which is default)
Note
----
Each model (specified by --model_name) and datamodule (specified by --data_name)
expects a different set of arguments. For example, `bivae` models allow the following
arguments:
Required:
--n_styles
--adversary_dim
--adv_loss_weight
To run: (at the root of the project, ie. /data/hayley-old/Tenanbaum2000
nohup python train.py --model_name="vae" --data_name="mnist" --latent_dim=10
nohup python train.py --model_name="iwae" --data_name="mnist" --latent_dim=10
# Train BetaVAE on MNIST
nohup python train.py --model_name="beta_vae" \
--enc_type "conv" --dec_type "conv" \
--latent_dim=10 --hidden_dims 32 64 128 256 \
--kld_weight=1.0 --use_beta_scheduler \
--data_name="mnist" --data_root='/data/hayley-old/Tenanbaum2000/data' \
-lr 3e-4 -bs 32 \
--gpu_id=1 --max_epochs=200 --terminate_on_nan=True \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-01-30/" &
# Train BiVAE on Multi Monochrome MNIST
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 --adv_dim 32 32 -lr 1e-3 --adv_weight 15.0 \
--data_name="multi_mono_mnist" --colors red green blue --n_styles=3 \
--gpu_id=1
# Train BiVAE on Multi Rotated MNIST
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 \
--data_name="multi_rotated_mnist" --angles -45 0 45 --n_styles=3 \
--gpu_id=1
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 64 64 --adv_dim 32 \
--data_name="multi_rotated_mnist" --angles 0 15 30 45 60 --n_styles=5 \
--gpu_id=3 --max_epochs=400 --terminate_on_nan=True \
-lr 3e-3 --adv_weight 2000.0 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-23/" &
## Specify which indices to use among the MNIST -- comparable to DIVA's experiments
## change 0 to anything inbtw 0,...,9
nohup python train.py --model_name="bivae" \
--latent_dim=128 --hidden_dims 32 64 64 64 --adv_dim 32 32 32 \
--data_name="multi_rotated_mnist" --angles -45 0 45 --n_styles=3 \
--selected_inds_fp='/data/hayley-old/Tenanbaum2000/data/Rotated-MNIST/supervised_inds_0.npy' \
--gpu_id=1
# Train BiVAE on Multi Maptiles MNIST
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 15.0 \
--data_name="multi_maptiles" \
--cities la paris \
--styles CartoVoyagerNoLabels StamenTonerBackground --n_styles=3 \
--zooms 14 \
--gpu_id=2 --max_epochs=400 --terminate_on_nan=True \
-lr 3e-4 -bs 32 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-01-23/" &
# Train BiVAE on Multi OSMnxRoads
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 1.0 \
--data_root="/data/hayley-old/osmnx_data/images" \
--data_name="osmnx_roads" \
--cities paris \
--bgcolors "k" "r" "g" "b" "y" --n_styles=5 \
--zooms 14 \
--gpu_id=2 --max_epochs=300 --terminate_on_nan=True \
-lr 3e-4 -bs 32 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-18/" &
nohup python train.py --model_name="bivae" \
--latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 1.0 \
--data_root="/data/hayley-old/osmnx_data/images" \
--data_name="osmnx_roads" \
--cities 'la' 'charlotte' 'vegas' 'boston' 'paris' \
'amsterdam' 'shanghai' 'seoul' 'chicago' 'manhattan' \
'berlin' 'montreal' 'rome' \
--bgcolors "k" "r" "g" "b" "y" --n_styles=5 \
--zooms 14 \
--gpu_id=2 --max_epochs=300 --terminate_on_nan=True \
-lr 3e-4 -bs 32 \
--log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-18/" &
"""
import os,sys
from datetime import datetime
import time
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from pathlib import Path
from typing import List, Set, Dict, Tuple, Optional, Iterable, Mapping, Union, Callable, TypeVar
import warnings
from pprint import pprint
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
# callbacks
from src.callbacks.recon_logger import ReconLogger
from src.callbacks.hist_logger import HistogramLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from src.callbacks.beta_scheduler import BetaScheduler
# src helpers
from src.utils.misc import info
from src.models.model_wrapper import ModelWrapper
from src.utils.misc import info, n_iter_per_epoch
# utils for instatiating a selected datamodule and a selected model
from utils import get_model_class, get_dm_class
from utils import instantiate_model, instantiate_dm
from utils import add_base_arguments
def train(args: Union[Dict, Namespace]):
# Init. datamodule and model
dm = instantiate_dm(args)
dm.setup('fit')
model = instantiate_model(args)
# Specify logger
exp_name = f'{model.name}_{dm.name}'
print('Exp name: ', exp_name)
tb_logger = pl_loggers.TensorBoardLogger(save_dir=args.log_root,
name=exp_name,
default_hp_metric=False,
)
log_dir = Path(tb_logger.log_dir)
print("Log Dir: ", log_dir)
# breakpoint()
if not log_dir.exists():
log_dir.mkdir(parents=True)
print("Created: ", log_dir)
# Specify callbacks
callbacks = [
LearningRateMonitor(logging_interval='epoch')
# HistogramLogger(hist_epoch_interval=args.hist_epoch_interval),
# ReconLogger(recon_epoch_interval=args.recon_epoch_interval),
# EarlyStopping('val_loss', patience=10),
]
if args.use_beta_scheduler:
max_iters = n_iter_per_epoch(dm.train_dataloader()) * args.max_epochs
callbacks.append(BetaScheduler(max_iters,
start=args.beta_start,
stop=args.beta_stop,
n_cycle=args.beta_n_cycle,
ratio=args.beta_ratio,
log_tag=args.beta_log_tag))
trainer_overwrites = {
'gpus':1, #use a single gpu
'progress_bar_refresh_rate':0, # don't print out progress bar
'terminate_on_nan':True,
'check_val_every_n_epoch':10,
'logger': tb_logger,
'callbacks': callbacks
}
# Init. trainer
trainer = pl.Trainer.from_argparse_args(args, **trainer_overwrites)
# Log model's computational graph
model_wrapper = ModelWrapper(model)
# tb_logger.experiment.add_graph(model_wrapper, model.)
tb_logger.log_graph(model_wrapper)
# ------------------------------------------------------------------------
# Run the experiment
# ------------------------------------------------------------------------
start_time = time.time()
print(f"{exp_name} started...")
print(f"Logging to {Path(tb_logger.log_dir).absolute()}")
trainer.fit(model, dm)
print(f"Finished at ep {trainer.current_epoch, trainer.batch_idx}")
# ------------------------------------------------------------------------
# Log the best score and current experiment's hyperparameters
# ------------------------------------------------------------------------
hparams = model.hparams.copy()
hparams.update(dm.hparams)
hparams['use_beta_scheduler'] = args.use_beta_scheduler
best_score = trainer.checkpoint_callback.best_model_score.item()
metrics = {'hparam/best_score': best_score} # todo: define a metric and use it here
trainer.logger.log_hyperparams(hparams, metrics)
print("Logged hparams and metrics...")
print("\t hparams: ")
pprint(hparams)
print("=====")
print("\t metrics: ", metrics)
print(f"Training Done: took {time.time() - start_time}")
# ------------------------------------------------------------------------
# Evaluation
# 1. Reconstructions:
# x --> model.encoder(x) --> theta_z
# --> sample N latent codes from the Pr(z; theta_z)
# --> model.decoder(z) for each sampled z's
# 2. Embedding:
# a mini-batch input -> mu_z, logvar_z
# -> rsample
# -> project to 2D -> visualize
# 3. Inspect the topology/landscape of the learned latent space
# Latent traversal: Pick a dimension of the latent space.
# - Keep all other dimensions' values constant.
# - Vary the chosen dimenion's values (eg. linearly, spherically)
# - and decode the latent codes. Show the outputs of the decoder.
# 4. Marginal Loglikelihood of train/val/test dataset
# ------------------------------------------------------------------------
# print("Evaluations...")
# model.eval()
if __name__ == '__main__':
parser = ArgumentParser()
# ------------------------------------------------------------------------
# Add general arguments for this CLI script for training/testing
# ------------------------------------------------------------------------
parser = add_base_arguments(parser)
args, unknown = parser.parse_known_args()
print("Base CLI args: ")
pprint(args)
# ------------------------------------------------------------------------
# Add model/datamodule/trainer specific args
# ------------------------------------------------------------------------
model_class = get_model_class(args.model_name)
dm_class = get_dm_class(args.data_name)
parser = model_class.add_model_specific_args(parser)
parser = dm_class.add_model_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument("--gpu_id", type=str, required=True, help="ID of GPU to use")
# Callback switch args
parser = BetaScheduler.add_argparse_args(parser)
# parser.add_argument("--hist_epoch_interval", type=int, default=10, help="Epoch interval to plot histogram of q's parameter")
# parser.add_argument("--recon_epoch_interval", type=int, default=10, help="Epoch interval to plot reconstructions of train and val samples")
args = parser.parse_args()
print("Final args: ")
pprint(args)
# breakpoint()
# ------------------------------------------------------------------------
# Run the training workflow
# -- Select Visible GPU
# -- Initialize model, datamodule, trainer using the parsed arg dict.
# -- Specify callbacks
# -- Init. trainer
# -- Run the experiment
# -- Log the best score and current experiment's hyperparameters
# -- TODO: Add Evaluation
# ------------------------------------------------------------------------
# Select Visible GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
# Start experiment
train(args)
|
en
| 0.163929
|
train.py Train a single configuration of a model specified on a specified data Required CLI args ----------------- --model_name: "vae", "beta_vae", "iwae", "bivae" --data_name: "mnist", "multi_rotated_mnist", "multi_mono_mnist", "maptiles", "multi_maptiles" --latent_dim: int, eg. 10 Optional CLI args: (partial) ---------------------------- --hidden_dims: eg. --hidden_dims 32 64 128 256 (which is default) Note ---- Each model (specified by --model_name) and datamodule (specified by --data_name) expects a different set of arguments. For example, `bivae` models allow the following arguments: Required: --n_styles --adversary_dim --adv_loss_weight To run: (at the root of the project, ie. /data/hayley-old/Tenanbaum2000 nohup python train.py --model_name="vae" --data_name="mnist" --latent_dim=10 nohup python train.py --model_name="iwae" --data_name="mnist" --latent_dim=10 # Train BetaVAE on MNIST nohup python train.py --model_name="beta_vae" \ --enc_type "conv" --dec_type "conv" \ --latent_dim=10 --hidden_dims 32 64 128 256 \ --kld_weight=1.0 --use_beta_scheduler \ --data_name="mnist" --data_root='/data/hayley-old/Tenanbaum2000/data' \ -lr 3e-4 -bs 32 \ --gpu_id=1 --max_epochs=200 --terminate_on_nan=True \ --log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-01-30/" & # Train BiVAE on Multi Monochrome MNIST nohup python train.py --model_name="bivae" \ --latent_dim=10 --hidden_dims 32 64 --adv_dim 32 32 -lr 1e-3 --adv_weight 15.0 \ --data_name="multi_mono_mnist" --colors red green blue --n_styles=3 \ --gpu_id=1 # Train BiVAE on Multi Rotated MNIST nohup python train.py --model_name="bivae" \ --latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 \ --data_name="multi_rotated_mnist" --angles -45 0 45 --n_styles=3 \ --gpu_id=1 nohup python train.py --model_name="bivae" \ --latent_dim=10 --hidden_dims 32 64 64 64 --adv_dim 32 \ --data_name="multi_rotated_mnist" --angles 0 15 30 45 60 --n_styles=5 \ --gpu_id=3 --max_epochs=400 --terminate_on_nan=True \ -lr 3e-3 --adv_weight 2000.0 \ --log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-23/" & ## Specify which indices to use among the MNIST -- comparable to DIVA's experiments ## change 0 to anything inbtw 0,...,9 nohup python train.py --model_name="bivae" \ --latent_dim=128 --hidden_dims 32 64 64 64 --adv_dim 32 32 32 \ --data_name="multi_rotated_mnist" --angles -45 0 45 --n_styles=3 \ --selected_inds_fp='/data/hayley-old/Tenanbaum2000/data/Rotated-MNIST/supervised_inds_0.npy' \ --gpu_id=1 # Train BiVAE on Multi Maptiles MNIST nohup python train.py --model_name="bivae" \ --latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 15.0 \ --data_name="multi_maptiles" \ --cities la paris \ --styles CartoVoyagerNoLabels StamenTonerBackground --n_styles=3 \ --zooms 14 \ --gpu_id=2 --max_epochs=400 --terminate_on_nan=True \ -lr 3e-4 -bs 32 \ --log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-01-23/" & # Train BiVAE on Multi OSMnxRoads nohup python train.py --model_name="bivae" \ --latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 1.0 \ --data_root="/data/hayley-old/osmnx_data/images" \ --data_name="osmnx_roads" \ --cities paris \ --bgcolors "k" "r" "g" "b" "y" --n_styles=5 \ --zooms 14 \ --gpu_id=2 --max_epochs=300 --terminate_on_nan=True \ -lr 3e-4 -bs 32 \ --log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-18/" & nohup python train.py --model_name="bivae" \ --latent_dim=10 --hidden_dims 32 64 128 256 --adv_dim 32 32 32 --adv_weight 1.0 \ --data_root="/data/hayley-old/osmnx_data/images" \ --data_name="osmnx_roads" \ --cities 'la' 'charlotte' 'vegas' 'boston' 'paris' \ 'amsterdam' 'shanghai' 'seoul' 'chicago' 'manhattan' \ 'berlin' 'montreal' 'rome' \ --bgcolors "k" "r" "g" "b" "y" --n_styles=5 \ --zooms 14 \ --gpu_id=2 --max_epochs=300 --terminate_on_nan=True \ -lr 3e-4 -bs 32 \ --log_root="/data/hayley-old/Tenanbaum2000/lightning_logs/2021-05-18/" & # callbacks # src helpers # utils for instatiating a selected datamodule and a selected model # Init. datamodule and model # Specify logger # breakpoint() # Specify callbacks # HistogramLogger(hist_epoch_interval=args.hist_epoch_interval), # ReconLogger(recon_epoch_interval=args.recon_epoch_interval), # EarlyStopping('val_loss', patience=10), #use a single gpu # don't print out progress bar # Init. trainer # Log model's computational graph # tb_logger.experiment.add_graph(model_wrapper, model.) # ------------------------------------------------------------------------ # Run the experiment # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ # Log the best score and current experiment's hyperparameters # ------------------------------------------------------------------------ # todo: define a metric and use it here # ------------------------------------------------------------------------ # Evaluation # 1. Reconstructions: # x --> model.encoder(x) --> theta_z # --> sample N latent codes from the Pr(z; theta_z) # --> model.decoder(z) for each sampled z's # 2. Embedding: # a mini-batch input -> mu_z, logvar_z # -> rsample # -> project to 2D -> visualize # 3. Inspect the topology/landscape of the learned latent space # Latent traversal: Pick a dimension of the latent space. # - Keep all other dimensions' values constant. # - Vary the chosen dimenion's values (eg. linearly, spherically) # - and decode the latent codes. Show the outputs of the decoder. # 4. Marginal Loglikelihood of train/val/test dataset # ------------------------------------------------------------------------ # print("Evaluations...") # model.eval() # ------------------------------------------------------------------------ # Add general arguments for this CLI script for training/testing # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ # Add model/datamodule/trainer specific args # ------------------------------------------------------------------------ # Callback switch args # parser.add_argument("--hist_epoch_interval", type=int, default=10, help="Epoch interval to plot histogram of q's parameter") # parser.add_argument("--recon_epoch_interval", type=int, default=10, help="Epoch interval to plot reconstructions of train and val samples") # breakpoint() # ------------------------------------------------------------------------ # Run the training workflow # -- Select Visible GPU # -- Initialize model, datamodule, trainer using the parsed arg dict. # -- Specify callbacks # -- Init. trainer # -- Run the experiment # -- Log the best score and current experiment's hyperparameters # -- TODO: Add Evaluation # ------------------------------------------------------------------------ # Select Visible GPU # Start experiment
| 2.384711
| 2
|
temp_prox/main_slide.py
|
sanweiliti/HMP
| 92
|
6626456
|
<filename>temp_prox/main_slide.py
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems and the Max Planck Institute for Biological
# Cybernetics. All rights reserved.
#
# Contact: <EMAIL>
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
rootPath = '../'
sys.path.append(rootPath)
import os.path as osp
import time
import yaml
import scipy.io as sio
import open3d as o3d
import torch
import smplx
from temp_prox.misc_utils import JointMapper
from temp_prox.cmd_parser import parse_config
from temp_prox.data_parser_slide import *
from temp_prox.fit_temp_loadprox_slide import fit_temp_loadprox_slide
from temp_prox.camera import create_camera
from temp_prox.prior import create_prior
from models.AE import AE as AE_infill
from models.AE_sep import Enc
torch.backends.cudnn.enabled = False
def main(**args):
gpu_id = args.get('gpu_id')
torch.cuda.set_device(gpu_id)
print('gpu id:', gpu_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################ read/creat paths ########################
data_folder = args.get('recording_dir')
recording_name = osp.basename(args.get('recording_dir')) # e.x. 'N3OpenArea_00157_01'
scene_name = recording_name.split("_")[0] # e.x. 'N3OpenArea'
base_dir = os.path.abspath(osp.join(args.get('recording_dir'), os.pardir, os.pardir)) # '/mnt/hdd/PROX'
keyp_dir = osp.join(base_dir, 'keypoints')
keyp_folder = osp.join(keyp_dir, recording_name)
cam2world_dir = osp.join(base_dir, 'cam2world')
scene_dir = osp.join(base_dir, 'scenes')
calib_dir = osp.join(base_dir, 'calibration')
sdf_dir = osp.join(base_dir, 'scenes_sdf')
body_segments_dir = '../body_segments'
marker_mask_dir = osp.join('../mask_markers', recording_name)
prox_params_dir = osp.join(base_dir, 'PROXD', recording_name)
# prox_params_dir = osp.join(base_dir, 'PROXD_filled', recording_name)
if args.get('use_motion_infill'):
prox_params_dir = osp.join('../fit_results_S2/', recording_name) # TODO: to set
output_folder = args.get('output_folder')
output_folder = osp.expandvars(output_folder)
output_folder = osp.join(output_folder, recording_name)
if not osp.exists(output_folder):
os.makedirs(output_folder)
# Store the arguments for the current experiment
conf_fn = osp.join(output_folder, 'conf.yaml')
with open(conf_fn, 'w') as conf_file:
yaml.dump(args, conf_file)
# remove 'output_folder' from args list
args.pop('output_folder')
result_folder = args.pop('result_folder', 'results')
result_folder = osp.join(output_folder, result_folder)
if not osp.exists(result_folder):
os.makedirs(result_folder)
mesh_folder = args.pop('mesh_folder', 'meshes')
mesh_folder = osp.join(output_folder, mesh_folder)
if not osp.exists(mesh_folder):
os.makedirs(mesh_folder)
out_img_folder = osp.join(output_folder, 'images')
if not osp.exists(out_img_folder):
os.makedirs(out_img_folder)
out_rendering_dir = os.path.join(output_folder, 'renderings')
if not osp.exists(out_rendering_dir):
os.mkdir(out_rendering_dir)
tensorboard_log_dir = os.path.join(output_folder, 'tensorboard_log')
if not osp.exists(tensorboard_log_dir):
os.mkdir(tensorboard_log_dir)
input_gender = args.pop('gender', 'neutral') # male
dtype = torch.float32
################################## load motion prior model #############################
if args.get('use_motion_smooth_prior'):
motion_smooth_model = Enc(downsample=False, z_channel=64).to(device)
weights = torch.load(args.get('AE_Enc_path'), map_location=lambda storage, loc: storage)
motion_smooth_model.load_state_dict(weights)
motion_smooth_model.eval()
for param in motion_smooth_model.parameters():
param.requires_grad = False
else:
motion_smooth_model = None
################################### load motion infilling model ###########################
if args.get('use_motion_infill_prior'):
motion_infill_model = AE_infill(downsample=True, in_channel=4, kernel=args.get('conv_kernel')).to(device)
infill_pretrain_weights = torch.load(args.get('AE_infill_path'), map_location=lambda storage, loc: storage)
motion_infill_model.load_state_dict(infill_pretrain_weights)
else:
motion_infill_model = None
infill_pretrain_weights = None
####################### create data loader / joint mapper / joint weights ########################
img_folder = args.pop('img_folder', 'Color')
dataset_obj = OpenPose(img_folder=img_folder, data_folder=data_folder, keyp_folder=keyp_folder, calib_dir=calib_dir,
prox_params_dir=prox_params_dir,
output_params_dir=output_folder,
marker_mask_dir=marker_mask_dir, **args)
data_loader = torch.utils.data.DataLoader(dataset=dataset_obj,
batch_size=args.get('batch_size'),
shuffle=False,
num_workers=0, drop_last=True)
# map smplx joints to openpose, 118=25body+21hand*2*51face
joint_mapper = JointMapper(dataset_obj.get_model2data())
# A weight for each joint of the model, 1 for each joint, 0 for joint 1,9,12
joint_weights = dataset_obj.get_joint_weights().to(device=device, dtype=dtype) # tensor, [118]
joint_weights.unsqueeze_(dim=0) # [1, 118]
####################### init smplx model ########################
start = time.time()
model_params = dict(model_path=args.get('model_folder'),
joint_mapper=joint_mapper,
create_global_orient=True,
create_body_pose=not args.get('use_vposer'),
create_betas=True,
create_left_hand_pose=True,
create_right_hand_pose=True,
create_expression=True,
create_jaw_pose=True,
create_leye_pose=True,
create_reye_pose=True,
create_transl=True,
dtype=dtype,
**args)
male_model = smplx.create(gender='male', **model_params)
# SMPL-H has no gender-neutral model
if args.get('model_type') != 'smplh':
neutral_model = smplx.create(gender='neutral', **model_params)
female_model = smplx.create(gender='female', **model_params)
####################### create camera object ########################
camera_center = None \
if args.get('camera_center_x') is None or args.get('camera_center_y') is None \
else torch.tensor([args.get('camera_center_x'), args.get('camera_center_y')], dtype=dtype).view(-1, 2) # tensor, [1,2]
camera = create_camera(focal_length_x=args.get('focal_length_x'),
focal_length_y=args.get('focal_length_y'),
center= camera_center,
batch_size=args.get('batch_size'),
dtype=dtype)
if hasattr(camera, 'rotation'):
camera.rotation.requires_grad = False
####################### creat prior type ########################
use_hands = args.get('use_hands', True) # True
use_face = args.get('use_face', True) # True
body_pose_prior = create_prior(
prior_type=args.get('body_prior_type'),
dtype=dtype,
**args)
jaw_prior, expr_prior = None, None
if use_face:
jaw_prior = create_prior(
prior_type=args.get('jaw_prior_type'),
dtype=dtype,
**args)
expr_prior = create_prior(
prior_type=args.get('expr_prior_type', 'l2'),
dtype=dtype, **args)
left_hand_prior, right_hand_prior = None, None
if use_hands:
lhand_args = args.copy()
lhand_args['num_gaussians'] = args.get('num_pca_comps') # 12
left_hand_prior = create_prior(
prior_type=args.get('left_hand_prior_type'),
dtype=dtype,
use_left_hand=True,
**lhand_args)
rhand_args = args.copy()
rhand_args['num_gaussians'] = args.get('num_pca_comps')
right_hand_prior = create_prior(
prior_type=args.get('right_hand_prior_type'),
dtype=dtype,
use_right_hand=True,
**rhand_args)
shape_prior = create_prior(
prior_type=args.get('shape_prior_type', 'l2'),
dtype=dtype, **args)
angle_prior = create_prior(prior_type='angle', dtype=dtype)
##################### objects to cuda #######################
camera = camera.to(device=device)
female_model = female_model.to(device=device)
male_model = male_model.to(device=device)
if args.get('model_type') != 'smplh':
neutral_model = neutral_model.to(device=device)
body_pose_prior = body_pose_prior.to(device=device)
angle_prior = angle_prior.to(device=device)
shape_prior = shape_prior.to(device=device)
if use_face:
expr_prior = expr_prior.to(device=device)
jaw_prior = jaw_prior.to(device=device)
if use_hands:
left_hand_prior = left_hand_prior.to(device=device)
right_hand_prior = right_hand_prior.to(device=device)
######################### start fitting ########################
for idx, data in enumerate(data_loader):
input_dict, prox_params_dict = data[0], data[1] # ex. prox_params_dict[transl]: tensor, [bs, 3]
for param_name in prox_params_dict:
prox_params_dict[param_name] = prox_params_dict[param_name].to(device)
##################### read input img/keypoint/scan/... ###############
img = input_dict['img'].to(device) # tensor, [bs, 1080, 1920, 3]
fn = input_dict['fn'] # list, ['s001_frame_00001__00.00.00.033', ...]
keypoints = input_dict['keypoints'].to(device) # [bs, num_person, 118, 3]
marker_mask = input_dict['marker_mask'].to(device) # [bs, 67]
init_trans = input_dict['init_trans'].to(device).view(-1,3) # [bs, 3]
scan_point_num = input_dict['scan_point_num'] # [bs], valid number of scan pts from depth img
scan = input_dict['scan'] # [bs, 20000, 3], pad 0 for number_pts < 20000
# todo: do not load depth info if you don't use depth in optimization terms
# if args.get('batch_size') > 1:
# scan = None
print('Processing: {} to {}'.format(input_dict['img_path'][0], input_dict['img_path'][-1])) # 'points'/'colors': [num_valid_pts, 3]
sys.stdout.flush()
# TODO: won't work for multiple persons
person_id = 0
####################### set save paths #########################
curr_result_fn_list = []
curr_mesh_fn_list = []
curr_rendering_fn_list = []
out_img_fn_list = []
# path to save logs
start_frame = idx * args.get('batch_size') + 1
end_frame = start_frame + args.get('batch_size') - 1
cur_log_folder = osp.join(tensorboard_log_dir, 'frame{}_to_frame{}'.format(start_frame, end_frame))
if not osp.exists(cur_log_folder):
os.makedirs(cur_log_folder)
for i in range(len(fn)):
# path to save images
out_img_fn = osp.join(out_img_folder, fn[i] + '.png')
out_img_fn_list.append(out_img_fn)
# path to save rendered imgs
curr_rendering_fn = osp.join(out_rendering_dir, fn[i] + '.png')
curr_rendering_fn_list.append(curr_rendering_fn)
# path to save optimized smplx params
curr_result_folder = osp.join(result_folder, fn[i])
if not osp.exists(curr_result_folder):
os.makedirs(curr_result_folder)
curr_result_fn = osp.join(curr_result_folder, '{:03d}.pkl'.format(person_id))
curr_result_fn_list.append(curr_result_fn)
# path to save optimized mesh
curr_mesh_folder = osp.join(mesh_folder, fn[i])
if not osp.exists(curr_mesh_folder):
os.makedirs(curr_mesh_folder)
curr_mesh_fn = osp.join(curr_mesh_folder, '{:03d}.ply'.format(person_id))
curr_mesh_fn_list.append(curr_mesh_fn)
gender = input_gender # male
if gender == 'neutral':
body_model = neutral_model
elif gender == 'female':
body_model = female_model
elif gender == 'male':
body_model = male_model
########################## fitting #########################
if idx == 0:
first_batch_flag = 1 # if it's the 1st motion clip
else:
first_batch_flag = 0
fit_temp_loadprox_slide(img=img,
keypoints=keypoints[:, person_id,],
marker_mask=marker_mask,
init_trans=init_trans,
scan_point_num=scan_point_num,
scan=scan,
cam2world_dir=cam2world_dir,
scene_dir=scene_dir,
sdf_dir=sdf_dir,
body_segments_dir=body_segments_dir,
scene_name=scene_name,
body_model=body_model,
camera=camera,
joint_weights=joint_weights,
dtype=dtype,
output_folder=output_folder,
out_img_fn_list=out_img_fn_list,
result_fn_list=curr_result_fn_list,
mesh_fn_list=curr_mesh_fn_list,
log_folder=cur_log_folder,
rendering_fn_list=curr_rendering_fn_list,
shape_prior=shape_prior,
expr_prior=expr_prior,
body_pose_prior=body_pose_prior,
left_hand_prior=left_hand_prior,
right_hand_prior=right_hand_prior,
jaw_prior=jaw_prior,
angle_prior=angle_prior,
prox_params_dict=prox_params_dict,
motion_smooth_model=motion_smooth_model,
motion_infill_model=motion_infill_model,
infill_pretrain_weights=infill_pretrain_weights,
device=device,
first_batch_flag=first_batch_flag,
**args)
elapsed = time.time() - start
time_msg = time.strftime('%H hours, %M minutes, %S seconds',
time.gmtime(elapsed))
print('Processing the data took: {}'.format(time_msg))
sys.stdout.flush()
if __name__ == "__main__":
args = parse_config()
main(**args)
|
<filename>temp_prox/main_slide.py
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems and the Max Planck Institute for Biological
# Cybernetics. All rights reserved.
#
# Contact: <EMAIL>
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
rootPath = '../'
sys.path.append(rootPath)
import os.path as osp
import time
import yaml
import scipy.io as sio
import open3d as o3d
import torch
import smplx
from temp_prox.misc_utils import JointMapper
from temp_prox.cmd_parser import parse_config
from temp_prox.data_parser_slide import *
from temp_prox.fit_temp_loadprox_slide import fit_temp_loadprox_slide
from temp_prox.camera import create_camera
from temp_prox.prior import create_prior
from models.AE import AE as AE_infill
from models.AE_sep import Enc
torch.backends.cudnn.enabled = False
def main(**args):
gpu_id = args.get('gpu_id')
torch.cuda.set_device(gpu_id)
print('gpu id:', gpu_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################ read/creat paths ########################
data_folder = args.get('recording_dir')
recording_name = osp.basename(args.get('recording_dir')) # e.x. 'N3OpenArea_00157_01'
scene_name = recording_name.split("_")[0] # e.x. 'N3OpenArea'
base_dir = os.path.abspath(osp.join(args.get('recording_dir'), os.pardir, os.pardir)) # '/mnt/hdd/PROX'
keyp_dir = osp.join(base_dir, 'keypoints')
keyp_folder = osp.join(keyp_dir, recording_name)
cam2world_dir = osp.join(base_dir, 'cam2world')
scene_dir = osp.join(base_dir, 'scenes')
calib_dir = osp.join(base_dir, 'calibration')
sdf_dir = osp.join(base_dir, 'scenes_sdf')
body_segments_dir = '../body_segments'
marker_mask_dir = osp.join('../mask_markers', recording_name)
prox_params_dir = osp.join(base_dir, 'PROXD', recording_name)
# prox_params_dir = osp.join(base_dir, 'PROXD_filled', recording_name)
if args.get('use_motion_infill'):
prox_params_dir = osp.join('../fit_results_S2/', recording_name) # TODO: to set
output_folder = args.get('output_folder')
output_folder = osp.expandvars(output_folder)
output_folder = osp.join(output_folder, recording_name)
if not osp.exists(output_folder):
os.makedirs(output_folder)
# Store the arguments for the current experiment
conf_fn = osp.join(output_folder, 'conf.yaml')
with open(conf_fn, 'w') as conf_file:
yaml.dump(args, conf_file)
# remove 'output_folder' from args list
args.pop('output_folder')
result_folder = args.pop('result_folder', 'results')
result_folder = osp.join(output_folder, result_folder)
if not osp.exists(result_folder):
os.makedirs(result_folder)
mesh_folder = args.pop('mesh_folder', 'meshes')
mesh_folder = osp.join(output_folder, mesh_folder)
if not osp.exists(mesh_folder):
os.makedirs(mesh_folder)
out_img_folder = osp.join(output_folder, 'images')
if not osp.exists(out_img_folder):
os.makedirs(out_img_folder)
out_rendering_dir = os.path.join(output_folder, 'renderings')
if not osp.exists(out_rendering_dir):
os.mkdir(out_rendering_dir)
tensorboard_log_dir = os.path.join(output_folder, 'tensorboard_log')
if not osp.exists(tensorboard_log_dir):
os.mkdir(tensorboard_log_dir)
input_gender = args.pop('gender', 'neutral') # male
dtype = torch.float32
################################## load motion prior model #############################
if args.get('use_motion_smooth_prior'):
motion_smooth_model = Enc(downsample=False, z_channel=64).to(device)
weights = torch.load(args.get('AE_Enc_path'), map_location=lambda storage, loc: storage)
motion_smooth_model.load_state_dict(weights)
motion_smooth_model.eval()
for param in motion_smooth_model.parameters():
param.requires_grad = False
else:
motion_smooth_model = None
################################### load motion infilling model ###########################
if args.get('use_motion_infill_prior'):
motion_infill_model = AE_infill(downsample=True, in_channel=4, kernel=args.get('conv_kernel')).to(device)
infill_pretrain_weights = torch.load(args.get('AE_infill_path'), map_location=lambda storage, loc: storage)
motion_infill_model.load_state_dict(infill_pretrain_weights)
else:
motion_infill_model = None
infill_pretrain_weights = None
####################### create data loader / joint mapper / joint weights ########################
img_folder = args.pop('img_folder', 'Color')
dataset_obj = OpenPose(img_folder=img_folder, data_folder=data_folder, keyp_folder=keyp_folder, calib_dir=calib_dir,
prox_params_dir=prox_params_dir,
output_params_dir=output_folder,
marker_mask_dir=marker_mask_dir, **args)
data_loader = torch.utils.data.DataLoader(dataset=dataset_obj,
batch_size=args.get('batch_size'),
shuffle=False,
num_workers=0, drop_last=True)
# map smplx joints to openpose, 118=25body+21hand*2*51face
joint_mapper = JointMapper(dataset_obj.get_model2data())
# A weight for each joint of the model, 1 for each joint, 0 for joint 1,9,12
joint_weights = dataset_obj.get_joint_weights().to(device=device, dtype=dtype) # tensor, [118]
joint_weights.unsqueeze_(dim=0) # [1, 118]
####################### init smplx model ########################
start = time.time()
model_params = dict(model_path=args.get('model_folder'),
joint_mapper=joint_mapper,
create_global_orient=True,
create_body_pose=not args.get('use_vposer'),
create_betas=True,
create_left_hand_pose=True,
create_right_hand_pose=True,
create_expression=True,
create_jaw_pose=True,
create_leye_pose=True,
create_reye_pose=True,
create_transl=True,
dtype=dtype,
**args)
male_model = smplx.create(gender='male', **model_params)
# SMPL-H has no gender-neutral model
if args.get('model_type') != 'smplh':
neutral_model = smplx.create(gender='neutral', **model_params)
female_model = smplx.create(gender='female', **model_params)
####################### create camera object ########################
camera_center = None \
if args.get('camera_center_x') is None or args.get('camera_center_y') is None \
else torch.tensor([args.get('camera_center_x'), args.get('camera_center_y')], dtype=dtype).view(-1, 2) # tensor, [1,2]
camera = create_camera(focal_length_x=args.get('focal_length_x'),
focal_length_y=args.get('focal_length_y'),
center= camera_center,
batch_size=args.get('batch_size'),
dtype=dtype)
if hasattr(camera, 'rotation'):
camera.rotation.requires_grad = False
####################### creat prior type ########################
use_hands = args.get('use_hands', True) # True
use_face = args.get('use_face', True) # True
body_pose_prior = create_prior(
prior_type=args.get('body_prior_type'),
dtype=dtype,
**args)
jaw_prior, expr_prior = None, None
if use_face:
jaw_prior = create_prior(
prior_type=args.get('jaw_prior_type'),
dtype=dtype,
**args)
expr_prior = create_prior(
prior_type=args.get('expr_prior_type', 'l2'),
dtype=dtype, **args)
left_hand_prior, right_hand_prior = None, None
if use_hands:
lhand_args = args.copy()
lhand_args['num_gaussians'] = args.get('num_pca_comps') # 12
left_hand_prior = create_prior(
prior_type=args.get('left_hand_prior_type'),
dtype=dtype,
use_left_hand=True,
**lhand_args)
rhand_args = args.copy()
rhand_args['num_gaussians'] = args.get('num_pca_comps')
right_hand_prior = create_prior(
prior_type=args.get('right_hand_prior_type'),
dtype=dtype,
use_right_hand=True,
**rhand_args)
shape_prior = create_prior(
prior_type=args.get('shape_prior_type', 'l2'),
dtype=dtype, **args)
angle_prior = create_prior(prior_type='angle', dtype=dtype)
##################### objects to cuda #######################
camera = camera.to(device=device)
female_model = female_model.to(device=device)
male_model = male_model.to(device=device)
if args.get('model_type') != 'smplh':
neutral_model = neutral_model.to(device=device)
body_pose_prior = body_pose_prior.to(device=device)
angle_prior = angle_prior.to(device=device)
shape_prior = shape_prior.to(device=device)
if use_face:
expr_prior = expr_prior.to(device=device)
jaw_prior = jaw_prior.to(device=device)
if use_hands:
left_hand_prior = left_hand_prior.to(device=device)
right_hand_prior = right_hand_prior.to(device=device)
######################### start fitting ########################
for idx, data in enumerate(data_loader):
input_dict, prox_params_dict = data[0], data[1] # ex. prox_params_dict[transl]: tensor, [bs, 3]
for param_name in prox_params_dict:
prox_params_dict[param_name] = prox_params_dict[param_name].to(device)
##################### read input img/keypoint/scan/... ###############
img = input_dict['img'].to(device) # tensor, [bs, 1080, 1920, 3]
fn = input_dict['fn'] # list, ['s001_frame_00001__00.00.00.033', ...]
keypoints = input_dict['keypoints'].to(device) # [bs, num_person, 118, 3]
marker_mask = input_dict['marker_mask'].to(device) # [bs, 67]
init_trans = input_dict['init_trans'].to(device).view(-1,3) # [bs, 3]
scan_point_num = input_dict['scan_point_num'] # [bs], valid number of scan pts from depth img
scan = input_dict['scan'] # [bs, 20000, 3], pad 0 for number_pts < 20000
# todo: do not load depth info if you don't use depth in optimization terms
# if args.get('batch_size') > 1:
# scan = None
print('Processing: {} to {}'.format(input_dict['img_path'][0], input_dict['img_path'][-1])) # 'points'/'colors': [num_valid_pts, 3]
sys.stdout.flush()
# TODO: won't work for multiple persons
person_id = 0
####################### set save paths #########################
curr_result_fn_list = []
curr_mesh_fn_list = []
curr_rendering_fn_list = []
out_img_fn_list = []
# path to save logs
start_frame = idx * args.get('batch_size') + 1
end_frame = start_frame + args.get('batch_size') - 1
cur_log_folder = osp.join(tensorboard_log_dir, 'frame{}_to_frame{}'.format(start_frame, end_frame))
if not osp.exists(cur_log_folder):
os.makedirs(cur_log_folder)
for i in range(len(fn)):
# path to save images
out_img_fn = osp.join(out_img_folder, fn[i] + '.png')
out_img_fn_list.append(out_img_fn)
# path to save rendered imgs
curr_rendering_fn = osp.join(out_rendering_dir, fn[i] + '.png')
curr_rendering_fn_list.append(curr_rendering_fn)
# path to save optimized smplx params
curr_result_folder = osp.join(result_folder, fn[i])
if not osp.exists(curr_result_folder):
os.makedirs(curr_result_folder)
curr_result_fn = osp.join(curr_result_folder, '{:03d}.pkl'.format(person_id))
curr_result_fn_list.append(curr_result_fn)
# path to save optimized mesh
curr_mesh_folder = osp.join(mesh_folder, fn[i])
if not osp.exists(curr_mesh_folder):
os.makedirs(curr_mesh_folder)
curr_mesh_fn = osp.join(curr_mesh_folder, '{:03d}.ply'.format(person_id))
curr_mesh_fn_list.append(curr_mesh_fn)
gender = input_gender # male
if gender == 'neutral':
body_model = neutral_model
elif gender == 'female':
body_model = female_model
elif gender == 'male':
body_model = male_model
########################## fitting #########################
if idx == 0:
first_batch_flag = 1 # if it's the 1st motion clip
else:
first_batch_flag = 0
fit_temp_loadprox_slide(img=img,
keypoints=keypoints[:, person_id,],
marker_mask=marker_mask,
init_trans=init_trans,
scan_point_num=scan_point_num,
scan=scan,
cam2world_dir=cam2world_dir,
scene_dir=scene_dir,
sdf_dir=sdf_dir,
body_segments_dir=body_segments_dir,
scene_name=scene_name,
body_model=body_model,
camera=camera,
joint_weights=joint_weights,
dtype=dtype,
output_folder=output_folder,
out_img_fn_list=out_img_fn_list,
result_fn_list=curr_result_fn_list,
mesh_fn_list=curr_mesh_fn_list,
log_folder=cur_log_folder,
rendering_fn_list=curr_rendering_fn_list,
shape_prior=shape_prior,
expr_prior=expr_prior,
body_pose_prior=body_pose_prior,
left_hand_prior=left_hand_prior,
right_hand_prior=right_hand_prior,
jaw_prior=jaw_prior,
angle_prior=angle_prior,
prox_params_dict=prox_params_dict,
motion_smooth_model=motion_smooth_model,
motion_infill_model=motion_infill_model,
infill_pretrain_weights=infill_pretrain_weights,
device=device,
first_batch_flag=first_batch_flag,
**args)
elapsed = time.time() - start
time_msg = time.strftime('%H hours, %M minutes, %S seconds',
time.gmtime(elapsed))
print('Processing the data took: {}'.format(time_msg))
sys.stdout.flush()
if __name__ == "__main__":
args = parse_config()
main(**args)
|
en
| 0.482054
|
# -*- coding: utf-8 -*- # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # You can only use this computer program if you have closed # a license agreement with MPG or you get the right to use the computer # program from someone who is authorized to grant you that right. # Any use of the computer program without a valid license is prohibited and # liable to prosecution. # # Copyright©2019 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems and the Max Planck Institute for Biological # Cybernetics. All rights reserved. # # Contact: <EMAIL> ################ read/creat paths ######################## # e.x. 'N3OpenArea_00157_01' # e.x. 'N3OpenArea' # '/mnt/hdd/PROX' # prox_params_dir = osp.join(base_dir, 'PROXD_filled', recording_name) # TODO: to set # Store the arguments for the current experiment # remove 'output_folder' from args list # male ################################## load motion prior model ############################# ################################### load motion infilling model ########################### ####################### create data loader / joint mapper / joint weights ######################## # map smplx joints to openpose, 118=25body+21hand*2*51face # A weight for each joint of the model, 1 for each joint, 0 for joint 1,9,12 # tensor, [118] # [1, 118] ####################### init smplx model ######################## # SMPL-H has no gender-neutral model ####################### create camera object ######################## # tensor, [1,2] ####################### creat prior type ######################## # True # True # 12 ##################### objects to cuda ####################### ######################### start fitting ######################## # ex. prox_params_dict[transl]: tensor, [bs, 3] ##################### read input img/keypoint/scan/... ############### # tensor, [bs, 1080, 1920, 3] # list, ['s001_frame_00001__00.00.00.033', ...] # [bs, num_person, 118, 3] # [bs, 67] # [bs, 3] # [bs], valid number of scan pts from depth img # [bs, 20000, 3], pad 0 for number_pts < 20000 # todo: do not load depth info if you don't use depth in optimization terms # if args.get('batch_size') > 1: # scan = None # 'points'/'colors': [num_valid_pts, 3] # TODO: won't work for multiple persons ####################### set save paths ######################### # path to save logs # path to save images # path to save rendered imgs # path to save optimized smplx params # path to save optimized mesh # male ########################## fitting ######################### # if it's the 1st motion clip
| 1.97214
| 2
|
.cloud-build/utils/util.py
|
nayaknishant/vertex-ai-samples
| 0
|
6626457
|
<reponame>nayaknishant/vertex-ai-samples
import os
import subprocess
import tarfile
import uuid
from datetime import datetime
from typing import Optional
from google.auth import credentials as auth_credentials
from google.cloud import storage
from google.cloud.aiplatform import utils
def download_file(bucket_name: str, blob_name: str, destination_file: str) -> str:
"""Copies a remote GCS file to a local path"""
remote_file_path = "".join(["gs://", "/".join([bucket_name, blob_name])])
subprocess.check_output(
["gsutil", "cp", remote_file_path, destination_file], encoding="UTF-8"
)
return destination_file
def upload_file(
local_file_path: str,
remote_file_path: str,
) -> str:
"""Copies a local file to a GCS path"""
subprocess.check_output(
["gsutil", "cp", local_file_path, remote_file_path], encoding="UTF-8"
)
return remote_file_path
def archive_code_and_upload(staging_bucket: str):
# Archive all source in current directory
unique_id = uuid.uuid4()
source_archived_file = f"source_archived_{unique_id}.tar.gz"
git_files = subprocess.check_output(
["git", "ls-tree", "-r", "HEAD", "--name-only"], encoding="UTF-8"
).split("\n")
with tarfile.open(source_archived_file, "w:gz") as tar:
for file in git_files:
if len(file) > 0 and os.path.exists(file):
tar.add(file)
# Upload archive to GCS bucket
source_archived_file_gcs = upload_file(
local_file_path=f"{source_archived_file}",
remote_file_path="/".join(
[staging_bucket, "code_archives", source_archived_file]
),
)
print(f"Uploaded source code archive to {source_archived_file_gcs}")
return source_archived_file_gcs
|
import os
import subprocess
import tarfile
import uuid
from datetime import datetime
from typing import Optional
from google.auth import credentials as auth_credentials
from google.cloud import storage
from google.cloud.aiplatform import utils
def download_file(bucket_name: str, blob_name: str, destination_file: str) -> str:
"""Copies a remote GCS file to a local path"""
remote_file_path = "".join(["gs://", "/".join([bucket_name, blob_name])])
subprocess.check_output(
["gsutil", "cp", remote_file_path, destination_file], encoding="UTF-8"
)
return destination_file
def upload_file(
local_file_path: str,
remote_file_path: str,
) -> str:
"""Copies a local file to a GCS path"""
subprocess.check_output(
["gsutil", "cp", local_file_path, remote_file_path], encoding="UTF-8"
)
return remote_file_path
def archive_code_and_upload(staging_bucket: str):
# Archive all source in current directory
unique_id = uuid.uuid4()
source_archived_file = f"source_archived_{unique_id}.tar.gz"
git_files = subprocess.check_output(
["git", "ls-tree", "-r", "HEAD", "--name-only"], encoding="UTF-8"
).split("\n")
with tarfile.open(source_archived_file, "w:gz") as tar:
for file in git_files:
if len(file) > 0 and os.path.exists(file):
tar.add(file)
# Upload archive to GCS bucket
source_archived_file_gcs = upload_file(
local_file_path=f"{source_archived_file}",
remote_file_path="/".join(
[staging_bucket, "code_archives", source_archived_file]
),
)
print(f"Uploaded source code archive to {source_archived_file_gcs}")
return source_archived_file_gcs
|
en
| 0.755565
|
Copies a remote GCS file to a local path Copies a local file to a GCS path # Archive all source in current directory # Upload archive to GCS bucket
| 2.430555
| 2
|
venv/Scripts/django-admin.py
|
DavidWz0403/Challange
| 0
|
6626458
|
<filename>venv/Scripts/django-admin.py
#!c:\users\davidwz\desktop\challange\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
<filename>venv/Scripts/django-admin.py
#!c:\users\davidwz\desktop\challange\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
en
| 0.285852
|
#!c:\users\davidwz\desktop\challange\venv\scripts\python.exe
| 1.105738
| 1
|
losses/utils.py
|
bunthet01/srl_zoo
| 3
|
6626459
|
try:
# absolute import
from pipeline import NO_PAIRS_ERROR
from utils import printRed
except:
# relative import
from ..pipeline import NO_PAIRS_ERROR
from ..utils import printRed
import torch as th
import numpy as np
def overSampling(batch_size, m_list, pairs, function_on_pairs, actions, rewards):
"""
Look for minibatches missing pairs of observations with the similar/dissimilar rewards (see params)
Sample for each of those minibatches an observation from another batch that satisfies the
similarity/dissimilarity with the 1rst observation.
return the new pairs & the modified minibatch list
:param batch_size: (int)
:param m_list: (list) mini-batch list
:param pairs: similar / dissimilar pairs
:param function_on_pairs: (function) findDissimilar applied to pairs
:param actions: (np.ndarray)
:param rewards: (np.ndarray)
:return: (list, list) pairs, mini-batch list modified
"""
# For a each minibatch_id
if function_on_pairs.__name__ == "findDissimilar":
pair_name = 'dissimilar pairs'
else:
pair_name = 'Unknown pairs'
counter = 0
for minibatch_id, d in enumerate(pairs):
#print("what is it ?: ", minibatch_id, d)
do = True
if len(d) == 0:
counter += 1
# Do if it contains no similar pairs of samples
while do and len(d) == 0:
# for every minibatch & obs of a mini-batch list
for m_id, minibatch in enumerate(m_list):
for i in range(batch_size):
# Look for similar samples j in other minibatches m_id
for j in function_on_pairs(i, m_list[minibatch_id], minibatch, actions, rewards):
# Copy samples - done once
if (j != i) & (minibatch_id != m_id) and do:
m_list[minibatch_id][j] = minibatch[j]
pairs[minibatch_id] = np.array([[i, j]])
do = False
print('Dealt with {} minibatches - {}'.format(counter, pair_name))
return pairs, m_list
def findDissimilar(index, minibatch1, minibatch2, actions, rewards):
"""
check which samples should be dissimilar
because they lead to different rewards after the same actions
:param index: (int)
:param minibatch1: (np.ndarray)
:param minibatch2: (np.ndarray)
:param actions: (np.ndarray)
:param rewards: (np.ndarray)
:return: (dict, np.ndarray)
"""
return np.where((actions[minibatch2] == actions[minibatch1[index]]) *
(rewards[minibatch2 + 1] != rewards[minibatch1[index] + 1]))[0]
def findSameActions(index, minibatch, actions):
"""
Get observations indices where the same action was performed
as in a reference observation
:param index: (int)
:param minibatch: (np.ndarray)
:param actions: (np.ndarray)
:return: (np.ndarray)
"""
return np.where(actions[minibatch] == actions[minibatch[index]])[0]
def findPriorsPairs(batch_size, minibatchlist, actions, rewards, n_actions, n_pairs_per_action):
"""
:param batch_size: (int)
:param minibatchlist: ([[int]])
:param actions: (np.ndarray)
:param rewards: (np.ndarray)
:param n_actions: (int)
:param n_pairs_per_action: ([int])
:return: ([np.ndarray], [np.ndarray])
"""
dissimilar_pairs = [
np.array(
[[i, j] for i in range(batch_size) for j in findDissimilar(i, minibatch, minibatch, actions, rewards) if
j > i],
dtype='int64') for minibatch in minibatchlist]
# sampling relevant pairs to have at least a pair of dissimilar obs in every minibatches
dissimilar_pairs, minibatchlist = overSampling(batch_size, minibatchlist, dissimilar_pairs,
findDissimilar, actions, rewards)
# same_actions: list of arrays, each containing one pair of observation ids
same_actions_pairs = [
np.array([[i, j] for i in range(batch_size) for j in findSameActions(i, minibatch, actions) if j > i],
dtype='int64') for minibatch in minibatchlist]
for pair, minibatch in zip(same_actions_pairs, minibatchlist):
for i in range(n_actions):
n_pairs_per_action[i] += np.sum(actions[minibatch[pair[:, 0]]] == i)
# Stats about pairs
print("Number of pairs per action:")
print(n_pairs_per_action)
print("Pairs of {} unique actions".format(np.sum(n_pairs_per_action > 0)))
for item in same_actions_pairs + dissimilar_pairs:
if len(item) == 0:
msg = "No same actions or dissimilar pairs found for at least one minibatch (currently is {})\n".format(
batch_size)
msg += "=> Consider increasing the batch_size or changing the seed"
printRed(msg)
sys.exit(NO_PAIRS_ERROR)
return dissimilar_pairs, same_actions_pairs
# From https://github.com/pytorch/pytorch/pull/4411
def correlationMatrix(mat, eps=1e-8):
"""
Returns Correlation matrix for mat. It is the equivalent of numpy np.corrcoef
:param mat: (th.Tensor) Shape: (N, D)
:param esp: (float) Small value to avoid division by zero.
:return: (th.Tensor) The correlation matrix Shape: (N, N)
"""
assert mat.dim() == 2, "Input must be a 2D matrix."
mat_bar = mat - mat.mean(1).repeat(mat.size(1)).view(mat.size(1), -1).t()
cov_matrix = mat_bar.mm(mat_bar.t()).div(mat_bar.size(1) - 1)
inv_stddev = th.rsqrt(th.diag(cov_matrix) + eps)
cor_matrix = cov_matrix.mul(inv_stddev.expand_as(cov_matrix))
cor_matrix.mul_(inv_stddev.expand_as(cov_matrix).t())
return cor_matrix.clamp(-1.0, 1.0)
|
try:
# absolute import
from pipeline import NO_PAIRS_ERROR
from utils import printRed
except:
# relative import
from ..pipeline import NO_PAIRS_ERROR
from ..utils import printRed
import torch as th
import numpy as np
def overSampling(batch_size, m_list, pairs, function_on_pairs, actions, rewards):
"""
Look for minibatches missing pairs of observations with the similar/dissimilar rewards (see params)
Sample for each of those minibatches an observation from another batch that satisfies the
similarity/dissimilarity with the 1rst observation.
return the new pairs & the modified minibatch list
:param batch_size: (int)
:param m_list: (list) mini-batch list
:param pairs: similar / dissimilar pairs
:param function_on_pairs: (function) findDissimilar applied to pairs
:param actions: (np.ndarray)
:param rewards: (np.ndarray)
:return: (list, list) pairs, mini-batch list modified
"""
# For a each minibatch_id
if function_on_pairs.__name__ == "findDissimilar":
pair_name = 'dissimilar pairs'
else:
pair_name = 'Unknown pairs'
counter = 0
for minibatch_id, d in enumerate(pairs):
#print("what is it ?: ", minibatch_id, d)
do = True
if len(d) == 0:
counter += 1
# Do if it contains no similar pairs of samples
while do and len(d) == 0:
# for every minibatch & obs of a mini-batch list
for m_id, minibatch in enumerate(m_list):
for i in range(batch_size):
# Look for similar samples j in other minibatches m_id
for j in function_on_pairs(i, m_list[minibatch_id], minibatch, actions, rewards):
# Copy samples - done once
if (j != i) & (minibatch_id != m_id) and do:
m_list[minibatch_id][j] = minibatch[j]
pairs[minibatch_id] = np.array([[i, j]])
do = False
print('Dealt with {} minibatches - {}'.format(counter, pair_name))
return pairs, m_list
def findDissimilar(index, minibatch1, minibatch2, actions, rewards):
"""
check which samples should be dissimilar
because they lead to different rewards after the same actions
:param index: (int)
:param minibatch1: (np.ndarray)
:param minibatch2: (np.ndarray)
:param actions: (np.ndarray)
:param rewards: (np.ndarray)
:return: (dict, np.ndarray)
"""
return np.where((actions[minibatch2] == actions[minibatch1[index]]) *
(rewards[minibatch2 + 1] != rewards[minibatch1[index] + 1]))[0]
def findSameActions(index, minibatch, actions):
"""
Get observations indices where the same action was performed
as in a reference observation
:param index: (int)
:param minibatch: (np.ndarray)
:param actions: (np.ndarray)
:return: (np.ndarray)
"""
return np.where(actions[minibatch] == actions[minibatch[index]])[0]
def findPriorsPairs(batch_size, minibatchlist, actions, rewards, n_actions, n_pairs_per_action):
"""
:param batch_size: (int)
:param minibatchlist: ([[int]])
:param actions: (np.ndarray)
:param rewards: (np.ndarray)
:param n_actions: (int)
:param n_pairs_per_action: ([int])
:return: ([np.ndarray], [np.ndarray])
"""
dissimilar_pairs = [
np.array(
[[i, j] for i in range(batch_size) for j in findDissimilar(i, minibatch, minibatch, actions, rewards) if
j > i],
dtype='int64') for minibatch in minibatchlist]
# sampling relevant pairs to have at least a pair of dissimilar obs in every minibatches
dissimilar_pairs, minibatchlist = overSampling(batch_size, minibatchlist, dissimilar_pairs,
findDissimilar, actions, rewards)
# same_actions: list of arrays, each containing one pair of observation ids
same_actions_pairs = [
np.array([[i, j] for i in range(batch_size) for j in findSameActions(i, minibatch, actions) if j > i],
dtype='int64') for minibatch in minibatchlist]
for pair, minibatch in zip(same_actions_pairs, minibatchlist):
for i in range(n_actions):
n_pairs_per_action[i] += np.sum(actions[minibatch[pair[:, 0]]] == i)
# Stats about pairs
print("Number of pairs per action:")
print(n_pairs_per_action)
print("Pairs of {} unique actions".format(np.sum(n_pairs_per_action > 0)))
for item in same_actions_pairs + dissimilar_pairs:
if len(item) == 0:
msg = "No same actions or dissimilar pairs found for at least one minibatch (currently is {})\n".format(
batch_size)
msg += "=> Consider increasing the batch_size or changing the seed"
printRed(msg)
sys.exit(NO_PAIRS_ERROR)
return dissimilar_pairs, same_actions_pairs
# From https://github.com/pytorch/pytorch/pull/4411
def correlationMatrix(mat, eps=1e-8):
"""
Returns Correlation matrix for mat. It is the equivalent of numpy np.corrcoef
:param mat: (th.Tensor) Shape: (N, D)
:param esp: (float) Small value to avoid division by zero.
:return: (th.Tensor) The correlation matrix Shape: (N, N)
"""
assert mat.dim() == 2, "Input must be a 2D matrix."
mat_bar = mat - mat.mean(1).repeat(mat.size(1)).view(mat.size(1), -1).t()
cov_matrix = mat_bar.mm(mat_bar.t()).div(mat_bar.size(1) - 1)
inv_stddev = th.rsqrt(th.diag(cov_matrix) + eps)
cor_matrix = cov_matrix.mul(inv_stddev.expand_as(cov_matrix))
cor_matrix.mul_(inv_stddev.expand_as(cov_matrix).t())
return cor_matrix.clamp(-1.0, 1.0)
|
en
| 0.695197
|
# absolute import # relative import Look for minibatches missing pairs of observations with the similar/dissimilar rewards (see params) Sample for each of those minibatches an observation from another batch that satisfies the similarity/dissimilarity with the 1rst observation. return the new pairs & the modified minibatch list :param batch_size: (int) :param m_list: (list) mini-batch list :param pairs: similar / dissimilar pairs :param function_on_pairs: (function) findDissimilar applied to pairs :param actions: (np.ndarray) :param rewards: (np.ndarray) :return: (list, list) pairs, mini-batch list modified # For a each minibatch_id #print("what is it ?: ", minibatch_id, d) # Do if it contains no similar pairs of samples # for every minibatch & obs of a mini-batch list # Look for similar samples j in other minibatches m_id # Copy samples - done once check which samples should be dissimilar because they lead to different rewards after the same actions :param index: (int) :param minibatch1: (np.ndarray) :param minibatch2: (np.ndarray) :param actions: (np.ndarray) :param rewards: (np.ndarray) :return: (dict, np.ndarray) Get observations indices where the same action was performed as in a reference observation :param index: (int) :param minibatch: (np.ndarray) :param actions: (np.ndarray) :return: (np.ndarray) :param batch_size: (int) :param minibatchlist: ([[int]]) :param actions: (np.ndarray) :param rewards: (np.ndarray) :param n_actions: (int) :param n_pairs_per_action: ([int]) :return: ([np.ndarray], [np.ndarray]) # sampling relevant pairs to have at least a pair of dissimilar obs in every minibatches # same_actions: list of arrays, each containing one pair of observation ids # Stats about pairs # From https://github.com/pytorch/pytorch/pull/4411 Returns Correlation matrix for mat. It is the equivalent of numpy np.corrcoef :param mat: (th.Tensor) Shape: (N, D) :param esp: (float) Small value to avoid division by zero. :return: (th.Tensor) The correlation matrix Shape: (N, N)
| 2.231509
| 2
|
logic/list_operations.py
|
royhershkovitz/Sudoku-Solver
| 0
|
6626460
|
def diff(lst1: list, lst2: list):
return [value for value in lst1 if not value in lst2]
def intersection(lst1: list, *args):
lst1 = lst1
for lst2 in args:
lst1 = [value for value in lst1 if value in lst2]
return lst1
def intersection_list_organs(lst: list):
out = lst.pop(0)
for sub in lst:
out = intersection(out, sub)
return out
|
def diff(lst1: list, lst2: list):
return [value for value in lst1 if not value in lst2]
def intersection(lst1: list, *args):
lst1 = lst1
for lst2 in args:
lst1 = [value for value in lst1 if value in lst2]
return lst1
def intersection_list_organs(lst: list):
out = lst.pop(0)
for sub in lst:
out = intersection(out, sub)
return out
|
none
| 1
| 3.696896
| 4
|
|
setup.py
|
sigopt/sigopt_sklearn
| 58
|
6626461
|
<gh_stars>10-100
from setuptools import setup
from sigopt_sklearn.version import VERSION
# Keep this in sync with `requirements.txt` and the conda install process in `.travis.yml`!
install_requires = [
'joblib>=0.9.4',
'numpy>=1.9',
'scikit-learn>=0.19,<0.21',
'sigopt>=2.6.0',
]
setup(
name='sigopt_sklearn',
version=VERSION,
description='SigOpt + scikit-learn Integrations',
author='SigOpt',
author_email='<EMAIL>',
url='https://sigopt.com/',
packages=['sigopt_sklearn'],
install_requires=install_requires,
extras_require={
'ensemble': ['xgboost>=0.4a30,<0.90'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
from setuptools import setup
from sigopt_sklearn.version import VERSION
# Keep this in sync with `requirements.txt` and the conda install process in `.travis.yml`!
install_requires = [
'joblib>=0.9.4',
'numpy>=1.9',
'scikit-learn>=0.19,<0.21',
'sigopt>=2.6.0',
]
setup(
name='sigopt_sklearn',
version=VERSION,
description='SigOpt + scikit-learn Integrations',
author='SigOpt',
author_email='<EMAIL>',
url='https://sigopt.com/',
packages=['sigopt_sklearn'],
install_requires=install_requires,
extras_require={
'ensemble': ['xgboost>=0.4a30,<0.90'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
en
| 0.806751
|
# Keep this in sync with `requirements.txt` and the conda install process in `.travis.yml`!
| 1.23337
| 1
|
scripts/mix.py
|
jfajkowski/asr-utils
| 0
|
6626462
|
#!/usr/bin/python3
import argparse
import logging
import random
def parse_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-f', '--file', dest='files', action='append', nargs=2, metavar=('CORPUS', 'RATIO'), default=[])
group.add_argument('-b', '--bytes', dest='bytes', type=int)
group.add_argument('-c', '--chars', dest='chars', type=int)
group.add_argument('-l', '--lines', dest='lines', type=int)
group.add_argument('-w', '--words', dest='words', type=int)
return parser.parse_args()
def normalize(files):
normalized = []
ratio_sum = sum([float(i[1]) for i in files])
for corpus, ratio in files:
normalized.append((corpus, float(ratio) / ratio_sum))
return normalized
def mix(files, max_count, count_method):
count = 0
results = []
logging.info('Selecting ~{} {}'.format(max_count, count_method_unit(count_method)))
for corpus, ratio in files:
max_corpus_count = int(max_count * ratio)
corpus_count = select_from_corpus(corpus, max_corpus_count, count_method)
count += corpus_count
results.append((corpus, corpus_count))
logging.info('Selected ~{} {}'.format(count, count_method_unit(count_method)))
return results
def select_from_corpus(corpus, max_corpus_count, count_method):
iteration = 0
corpus_count = 0
logging.info('Selecting ~{} {} from {}'.format(max_corpus_count, count_method_unit(count_method), corpus))
while corpus_count < max_corpus_count:
with open(corpus, encoding='UTF-8') as c_in:
lines, iteration_count = random_lines(c_in, max_corpus_count - corpus_count, count_method)
for line in lines:
print(line.rstrip('\n'))
iteration += 1
corpus_count += iteration_count
logging.info(
'Selected {} {} from {} in {} iteration(s)'.format(corpus_count, count_method_unit(count_method),
corpus, iteration))
return corpus_count
def random_lines(file, max_count, count_method):
count = 0
selected = []
for i, line in enumerate(file):
if count < max_count:
selected.append(line)
count += count_method(line)
else:
m = random.randint(0, i)
if m < len(selected):
count -= count_method(selected[m])
selected[m] = line
count += count_method(selected[m])
return selected, count
def count_bytes(line):
return len(line.encode('UTF-8'))
def count_chars(line):
return len(line)
def count_lines(line):
return 1 if line else 0
def count_words(line):
return len(line.split(' '))
def count_method_unit(count_method):
return count_method.__name__.replace('count_', '')
def main():
logging.basicConfig(format='[%(asctime)s][%(levelname)s] %(name)s: %(message)s', level=logging.INFO)
args = parse_args()
files = normalize(args.files)
logging.info('Desired ratio: {}'.format(','.join([str(f) for f in files])))
if args.bytes:
files = mix(files, args.bytes, count_bytes)
elif args.chars:
files = mix(files, args.chars, count_chars)
elif args.lines:
files = mix(files, args.lines, count_lines)
elif args.words:
files = mix(files, args.words, count_words)
files = normalize(files)
logging.info('Achieved ratio: {}'.format(','.join([str(f) for f in files])))
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
import argparse
import logging
import random
def parse_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-f', '--file', dest='files', action='append', nargs=2, metavar=('CORPUS', 'RATIO'), default=[])
group.add_argument('-b', '--bytes', dest='bytes', type=int)
group.add_argument('-c', '--chars', dest='chars', type=int)
group.add_argument('-l', '--lines', dest='lines', type=int)
group.add_argument('-w', '--words', dest='words', type=int)
return parser.parse_args()
def normalize(files):
normalized = []
ratio_sum = sum([float(i[1]) for i in files])
for corpus, ratio in files:
normalized.append((corpus, float(ratio) / ratio_sum))
return normalized
def mix(files, max_count, count_method):
count = 0
results = []
logging.info('Selecting ~{} {}'.format(max_count, count_method_unit(count_method)))
for corpus, ratio in files:
max_corpus_count = int(max_count * ratio)
corpus_count = select_from_corpus(corpus, max_corpus_count, count_method)
count += corpus_count
results.append((corpus, corpus_count))
logging.info('Selected ~{} {}'.format(count, count_method_unit(count_method)))
return results
def select_from_corpus(corpus, max_corpus_count, count_method):
iteration = 0
corpus_count = 0
logging.info('Selecting ~{} {} from {}'.format(max_corpus_count, count_method_unit(count_method), corpus))
while corpus_count < max_corpus_count:
with open(corpus, encoding='UTF-8') as c_in:
lines, iteration_count = random_lines(c_in, max_corpus_count - corpus_count, count_method)
for line in lines:
print(line.rstrip('\n'))
iteration += 1
corpus_count += iteration_count
logging.info(
'Selected {} {} from {} in {} iteration(s)'.format(corpus_count, count_method_unit(count_method),
corpus, iteration))
return corpus_count
def random_lines(file, max_count, count_method):
count = 0
selected = []
for i, line in enumerate(file):
if count < max_count:
selected.append(line)
count += count_method(line)
else:
m = random.randint(0, i)
if m < len(selected):
count -= count_method(selected[m])
selected[m] = line
count += count_method(selected[m])
return selected, count
def count_bytes(line):
return len(line.encode('UTF-8'))
def count_chars(line):
return len(line)
def count_lines(line):
return 1 if line else 0
def count_words(line):
return len(line.split(' '))
def count_method_unit(count_method):
return count_method.__name__.replace('count_', '')
def main():
logging.basicConfig(format='[%(asctime)s][%(levelname)s] %(name)s: %(message)s', level=logging.INFO)
args = parse_args()
files = normalize(args.files)
logging.info('Desired ratio: {}'.format(','.join([str(f) for f in files])))
if args.bytes:
files = mix(files, args.bytes, count_bytes)
elif args.chars:
files = mix(files, args.chars, count_chars)
elif args.lines:
files = mix(files, args.lines, count_lines)
elif args.words:
files = mix(files, args.words, count_words)
files = normalize(files)
logging.info('Achieved ratio: {}'.format(','.join([str(f) for f in files])))
if __name__ == '__main__':
main()
|
fr
| 0.386793
|
#!/usr/bin/python3
| 2.667698
| 3
|
common/src/stack/switch/pylib/switch/x1052.py
|
knutsonchris/stacki
| 0
|
6626463
|
<filename>common/src/stack/switch/pylib/switch/x1052.py<gh_stars>0
# Copyright (c) 2006 - 2017 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
from . import Switch, SwitchException
import os
import time
import pexpect
class SwitchDellX1052(Switch):
"""
Class for interfacing with a Dell x1052 switch.
"""
def supported(*cls):
return [
('Dell', 'x1052'),
]
def connect(self):
"""Connect to the switch"""
try:
self.child = pexpect.spawn('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -tt ' +
self.switch_ip_address)
self._expect('User Name:', 10)
self.child.sendline(self.username)
self._expect('Password:')
self.child.sendline(self.password)
except:
raise SwitchException("Couldn't connect to the switch")
def disconnect(self):
# q will exit out of an existing scrollable more/less type of prompt
# Probably not necessary, but a bit safer
# if there isn't an exit status
# close the connection
if not self.child.exitstatus:
self.child.sendline("\nq\n")
# exit should cleanly exit the ssh
self.child.sendline("\nexit\n")
# Just give it a few seconds to exit gracefully before terminate.
time.sleep(3)
self.child.terminate()
def _expect(self, look_for, custom_timeout=15):
try:
self.child.expect(look_for, timeout=custom_timeout)
except pexpect.exceptions.TIMEOUT:
# print "Giving SSH time to close gracefully...",
for _ in range(9, -1, -1):
if not self.child.isalive():
break
time.sleep(1)
debug_info = str(str(self.child.before) + str(self.child.buffer) + str(self.child.after))
self.__exit__()
raise SwitchException(self.switch_ip_address + " expected output '" + look_for +
"' from SSH connection timed out after " +
str(custom_timeout) + " seconds.\nBuffer: " + debug_info)
except pexpect.exceptions.EOF:
self.__exit__()
raise SwitchException("SSH connection to " + self.switch_ip_address + " not available.")
def get_mac_address_table(self):
"""Download the mac address table"""
time.sleep(1)
command = 'show mac address-table'
self.child.expect('console#', timeout=60)
with open('/tmp/%s_mac_address_table' % self.switchname, 'wb') as macout:
self.child.logfile = macout
self.child.sendline(command)
time.sleep(1)
self.send_spacebar(4)
self.child.expect('console#', timeout=60)
self.child.logfile = None
def parse_mac_address_table(self):
"""Parse the mac address table and return list of connected macs"""
_hosts = []
with open('/tmp/%s_mac_address_table' % self.switchname, 'r') as f:
for line in f.readlines():
if 'dynamic' in line:
# appends line to list
# map just splits out the port
# from the interface
_hosts.append(list(
map(lambda x: x.split('/')[-1],
line.split())
))
return sorted(_hosts, key=lambda x: x[2])
def get_interface_status_table(self):
"""Download the interface status table"""
time.sleep(1)
command = 'show interface status'
self.child.expect('console#', timeout=60)
with open('/tmp/%s_interface_status_table' % self.switchname, 'wb') as macout:
self.child.logfile = macout
self.child.sendline(command)
time.sleep(1)
self.send_spacebar(4)
self.child.expect('console#', timeout=60)
self.child.logfile = None
def parse_interface_status_table(self):
"""Parse the interface status and return list of port information"""
_hosts = []
with open('/tmp/%s_interface_status_table' % self.switchname, 'r') as f:
for line in f.readlines():
if 'gi1/0/' in line:
# appends line to list
# map just splits out the port
# from the interface
_hosts.append(list(
map(lambda x: x.split('/')[-1],
line.split())
))
return _hosts
def send_spacebar(self, times=1):
"""Send Spacebar; Used to read more of the output"""
command = "\x20"
for i in range(times):
self.child.send(command)
time.sleep(1)
def download(self):
"""Download the running-config from the switch to the server"""
self.child.expect('console#', timeout=60)
#
# tftp requires the destination file to already exist and to be writable by all
#
filename = os.path.join(self.tftpdir, self.current_config)
f = open(filename, 'w')
f.close()
os.chmod(filename, mode=0o777)
cmd = "copy running-config tftp://%s/%s" % (self.stacki_server_ip,
self.current_config)
self.child.sendline(cmd)
self._expect('The copy operation was completed successfully')
def upload(self):
"""Upload the file from the switch to the server"""
self.child.expect('console#', timeout=60)
cmd = "copy tftp://%s/%s temp" % (self.stacki_server_ip, self.new_config)
self.child.sendline(cmd)
time.sleep(2)
self.child.sendline('Y') # A quick Y will fix the overwrite prompt if it exists.
self._expect('The copy operation was completed successfully')
#
# we remove all VLANs (2-4094) which is time consuming, so up the timeout to 30
#
self.child.sendline("copy temp running-config")
self._expect('The copy operation was completed successfully', custom_timeout=30)
def apply_configuration(self):
"""Apply running-config to startup-config"""
try:
self.child.expect('console#')
self.child.sendline('write')
self.child.expect('Overwrite file .startup-config.*\?')
self.child.sendline('Y')
self._expect('The copy operation was completed successfully')
except:
raise SwitchException('Could not apply configuration to startup-config')
def set_tftp_ip(self, ip):
self.stacki_server_ip = ip
|
<filename>common/src/stack/switch/pylib/switch/x1052.py<gh_stars>0
# Copyright (c) 2006 - 2017 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
from . import Switch, SwitchException
import os
import time
import pexpect
class SwitchDellX1052(Switch):
"""
Class for interfacing with a Dell x1052 switch.
"""
def supported(*cls):
return [
('Dell', 'x1052'),
]
def connect(self):
"""Connect to the switch"""
try:
self.child = pexpect.spawn('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -tt ' +
self.switch_ip_address)
self._expect('User Name:', 10)
self.child.sendline(self.username)
self._expect('Password:')
self.child.sendline(self.password)
except:
raise SwitchException("Couldn't connect to the switch")
def disconnect(self):
# q will exit out of an existing scrollable more/less type of prompt
# Probably not necessary, but a bit safer
# if there isn't an exit status
# close the connection
if not self.child.exitstatus:
self.child.sendline("\nq\n")
# exit should cleanly exit the ssh
self.child.sendline("\nexit\n")
# Just give it a few seconds to exit gracefully before terminate.
time.sleep(3)
self.child.terminate()
def _expect(self, look_for, custom_timeout=15):
try:
self.child.expect(look_for, timeout=custom_timeout)
except pexpect.exceptions.TIMEOUT:
# print "Giving SSH time to close gracefully...",
for _ in range(9, -1, -1):
if not self.child.isalive():
break
time.sleep(1)
debug_info = str(str(self.child.before) + str(self.child.buffer) + str(self.child.after))
self.__exit__()
raise SwitchException(self.switch_ip_address + " expected output '" + look_for +
"' from SSH connection timed out after " +
str(custom_timeout) + " seconds.\nBuffer: " + debug_info)
except pexpect.exceptions.EOF:
self.__exit__()
raise SwitchException("SSH connection to " + self.switch_ip_address + " not available.")
def get_mac_address_table(self):
"""Download the mac address table"""
time.sleep(1)
command = 'show mac address-table'
self.child.expect('console#', timeout=60)
with open('/tmp/%s_mac_address_table' % self.switchname, 'wb') as macout:
self.child.logfile = macout
self.child.sendline(command)
time.sleep(1)
self.send_spacebar(4)
self.child.expect('console#', timeout=60)
self.child.logfile = None
def parse_mac_address_table(self):
"""Parse the mac address table and return list of connected macs"""
_hosts = []
with open('/tmp/%s_mac_address_table' % self.switchname, 'r') as f:
for line in f.readlines():
if 'dynamic' in line:
# appends line to list
# map just splits out the port
# from the interface
_hosts.append(list(
map(lambda x: x.split('/')[-1],
line.split())
))
return sorted(_hosts, key=lambda x: x[2])
def get_interface_status_table(self):
"""Download the interface status table"""
time.sleep(1)
command = 'show interface status'
self.child.expect('console#', timeout=60)
with open('/tmp/%s_interface_status_table' % self.switchname, 'wb') as macout:
self.child.logfile = macout
self.child.sendline(command)
time.sleep(1)
self.send_spacebar(4)
self.child.expect('console#', timeout=60)
self.child.logfile = None
def parse_interface_status_table(self):
"""Parse the interface status and return list of port information"""
_hosts = []
with open('/tmp/%s_interface_status_table' % self.switchname, 'r') as f:
for line in f.readlines():
if 'gi1/0/' in line:
# appends line to list
# map just splits out the port
# from the interface
_hosts.append(list(
map(lambda x: x.split('/')[-1],
line.split())
))
return _hosts
def send_spacebar(self, times=1):
"""Send Spacebar; Used to read more of the output"""
command = "\x20"
for i in range(times):
self.child.send(command)
time.sleep(1)
def download(self):
"""Download the running-config from the switch to the server"""
self.child.expect('console#', timeout=60)
#
# tftp requires the destination file to already exist and to be writable by all
#
filename = os.path.join(self.tftpdir, self.current_config)
f = open(filename, 'w')
f.close()
os.chmod(filename, mode=0o777)
cmd = "copy running-config tftp://%s/%s" % (self.stacki_server_ip,
self.current_config)
self.child.sendline(cmd)
self._expect('The copy operation was completed successfully')
def upload(self):
"""Upload the file from the switch to the server"""
self.child.expect('console#', timeout=60)
cmd = "copy tftp://%s/%s temp" % (self.stacki_server_ip, self.new_config)
self.child.sendline(cmd)
time.sleep(2)
self.child.sendline('Y') # A quick Y will fix the overwrite prompt if it exists.
self._expect('The copy operation was completed successfully')
#
# we remove all VLANs (2-4094) which is time consuming, so up the timeout to 30
#
self.child.sendline("copy temp running-config")
self._expect('The copy operation was completed successfully', custom_timeout=30)
def apply_configuration(self):
"""Apply running-config to startup-config"""
try:
self.child.expect('console#')
self.child.sendline('write')
self.child.expect('Overwrite file .startup-config.*\?')
self.child.sendline('Y')
self._expect('The copy operation was completed successfully')
except:
raise SwitchException('Could not apply configuration to startup-config')
def set_tftp_ip(self, ip):
self.stacki_server_ip = ip
|
en
| 0.736133
|
# Copyright (c) 2006 - 2017 Teradata # All rights reserved. Stacki(r) v5.x stacki.com # https://github.com/Teradata/stacki/blob/master/LICENSE.txt # @copyright@ Class for interfacing with a Dell x1052 switch. Connect to the switch # q will exit out of an existing scrollable more/less type of prompt # Probably not necessary, but a bit safer # if there isn't an exit status # close the connection # exit should cleanly exit the ssh # Just give it a few seconds to exit gracefully before terminate. # print "Giving SSH time to close gracefully...", Download the mac address table #', timeout=60) #', timeout=60) Parse the mac address table and return list of connected macs # appends line to list # map just splits out the port # from the interface Download the interface status table #', timeout=60) #', timeout=60) Parse the interface status and return list of port information # appends line to list # map just splits out the port # from the interface Send Spacebar; Used to read more of the output Download the running-config from the switch to the server #', timeout=60) # # tftp requires the destination file to already exist and to be writable by all # Upload the file from the switch to the server #', timeout=60) # A quick Y will fix the overwrite prompt if it exists. # # we remove all VLANs (2-4094) which is time consuming, so up the timeout to 30 # Apply running-config to startup-config #')
| 2.227643
| 2
|
mysql/pythob_mysql6.py
|
MiracleWong/PythonBasic
| 0
|
6626464
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 对数据库的删除操作
import MySQLdb
# 打开数据库连接
conn = MySQLdb.connect("localhost", "root", "<PASSWORD>", "crm")
# 使用cursor()方法获取操作游标
cursor = conn.cursor()
# SQL 更新语句
sql = "DELETE FROM EMPLOYEE WHERE AGE = '%d'" % (21)
#sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (21)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交到数据库执行
conn.commit()
except:
# 发生错误时进行回滚
conn.rollback()
# 关闭数据库连接
conn.close()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 对数据库的删除操作
import MySQLdb
# 打开数据库连接
conn = MySQLdb.connect("localhost", "root", "<PASSWORD>", "crm")
# 使用cursor()方法获取操作游标
cursor = conn.cursor()
# SQL 更新语句
sql = "DELETE FROM EMPLOYEE WHERE AGE = '%d'" % (21)
#sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (21)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交到数据库执行
conn.commit()
except:
# 发生错误时进行回滚
conn.rollback()
# 关闭数据库连接
conn.close()
|
zh
| 0.7843
|
#!/usr/bin/python # -*- coding: UTF-8 -*- # 对数据库的删除操作 # 打开数据库连接 # 使用cursor()方法获取操作游标 # SQL 更新语句 #sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (21) # 执行SQL语句 # 提交到数据库执行 # 发生错误时进行回滚 # 关闭数据库连接
| 3.412014
| 3
|
setup.py
|
safetydave/open-spacial-may
| 0
|
6626465
|
from setuptools import setup, find_packages
setup(name='ospacial',
version='0.1',
description='Solution to Open-Spacial Shokunin Challenge',
url='https://github.com/safetydave/open-spacial',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
python_requires='>=3.6.0',
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=['matplotlib', 'networkx', 'numpy', 'pandas', 'scipy'],
zip_safe=False)
|
from setuptools import setup, find_packages
setup(name='ospacial',
version='0.1',
description='Solution to Open-Spacial Shokunin Challenge',
url='https://github.com/safetydave/open-spacial',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
python_requires='>=3.6.0',
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=['matplotlib', 'networkx', 'numpy', 'pandas', 'scipy'],
zip_safe=False)
|
none
| 1
| 1.180388
| 1
|
|
svhn 64x64/mainmodel1_6layers_copy.py
|
vrishabh22/OCR-Minor-Project
| 0
|
6626466
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import time
import os
from datetime import timedelta
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
plt.rcParams['figure.figsize'] = (16.0, 4.0)
h5f = h5py.File('data/SVHN_multi_grey.h5','r')
X_train = h5f['train_dataset'][:]
y_train = h5f['train_labels'][:]
X_val = h5f['valid_dataset'][:]
y_val = h5f['valid_labels'][:]
X_test = h5f['test_dataset'][:]
y_test = h5f['test_labels'][:]
h5f.close()
print('Training set', X_train.shape, y_train.shape)
print('Validation set', X_val.shape, y_val.shape)
print('Test set', X_test.shape, y_test.shape)
_,img_height, img_width, num_channels = X_train.shape
num_digits, num_labels = y_train.shape[1], len(np.unique(y_train))
X_train = np.concatenate([X_train, X_val])
y_train = np.concatenate([y_train, y_val])
print('Training set', X_train.shape, y_train.shape)
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
def subtract_mean(a):
for i in range(a.shape[0]):
a[i] -= a[i].mean()
return a
X_train = subtract_mean(X_train)
X_test = subtract_mean(X_test)
X_val = subtract_mean(X_val)
def plot_images(images, nrows, ncols, cls_true, cls_pred=None):
fig, axes = plt.subplots(nrows, ncols, figsize=(16, 2*nrows))
rs = np.random.choice(images.shape[0], nrows*ncols)
for i, ax in zip(rs, axes.flat):
true_number = ''.join(str(x) for x in cls_true[i] if x != 10)
if cls_pred is None:
title = "True: {0}".format(true_number)
else:
pred_number = ''.join(str(x) for x in cls_pred[i] if x != 10)
title = "True: {0}, Pred: {1}".format(true_number, pred_number)
ax.imshow(images[i,:,:,0], cmap='binary')
ax.set_title(title)
ax.set_xticks([]); ax.set_yticks([])
plot_images(X_train, 2, 8, y_train)
plt.show()
def init_conv_weights(shape, name):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer_conv2d())
def init_fc_weights(shape, name):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
def init_biases(shape):
return tf.Variable(tf.constant(0.0, shape=shape))
#--------------------------------------MAIN MODEL----------------------------------
def conv_layer(input_tensor,
filter_size,
in_channels,
num_filters,
layer_name,
pooling,pooling_stride):
with tf.variable_scope(layer_name) as scope:
shape = [filter_size, filter_size, in_channels, num_filters]
weights = init_conv_weights(shape, layer_name)
biases = init_biases([num_filters])
tf.summary.histogram(layer_name + '/weights_6layers', weights)
activations = tf.nn.conv2d(input_tensor, weights, [1, 1, 1, 1], 'SAME') + biases
activations = tf.layers.batch_normalization(activations)
activations = tf.nn.relu(activations)
if pooling_stride:
activations = tf.nn.max_pool(activations, [1, 2, 2, 1], [1, 1, 1, 1], 'SAME')
elif pooling:
activations = tf.nn.max_pool(activations, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
return activations
def flatten_tensor(input_tensor):
input_tensor_shape = input_tensor.get_shape()
num_activations = input_tensor_shape[1:4].num_elements()
input_tensor_flat = tf.reshape(input_tensor, [-1, num_activations])
return input_tensor_flat, num_activations
def fc_layer(input_tensor,
input_dim,
output_dim,
layer_name,
relu=False):
with tf.variable_scope(layer_name):
weights = init_fc_weights([input_dim, output_dim], layer_name + '/weights_6layers')
biases = init_biases([output_dim])
tf.summary.histogram(layer_name + '/weights_6layers', weights)
activations = tf.matmul(input_tensor, weights) + biases
if relu:
activations = tf.nn.relu(activations)
return activations
filter_size1 = filter_size2 = 5
num_filters1 = num_filters2 = 32
filter_size3 = filter_size4 = 5
num_filters3 = num_filters4 = 64
filter_size5 = filter_size6 = 5
num_filters5 = num_filters6 = 128
filter_size7 = filter_size8 = 5
num_filters7 = num_filters8 = 256
fc1_size = fc2_size= 256
with tf.variable_scope("input"):
x = tf.placeholder(tf.float32, shape=(None, img_height, img_width, num_channels), name='x')
y_ = tf.placeholder(tf.int64, shape=[None, num_digits], name='y_')
with tf.variable_scope("dropout"):
p_keep_3 = tf.placeholder(tf.float32)
tf.summary.scalar('input_keep_probability', p_keep_3)
conv_1 = conv_layer(x, filter_size1, num_channels, num_filters1, "conv1", pooling=True,pooling_stride=False)
drop_block1 = tf.nn.dropout(conv_1, p_keep_3)
conv_2 = conv_layer(drop_block1, filter_size2, num_filters1, num_filters2, "conv2", pooling=False,pooling_stride=True)
drop_block2 = tf.nn.dropout(conv_2, p_keep_3)
conv_3 = conv_layer(drop_block2, filter_size3, num_filters2, num_filters3, "conv3", pooling=True,pooling_stride=False)
drop_block3 = tf.nn.dropout(conv_3, p_keep_3)
conv_4 = conv_layer(drop_block3, filter_size4, num_filters3, num_filters4, "conv4", pooling=False,pooling_stride=True)
drop_block4 = tf.nn.dropout(conv_4, p_keep_3)
conv_5 = conv_layer(drop_block4, filter_size5, num_filters4, num_filters5, "conv5", pooling=True,pooling_stride=False)
drop_block5 = tf.nn.dropout(conv_5, p_keep_3)
conv_6 = conv_layer(drop_block5, filter_size6, num_filters5, num_filters6, "conv6", pooling=False,pooling_stride=True)
drop_block6 = tf.nn.dropout(conv_6, p_keep_3)
conv_7 = conv_layer(drop_block6, filter_size7, num_filters6, num_filters7, "conv7", pooling=True,pooling_stride=False)
drop_block7 = tf.nn.dropout(conv_7, p_keep_3)
conv_8 = conv_layer(drop_block7, filter_size8, num_filters7, num_filters8, "conv8", pooling=False,pooling_stride=True)
flat_tensor, num_activations = flatten_tensor(tf.nn.dropout(conv_8, p_keep_3))
fc_1 = fc_layer(flat_tensor, num_activations, fc1_size, 'fc1', relu=True)
fc_2 = fc_layer(fc_1, fc1_size, fc2_size, 'fc2', relu=True)
logits_1 = fc_layer(fc_2, fc2_size, num_labels, 'softmax1')
logits_2 = fc_layer(fc_2, fc2_size, num_labels, 'softmax2')
logits_3 = fc_layer(fc_2, fc2_size, num_labels, 'softmax3')
logits_4 = fc_layer(fc_2, fc2_size, num_labels, 'softmax4')
logits_5 = fc_layer(fc_2, fc2_size, num_labels, 'softmax5')
y_pred = tf.stack([logits_1, logits_2, logits_3, logits_4, logits_5])
y_pred_cls = tf.transpose(tf.argmax(y_pred, dimension=2))
with tf.variable_scope("loss"):
loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_1, labels=y_[:, 0]))
loss2 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_2, labels=y_[:, 1]))
loss3 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_3, labels=y_[:, 2]))
loss4 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_4, labels=y_[:, 3]))
loss5 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_5, labels=y_[:, 4]))
loss = loss1 + loss2 + loss3 + loss4 + loss5
tf.summary.scalar('loss', loss)
with tf.variable_scope('optimizer'):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(1e-3, global_step, 7500, 0.5, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
# optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.variable_scope('accuracy'):
correct_prediction = tf.reduce_min(tf.cast(tf.equal(y_pred_cls, y_), tf.float32), 1)
accuracy = tf.reduce_mean(correct_prediction) * 100.0
tf.summary.scalar('accuracy', accuracy)
session = tf.Session()
saver = tf.train.Saver()
save_path = os.path.join('checkpoints_6layers_copy/', 'svhn_multi_v5_6layers')
try:
print("Restoring last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir='checkpoints_6layers_copy')
print(last_chk_path)
saver.restore(session, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except:
print("Failed to restore checkpoint - initializing variables")
session.run(tf.global_variables_initializer())
LOG_DIR = 'logs_6layers_copy/'
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(LOG_DIR,graph=tf.get_default_graph())
batch_size = 128
d3 = 0.8
def feed_dict(step=0):
offset = (step * batch_size) % (y_train.shape[0] - batch_size)
xs, ys = X_train[offset:offset + batch_size], y_train[offset:offset+batch_size]
return {x: xs, y_: ys, p_keep_3: d3}
def evaluate_batch(test, batch_size):
cumulative_accuracy = 0.0
n_images = y_test.shape[0] if test else y_val.shape[0]
n_batches = n_images // batch_size + 1
for i in range(n_batches):
offset = i * batch_size
if test:
xs, ys = X_test[offset:offset+batch_size], y_test[offset:offset+batch_size]
else:
xs, ys = X_val[offset:offset+batch_size], y_val[offset:offset+batch_size]
cumulative_accuracy += session.run(accuracy,
{x: xs, y_: ys, p_keep_3: 1.})
return cumulative_accuracy / (0.0 + n_batches)
def optimize(num_iterations, display_step):
start_time = time.time()
for step in range(num_iterations):
summary, i, _ = session.run([merged, global_step, optimizer], feed_dict(step))
train_writer.add_summary(summary, i)
if (i % display_step == 0) or (step == num_iterations - 1):
batch_acc = session.run(accuracy, feed_dict=feed_dict(step))
print("Minibatch accuracy at step %d: %.4f" % (i, batch_acc))
run_time = time.time() - start_time
print("\nTime usage: " + str(timedelta(seconds=int(round(run_time)))))
test_acc = evaluate_batch(test=True, batch_size=512)
print("Test accuracy: %.4f" % test_acc)
saver.save(session, save_path=save_path, global_step=global_step)
print('Model saved in file: {}'.format(save_path))
num_iter=int(X_train.shape[0]/batch_size)+1
# for i in range(10):
# print(i)
optimize(num_iterations=600, display_step=200)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import time
import os
from datetime import timedelta
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
plt.rcParams['figure.figsize'] = (16.0, 4.0)
h5f = h5py.File('data/SVHN_multi_grey.h5','r')
X_train = h5f['train_dataset'][:]
y_train = h5f['train_labels'][:]
X_val = h5f['valid_dataset'][:]
y_val = h5f['valid_labels'][:]
X_test = h5f['test_dataset'][:]
y_test = h5f['test_labels'][:]
h5f.close()
print('Training set', X_train.shape, y_train.shape)
print('Validation set', X_val.shape, y_val.shape)
print('Test set', X_test.shape, y_test.shape)
_,img_height, img_width, num_channels = X_train.shape
num_digits, num_labels = y_train.shape[1], len(np.unique(y_train))
X_train = np.concatenate([X_train, X_val])
y_train = np.concatenate([y_train, y_val])
print('Training set', X_train.shape, y_train.shape)
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
def subtract_mean(a):
for i in range(a.shape[0]):
a[i] -= a[i].mean()
return a
X_train = subtract_mean(X_train)
X_test = subtract_mean(X_test)
X_val = subtract_mean(X_val)
def plot_images(images, nrows, ncols, cls_true, cls_pred=None):
fig, axes = plt.subplots(nrows, ncols, figsize=(16, 2*nrows))
rs = np.random.choice(images.shape[0], nrows*ncols)
for i, ax in zip(rs, axes.flat):
true_number = ''.join(str(x) for x in cls_true[i] if x != 10)
if cls_pred is None:
title = "True: {0}".format(true_number)
else:
pred_number = ''.join(str(x) for x in cls_pred[i] if x != 10)
title = "True: {0}, Pred: {1}".format(true_number, pred_number)
ax.imshow(images[i,:,:,0], cmap='binary')
ax.set_title(title)
ax.set_xticks([]); ax.set_yticks([])
plot_images(X_train, 2, 8, y_train)
plt.show()
def init_conv_weights(shape, name):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer_conv2d())
def init_fc_weights(shape, name):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
def init_biases(shape):
return tf.Variable(tf.constant(0.0, shape=shape))
#--------------------------------------MAIN MODEL----------------------------------
def conv_layer(input_tensor,
filter_size,
in_channels,
num_filters,
layer_name,
pooling,pooling_stride):
with tf.variable_scope(layer_name) as scope:
shape = [filter_size, filter_size, in_channels, num_filters]
weights = init_conv_weights(shape, layer_name)
biases = init_biases([num_filters])
tf.summary.histogram(layer_name + '/weights_6layers', weights)
activations = tf.nn.conv2d(input_tensor, weights, [1, 1, 1, 1], 'SAME') + biases
activations = tf.layers.batch_normalization(activations)
activations = tf.nn.relu(activations)
if pooling_stride:
activations = tf.nn.max_pool(activations, [1, 2, 2, 1], [1, 1, 1, 1], 'SAME')
elif pooling:
activations = tf.nn.max_pool(activations, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
return activations
def flatten_tensor(input_tensor):
input_tensor_shape = input_tensor.get_shape()
num_activations = input_tensor_shape[1:4].num_elements()
input_tensor_flat = tf.reshape(input_tensor, [-1, num_activations])
return input_tensor_flat, num_activations
def fc_layer(input_tensor,
input_dim,
output_dim,
layer_name,
relu=False):
with tf.variable_scope(layer_name):
weights = init_fc_weights([input_dim, output_dim], layer_name + '/weights_6layers')
biases = init_biases([output_dim])
tf.summary.histogram(layer_name + '/weights_6layers', weights)
activations = tf.matmul(input_tensor, weights) + biases
if relu:
activations = tf.nn.relu(activations)
return activations
filter_size1 = filter_size2 = 5
num_filters1 = num_filters2 = 32
filter_size3 = filter_size4 = 5
num_filters3 = num_filters4 = 64
filter_size5 = filter_size6 = 5
num_filters5 = num_filters6 = 128
filter_size7 = filter_size8 = 5
num_filters7 = num_filters8 = 256
fc1_size = fc2_size= 256
with tf.variable_scope("input"):
x = tf.placeholder(tf.float32, shape=(None, img_height, img_width, num_channels), name='x')
y_ = tf.placeholder(tf.int64, shape=[None, num_digits], name='y_')
with tf.variable_scope("dropout"):
p_keep_3 = tf.placeholder(tf.float32)
tf.summary.scalar('input_keep_probability', p_keep_3)
conv_1 = conv_layer(x, filter_size1, num_channels, num_filters1, "conv1", pooling=True,pooling_stride=False)
drop_block1 = tf.nn.dropout(conv_1, p_keep_3)
conv_2 = conv_layer(drop_block1, filter_size2, num_filters1, num_filters2, "conv2", pooling=False,pooling_stride=True)
drop_block2 = tf.nn.dropout(conv_2, p_keep_3)
conv_3 = conv_layer(drop_block2, filter_size3, num_filters2, num_filters3, "conv3", pooling=True,pooling_stride=False)
drop_block3 = tf.nn.dropout(conv_3, p_keep_3)
conv_4 = conv_layer(drop_block3, filter_size4, num_filters3, num_filters4, "conv4", pooling=False,pooling_stride=True)
drop_block4 = tf.nn.dropout(conv_4, p_keep_3)
conv_5 = conv_layer(drop_block4, filter_size5, num_filters4, num_filters5, "conv5", pooling=True,pooling_stride=False)
drop_block5 = tf.nn.dropout(conv_5, p_keep_3)
conv_6 = conv_layer(drop_block5, filter_size6, num_filters5, num_filters6, "conv6", pooling=False,pooling_stride=True)
drop_block6 = tf.nn.dropout(conv_6, p_keep_3)
conv_7 = conv_layer(drop_block6, filter_size7, num_filters6, num_filters7, "conv7", pooling=True,pooling_stride=False)
drop_block7 = tf.nn.dropout(conv_7, p_keep_3)
conv_8 = conv_layer(drop_block7, filter_size8, num_filters7, num_filters8, "conv8", pooling=False,pooling_stride=True)
flat_tensor, num_activations = flatten_tensor(tf.nn.dropout(conv_8, p_keep_3))
fc_1 = fc_layer(flat_tensor, num_activations, fc1_size, 'fc1', relu=True)
fc_2 = fc_layer(fc_1, fc1_size, fc2_size, 'fc2', relu=True)
logits_1 = fc_layer(fc_2, fc2_size, num_labels, 'softmax1')
logits_2 = fc_layer(fc_2, fc2_size, num_labels, 'softmax2')
logits_3 = fc_layer(fc_2, fc2_size, num_labels, 'softmax3')
logits_4 = fc_layer(fc_2, fc2_size, num_labels, 'softmax4')
logits_5 = fc_layer(fc_2, fc2_size, num_labels, 'softmax5')
y_pred = tf.stack([logits_1, logits_2, logits_3, logits_4, logits_5])
y_pred_cls = tf.transpose(tf.argmax(y_pred, dimension=2))
with tf.variable_scope("loss"):
loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_1, labels=y_[:, 0]))
loss2 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_2, labels=y_[:, 1]))
loss3 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_3, labels=y_[:, 2]))
loss4 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_4, labels=y_[:, 3]))
loss5 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_5, labels=y_[:, 4]))
loss = loss1 + loss2 + loss3 + loss4 + loss5
tf.summary.scalar('loss', loss)
with tf.variable_scope('optimizer'):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(1e-3, global_step, 7500, 0.5, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
# optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.variable_scope('accuracy'):
correct_prediction = tf.reduce_min(tf.cast(tf.equal(y_pred_cls, y_), tf.float32), 1)
accuracy = tf.reduce_mean(correct_prediction) * 100.0
tf.summary.scalar('accuracy', accuracy)
session = tf.Session()
saver = tf.train.Saver()
save_path = os.path.join('checkpoints_6layers_copy/', 'svhn_multi_v5_6layers')
try:
print("Restoring last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir='checkpoints_6layers_copy')
print(last_chk_path)
saver.restore(session, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except:
print("Failed to restore checkpoint - initializing variables")
session.run(tf.global_variables_initializer())
LOG_DIR = 'logs_6layers_copy/'
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(LOG_DIR,graph=tf.get_default_graph())
batch_size = 128
d3 = 0.8
def feed_dict(step=0):
offset = (step * batch_size) % (y_train.shape[0] - batch_size)
xs, ys = X_train[offset:offset + batch_size], y_train[offset:offset+batch_size]
return {x: xs, y_: ys, p_keep_3: d3}
def evaluate_batch(test, batch_size):
cumulative_accuracy = 0.0
n_images = y_test.shape[0] if test else y_val.shape[0]
n_batches = n_images // batch_size + 1
for i in range(n_batches):
offset = i * batch_size
if test:
xs, ys = X_test[offset:offset+batch_size], y_test[offset:offset+batch_size]
else:
xs, ys = X_val[offset:offset+batch_size], y_val[offset:offset+batch_size]
cumulative_accuracy += session.run(accuracy,
{x: xs, y_: ys, p_keep_3: 1.})
return cumulative_accuracy / (0.0 + n_batches)
def optimize(num_iterations, display_step):
start_time = time.time()
for step in range(num_iterations):
summary, i, _ = session.run([merged, global_step, optimizer], feed_dict(step))
train_writer.add_summary(summary, i)
if (i % display_step == 0) or (step == num_iterations - 1):
batch_acc = session.run(accuracy, feed_dict=feed_dict(step))
print("Minibatch accuracy at step %d: %.4f" % (i, batch_acc))
run_time = time.time() - start_time
print("\nTime usage: " + str(timedelta(seconds=int(round(run_time)))))
test_acc = evaluate_batch(test=True, batch_size=512)
print("Test accuracy: %.4f" % test_acc)
saver.save(session, save_path=save_path, global_step=global_step)
print('Model saved in file: {}'.format(save_path))
num_iter=int(X_train.shape[0]/batch_size)+1
# for i in range(10):
# print(i)
optimize(num_iterations=600, display_step=200)
|
en
| 0.284126
|
#--------------------------------------MAIN MODEL---------------------------------- # optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss, global_step=global_step) # for i in range(10): # print(i)
| 2.202191
| 2
|
release/stubs.min/System/Diagnostics/__init___parts/EventLogEntry.py
|
htlcnn/ironpython-stubs
| 182
|
6626467
|
class EventLogEntry(Component,IComponent,IDisposable,ISerializable):
""" Encapsulates a single record in the event log. This class cannot be inherited. """
def Dispose(self):
"""
Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Equals(self,*__args):
"""
Equals(self: EventLogEntry,otherEntry: EventLogEntry) -> bool
Performs a comparison between two event log entries.
otherEntry: The System.Diagnostics.EventLogEntry to compare.
Returns: true if the System.Diagnostics.EventLogEntry objects are identical; otherwise,false.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the component can raise an event.
"""
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the text associated with the System.Diagnostics.EventLogEntry.CategoryNumber property for this entry.
Get: Category(self: EventLogEntry) -> str
"""
CategoryNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the category number of the event log entry.
Get: CategoryNumber(self: EventLogEntry) -> Int16
"""
Data=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the binary data associated with the entry.
Get: Data(self: EventLogEntry) -> Array[Byte]
"""
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
EntryType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the event type of this entry.
Get: EntryType(self: EventLogEntry) -> EventLogEntryType
"""
EventID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the application-specific event identifier for the current event entry.
Get: EventID(self: EventLogEntry) -> int
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
Index=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the index of this entry in the event log.
Get: Index(self: EventLogEntry) -> int
"""
InstanceId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the resource identifier that designates the message text of the event entry.
Get: InstanceId(self: EventLogEntry) -> Int64
"""
MachineName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the computer on which this entry was generated.
Get: MachineName(self: EventLogEntry) -> str
"""
Message=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the localized message associated with this event entry.
Get: Message(self: EventLogEntry) -> str
"""
ReplacementStrings=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the replacement strings associated with the event log entry.
Get: ReplacementStrings(self: EventLogEntry) -> Array[str]
"""
Source=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the application that generated this event.
Get: Source(self: EventLogEntry) -> str
"""
TimeGenerated=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the local time at which this event was generated.
Get: TimeGenerated(self: EventLogEntry) -> DateTime
"""
TimeWritten=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the local time at which this event was written to the log.
Get: TimeWritten(self: EventLogEntry) -> DateTime
"""
UserName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the user who is responsible for this event.
Get: UserName(self: EventLogEntry) -> str
"""
|
class EventLogEntry(Component,IComponent,IDisposable,ISerializable):
""" Encapsulates a single record in the event log. This class cannot be inherited. """
def Dispose(self):
"""
Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Equals(self,*__args):
"""
Equals(self: EventLogEntry,otherEntry: EventLogEntry) -> bool
Performs a comparison between two event log entries.
otherEntry: The System.Diagnostics.EventLogEntry to compare.
Returns: true if the System.Diagnostics.EventLogEntry objects are identical; otherwise,false.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the component can raise an event.
"""
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the text associated with the System.Diagnostics.EventLogEntry.CategoryNumber property for this entry.
Get: Category(self: EventLogEntry) -> str
"""
CategoryNumber=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the category number of the event log entry.
Get: CategoryNumber(self: EventLogEntry) -> Int16
"""
Data=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the binary data associated with the entry.
Get: Data(self: EventLogEntry) -> Array[Byte]
"""
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
EntryType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the event type of this entry.
Get: EntryType(self: EventLogEntry) -> EventLogEntryType
"""
EventID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the application-specific event identifier for the current event entry.
Get: EventID(self: EventLogEntry) -> int
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
Index=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the index of this entry in the event log.
Get: Index(self: EventLogEntry) -> int
"""
InstanceId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the resource identifier that designates the message text of the event entry.
Get: InstanceId(self: EventLogEntry) -> Int64
"""
MachineName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the computer on which this entry was generated.
Get: MachineName(self: EventLogEntry) -> str
"""
Message=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the localized message associated with this event entry.
Get: Message(self: EventLogEntry) -> str
"""
ReplacementStrings=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the replacement strings associated with the event log entry.
Get: ReplacementStrings(self: EventLogEntry) -> Array[str]
"""
Source=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the application that generated this event.
Get: Source(self: EventLogEntry) -> str
"""
TimeGenerated=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the local time at which this event was generated.
Get: TimeGenerated(self: EventLogEntry) -> DateTime
"""
TimeWritten=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the local time at which this event was written to the log.
Get: TimeWritten(self: EventLogEntry) -> DateTime
"""
UserName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the user who is responsible for this event.
Get: UserName(self: EventLogEntry) -> str
"""
|
en
| 0.764724
|
Encapsulates a single record in the event log. This class cannot be inherited. Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources. Equals(self: EventLogEntry,otherEntry: EventLogEntry) -> bool
Performs a comparison between two event log entries.
otherEntry: The System.Diagnostics.EventLogEntry to compare.
Returns: true if the System.Diagnostics.EventLogEntry objects are identical; otherwise,false. GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service. MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object. __enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable. x.__eq__(y) <==> x==y __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable. x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature Gets a value indicating whether the component can raise an event. Gets the text associated with the System.Diagnostics.EventLogEntry.CategoryNumber property for this entry.
Get: Category(self: EventLogEntry) -> str Gets the category number of the event log entry.
Get: CategoryNumber(self: EventLogEntry) -> Int16 Gets the binary data associated with the entry.
Get: Data(self: EventLogEntry) -> Array[Byte] Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode. Gets the event type of this entry.
Get: EntryType(self: EventLogEntry) -> EventLogEntryType Gets the application-specific event identifier for the current event entry.
Get: EventID(self: EventLogEntry) -> int Gets the list of event handlers that are attached to this System.ComponentModel.Component. Gets the index of this entry in the event log.
Get: Index(self: EventLogEntry) -> int Gets the resource identifier that designates the message text of the event entry.
Get: InstanceId(self: EventLogEntry) -> Int64 Gets the name of the computer on which this entry was generated.
Get: MachineName(self: EventLogEntry) -> str Gets the localized message associated with this event entry.
Get: Message(self: EventLogEntry) -> str Gets the replacement strings associated with the event log entry.
Get: ReplacementStrings(self: EventLogEntry) -> Array[str] Gets the name of the application that generated this event.
Get: Source(self: EventLogEntry) -> str Gets the local time at which this event was generated.
Get: TimeGenerated(self: EventLogEntry) -> DateTime Gets the local time at which this event was written to the log.
Get: TimeWritten(self: EventLogEntry) -> DateTime Gets the name of the user who is responsible for this event.
Get: UserName(self: EventLogEntry) -> str
| 2.116007
| 2
|
setup.py
|
datalexum/UNIX-time-from-NTP
| 1
|
6626468
|
<gh_stars>1-10
from setuptools import setup, find_packages
from io import open
from os import path
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
with open(path.join(HERE, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if ('git+' not in x) and (
not x.startswith('#')) and (not x.startswith('-'))]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs \
if 'git+' not in x]
setup (
name = 'UtfN',
description = 'UNIX time from NTP or short UtfN is a simple CLI tool to set the time from an NTP-Server.',
version = '1.0.1',
packages = find_packages(),
install_requires = install_requires,
python_requires='>=3',
entry_points='''
[console_scripts]
utfn=utfn.__main__:main
''',
author="<NAME>",
keyword="time, date, unix, setup, linux, rtc, battery",
long_description=README,
long_description_content_type="text/markdown",
license='MIT',
url='https://github.com/datalexum/UNIX-time-from-NTP/',
download_url='https://codeload.github.com/datalexum/UNIX-time-from-NTP/tar.gz/refs/tags/1.0.1',
dependency_links = dependency_links,
author_email='<EMAIL>',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
]
)
|
from setuptools import setup, find_packages
from io import open
from os import path
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
with open(path.join(HERE, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if ('git+' not in x) and (
not x.startswith('#')) and (not x.startswith('-'))]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs \
if 'git+' not in x]
setup (
name = 'UtfN',
description = 'UNIX time from NTP or short UtfN is a simple CLI tool to set the time from an NTP-Server.',
version = '1.0.1',
packages = find_packages(),
install_requires = install_requires,
python_requires='>=3',
entry_points='''
[console_scripts]
utfn=utfn.__main__:main
''',
author="<NAME>",
keyword="time, date, unix, setup, linux, rtc, battery",
long_description=README,
long_description_content_type="text/markdown",
license='MIT',
url='https://github.com/datalexum/UNIX-time-from-NTP/',
download_url='https://codeload.github.com/datalexum/UNIX-time-from-NTP/tar.gz/refs/tags/1.0.1',
dependency_links = dependency_links,
author_email='<EMAIL>',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
]
)
|
en
| 0.220645
|
[console_scripts] utfn=utfn.__main__:main
| 1.678683
| 2
|
src/fedex_python/examples/unitary_schemas/generate_schemas_modules.py
|
h4ck3rm1k3/stepcode
| 1
|
6626469
|
__doc__= ''' This script runs fedex_python over each EXPRESS schema in the test/unitary_schemas folder'''
unitary_schemas_path = '../../../../test/unitary_schemas'
fedex_python_path = '../../../../cmake-build/bin/fedex_python'
import subprocess
import glob
import os
unitary_schemas = glob.glob(os.path.join(unitary_schemas_path,'*.exp'))
for unitary_schema in unitary_schemas:
subprocess.call([fedex_python_path,unitary_schema])
|
__doc__= ''' This script runs fedex_python over each EXPRESS schema in the test/unitary_schemas folder'''
unitary_schemas_path = '../../../../test/unitary_schemas'
fedex_python_path = '../../../../cmake-build/bin/fedex_python'
import subprocess
import glob
import os
unitary_schemas = glob.glob(os.path.join(unitary_schemas_path,'*.exp'))
for unitary_schema in unitary_schemas:
subprocess.call([fedex_python_path,unitary_schema])
|
en
| 0.702431
|
This script runs fedex_python over each EXPRESS schema in the test/unitary_schemas folder
| 1.831335
| 2
|
iniParser/iniparser.py
|
MDGSF/PythonPractice
| 1
|
6626470
|
<reponame>MDGSF/PythonPractice<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Author: <NAME>
Date: 2019-04-22
Module IniParser implements an *.ini parser.
"""
DefaultSectionName = 'default'
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
class Section:
def __init__(self, name=DefaultSectionName):
self.name = name
self.nodeslist = []
def __getitem__(self, item):
"""
Get value with item(key).
"""
if type(item) != str:
raise TypeError
for node in self.nodeslist:
if node.key == item:
return node.value
return ""
def __setitem__(self, key, value):
"""
If key already exists, update key=value.
If key not exists, set new key=value.
"""
if type(key) != str or type(value) != str:
raise TypeError
for node in self.nodeslist:
if node.key == key:
node.value = value
break
else:
self.nodeslist.append(Node(key, value))
def hasKey(self, key):
"""
Check key whether is exists in this section.
"""
for node in self.nodeslist:
if node.key == key:
return True
return False
def remove(self, key):
"""
Remove key in this section.
"""
for i, node in enumerate(self.nodeslist):
if node.key == key:
del self.nodeslist[i]
return
class IniParser:
"""
IniParser is an *.ini file parser.
"""
def __init__(self, defaultIsVisibleWhenSave=False):
self.defaultIsVisibleWhenSave = defaultIsVisibleWhenSave
self.allsections = []
def read(self, filename):
"""
Wrapper for LoadFromFile.
"""
return self.LoadFromFile(filename)
def LoadFromFile(self, filename):
"""
LoadFromFile load data from filename file.
"""
f = open(filename, 'r')
contents = f.read()
self.LoadFromData(contents)
f.close()
def LoadFromData(self, data):
"""
Load data from memory data.
"""
curSectionName = DefaultSectionName
self.addsection(curSectionName)
for line in data.splitlines():
line = line.strip()
if len(line) == 0:
continue
if line.startswith('[') and line.endswith(']'):
curSectionName = line[1:len(line) - 1]
self.addsection(curSectionName)
elif line.startswith('#'):
continue
else:
linearray = line.split(sep='=', maxsplit=1)
if len(linearray) != 2:
continue
key = linearray[0].strip()
value = linearray[1].strip()
self.__setSectionKeyValue(curSectionName, key, value)
def write(self, filename):
"""
Wrapper for SaveToFile.
"""
return self.SaveToFile(filename)
def SaveToFile(self, filename):
"""
Save data to filename file.
"""
f = open(filename, 'w')
for section in self.allsections:
if self.defaultIsVisibleWhenSave:
f.write("[" + section.name + "]\n")
else:
if section.name != DefaultSectionName:
f.write("[" + section.name + "]\n")
for node in section.nodeslist:
f.write(node.key + " = " + node.value + "\n")
f.write('\n')
f.close()
def remove(self, sectionname, key=None):
"""
remove sectionname section if key is None.
remove section's key if key is not None.
"""
if key is None:
self.__removesection(sectionname)
else:
section = self.__getSection(sectionname)
if section:
section.remove(key)
def sections(self):
"""
return all section's name.
"""
if self.defaultIsVisibleWhenSave:
return [section.name for section in self.allsections]
else:
return [section.name for section in self.allsections if
section.name != DefaultSectionName]
def hassection(self, sectionname):
"""
hassection check whether sectionname is exists.
"""
for section in self.allsections:
if section.name == sectionname:
return True
return False
def addsection(self, sectionname):
"""
Add one section with sectionname.
"""
if self.hassection(sectionname):
return
self.allsections.append(Section(sectionname))
def items(self, sectionname):
"""
return all items in sectionname.
"""
for section in self.allsections:
if section.name != sectionname:
continue
return [(node.key, node.value) for node in section.nodeslist]
return []
def keys(self, sectionname):
"""
return all keys in sectionname section.
"""
for section in self.allsections:
if section.name != sectionname:
continue
return [node.key for node in section.nodeslist]
return []
def hasKey(self, sectionname, key):
"""
judge whether key is in sectionname section.
"""
section = self.__getSection(sectionname)
if section is None:
return False
return section.hasKey(key)
def getd(self, key, sectionname=DefaultSectionName):
"""
Get value with key in sectionname with DefaultSectionName.
"""
return self.get(sectionname, key)
def setd(self, key, value, sectionname=DefaultSectionName):
"""
Set key=value in sectionname section.
"""
return self.set(sectionname, key, value)
def get(self, sectionname, key):
"""
Get value with key in sectionname section.
If sectionname or key is not exists, return empty string "".
"""
if type(key) != str or type(sectionname) != str:
raise TypeError
if len(key) == 0 or len(sectionname) == 0:
return ""
section = self.__getSection(sectionname)
if section is None:
return ""
return section[key]
def getint(self, sectionname, key):
"""
Get int value with key in sectionname section.
"""
return int(self.get(sectionname, key))
def set(self, sectionname, key, value):
"""
Set key=value in sectionname section.
"""
if type(key) != str or type(sectionname) != str:
raise TypeError
if len(key) == 0 or len(sectionname) == 0:
return ""
self.__setSectionKeyValue(sectionname, key, value)
def __removesection(self, sectionname):
for i, section in enumerate(self.allsections):
if section.name == sectionname:
del self.allsections[i]
return
def __setSectionKeyValue(self, sectionname, key, value):
section = self.__getSection(sectionname)
if section is None:
raise KeyError("invalid sectionname")
section[key] = value
def __getSection(self, sectionname):
for section in self.allsections:
if section.name == sectionname:
return section
return None
def __str__(self):
result = ""
for section in self.allsections:
if self.defaultIsVisibleWhenSave:
result += "[" + section.name + "]\n"
else:
if section.name != DefaultSectionName:
result += "[" + section.name + "]\n"
for node in section.nodeslist:
result += node.key + " = " + node.value + "\n"
result += "\n"
return result
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Author: <NAME>
Date: 2019-04-22
Module IniParser implements an *.ini parser.
"""
DefaultSectionName = 'default'
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
class Section:
def __init__(self, name=DefaultSectionName):
self.name = name
self.nodeslist = []
def __getitem__(self, item):
"""
Get value with item(key).
"""
if type(item) != str:
raise TypeError
for node in self.nodeslist:
if node.key == item:
return node.value
return ""
def __setitem__(self, key, value):
"""
If key already exists, update key=value.
If key not exists, set new key=value.
"""
if type(key) != str or type(value) != str:
raise TypeError
for node in self.nodeslist:
if node.key == key:
node.value = value
break
else:
self.nodeslist.append(Node(key, value))
def hasKey(self, key):
"""
Check key whether is exists in this section.
"""
for node in self.nodeslist:
if node.key == key:
return True
return False
def remove(self, key):
"""
Remove key in this section.
"""
for i, node in enumerate(self.nodeslist):
if node.key == key:
del self.nodeslist[i]
return
class IniParser:
"""
IniParser is an *.ini file parser.
"""
def __init__(self, defaultIsVisibleWhenSave=False):
self.defaultIsVisibleWhenSave = defaultIsVisibleWhenSave
self.allsections = []
def read(self, filename):
"""
Wrapper for LoadFromFile.
"""
return self.LoadFromFile(filename)
def LoadFromFile(self, filename):
"""
LoadFromFile load data from filename file.
"""
f = open(filename, 'r')
contents = f.read()
self.LoadFromData(contents)
f.close()
def LoadFromData(self, data):
"""
Load data from memory data.
"""
curSectionName = DefaultSectionName
self.addsection(curSectionName)
for line in data.splitlines():
line = line.strip()
if len(line) == 0:
continue
if line.startswith('[') and line.endswith(']'):
curSectionName = line[1:len(line) - 1]
self.addsection(curSectionName)
elif line.startswith('#'):
continue
else:
linearray = line.split(sep='=', maxsplit=1)
if len(linearray) != 2:
continue
key = linearray[0].strip()
value = linearray[1].strip()
self.__setSectionKeyValue(curSectionName, key, value)
def write(self, filename):
"""
Wrapper for SaveToFile.
"""
return self.SaveToFile(filename)
def SaveToFile(self, filename):
"""
Save data to filename file.
"""
f = open(filename, 'w')
for section in self.allsections:
if self.defaultIsVisibleWhenSave:
f.write("[" + section.name + "]\n")
else:
if section.name != DefaultSectionName:
f.write("[" + section.name + "]\n")
for node in section.nodeslist:
f.write(node.key + " = " + node.value + "\n")
f.write('\n')
f.close()
def remove(self, sectionname, key=None):
"""
remove sectionname section if key is None.
remove section's key if key is not None.
"""
if key is None:
self.__removesection(sectionname)
else:
section = self.__getSection(sectionname)
if section:
section.remove(key)
def sections(self):
"""
return all section's name.
"""
if self.defaultIsVisibleWhenSave:
return [section.name for section in self.allsections]
else:
return [section.name for section in self.allsections if
section.name != DefaultSectionName]
def hassection(self, sectionname):
"""
hassection check whether sectionname is exists.
"""
for section in self.allsections:
if section.name == sectionname:
return True
return False
def addsection(self, sectionname):
"""
Add one section with sectionname.
"""
if self.hassection(sectionname):
return
self.allsections.append(Section(sectionname))
def items(self, sectionname):
"""
return all items in sectionname.
"""
for section in self.allsections:
if section.name != sectionname:
continue
return [(node.key, node.value) for node in section.nodeslist]
return []
def keys(self, sectionname):
"""
return all keys in sectionname section.
"""
for section in self.allsections:
if section.name != sectionname:
continue
return [node.key for node in section.nodeslist]
return []
def hasKey(self, sectionname, key):
"""
judge whether key is in sectionname section.
"""
section = self.__getSection(sectionname)
if section is None:
return False
return section.hasKey(key)
def getd(self, key, sectionname=DefaultSectionName):
"""
Get value with key in sectionname with DefaultSectionName.
"""
return self.get(sectionname, key)
def setd(self, key, value, sectionname=DefaultSectionName):
"""
Set key=value in sectionname section.
"""
return self.set(sectionname, key, value)
def get(self, sectionname, key):
"""
Get value with key in sectionname section.
If sectionname or key is not exists, return empty string "".
"""
if type(key) != str or type(sectionname) != str:
raise TypeError
if len(key) == 0 or len(sectionname) == 0:
return ""
section = self.__getSection(sectionname)
if section is None:
return ""
return section[key]
def getint(self, sectionname, key):
"""
Get int value with key in sectionname section.
"""
return int(self.get(sectionname, key))
def set(self, sectionname, key, value):
"""
Set key=value in sectionname section.
"""
if type(key) != str or type(sectionname) != str:
raise TypeError
if len(key) == 0 or len(sectionname) == 0:
return ""
self.__setSectionKeyValue(sectionname, key, value)
def __removesection(self, sectionname):
for i, section in enumerate(self.allsections):
if section.name == sectionname:
del self.allsections[i]
return
def __setSectionKeyValue(self, sectionname, key, value):
section = self.__getSection(sectionname)
if section is None:
raise KeyError("invalid sectionname")
section[key] = value
def __getSection(self, sectionname):
for section in self.allsections:
if section.name == sectionname:
return section
return None
def __str__(self):
result = ""
for section in self.allsections:
if self.defaultIsVisibleWhenSave:
result += "[" + section.name + "]\n"
else:
if section.name != DefaultSectionName:
result += "[" + section.name + "]\n"
for node in section.nodeslist:
result += node.key + " = " + node.value + "\n"
result += "\n"
return result
|
en
| 0.581209
|
#!/usr/bin/env python # -*- coding: UTF-8 -*- Author: <NAME> Date: 2019-04-22 Module IniParser implements an *.ini parser. Get value with item(key). If key already exists, update key=value. If key not exists, set new key=value. Check key whether is exists in this section. Remove key in this section. IniParser is an *.ini file parser. Wrapper for LoadFromFile. LoadFromFile load data from filename file. Load data from memory data. Wrapper for SaveToFile. Save data to filename file. remove sectionname section if key is None. remove section's key if key is not None. return all section's name. hassection check whether sectionname is exists. Add one section with sectionname. return all items in sectionname. return all keys in sectionname section. judge whether key is in sectionname section. Get value with key in sectionname with DefaultSectionName. Set key=value in sectionname section. Get value with key in sectionname section. If sectionname or key is not exists, return empty string "". Get int value with key in sectionname section. Set key=value in sectionname section.
| 2.876364
| 3
|
Pygame/Game/study/hellouglyworld.py
|
danghohuuphuc/Code_Python
| 0
|
6626471
|
<gh_stars>0
import pygame, sys
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((800, 600))
iconpikachu = pygame.image.load("pikachu.png")
pygame.display.set_icon(iconpikachu)
pygame.display.set_caption("Hello Ugly World")
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 128)
fontObj = pygame.font.Font('FreeSansBold.ttf', 32)
textSurfaceObj = fontObj.render("hello world!", True, GREEN, BLUE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (200, 150)
# the main game loop
while True:
screen.fill(WHITE)
screen.blit(textSurfaceObj, textRectObj)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pygame.display.update()
|
import pygame, sys
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((800, 600))
iconpikachu = pygame.image.load("pikachu.png")
pygame.display.set_icon(iconpikachu)
pygame.display.set_caption("Hello Ugly World")
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 128)
fontObj = pygame.font.Font('FreeSansBold.ttf', 32)
textSurfaceObj = fontObj.render("hello world!", True, GREEN, BLUE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (200, 150)
# the main game loop
while True:
screen.fill(WHITE)
screen.blit(textSurfaceObj, textRectObj)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pygame.display.update()
|
en
| 0.833005
|
# the main game loop
| 3.26503
| 3
|
layer.py
|
unionsetde/ToyNN
| 0
|
6626472
|
<reponame>unionsetde/ToyNN
import numpy
import pickle
class fully_connected_layer:
def __init__(self, num_of_input, num_of_output, w, b):
self.input_size = num_of_input
self.output_size = num_of_output
self.weights = numpy.random.randn(self.output_size, self.input_size)
self.bias = numpy.zeros(self.output_size)
if (w.shape == self.weights.shape and b.shape == self.bias.shape):
self.weights = w
self.bias = b
else:
print "Initializing weights for fully_connected_layer..."
self.weights = self.weights*numpy.sqrt(2./(self.input_size+1))
self.is_valid_input = False
self.input_shape = None
def save_layer(self, file_path):
pickle.dump(self, open(file_path, 'w'))
def load_layer(self, file_path):
print "Loading fully_connected_layer weights and bias..."
old_one = pickle.load(open(file_path, 'r'))
self.input_size = old_one.input_size
self.output_size = old_one.output_size
self.weights = old_one.weights
self.bias = old_one.bias
print "Loading completed..."
self.is_valid_input = False
self.input_shape = None
def check_input_validity(self, input_array):
if (input_array.flatten().shape[0] == self.weights.shape[1]):
self.is_valid_input = True
else:
self.is_valid_input = False
print "** Warning: Input dimensions do not match fully_connected_layer **"
print "Input shape:",input_array.shape
print "Layer's input_size",self.input_size
return self.is_valid_input
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
reshaped_input = input_array.reshape(1, self.input_size)
output = numpy.inner(reshaped_input, self.weights)
output += self.bias
return output
def update_weights(self, delta_weights, delta_bias):
self.weights = self.weights+delta_weights
self.bias = self.bias+delta_bias
def propagate_backward(self, error_array, input_array):
reshaped_error = error_array.reshape(1, self.output_size)
gradient_bias = reshaped_error.flatten()
gradient_weights = numpy.outer(input_array, reshaped_error).T
new_error = numpy.inner(self.weights.T, reshaped_error)
new_error = new_error.reshape(self.input_shape)
return (new_error, gradient_weights, gradient_bias)
def regularize_weights(self, l1=0.1, l2=0.01):
self.weights = self.weights-l1*numpy.fabs(self.weights)-l2*numpy.square(self.weights)
class convolutional_layer:
def __init__(self, input_depth, num_filter, field_size, stride, zero_pad, w, b):
self.d = input_depth
self.n = num_filter
self.f = field_size
self.s = stride
self.p = zero_pad
self.weights = numpy.random.randn(self.n, self.d, self.f, self.f)
self.bias = numpy.zeros((self.n))
if (w.shape == self.weights.shape and b.shape == self.bias.shape):
self.weights = w
self.bias = b
else:
print "Initializing weights for convolutional_layer..."
self.weights = self.weights*numpy.sqrt(2./(self.d*self.f*self.f+1))
self.is_valid_input = False
self.padded_input = None
self.activation_derivative_mask = None
def save_layer(self, file_path):
pickle.dump(self, open(file_path, 'w'))
def load_layer(self, file_path):
print "Loading convolutional_layer weights and bias..."
old_one = pickle.load(open(file_path, 'r'))
self.d = old_one.d
self.n = old_one.n
self.f = old_one.f
self.s = old_one.s
self.p = old_one.p
self.weights = old_one.weights
self.bias = old_one.bias
print "Loading completed..."
self.is_valid_input = False
self.padded_input = None
self.activation_derivative_mask = None
def check_input_validity(self, input_array):
input_shape = input_array.shape
input_h = input_shape[1]
input_w = input_shape[2]
if (0 == (input_w+input_h-2*self.f+4*self.p)%self.s):
self.is_valid_input = True
else:
self.is_valid_input = False
print "** Warning: Input dimensions do not match convolutional_layer **"
print "Input shape:",input_shape
print "Layer's parameters:"
print "input_depth:", self.d
print "num_filter:",self.n
print "field_size:",self.f
print "stride:",self.s
print "zero_pad:",self.p
return self.is_valid_input
def propagate_forward(self, input_array, ReLU_alpha=0.1):
input_shape = input_array.shape
padded_input_h = input_shape[1]+2*self.p
padded_input_w = input_shape[2]+2*self.p
padded_input_h_end = padded_input_h-self.p
padded_input_w_end = padded_input_w-self.p
if (self.p > 0):
self.padded_input = numpy.zeros((input_shape[0], padded_input_h, padded_input_w))
self.padded_input[:,self.p:padded_input_h_end,self.p:padded_input_w_end] = input_array
else:
self.padded_input = input_array
output_h = ((input_shape[1]-self.f+2*self.p)/self.s)+1
output_w = ((input_shape[2]-self.f+2*self.p)/self.s)+1
output_d = self.n
output = numpy.zeros((output_d, output_h, output_w))
self.activation_derivative_mask = numpy.ones((output_d, output_h, output_w))
for filter_cnt in range(0, self.n):
output[filter_cnt] += self.bias[filter_cnt]*numpy.ones((output_h, output_w))
for depth_cnt in range(0, self.d):
# silding-window 2D convolution
for h_cnt in range(0, output_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, output_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_w = self.weights[filter_cnt][depth_cnt]
buff_x = self.padded_input[depth_cnt][h_begin:h_end,w_begin:w_end]
output[filter_cnt][h_cnt][w_cnt] += numpy.sum((buff_w*buff_x))
# leaky ReLU activation
if output[filter_cnt][h_cnt][w_cnt] < 0:
output[filter_cnt][h_cnt][w_cnt] *= ReLU_alpha
self.activation_derivative_mask[filter_cnt][h_cnt][w_cnt] = ReLU_alpha
return output
def update_weights(self, delta_weights, delta_bias):
self.weights = self.weights+delta_weights
self.bias = self.bias+delta_bias
def propagate_backward(self, error_array):
masked_error = self.activation_derivative_mask*error_array
gradient_weights = numpy.zeros(self.weights.shape)
gradient_bias = numpy.zeros(self.bias.shape)
new_error = numpy.zeros(self.padded_input.shape)
error_h = error_array.shape[1]
error_w = error_array.shape[2]
for filter_cnt in range(0, self.n):
gradient_bias[filter_cnt] = (numpy.sum(masked_error[filter_cnt]))/(error_h*error_w)
for depth_cnt in range(0, self.d):
for h_cnt in range(0, error_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, error_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_e = masked_error[filter_cnt][h_cnt][w_cnt] # scalar value
buff_y = self.padded_input[depth_cnt][h_begin:h_end,w_begin:w_end]
gradient_weights[filter_cnt][depth_cnt] += (buff_e*buff_y)
buff_w = self.weights[filter_cnt][depth_cnt]
new_error[depth_cnt][h_begin:h_end,w_begin:w_end] += (buff_e*buff_w)
new_error_h_end = new_error.shape[1]-self.p
new_error_w_end = new_error.shape[2]-self.p
new_error = new_error[:,self.p:new_error_h_end,self.p:new_error_w_end]
return (new_error, gradient_weights, gradient_bias)
def regularize_weights(self, l1=0.1, l2=0.01):
self.weights = self.weights-l1*numpy.fabs(self.weights)-l2*numpy.square(self.weights)
class max_pooling_layer:
def __init__(self, input_depth, field_size, stride):
self.d = input_depth
self.f = field_size
self.s = stride
self.is_valid_input = False
self.input_shape = None
self.activation_positions = None
def check_input_validity(self, input_array):
input_shape = input_array.shape
input_h = input_shape[1]
input_w = input_shape[2]
if (0 == (input_w+input_h-2*self.f)%self.s):
self.is_valid_input = True
else:
self.is_valid_input = False
print "** Warning: Input dimensions do not match max_pooling_layer **"
print "Input shape:",input_shape
print "Layer's parameters:"
print "input_depth:", self.d
print "field_size:",self.f
print "stride:",self.s
return self.is_valid_input
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
output_h = ((self.input_shape[1]-self.f)/self.s)+1
output_w = ((self.input_shape[2]-self.f)/self.s)+1
output_d = self.d
output = numpy.zeros((output_d, output_h, output_w))
self.activation_positions = numpy.zeros((output_d, output_h, output_w))
for depth_cnt in range(0, self.d):
for h_cnt in range(0, output_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, output_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_x = input_array[depth_cnt][h_begin:h_end,w_begin:w_end]
output[depth_cnt][h_cnt][w_cnt] = numpy.max(buff_x)
# record the activation position of the sub-input
self.activation_positions[depth_cnt][h_cnt][w_cnt] = numpy.argmax(buff_x)
return output
def propagate_backward(self, error_array):
new_error = numpy.zeros(self.input_shape)
error_h = error_array.shape[1]
error_w = error_array.shape[2]
for depth_cnt in range(0, self.d):
for h_cnt in range(0, error_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, error_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_e = numpy.zeros((self.f*self.f))
buff_pos = self.activation_positions[depth_cnt][h_cnt][w_cnt]
buff_e[numpy.int32(buff_pos)] = error_array[depth_cnt][h_cnt][w_cnt]
buff_e = buff_e.reshape(self.f, self.f)
new_error[depth_cnt][h_begin:h_end,w_begin:w_end] += buff_e
return new_error
class average_pooling_layer:
def __init(self):
self.input_shape = None
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
output = numpy.average(input_array, axis=2)
output = numpy.average(output, axis=1)
return output # output is 1-D array
def propagate_backward(self, error_array):
input_h = self.input_shape[1]
input_w = self.input_shape[2]
new_error = numpy.ones(self.input_shape)*(1./(input_h*input_w))
reshaped_error = error_array.reshape(self.input_shape[0], 1, 1)
new_error = reshaped_error*new_error
return new_error
class softmax_layer:
def __init__(self):
self.input_shape = None
self.activation_derivative_mask = None
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
reshaped_input = input_array.flatten()
reshaped_input = reshaped_input-numpy.max(reshaped_input)
reshaped_input = numpy.exp(reshaped_input)
output = reshaped_input/numpy.sum(reshaped_input)
self.activation_derivative_mask = output*(1-output)
return output
def propagate_backward(self, error_array):
new_error = self.activation_derivative_mask*error_array.flatten()
new_error = new_error.reshape(self.input_shape)
return new_error
|
import numpy
import pickle
class fully_connected_layer:
def __init__(self, num_of_input, num_of_output, w, b):
self.input_size = num_of_input
self.output_size = num_of_output
self.weights = numpy.random.randn(self.output_size, self.input_size)
self.bias = numpy.zeros(self.output_size)
if (w.shape == self.weights.shape and b.shape == self.bias.shape):
self.weights = w
self.bias = b
else:
print "Initializing weights for fully_connected_layer..."
self.weights = self.weights*numpy.sqrt(2./(self.input_size+1))
self.is_valid_input = False
self.input_shape = None
def save_layer(self, file_path):
pickle.dump(self, open(file_path, 'w'))
def load_layer(self, file_path):
print "Loading fully_connected_layer weights and bias..."
old_one = pickle.load(open(file_path, 'r'))
self.input_size = old_one.input_size
self.output_size = old_one.output_size
self.weights = old_one.weights
self.bias = old_one.bias
print "Loading completed..."
self.is_valid_input = False
self.input_shape = None
def check_input_validity(self, input_array):
if (input_array.flatten().shape[0] == self.weights.shape[1]):
self.is_valid_input = True
else:
self.is_valid_input = False
print "** Warning: Input dimensions do not match fully_connected_layer **"
print "Input shape:",input_array.shape
print "Layer's input_size",self.input_size
return self.is_valid_input
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
reshaped_input = input_array.reshape(1, self.input_size)
output = numpy.inner(reshaped_input, self.weights)
output += self.bias
return output
def update_weights(self, delta_weights, delta_bias):
self.weights = self.weights+delta_weights
self.bias = self.bias+delta_bias
def propagate_backward(self, error_array, input_array):
reshaped_error = error_array.reshape(1, self.output_size)
gradient_bias = reshaped_error.flatten()
gradient_weights = numpy.outer(input_array, reshaped_error).T
new_error = numpy.inner(self.weights.T, reshaped_error)
new_error = new_error.reshape(self.input_shape)
return (new_error, gradient_weights, gradient_bias)
def regularize_weights(self, l1=0.1, l2=0.01):
self.weights = self.weights-l1*numpy.fabs(self.weights)-l2*numpy.square(self.weights)
class convolutional_layer:
def __init__(self, input_depth, num_filter, field_size, stride, zero_pad, w, b):
self.d = input_depth
self.n = num_filter
self.f = field_size
self.s = stride
self.p = zero_pad
self.weights = numpy.random.randn(self.n, self.d, self.f, self.f)
self.bias = numpy.zeros((self.n))
if (w.shape == self.weights.shape and b.shape == self.bias.shape):
self.weights = w
self.bias = b
else:
print "Initializing weights for convolutional_layer..."
self.weights = self.weights*numpy.sqrt(2./(self.d*self.f*self.f+1))
self.is_valid_input = False
self.padded_input = None
self.activation_derivative_mask = None
def save_layer(self, file_path):
pickle.dump(self, open(file_path, 'w'))
def load_layer(self, file_path):
print "Loading convolutional_layer weights and bias..."
old_one = pickle.load(open(file_path, 'r'))
self.d = old_one.d
self.n = old_one.n
self.f = old_one.f
self.s = old_one.s
self.p = old_one.p
self.weights = old_one.weights
self.bias = old_one.bias
print "Loading completed..."
self.is_valid_input = False
self.padded_input = None
self.activation_derivative_mask = None
def check_input_validity(self, input_array):
input_shape = input_array.shape
input_h = input_shape[1]
input_w = input_shape[2]
if (0 == (input_w+input_h-2*self.f+4*self.p)%self.s):
self.is_valid_input = True
else:
self.is_valid_input = False
print "** Warning: Input dimensions do not match convolutional_layer **"
print "Input shape:",input_shape
print "Layer's parameters:"
print "input_depth:", self.d
print "num_filter:",self.n
print "field_size:",self.f
print "stride:",self.s
print "zero_pad:",self.p
return self.is_valid_input
def propagate_forward(self, input_array, ReLU_alpha=0.1):
input_shape = input_array.shape
padded_input_h = input_shape[1]+2*self.p
padded_input_w = input_shape[2]+2*self.p
padded_input_h_end = padded_input_h-self.p
padded_input_w_end = padded_input_w-self.p
if (self.p > 0):
self.padded_input = numpy.zeros((input_shape[0], padded_input_h, padded_input_w))
self.padded_input[:,self.p:padded_input_h_end,self.p:padded_input_w_end] = input_array
else:
self.padded_input = input_array
output_h = ((input_shape[1]-self.f+2*self.p)/self.s)+1
output_w = ((input_shape[2]-self.f+2*self.p)/self.s)+1
output_d = self.n
output = numpy.zeros((output_d, output_h, output_w))
self.activation_derivative_mask = numpy.ones((output_d, output_h, output_w))
for filter_cnt in range(0, self.n):
output[filter_cnt] += self.bias[filter_cnt]*numpy.ones((output_h, output_w))
for depth_cnt in range(0, self.d):
# silding-window 2D convolution
for h_cnt in range(0, output_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, output_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_w = self.weights[filter_cnt][depth_cnt]
buff_x = self.padded_input[depth_cnt][h_begin:h_end,w_begin:w_end]
output[filter_cnt][h_cnt][w_cnt] += numpy.sum((buff_w*buff_x))
# leaky ReLU activation
if output[filter_cnt][h_cnt][w_cnt] < 0:
output[filter_cnt][h_cnt][w_cnt] *= ReLU_alpha
self.activation_derivative_mask[filter_cnt][h_cnt][w_cnt] = ReLU_alpha
return output
def update_weights(self, delta_weights, delta_bias):
self.weights = self.weights+delta_weights
self.bias = self.bias+delta_bias
def propagate_backward(self, error_array):
masked_error = self.activation_derivative_mask*error_array
gradient_weights = numpy.zeros(self.weights.shape)
gradient_bias = numpy.zeros(self.bias.shape)
new_error = numpy.zeros(self.padded_input.shape)
error_h = error_array.shape[1]
error_w = error_array.shape[2]
for filter_cnt in range(0, self.n):
gradient_bias[filter_cnt] = (numpy.sum(masked_error[filter_cnt]))/(error_h*error_w)
for depth_cnt in range(0, self.d):
for h_cnt in range(0, error_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, error_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_e = masked_error[filter_cnt][h_cnt][w_cnt] # scalar value
buff_y = self.padded_input[depth_cnt][h_begin:h_end,w_begin:w_end]
gradient_weights[filter_cnt][depth_cnt] += (buff_e*buff_y)
buff_w = self.weights[filter_cnt][depth_cnt]
new_error[depth_cnt][h_begin:h_end,w_begin:w_end] += (buff_e*buff_w)
new_error_h_end = new_error.shape[1]-self.p
new_error_w_end = new_error.shape[2]-self.p
new_error = new_error[:,self.p:new_error_h_end,self.p:new_error_w_end]
return (new_error, gradient_weights, gradient_bias)
def regularize_weights(self, l1=0.1, l2=0.01):
self.weights = self.weights-l1*numpy.fabs(self.weights)-l2*numpy.square(self.weights)
class max_pooling_layer:
def __init__(self, input_depth, field_size, stride):
self.d = input_depth
self.f = field_size
self.s = stride
self.is_valid_input = False
self.input_shape = None
self.activation_positions = None
def check_input_validity(self, input_array):
input_shape = input_array.shape
input_h = input_shape[1]
input_w = input_shape[2]
if (0 == (input_w+input_h-2*self.f)%self.s):
self.is_valid_input = True
else:
self.is_valid_input = False
print "** Warning: Input dimensions do not match max_pooling_layer **"
print "Input shape:",input_shape
print "Layer's parameters:"
print "input_depth:", self.d
print "field_size:",self.f
print "stride:",self.s
return self.is_valid_input
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
output_h = ((self.input_shape[1]-self.f)/self.s)+1
output_w = ((self.input_shape[2]-self.f)/self.s)+1
output_d = self.d
output = numpy.zeros((output_d, output_h, output_w))
self.activation_positions = numpy.zeros((output_d, output_h, output_w))
for depth_cnt in range(0, self.d):
for h_cnt in range(0, output_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, output_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_x = input_array[depth_cnt][h_begin:h_end,w_begin:w_end]
output[depth_cnt][h_cnt][w_cnt] = numpy.max(buff_x)
# record the activation position of the sub-input
self.activation_positions[depth_cnt][h_cnt][w_cnt] = numpy.argmax(buff_x)
return output
def propagate_backward(self, error_array):
new_error = numpy.zeros(self.input_shape)
error_h = error_array.shape[1]
error_w = error_array.shape[2]
for depth_cnt in range(0, self.d):
for h_cnt in range(0, error_h):
h_begin = h_cnt*self.s
h_end = h_begin+self.f
for w_cnt in range(0, error_w):
w_begin = w_cnt*self.s
w_end = w_begin+self.f
buff_e = numpy.zeros((self.f*self.f))
buff_pos = self.activation_positions[depth_cnt][h_cnt][w_cnt]
buff_e[numpy.int32(buff_pos)] = error_array[depth_cnt][h_cnt][w_cnt]
buff_e = buff_e.reshape(self.f, self.f)
new_error[depth_cnt][h_begin:h_end,w_begin:w_end] += buff_e
return new_error
class average_pooling_layer:
def __init(self):
self.input_shape = None
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
output = numpy.average(input_array, axis=2)
output = numpy.average(output, axis=1)
return output # output is 1-D array
def propagate_backward(self, error_array):
input_h = self.input_shape[1]
input_w = self.input_shape[2]
new_error = numpy.ones(self.input_shape)*(1./(input_h*input_w))
reshaped_error = error_array.reshape(self.input_shape[0], 1, 1)
new_error = reshaped_error*new_error
return new_error
class softmax_layer:
def __init__(self):
self.input_shape = None
self.activation_derivative_mask = None
def propagate_forward(self, input_array):
self.input_shape = input_array.shape
reshaped_input = input_array.flatten()
reshaped_input = reshaped_input-numpy.max(reshaped_input)
reshaped_input = numpy.exp(reshaped_input)
output = reshaped_input/numpy.sum(reshaped_input)
self.activation_derivative_mask = output*(1-output)
return output
def propagate_backward(self, error_array):
new_error = self.activation_derivative_mask*error_array.flatten()
new_error = new_error.reshape(self.input_shape)
return new_error
|
en
| 0.673012
|
# silding-window 2D convolution # leaky ReLU activation # scalar value # record the activation position of the sub-input # output is 1-D array
| 2.813833
| 3
|
src/sos/monitor.py
|
BoPeng/sos
| 0
|
6626473
|
<filename>src/sos/monitor.py
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import os
import stat
import threading
import time
import psutil
from .tasks import TaskFile
from .utils import env, expand_time, format_HHMMSS
class TaskMonitor(threading.Thread):
def __init__(
self,
task_id,
monitor_interval,
resource_monitor_interval,
max_walltime=None,
max_mem=None,
max_procs=None,
sos_dict={},
):
threading.Thread.__init__(self)
self.task_id = task_id
self.pid = os.getpid()
self.monitor_interval = monitor_interval
self.resource_monitor_interval = max(
resource_monitor_interval // monitor_interval, 1)
self.daemon = True
if max_walltime is None:
self.max_walltime = None
else:
mwall = expand_time(max_walltime[0]) if max_walltime[0] else None
wall = expand_time(max_walltime[1]) if max_walltime[1] else None
if mwall is not None and wall is not None:
self.max_walltime = min(wall, wall)
elif mwall is not None:
self.max_walltime = mwall
else:
self.max_walltime = wall
#
if max_mem is None:
self.max_mem = None
elif max_mem[0] is not None and max_mem[1] is not None:
self.max_mem = min(max_mem[0], max_mem[1])
elif max_mem[0] is not None:
self.max_mem = max_mem[0]
else:
self.max_mem = max_mem[1]
#
if max_procs is None:
self.max_procs = None
elif max_procs[0] is not None and max_procs[1] is not None:
self.max_procs = min(max_procs[0], max_procs[1])
elif max_procs[0] is not None:
self.max_procs = max_procs[0]
else:
self.max_procs = max_procs[1]
self.pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task_id + ".pulse")
# remove previous status file, which could be readonly if the job is killed
if os.path.isfile(self.pulse_file):
if not os.access(self.pulse_file, os.W_OK):
os.chmod(self.pulse_file, stat.S_IREAD | stat.S_IWRITE)
os.remove(self.pulse_file)
self.sos_dict = sos_dict
with open(self.pulse_file, "a") as pd:
pd.write(
"#time\tproc_cpu\tproc_mem\tchildren\tchildren_cpu\tchildren_mem\n"
)
def _check(self):
current_process = psutil.Process(self.pid)
par_cpu = current_process.cpu_percent()
par_mem = current_process.memory_info()[0]
ch_cpu = 0
ch_mem = 0
children = current_process.children(recursive=True)
n_children = len(children)
for child in children:
ch_cpu += child.cpu_percent()
ch_mem += child.memory_info()[0]
return par_cpu, par_mem, n_children, ch_cpu, ch_mem
def _exceed_resource(self, msg):
err_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", self.task_id + ".soserr")
with open(err_file, "a") as err:
err.write(msg + "\n")
tf = TaskFile(self.task_id)
tf.add_outputs()
tf.status = "aborted"
env.logger.warning(f"{self.task_id} ``aborted``: {msg}")
# kill the task
p = psutil.Process(self.pid)
p.kill()
def run(self):
counter = 0
start_time = time.time()
while True:
try:
tf = TaskFile(self.task_id)
if not tf.exists():
env.logger.warning(f"Task {self.task_id} ``removed``")
# the job should be removed
p = psutil.Process(self.pid)
p.kill()
sts = tf.status
if sts in ("completed", "failed"):
break
if sts == "aborted" or not os.path.isfile(self.pulse_file):
env.logger.warning(f"Task {self.task_id} ``aborted``")
# the job should be killed
p = psutil.Process(self.pid)
p.kill()
if sts != "aborted":
# mark the task file
tf.status = "aborted"
# most of the time we only update
if counter % self.resource_monitor_interval:
os.utime(self.pulse_file, None)
else:
cpu, mem, nch, ch_cpu, ch_mem = self._check()
if ("peak_cpu" not in self.sos_dict or
self.sos_dict["peak_cpu"] < cpu + ch_cpu):
self.sos_dict["peak_cpu"] = cpu + ch_cpu
if ("peak_mem" not in self.sos_dict or
self.sos_dict["peak_mem"] < mem + ch_mem):
self.sos_dict["peak_mem"] = mem + ch_mem
with open(self.pulse_file, "a") as pd:
pd.write(
f"{time.time()}\t{cpu:.2f}\t{mem}\t{nch}\t{ch_cpu}\t{ch_mem}\n"
)
if self.max_procs is not None and cpu + ch_cpu > self.max_procs:
self._exceed_resource(
f"Task {self.task_id} exits because of excessive use of procs (used {cpu + ch_cpu}, limit {self.max_procs})"
)
if self.max_mem is not None and mem + ch_mem > self.max_mem:
self._exceed_resource(
f"Task {self.task_id} exits because of excessive use of max_mem (used {mem + ch_mem}, limit {self.max_mem})"
)
# walltime can be checked more frequently and does not have to wait for resource option
elapsed = time.time() - start_time
if self.max_walltime is not None and elapsed > self.max_walltime:
self._exceed_resource(
f"Task {self.task_id} exits because of excessive run time (used {format_HHMMSS(int(elapsed))}, limit {format_HHMMSS(self.max_walltime)})"
)
time.sleep(self.monitor_interval)
counter += 1
except Exception as e:
# if the process died, exit the thread
# the warning message is usually:
# WARNING: psutil.NoSuchProcess no process found with pid XXXXX
# env.logger.warning(str(e))
env.logger.debug(
f"Monitor of {self.task_id} failed with message {e}")
break
class WorkflowMonitor(threading.Thread):
def __init__(
self,
workflow_id,
monitor_interval,
resource_monitor_interval,
max_walltime=None,
max_mem=None,
max_procs=None,
sos_dict={},
):
threading.Thread.__init__(self)
self.workflow_id = workflow_id
self.pid = os.getpid()
self.monitor_interval = monitor_interval
self.resource_monitor_interval = max(
resource_monitor_interval // monitor_interval, 1)
self.daemon = True
self.max_walltime = max_walltime
if self.max_walltime is not None:
self.max_walltime = expand_time(self.max_walltime)
self.max_mem = max_mem
self.max_procs = max_procs
self.pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "workflows",
workflow_id + ".pulse")
# remove previous status file, which could be readonly if the job is killed
if os.path.isfile(self.pulse_file):
if not os.stat(self.pulse_file).st_mode & stat.S_IWUSR:
os.chmod(self.pulse_file, stat.S_IREAD | stat.S_IWRITE)
os.remove(self.pulse_file)
self.sos_dict = sos_dict
with open(self.pulse_file, "a") as pd:
pd.write(
"#time\tproc_cpu\tproc_mem\tchildren\tchildren_cpu\tchildren_mem\n"
)
def _check(self):
current_process = psutil.Process(self.pid)
par_cpu = current_process.cpu_percent()
par_mem = current_process.memory_info()[0]
ch_cpu = 0
ch_mem = 0
children = current_process.children(recursive=True)
n_children = len(children)
for child in children:
ch_cpu += child.cpu_percent()
ch_mem += child.memory_info()[0]
return par_cpu, par_mem, n_children, ch_cpu, ch_mem
def _exceed_resource(self, msg):
err_file = os.path.join(
os.path.expanduser("~"), ".sos", "workflows",
self.workflow_id + ".soserr")
with open(err_file, "a") as err:
err.write(msg + "\n")
env.logger.warning(f"{self.workflow_id} ``aborted``: {msg}")
# kill the workflow
p = psutil.Process(self.pid)
p.kill()
def write(self, msg):
with open(self.pulse_file, "a") as pd:
pd.write(f"#{time.time()}\t{msg}\n")
def run(self):
counter = 0
start_time = time.time()
while True:
try:
if not os.path.isfile(self.pulse_file) or not os.stat(
self.pulse_file).st_mode & stat.S_IWUSR:
env.logger.warning(
f"Workflow {self.workflow_id} ``aborted``")
# the job should be killed
p = psutil.Process(self.pid)
p.kill()
# most of the time we only update
if counter % self.resource_monitor_interval:
os.utime(self.pulse_file, None)
else:
cpu, mem, nch, ch_cpu, ch_mem = self._check()
if ("peak_cpu" not in self.sos_dict or
self.sos_dict["peak_cpu"] < cpu + ch_cpu):
self.sos_dict["peak_cpu"] = cpu + ch_cpu
if ("peak_mem" not in self.sos_dict or
self.sos_dict["peak_mem"] < mem + ch_mem):
self.sos_dict["peak_mem"] = mem + ch_mem
with open(self.pulse_file, "a") as pd:
pd.write(
f"{time.time()}\t{cpu:.2f}\t{mem}\t{nch}\t{ch_cpu}\t{ch_mem}\n"
)
if self.max_procs is not None and cpu + ch_cpu > self.max_procs:
self._exceed_resource(
f"Workflow {self.workflow_id} exits because of excessive use of procs (used {cpu + ch_cpu}, limit {self.max_procs})"
)
if self.max_mem is not None and mem + ch_mem > self.max_mem:
self._exceed_resource(
f"Workflow {self.workflow_id} exits because of excessive use of max_mem (used {mem + ch_mem}, limit {self.max_mem})"
)
# walltime can be checked more frequently and does not have to wait for resource option
elapsed = time.time() - start_time
if self.max_walltime is not None and elapsed > self.max_walltime:
self._exceed_resource(
f"Workflow {self.workflow_id} exits because of excessive run time (used {format_HHMMSS(int(elapsed))}, limit {format_HHMMSS(self.max_walltime)})"
)
time.sleep(self.monitor_interval)
counter += 1
except Exception as e:
# if the process died, exit the thread
# the warning message is usually:
# WARNING: psutil.NoSuchProcess no process found with pid XXXXX
# env.logger.warning(str(e))
env.logger.debug(
f"Monitor of {self.workflow_id} failed with message {e}")
break
def summarizeExecution(task_id, pulses, status="Unknown"):
peak_cpu = 0
accu_cpu = 0
peak_mem = 0
accu_mem = 0
peak_nch = 0
start_time = None
end_time = None
count = 0
for line in pulses.splitlines():
if line.startswith("#"):
continue
try:
t, c, m, nch, cc, cm = line.split()
except Exception as e:
env.logger.warning(
f'Unrecognized resource line "{line.strip()}": {e}')
if start_time is None:
start_time = float(t)
end_time = float(t)
else:
end_time = float(t)
accu_cpu += float(c) + float(cc)
accu_mem += float(m) + float(cm)
count += 1
if float(c) + float(cc) > peak_cpu:
peak_cpu = float(c) + float(cc)
if float(m) + float(cm) > peak_mem:
peak_mem = float(m) + float(cm)
if int(nch) > peak_nch:
peak_nch = int(nch)
try:
second_elapsed = end_time - start_time
except Exception:
second_elapsed = 0
result = [
("status", status),
("task", task_id),
("nproc", str(peak_nch)),
("start", time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(start_time))),
("end", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(end_time))),
(
"duration",
("" if second_elapsed < 86400 else
f'{int(second_elapsed/86400)} day{"s" if second_elapsed > 172800 else ""} '
) + time.strftime("%H:%M:%S", time.gmtime(second_elapsed)),
),
("cpu_peak", f"{peak_cpu:.1f}"),
("cpu_avg", f"{0 if count == 0 else accu_cpu/count:.1f}"),
("mem_peak", f"{peak_mem/1024/1024:.1f}Mb"),
("mem_avg", f"{0 if count == 0 else accu_mem/1024/1024/count:.1f}Mb"),
]
return "\n".join(f"{x:20s} {y}" for x, y in result)
|
<filename>src/sos/monitor.py
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import os
import stat
import threading
import time
import psutil
from .tasks import TaskFile
from .utils import env, expand_time, format_HHMMSS
class TaskMonitor(threading.Thread):
def __init__(
self,
task_id,
monitor_interval,
resource_monitor_interval,
max_walltime=None,
max_mem=None,
max_procs=None,
sos_dict={},
):
threading.Thread.__init__(self)
self.task_id = task_id
self.pid = os.getpid()
self.monitor_interval = monitor_interval
self.resource_monitor_interval = max(
resource_monitor_interval // monitor_interval, 1)
self.daemon = True
if max_walltime is None:
self.max_walltime = None
else:
mwall = expand_time(max_walltime[0]) if max_walltime[0] else None
wall = expand_time(max_walltime[1]) if max_walltime[1] else None
if mwall is not None and wall is not None:
self.max_walltime = min(wall, wall)
elif mwall is not None:
self.max_walltime = mwall
else:
self.max_walltime = wall
#
if max_mem is None:
self.max_mem = None
elif max_mem[0] is not None and max_mem[1] is not None:
self.max_mem = min(max_mem[0], max_mem[1])
elif max_mem[0] is not None:
self.max_mem = max_mem[0]
else:
self.max_mem = max_mem[1]
#
if max_procs is None:
self.max_procs = None
elif max_procs[0] is not None and max_procs[1] is not None:
self.max_procs = min(max_procs[0], max_procs[1])
elif max_procs[0] is not None:
self.max_procs = max_procs[0]
else:
self.max_procs = max_procs[1]
self.pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task_id + ".pulse")
# remove previous status file, which could be readonly if the job is killed
if os.path.isfile(self.pulse_file):
if not os.access(self.pulse_file, os.W_OK):
os.chmod(self.pulse_file, stat.S_IREAD | stat.S_IWRITE)
os.remove(self.pulse_file)
self.sos_dict = sos_dict
with open(self.pulse_file, "a") as pd:
pd.write(
"#time\tproc_cpu\tproc_mem\tchildren\tchildren_cpu\tchildren_mem\n"
)
def _check(self):
current_process = psutil.Process(self.pid)
par_cpu = current_process.cpu_percent()
par_mem = current_process.memory_info()[0]
ch_cpu = 0
ch_mem = 0
children = current_process.children(recursive=True)
n_children = len(children)
for child in children:
ch_cpu += child.cpu_percent()
ch_mem += child.memory_info()[0]
return par_cpu, par_mem, n_children, ch_cpu, ch_mem
def _exceed_resource(self, msg):
err_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", self.task_id + ".soserr")
with open(err_file, "a") as err:
err.write(msg + "\n")
tf = TaskFile(self.task_id)
tf.add_outputs()
tf.status = "aborted"
env.logger.warning(f"{self.task_id} ``aborted``: {msg}")
# kill the task
p = psutil.Process(self.pid)
p.kill()
def run(self):
counter = 0
start_time = time.time()
while True:
try:
tf = TaskFile(self.task_id)
if not tf.exists():
env.logger.warning(f"Task {self.task_id} ``removed``")
# the job should be removed
p = psutil.Process(self.pid)
p.kill()
sts = tf.status
if sts in ("completed", "failed"):
break
if sts == "aborted" or not os.path.isfile(self.pulse_file):
env.logger.warning(f"Task {self.task_id} ``aborted``")
# the job should be killed
p = psutil.Process(self.pid)
p.kill()
if sts != "aborted":
# mark the task file
tf.status = "aborted"
# most of the time we only update
if counter % self.resource_monitor_interval:
os.utime(self.pulse_file, None)
else:
cpu, mem, nch, ch_cpu, ch_mem = self._check()
if ("peak_cpu" not in self.sos_dict or
self.sos_dict["peak_cpu"] < cpu + ch_cpu):
self.sos_dict["peak_cpu"] = cpu + ch_cpu
if ("peak_mem" not in self.sos_dict or
self.sos_dict["peak_mem"] < mem + ch_mem):
self.sos_dict["peak_mem"] = mem + ch_mem
with open(self.pulse_file, "a") as pd:
pd.write(
f"{time.time()}\t{cpu:.2f}\t{mem}\t{nch}\t{ch_cpu}\t{ch_mem}\n"
)
if self.max_procs is not None and cpu + ch_cpu > self.max_procs:
self._exceed_resource(
f"Task {self.task_id} exits because of excessive use of procs (used {cpu + ch_cpu}, limit {self.max_procs})"
)
if self.max_mem is not None and mem + ch_mem > self.max_mem:
self._exceed_resource(
f"Task {self.task_id} exits because of excessive use of max_mem (used {mem + ch_mem}, limit {self.max_mem})"
)
# walltime can be checked more frequently and does not have to wait for resource option
elapsed = time.time() - start_time
if self.max_walltime is not None and elapsed > self.max_walltime:
self._exceed_resource(
f"Task {self.task_id} exits because of excessive run time (used {format_HHMMSS(int(elapsed))}, limit {format_HHMMSS(self.max_walltime)})"
)
time.sleep(self.monitor_interval)
counter += 1
except Exception as e:
# if the process died, exit the thread
# the warning message is usually:
# WARNING: psutil.NoSuchProcess no process found with pid XXXXX
# env.logger.warning(str(e))
env.logger.debug(
f"Monitor of {self.task_id} failed with message {e}")
break
class WorkflowMonitor(threading.Thread):
def __init__(
self,
workflow_id,
monitor_interval,
resource_monitor_interval,
max_walltime=None,
max_mem=None,
max_procs=None,
sos_dict={},
):
threading.Thread.__init__(self)
self.workflow_id = workflow_id
self.pid = os.getpid()
self.monitor_interval = monitor_interval
self.resource_monitor_interval = max(
resource_monitor_interval // monitor_interval, 1)
self.daemon = True
self.max_walltime = max_walltime
if self.max_walltime is not None:
self.max_walltime = expand_time(self.max_walltime)
self.max_mem = max_mem
self.max_procs = max_procs
self.pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "workflows",
workflow_id + ".pulse")
# remove previous status file, which could be readonly if the job is killed
if os.path.isfile(self.pulse_file):
if not os.stat(self.pulse_file).st_mode & stat.S_IWUSR:
os.chmod(self.pulse_file, stat.S_IREAD | stat.S_IWRITE)
os.remove(self.pulse_file)
self.sos_dict = sos_dict
with open(self.pulse_file, "a") as pd:
pd.write(
"#time\tproc_cpu\tproc_mem\tchildren\tchildren_cpu\tchildren_mem\n"
)
def _check(self):
current_process = psutil.Process(self.pid)
par_cpu = current_process.cpu_percent()
par_mem = current_process.memory_info()[0]
ch_cpu = 0
ch_mem = 0
children = current_process.children(recursive=True)
n_children = len(children)
for child in children:
ch_cpu += child.cpu_percent()
ch_mem += child.memory_info()[0]
return par_cpu, par_mem, n_children, ch_cpu, ch_mem
def _exceed_resource(self, msg):
err_file = os.path.join(
os.path.expanduser("~"), ".sos", "workflows",
self.workflow_id + ".soserr")
with open(err_file, "a") as err:
err.write(msg + "\n")
env.logger.warning(f"{self.workflow_id} ``aborted``: {msg}")
# kill the workflow
p = psutil.Process(self.pid)
p.kill()
def write(self, msg):
with open(self.pulse_file, "a") as pd:
pd.write(f"#{time.time()}\t{msg}\n")
def run(self):
counter = 0
start_time = time.time()
while True:
try:
if not os.path.isfile(self.pulse_file) or not os.stat(
self.pulse_file).st_mode & stat.S_IWUSR:
env.logger.warning(
f"Workflow {self.workflow_id} ``aborted``")
# the job should be killed
p = psutil.Process(self.pid)
p.kill()
# most of the time we only update
if counter % self.resource_monitor_interval:
os.utime(self.pulse_file, None)
else:
cpu, mem, nch, ch_cpu, ch_mem = self._check()
if ("peak_cpu" not in self.sos_dict or
self.sos_dict["peak_cpu"] < cpu + ch_cpu):
self.sos_dict["peak_cpu"] = cpu + ch_cpu
if ("peak_mem" not in self.sos_dict or
self.sos_dict["peak_mem"] < mem + ch_mem):
self.sos_dict["peak_mem"] = mem + ch_mem
with open(self.pulse_file, "a") as pd:
pd.write(
f"{time.time()}\t{cpu:.2f}\t{mem}\t{nch}\t{ch_cpu}\t{ch_mem}\n"
)
if self.max_procs is not None and cpu + ch_cpu > self.max_procs:
self._exceed_resource(
f"Workflow {self.workflow_id} exits because of excessive use of procs (used {cpu + ch_cpu}, limit {self.max_procs})"
)
if self.max_mem is not None and mem + ch_mem > self.max_mem:
self._exceed_resource(
f"Workflow {self.workflow_id} exits because of excessive use of max_mem (used {mem + ch_mem}, limit {self.max_mem})"
)
# walltime can be checked more frequently and does not have to wait for resource option
elapsed = time.time() - start_time
if self.max_walltime is not None and elapsed > self.max_walltime:
self._exceed_resource(
f"Workflow {self.workflow_id} exits because of excessive run time (used {format_HHMMSS(int(elapsed))}, limit {format_HHMMSS(self.max_walltime)})"
)
time.sleep(self.monitor_interval)
counter += 1
except Exception as e:
# if the process died, exit the thread
# the warning message is usually:
# WARNING: psutil.NoSuchProcess no process found with pid XXXXX
# env.logger.warning(str(e))
env.logger.debug(
f"Monitor of {self.workflow_id} failed with message {e}")
break
def summarizeExecution(task_id, pulses, status="Unknown"):
peak_cpu = 0
accu_cpu = 0
peak_mem = 0
accu_mem = 0
peak_nch = 0
start_time = None
end_time = None
count = 0
for line in pulses.splitlines():
if line.startswith("#"):
continue
try:
t, c, m, nch, cc, cm = line.split()
except Exception as e:
env.logger.warning(
f'Unrecognized resource line "{line.strip()}": {e}')
if start_time is None:
start_time = float(t)
end_time = float(t)
else:
end_time = float(t)
accu_cpu += float(c) + float(cc)
accu_mem += float(m) + float(cm)
count += 1
if float(c) + float(cc) > peak_cpu:
peak_cpu = float(c) + float(cc)
if float(m) + float(cm) > peak_mem:
peak_mem = float(m) + float(cm)
if int(nch) > peak_nch:
peak_nch = int(nch)
try:
second_elapsed = end_time - start_time
except Exception:
second_elapsed = 0
result = [
("status", status),
("task", task_id),
("nproc", str(peak_nch)),
("start", time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(start_time))),
("end", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(end_time))),
(
"duration",
("" if second_elapsed < 86400 else
f'{int(second_elapsed/86400)} day{"s" if second_elapsed > 172800 else ""} '
) + time.strftime("%H:%M:%S", time.gmtime(second_elapsed)),
),
("cpu_peak", f"{peak_cpu:.1f}"),
("cpu_avg", f"{0 if count == 0 else accu_cpu/count:.1f}"),
("mem_peak", f"{peak_mem/1024/1024:.1f}Mb"),
("mem_avg", f"{0 if count == 0 else accu_mem/1024/1024/count:.1f}Mb"),
]
return "\n".join(f"{x:20s} {y}" for x, y in result)
|
en
| 0.853951
|
#!/usr/bin/env python3 # # Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center # Distributed under the terms of the 3-clause BSD License. # # # remove previous status file, which could be readonly if the job is killed # kill the task # the job should be removed # the job should be killed # mark the task file # most of the time we only update # walltime can be checked more frequently and does not have to wait for resource option # if the process died, exit the thread # the warning message is usually: # WARNING: psutil.NoSuchProcess no process found with pid XXXXX # env.logger.warning(str(e)) # remove previous status file, which could be readonly if the job is killed # kill the workflow # the job should be killed # most of the time we only update # walltime can be checked more frequently and does not have to wait for resource option # if the process died, exit the thread # the warning message is usually: # WARNING: psutil.NoSuchProcess no process found with pid XXXXX # env.logger.warning(str(e))
| 2.205157
| 2
|
Sketches/RJL/bittorrent/BitTorrent/launchmany-curses.py
|
sparkslabs/kamaelia_orig
| 12
|
6626474
|
<filename>Sketches/RJL/bittorrent/BitTorrent/launchmany-curses.py
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by <NAME>
from __future__ import division
from BitTorrent.platform import install_translation
install_translation()
DOWNLOAD_SCROLL_RATE = 1
import sys, os
from threading import Event
from time import time, localtime, strftime
from BitTorrent.obsoletepythonsupport import *
from BitTorrent.launchmanycore import LaunchMany
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent import configfile
from BitTorrent import version
from BitTorrent import BTFailure
try:
curses = import_curses()
import curses.panel
from curses.wrapper import wrapper as curses_wrapper
from signal import signal, SIGWINCH
except:
print _("Textmode GUI initialization failed, cannot proceed.")
print
print _("This download interface requires the standard Python module "
"\"curses\", which is unfortunately not available for the native "
"Windows port of Python. It is however available for the Cygwin "
"port of Python, running on all Win32 systems (www.cygwin.com).")
print
print _("You may still use \"btdownloadheadless.py\" to download.")
sys.exit(1)
exceptions = []
def fmttime(n):
if n <= 0:
return None
n = int(n)
m, s = divmod(n, 60)
h, m = divmod(m, 60)
if h > 1000000:
return _("connecting to peers")
return _("ETA in %d:%02d:%02d") % (h, m, s)
def fmtsize(n):
n = long(n)
unit = [' B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
i = 0
if (n > 999):
i = 1
while i + 1 < len(unit) and (n >> 10) >= 999:
i += 1
n >>= 10
n /= 1024
if i > 0:
size = '%.1f' % n + '%s' % unit[i]
else:
size = '%.0f' % n + '%s' % unit[i]
return size
def ljust(s, size):
s = s[:size]
return s + (' '*(size-len(s)))
def rjust(s, size):
s = s[:size]
return (' '*(size-len(s)))+s
class CursesDisplayer(object):
def __init__(self, scrwin):
self.messages = []
self.scroll_pos = 0
self.scroll_time = 0
self.scrwin = scrwin
signal(SIGWINCH, self.winch_handler)
self.changeflag = Event()
self._remake_window()
curses.use_default_colors()
def winch_handler(self, signum, stackframe):
self.changeflag.set()
curses.endwin()
self.scrwin.refresh()
self.scrwin = curses.newwin(0, 0, 0, 0)
self._remake_window()
self._display_messages()
def _remake_window(self):
self.scrh, self.scrw = self.scrwin.getmaxyx()
self.scrpan = curses.panel.new_panel(self.scrwin)
self.mainwinh = (2*self.scrh)//3
self.mainwinw = self.scrw - 4 # - 2 (bars) - 2 (spaces)
self.mainwiny = 2 # + 1 (bar) + 1 (titles)
self.mainwinx = 2 # + 1 (bar) + 1 (space)
# + 1 to all windows so we can write at mainwinw
self.mainwin = curses.newwin(self.mainwinh, self.mainwinw+1,
self.mainwiny, self.mainwinx)
self.mainpan = curses.panel.new_panel(self.mainwin)
self.mainwin.scrollok(0)
self.mainwin.nodelay(1)
self.headerwin = curses.newwin(1, self.mainwinw+1,
1, self.mainwinx)
self.headerpan = curses.panel.new_panel(self.headerwin)
self.headerwin.scrollok(0)
self.totalwin = curses.newwin(1, self.mainwinw+1,
self.mainwinh+1, self.mainwinx)
self.totalpan = curses.panel.new_panel(self.totalwin)
self.totalwin.scrollok(0)
self.statuswinh = self.scrh-4-self.mainwinh
self.statuswin = curses.newwin(self.statuswinh, self.mainwinw+1,
self.mainwinh+3, self.mainwinx)
self.statuspan = curses.panel.new_panel(self.statuswin)
self.statuswin.scrollok(0)
try:
self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
except:
pass
rcols = (_("Size"),_("Download"),_("Upload"))
rwids = (8, 10, 10)
rwid = sum(rwids)
start = self.mainwinw - rwid
self.headerwin.addnstr(0, 2, '#', start, curses.A_BOLD)
self.headerwin.addnstr(0, 4, _("Filename"), start, curses.A_BOLD)
for s,w in zip(rcols, rwids):
st = start + max(w - len(s), 0)
self.headerwin.addnstr(0, st, s[:w], len(s[:w]), curses.A_BOLD)
start += w
self.totalwin.addnstr(0, self.mainwinw - 27, _("Totals:"), 7, curses.A_BOLD)
self._display_messages()
curses.panel.update_panels()
curses.doupdate()
self.changeflag.clear()
def _display_line(self, s, bold = False):
if self.disp_end:
return True
line = self.disp_line
self.disp_line += 1
if line < 0:
return False
if bold:
self.mainwin.addnstr(line, 0, s, self.mainwinw, curses.A_BOLD)
else:
self.mainwin.addnstr(line, 0, s, self.mainwinw)
if self.disp_line >= self.mainwinh:
self.disp_end = True
return self.disp_end
def _display_data(self, data):
if 3*len(data) <= self.mainwinh:
self.scroll_pos = 0
self.scrolling = False
elif self.scroll_time + DOWNLOAD_SCROLL_RATE < time():
self.scroll_time = time()
self.scroll_pos += 1
self.scrolling = True
if self.scroll_pos >= 3*len(data)+2:
self.scroll_pos = 0
i = self.scroll_pos//3
self.disp_line = (3*i)-self.scroll_pos
self.disp_end = False
while not self.disp_end:
ii = i % len(data)
if i and not ii:
if not self.scrolling:
break
self._display_line('')
if self._display_line(''):
break
( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) = data[ii]
t = fmttime(t)
if t:
status = t
name = ljust(name,self.mainwinw-32)
size = rjust(fmtsize(size),8)
uprate = rjust('%s/s' % fmtsize(uprate),10)
dnrate = rjust('%s/s' % fmtsize(dnrate),10)
line = "%3d %s%s%s%s" % (ii+1, name, size, dnrate, uprate)
self._display_line(line, True)
if peers + seeds:
datastr = _(" (%s) %s - %s peers %s seeds %s dist copies - %s dn %s up") % (
progress, status, peers, seeds, dist,
fmtsize(dnamt), fmtsize(upamt) )
else:
datastr = ' '+status+' ('+progress+')'
self._display_line(datastr)
self._display_line(' '+ljust(msg,self.mainwinw-4))
i += 1
def display(self, data):
if self.changeflag.isSet():
return
inchar = self.mainwin.getch()
if inchar == 12: # ^L
self._remake_window()
self.mainwin.erase()
if data:
self._display_data(data)
else:
self.mainwin.addnstr( 1, self.mainwinw//2-5,
_("no torrents"), 12, curses.A_BOLD )
totalup = 0
totaldn = 0
for ( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) in data:
totalup += uprate
totaldn += dnrate
totalup = '%s/s' % fmtsize(totalup)
totaldn = '%s/s' % fmtsize(totaldn)
self.totalwin.erase()
self.totalwin.addnstr(0, self.mainwinw-27, _("Totals:"), 7, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw-20 + (10-len(totaldn)),
totaldn, 10, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw-10 + (10-len(totalup)),
totalup, 10, curses.A_BOLD)
curses.panel.update_panels()
curses.doupdate()
return inchar in (ord('q'),ord('Q'))
def message(self, s):
self.messages.append(strftime('%x %X - ',localtime(time()))+s)
self._display_messages()
def _display_messages(self):
self.statuswin.erase()
winpos = 0
for s in self.messages[-self.statuswinh:]:
self.statuswin.addnstr(winpos, 0, s, self.mainwinw)
winpos += 1
curses.panel.update_panels()
curses.doupdate()
def exception(self, s):
exceptions.append(s)
self.message(_("SYSTEM ERROR - EXCEPTION GENERATED"))
def LaunchManyWrapper(scrwin, config):
LaunchMany(config, CursesDisplayer(scrwin), 'launchmany-curses')
if __name__ == '__main__':
uiname = 'launchmany-curses'
defaults = get_defaults(uiname)
try:
if len(sys.argv) < 2:
printHelp(uiname, defaults)
sys.exit(1)
config, args = configfile.parse_configuration_and_args(defaults,
uiname, sys.argv[1:], 0, 1)
if args:
config['torrent_dir'] = args[0]
if not os.path.isdir(config['torrent_dir']):
raise BTFailure(_("Warning: ")+args[0]+_(" is not a directory"))
except BTFailure, e:
print _("error: ") + str(e) + _("\nrun with no args for parameter explanations")
sys.exit(1)
curses_wrapper(LaunchManyWrapper, config)
if exceptions:
print _("\nEXCEPTION:")
print exceptions[0]
|
<filename>Sketches/RJL/bittorrent/BitTorrent/launchmany-curses.py
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by <NAME>
from __future__ import division
from BitTorrent.platform import install_translation
install_translation()
DOWNLOAD_SCROLL_RATE = 1
import sys, os
from threading import Event
from time import time, localtime, strftime
from BitTorrent.obsoletepythonsupport import *
from BitTorrent.launchmanycore import LaunchMany
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent import configfile
from BitTorrent import version
from BitTorrent import BTFailure
try:
curses = import_curses()
import curses.panel
from curses.wrapper import wrapper as curses_wrapper
from signal import signal, SIGWINCH
except:
print _("Textmode GUI initialization failed, cannot proceed.")
print
print _("This download interface requires the standard Python module "
"\"curses\", which is unfortunately not available for the native "
"Windows port of Python. It is however available for the Cygwin "
"port of Python, running on all Win32 systems (www.cygwin.com).")
print
print _("You may still use \"btdownloadheadless.py\" to download.")
sys.exit(1)
exceptions = []
def fmttime(n):
if n <= 0:
return None
n = int(n)
m, s = divmod(n, 60)
h, m = divmod(m, 60)
if h > 1000000:
return _("connecting to peers")
return _("ETA in %d:%02d:%02d") % (h, m, s)
def fmtsize(n):
n = long(n)
unit = [' B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
i = 0
if (n > 999):
i = 1
while i + 1 < len(unit) and (n >> 10) >= 999:
i += 1
n >>= 10
n /= 1024
if i > 0:
size = '%.1f' % n + '%s' % unit[i]
else:
size = '%.0f' % n + '%s' % unit[i]
return size
def ljust(s, size):
s = s[:size]
return s + (' '*(size-len(s)))
def rjust(s, size):
s = s[:size]
return (' '*(size-len(s)))+s
class CursesDisplayer(object):
def __init__(self, scrwin):
self.messages = []
self.scroll_pos = 0
self.scroll_time = 0
self.scrwin = scrwin
signal(SIGWINCH, self.winch_handler)
self.changeflag = Event()
self._remake_window()
curses.use_default_colors()
def winch_handler(self, signum, stackframe):
self.changeflag.set()
curses.endwin()
self.scrwin.refresh()
self.scrwin = curses.newwin(0, 0, 0, 0)
self._remake_window()
self._display_messages()
def _remake_window(self):
self.scrh, self.scrw = self.scrwin.getmaxyx()
self.scrpan = curses.panel.new_panel(self.scrwin)
self.mainwinh = (2*self.scrh)//3
self.mainwinw = self.scrw - 4 # - 2 (bars) - 2 (spaces)
self.mainwiny = 2 # + 1 (bar) + 1 (titles)
self.mainwinx = 2 # + 1 (bar) + 1 (space)
# + 1 to all windows so we can write at mainwinw
self.mainwin = curses.newwin(self.mainwinh, self.mainwinw+1,
self.mainwiny, self.mainwinx)
self.mainpan = curses.panel.new_panel(self.mainwin)
self.mainwin.scrollok(0)
self.mainwin.nodelay(1)
self.headerwin = curses.newwin(1, self.mainwinw+1,
1, self.mainwinx)
self.headerpan = curses.panel.new_panel(self.headerwin)
self.headerwin.scrollok(0)
self.totalwin = curses.newwin(1, self.mainwinw+1,
self.mainwinh+1, self.mainwinx)
self.totalpan = curses.panel.new_panel(self.totalwin)
self.totalwin.scrollok(0)
self.statuswinh = self.scrh-4-self.mainwinh
self.statuswin = curses.newwin(self.statuswinh, self.mainwinw+1,
self.mainwinh+3, self.mainwinx)
self.statuspan = curses.panel.new_panel(self.statuswin)
self.statuswin.scrollok(0)
try:
self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
except:
pass
rcols = (_("Size"),_("Download"),_("Upload"))
rwids = (8, 10, 10)
rwid = sum(rwids)
start = self.mainwinw - rwid
self.headerwin.addnstr(0, 2, '#', start, curses.A_BOLD)
self.headerwin.addnstr(0, 4, _("Filename"), start, curses.A_BOLD)
for s,w in zip(rcols, rwids):
st = start + max(w - len(s), 0)
self.headerwin.addnstr(0, st, s[:w], len(s[:w]), curses.A_BOLD)
start += w
self.totalwin.addnstr(0, self.mainwinw - 27, _("Totals:"), 7, curses.A_BOLD)
self._display_messages()
curses.panel.update_panels()
curses.doupdate()
self.changeflag.clear()
def _display_line(self, s, bold = False):
if self.disp_end:
return True
line = self.disp_line
self.disp_line += 1
if line < 0:
return False
if bold:
self.mainwin.addnstr(line, 0, s, self.mainwinw, curses.A_BOLD)
else:
self.mainwin.addnstr(line, 0, s, self.mainwinw)
if self.disp_line >= self.mainwinh:
self.disp_end = True
return self.disp_end
def _display_data(self, data):
if 3*len(data) <= self.mainwinh:
self.scroll_pos = 0
self.scrolling = False
elif self.scroll_time + DOWNLOAD_SCROLL_RATE < time():
self.scroll_time = time()
self.scroll_pos += 1
self.scrolling = True
if self.scroll_pos >= 3*len(data)+2:
self.scroll_pos = 0
i = self.scroll_pos//3
self.disp_line = (3*i)-self.scroll_pos
self.disp_end = False
while not self.disp_end:
ii = i % len(data)
if i and not ii:
if not self.scrolling:
break
self._display_line('')
if self._display_line(''):
break
( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) = data[ii]
t = fmttime(t)
if t:
status = t
name = ljust(name,self.mainwinw-32)
size = rjust(fmtsize(size),8)
uprate = rjust('%s/s' % fmtsize(uprate),10)
dnrate = rjust('%s/s' % fmtsize(dnrate),10)
line = "%3d %s%s%s%s" % (ii+1, name, size, dnrate, uprate)
self._display_line(line, True)
if peers + seeds:
datastr = _(" (%s) %s - %s peers %s seeds %s dist copies - %s dn %s up") % (
progress, status, peers, seeds, dist,
fmtsize(dnamt), fmtsize(upamt) )
else:
datastr = ' '+status+' ('+progress+')'
self._display_line(datastr)
self._display_line(' '+ljust(msg,self.mainwinw-4))
i += 1
def display(self, data):
if self.changeflag.isSet():
return
inchar = self.mainwin.getch()
if inchar == 12: # ^L
self._remake_window()
self.mainwin.erase()
if data:
self._display_data(data)
else:
self.mainwin.addnstr( 1, self.mainwinw//2-5,
_("no torrents"), 12, curses.A_BOLD )
totalup = 0
totaldn = 0
for ( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) in data:
totalup += uprate
totaldn += dnrate
totalup = '%s/s' % fmtsize(totalup)
totaldn = '%s/s' % fmtsize(totaldn)
self.totalwin.erase()
self.totalwin.addnstr(0, self.mainwinw-27, _("Totals:"), 7, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw-20 + (10-len(totaldn)),
totaldn, 10, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw-10 + (10-len(totalup)),
totalup, 10, curses.A_BOLD)
curses.panel.update_panels()
curses.doupdate()
return inchar in (ord('q'),ord('Q'))
def message(self, s):
self.messages.append(strftime('%x %X - ',localtime(time()))+s)
self._display_messages()
def _display_messages(self):
self.statuswin.erase()
winpos = 0
for s in self.messages[-self.statuswinh:]:
self.statuswin.addnstr(winpos, 0, s, self.mainwinw)
winpos += 1
curses.panel.update_panels()
curses.doupdate()
def exception(self, s):
exceptions.append(s)
self.message(_("SYSTEM ERROR - EXCEPTION GENERATED"))
def LaunchManyWrapper(scrwin, config):
LaunchMany(config, CursesDisplayer(scrwin), 'launchmany-curses')
if __name__ == '__main__':
uiname = 'launchmany-curses'
defaults = get_defaults(uiname)
try:
if len(sys.argv) < 2:
printHelp(uiname, defaults)
sys.exit(1)
config, args = configfile.parse_configuration_and_args(defaults,
uiname, sys.argv[1:], 0, 1)
if args:
config['torrent_dir'] = args[0]
if not os.path.isdir(config['torrent_dir']):
raise BTFailure(_("Warning: ")+args[0]+_(" is not a directory"))
except BTFailure, e:
print _("error: ") + str(e) + _("\nrun with no args for parameter explanations")
sys.exit(1)
curses_wrapper(LaunchManyWrapper, config)
if exceptions:
print _("\nEXCEPTION:")
print exceptions[0]
|
en
| 0.783175
|
#!/usr/bin/env python # The contents of this file are subject to the BitTorrent Open Source License # Version 1.1 (the License). You may not copy or use this file, in either # source code or executable form, except in compliance with the License. You # may obtain a copy of the License at http://www.bittorrent.com/license/. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # Written by <NAME> # - 2 (bars) - 2 (spaces) # + 1 (bar) + 1 (titles) # + 1 (bar) + 1 (space) # + 1 to all windows so we can write at mainwinw # ^L
| 2.140018
| 2
|
models/__init__.py
|
EmanuelNk/semantic-segmentation
| 0
|
6626475
|
<filename>models/__init__.py<gh_stars>0
from .segformer import SegFormer
from .custom_cnn import CustomCNN
from .custom_vit import CustomVIT
from .ddrnet import DDRNet
__all__ = {
'customcnn': CustomCNN,
'customvit': CustomVIT,
'segformer': SegFormer,
'ddrnet': DDRNet,
}
def get_model(model_name: str, variant: str, num_classes: int):
assert model_name in __all__.keys(), f"Only {list(__all__.keys())} models are supported."
return __all__[model_name](variant, num_classes)
|
<filename>models/__init__.py<gh_stars>0
from .segformer import SegFormer
from .custom_cnn import CustomCNN
from .custom_vit import CustomVIT
from .ddrnet import DDRNet
__all__ = {
'customcnn': CustomCNN,
'customvit': CustomVIT,
'segformer': SegFormer,
'ddrnet': DDRNet,
}
def get_model(model_name: str, variant: str, num_classes: int):
assert model_name in __all__.keys(), f"Only {list(__all__.keys())} models are supported."
return __all__[model_name](variant, num_classes)
|
none
| 1
| 2.178869
| 2
|
|
pynfact/__main__.py
|
jacorbal/pynfact
| 0
|
6626476
|
<filename>pynfact/__main__.py<gh_stars>0
#!/usr/bin/env python3
# vim: set ft=python fileencoding=utf-8 tw=72:
r"""
A blog-oriented static web content generator.
* Input formats: Markdown and reStructuredText
* Output format: HTML5.
:author: \<NAME>
:email: <EMAIL>
:copyright: © 2012-2020, <NAME>
:license: MIT
"""
from pynfact.main import main
if __name__ == '__main__':
main()
|
<filename>pynfact/__main__.py<gh_stars>0
#!/usr/bin/env python3
# vim: set ft=python fileencoding=utf-8 tw=72:
r"""
A blog-oriented static web content generator.
* Input formats: Markdown and reStructuredText
* Output format: HTML5.
:author: \<NAME>
:email: <EMAIL>
:copyright: © 2012-2020, <NAME>
:license: MIT
"""
from pynfact.main import main
if __name__ == '__main__':
main()
|
en
| 0.338002
|
#!/usr/bin/env python3 # vim: set ft=python fileencoding=utf-8 tw=72: A blog-oriented static web content generator. * Input formats: Markdown and reStructuredText * Output format: HTML5. :author: \<NAME> :email: <EMAIL> :copyright: © 2012-2020, <NAME> :license: MIT
| 1.337388
| 1
|
nuitka/nodes/FrameNodes.py
|
em3ndez/Nuitka
| 1
|
6626477
|
# Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frame nodes.
The frame attaches name and other frame properties to a scope, where it is
optional. For use in tracebacks, their created frame objects, potentially
cached are essential.
Otherwise, they are similar to statement sequences, so they inherit from
them.
"""
from nuitka.PythonVersions import python_version
from .CodeObjectSpecs import CodeObjectSpec
from .FutureSpecs import fromFlags
from .StatementNodes import StatementsSequence
def checkFrameStatements(value):
"""Check that frames statements list value proper.
Must not be None, must not contain None, and of course only statements
sequences, or statements, may be empty.
"""
assert value is not None
assert None not in value
for statement in value:
assert statement.isStatement() or statement.isStatementsFrame(), statement
return tuple(value)
class StatementsFrameBase(StatementsSequence):
checkers = {"statements": checkFrameStatements}
__slots__ = ("guard_mode", "code_object", "needs_frame_exception_preserve")
def __init__(self, statements, guard_mode, code_object, source_ref):
StatementsSequence.__init__(self, statements=statements, source_ref=source_ref)
# TODO: Why not have multiple classes for this.
self.guard_mode = guard_mode
self.code_object = code_object
self.needs_frame_exception_preserve = False
def isStatementsFrame(self):
return True
def getDetails(self):
result = {"code_object": self.code_object}
result.update(StatementsSequence.getDetails(self))
return result
def getDetailsForDisplay(self):
result = StatementsSequence.getDetails(self)
result.update()
result.update(self.code_object.getDetails())
return result
@classmethod
def fromXML(cls, provider, source_ref, **args):
code_object_args = {}
other_args = {}
for key, value in args.items():
if key.startswith("co_"):
code_object_args[key] = value
elif key == "code_flags":
code_object_args["future_spec"] = fromFlags(args["code_flags"])
else:
other_args[key] = value
code_object = CodeObjectSpec(**code_object_args)
return cls(code_object=code_object, source_ref=source_ref, **other_args)
def getGuardMode(self):
return self.guard_mode
def needsExceptionFramePreservation(self):
if python_version < 300:
return self.guard_mode != "generator"
else:
return True
def getVarNames(self):
return self.code_object.getVarNames()
def updateLocalNames(self):
"""For use during variable closure phase. Finalize attributes."""
provider = self.getParentVariableProvider()
if not provider.isCompiledPythonModule():
self.code_object.updateLocalNames(
[variable.getName() for variable in provider.getLocalVariables()]
)
entry_point = provider.getEntryPoint()
is_optimized = (
not entry_point.isCompiledPythonModule()
and not entry_point.isExpressionClassBody()
and not entry_point.isUnoptimized()
)
self.code_object.setFlagIsOptimizedValue(is_optimized)
new_locals = not provider.isCompiledPythonModule() and (
python_version < 340
or (not provider.isExpressionClassBody() and not provider.isUnoptimized())
)
self.code_object.setFlagNewLocalsValue(new_locals)
if (
provider.isExpressionGeneratorObjectBody()
or provider.isExpressionCoroutineObjectBody()
or provider.isExpressionAsyncgenObjectBody()
):
closure_provider = provider.getParentVariableProvider()
else:
closure_provider = provider
has_closure = (
closure_provider.isExpressionFunctionBody()
and closure_provider.getClosureVariables() != ()
and not closure_provider.isExpressionClassBody()
)
self.code_object.setFlagHasClosureValue(has_closure)
def markAsFrameExceptionPreserving(self):
self.needs_frame_exception_preserve = True
def needsFrameExceptionPreserving(self):
return self.needs_frame_exception_preserve
def getCodeObject(self):
return self.code_object
def computeStatementsSequence(self, trace_collection):
# The extraction of parts of the frame that can be moved before or after
# the frame scope, takes it toll to complexity, pylint: disable=too-many-branches
new_statements = []
statements = self.getStatements()
for count, statement in enumerate(statements):
# May be frames embedded.
if statement.isStatementsFrame():
new_statement = statement.computeStatementsSequence(
trace_collection=trace_collection
)
else:
new_statement = trace_collection.onStatement(statement=statement)
if new_statement is not None:
if (
new_statement.isStatementsSequence()
and not new_statement.isStatementsFrame()
):
new_statements.extend(new_statement.getStatements())
else:
new_statements.append(new_statement)
if (
statement is not statements[-1]
and new_statement.isStatementAborting()
):
trace_collection.signalChange(
"new_statements",
statements[count + 1].getSourceReference(),
"Removed dead statements.",
)
break
if not new_statements:
trace_collection.signalChange(
"new_statements",
self.source_ref,
"Removed empty frame object of '%s'."
% self.code_object.getCodeObjectName(),
)
return None
# If our statements changed just now, they are not immediately usable,
# so do this in two steps. Next time we can reduce the frame scope just
# as well.
if statements != tuple(new_statements):
self.setStatements(new_statements)
return self
# Determine statements inside the frame, that need not be in a frame,
# because they wouldn't raise an exception.
outside_pre = []
while new_statements and not new_statements[0].needsFrame():
outside_pre.append(new_statements[0])
del new_statements[0]
outside_post = []
while new_statements and not new_statements[-1].needsFrame():
outside_post.insert(0, new_statements[-1])
del new_statements[-1]
if outside_pre or outside_post:
from .NodeMakingHelpers import (
makeStatementsSequenceReplacementNode,
)
if new_statements:
self.setStatements(new_statements)
return makeStatementsSequenceReplacementNode(
statements=outside_pre + [self] + outside_post, node=self
)
else:
trace_collection.signalChange(
"new_statements",
self.source_ref,
"Removed useless frame object of '%s'."
% self.code_object.getCodeObjectName(),
)
return makeStatementsSequenceReplacementNode(
statements=outside_pre + outside_post, node=self
)
else:
if statements != new_statements:
self.setStatements(new_statements)
return self
class StatementsFrameModule(StatementsFrameBase):
kind = "STATEMENTS_FRAME_MODULE"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="once",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return False
class StatementsFrameFunction(StatementsFrameBase):
kind = "STATEMENTS_FRAME_FUNCTION"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="full",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return False
class StatementsFrameGenerator(StatementsFrameBase):
kind = "STATEMENTS_FRAME_GENERATOR"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="generator",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return True
class StatementsFrameCoroutine(StatementsFrameBase):
kind = "STATEMENTS_FRAME_COROUTINE"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="generator",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return True
class StatementsFrameAsyncgen(StatementsFrameBase):
kind = "STATEMENTS_FRAME_ASYNCGEN"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="generator",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return True
|
# Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frame nodes.
The frame attaches name and other frame properties to a scope, where it is
optional. For use in tracebacks, their created frame objects, potentially
cached are essential.
Otherwise, they are similar to statement sequences, so they inherit from
them.
"""
from nuitka.PythonVersions import python_version
from .CodeObjectSpecs import CodeObjectSpec
from .FutureSpecs import fromFlags
from .StatementNodes import StatementsSequence
def checkFrameStatements(value):
"""Check that frames statements list value proper.
Must not be None, must not contain None, and of course only statements
sequences, or statements, may be empty.
"""
assert value is not None
assert None not in value
for statement in value:
assert statement.isStatement() or statement.isStatementsFrame(), statement
return tuple(value)
class StatementsFrameBase(StatementsSequence):
checkers = {"statements": checkFrameStatements}
__slots__ = ("guard_mode", "code_object", "needs_frame_exception_preserve")
def __init__(self, statements, guard_mode, code_object, source_ref):
StatementsSequence.__init__(self, statements=statements, source_ref=source_ref)
# TODO: Why not have multiple classes for this.
self.guard_mode = guard_mode
self.code_object = code_object
self.needs_frame_exception_preserve = False
def isStatementsFrame(self):
return True
def getDetails(self):
result = {"code_object": self.code_object}
result.update(StatementsSequence.getDetails(self))
return result
def getDetailsForDisplay(self):
result = StatementsSequence.getDetails(self)
result.update()
result.update(self.code_object.getDetails())
return result
@classmethod
def fromXML(cls, provider, source_ref, **args):
code_object_args = {}
other_args = {}
for key, value in args.items():
if key.startswith("co_"):
code_object_args[key] = value
elif key == "code_flags":
code_object_args["future_spec"] = fromFlags(args["code_flags"])
else:
other_args[key] = value
code_object = CodeObjectSpec(**code_object_args)
return cls(code_object=code_object, source_ref=source_ref, **other_args)
def getGuardMode(self):
return self.guard_mode
def needsExceptionFramePreservation(self):
if python_version < 300:
return self.guard_mode != "generator"
else:
return True
def getVarNames(self):
return self.code_object.getVarNames()
def updateLocalNames(self):
"""For use during variable closure phase. Finalize attributes."""
provider = self.getParentVariableProvider()
if not provider.isCompiledPythonModule():
self.code_object.updateLocalNames(
[variable.getName() for variable in provider.getLocalVariables()]
)
entry_point = provider.getEntryPoint()
is_optimized = (
not entry_point.isCompiledPythonModule()
and not entry_point.isExpressionClassBody()
and not entry_point.isUnoptimized()
)
self.code_object.setFlagIsOptimizedValue(is_optimized)
new_locals = not provider.isCompiledPythonModule() and (
python_version < 340
or (not provider.isExpressionClassBody() and not provider.isUnoptimized())
)
self.code_object.setFlagNewLocalsValue(new_locals)
if (
provider.isExpressionGeneratorObjectBody()
or provider.isExpressionCoroutineObjectBody()
or provider.isExpressionAsyncgenObjectBody()
):
closure_provider = provider.getParentVariableProvider()
else:
closure_provider = provider
has_closure = (
closure_provider.isExpressionFunctionBody()
and closure_provider.getClosureVariables() != ()
and not closure_provider.isExpressionClassBody()
)
self.code_object.setFlagHasClosureValue(has_closure)
def markAsFrameExceptionPreserving(self):
self.needs_frame_exception_preserve = True
def needsFrameExceptionPreserving(self):
return self.needs_frame_exception_preserve
def getCodeObject(self):
return self.code_object
def computeStatementsSequence(self, trace_collection):
# The extraction of parts of the frame that can be moved before or after
# the frame scope, takes it toll to complexity, pylint: disable=too-many-branches
new_statements = []
statements = self.getStatements()
for count, statement in enumerate(statements):
# May be frames embedded.
if statement.isStatementsFrame():
new_statement = statement.computeStatementsSequence(
trace_collection=trace_collection
)
else:
new_statement = trace_collection.onStatement(statement=statement)
if new_statement is not None:
if (
new_statement.isStatementsSequence()
and not new_statement.isStatementsFrame()
):
new_statements.extend(new_statement.getStatements())
else:
new_statements.append(new_statement)
if (
statement is not statements[-1]
and new_statement.isStatementAborting()
):
trace_collection.signalChange(
"new_statements",
statements[count + 1].getSourceReference(),
"Removed dead statements.",
)
break
if not new_statements:
trace_collection.signalChange(
"new_statements",
self.source_ref,
"Removed empty frame object of '%s'."
% self.code_object.getCodeObjectName(),
)
return None
# If our statements changed just now, they are not immediately usable,
# so do this in two steps. Next time we can reduce the frame scope just
# as well.
if statements != tuple(new_statements):
self.setStatements(new_statements)
return self
# Determine statements inside the frame, that need not be in a frame,
# because they wouldn't raise an exception.
outside_pre = []
while new_statements and not new_statements[0].needsFrame():
outside_pre.append(new_statements[0])
del new_statements[0]
outside_post = []
while new_statements and not new_statements[-1].needsFrame():
outside_post.insert(0, new_statements[-1])
del new_statements[-1]
if outside_pre or outside_post:
from .NodeMakingHelpers import (
makeStatementsSequenceReplacementNode,
)
if new_statements:
self.setStatements(new_statements)
return makeStatementsSequenceReplacementNode(
statements=outside_pre + [self] + outside_post, node=self
)
else:
trace_collection.signalChange(
"new_statements",
self.source_ref,
"Removed useless frame object of '%s'."
% self.code_object.getCodeObjectName(),
)
return makeStatementsSequenceReplacementNode(
statements=outside_pre + outside_post, node=self
)
else:
if statements != new_statements:
self.setStatements(new_statements)
return self
class StatementsFrameModule(StatementsFrameBase):
kind = "STATEMENTS_FRAME_MODULE"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="once",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return False
class StatementsFrameFunction(StatementsFrameBase):
kind = "STATEMENTS_FRAME_FUNCTION"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="full",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return False
class StatementsFrameGenerator(StatementsFrameBase):
kind = "STATEMENTS_FRAME_GENERATOR"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="generator",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return True
class StatementsFrameCoroutine(StatementsFrameBase):
kind = "STATEMENTS_FRAME_COROUTINE"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="generator",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return True
class StatementsFrameAsyncgen(StatementsFrameBase):
kind = "STATEMENTS_FRAME_ASYNCGEN"
def __init__(self, statements, code_object, source_ref):
StatementsFrameBase.__init__(
self,
statements=statements,
code_object=code_object,
guard_mode="generator",
source_ref=source_ref,
)
@staticmethod
def hasStructureMember():
return True
|
en
| 0.907439
|
# Copyright 2020, <NAME>, mailto:<EMAIL> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Frame nodes. The frame attaches name and other frame properties to a scope, where it is optional. For use in tracebacks, their created frame objects, potentially cached are essential. Otherwise, they are similar to statement sequences, so they inherit from them. Check that frames statements list value proper. Must not be None, must not contain None, and of course only statements sequences, or statements, may be empty. # TODO: Why not have multiple classes for this. For use during variable closure phase. Finalize attributes. # The extraction of parts of the frame that can be moved before or after # the frame scope, takes it toll to complexity, pylint: disable=too-many-branches # May be frames embedded. # If our statements changed just now, they are not immediately usable, # so do this in two steps. Next time we can reduce the frame scope just # as well. # Determine statements inside the frame, that need not be in a frame, # because they wouldn't raise an exception.
| 2.036582
| 2
|
FaceClassify/nets/ResNets.py
|
CharlesPikachu/CharlesFace
| 13
|
6626478
|
# Author:
# Charlse
# Function:
# ResNet(in torchvision):
# resnet18, resnet34, resnet50, resnet101, resnet152.
import torch
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
# Parameters Explain:
# is_fc:
# True: use fully connected layer to get feature vector.
# False: don't use fully connected layer to get feature vector.
# is_AvgPool:
# True: use AvgPool.
# False: without AvgPool.
# resnet:
# choose ResNet structure.
# embeddings_num:
# feature vector(128 or 256 is suggested.)
# num_classes:
# not None: TripletLoss + ClassifierLoss.
# None: TripletLoss.
# img_size:
# the size of input image.
# This is a complex code express, however, I think its extensibility is better.
class ResNet(nn.Module):
def __init__(self, num_classes=None, embeddings_num=128, resnet='resnet34', pretrained=False, is_fc=True, img_size=224, is_AvgPool=True, is_softmax=False):
super(ResNet, self).__init__()
if num_classes:
assert isinstance(num_classes, int)
assert isinstance(embeddings_num, int)
assert isinstance(img_size, int)
self.is_fc = is_fc
self.embeddings_num = embeddings_num
self.num_classes = num_classes
self.img_size = img_size
self.is_AvgPool = is_AvgPool
self.pretrained = pretrained
self.resnet = resnet
self.is_softmax = is_softmax
self.centers = torch.zeros(num_classes, embeddings_num).type(torch.FloatTensor)
# strides of all models is 32.
self.stride = 32
if is_fc and is_AvgPool:
if resnet == 'resnet18':
self.model = models.resnet18(pretrained=pretrained)
elif resnet == 'resnet34':
self.model = models.resnet34(pretrained=pretrained)
elif resnet == 'resnet50':
self.model = models.resnet50(pretrained=pretrained)
elif resnet == 'resnet101':
self.model = models.resnet101(pretrained=pretrained)
elif resnet == 'resnet152':
self.model = models.resnet152(pretrained=pretrained)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
kernel_size = math.ceil(img_size/self.stride)
self.model.avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=1, padding=0, ceil_mode=False, count_include_pad=True)
if num_classes:
self.model.classifier = nn.Linear(embeddings_num, num_classes)
elif is_fc and (not is_AvgPool):
self.model = nn.ModuleList()
if resnet == 'resnet18':
temp_model = models.resnet18(pretrained=pretrained)
temp_model.fc = nn.Linear(512 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet34':
temp_model = models.resnet34(pretrained=pretrained)
temp_model.fc = nn.Linear(512 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet50':
temp_model = models.resnet50(pretrained=pretrained)
temp_model.fc = nn.Linear(2048 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet101':
temp_model = models.resnet101(pretrained=pretrained)
temp_model.fc = nn.Linear(2048 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet152':
temp_model = models.resnet152(pretrained=pretrained)
temp_model.fc = nn.Linear(2048 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
self.model.append(temp_model.conv1)
self.model.append(temp_model.bn1)
self.model.append(temp_model.relu)
self.model.append(temp_model.maxpool)
self.model.append(temp_model.layer1)
self.model.append(temp_model.layer2)
self.model.append(temp_model.layer3)
self.model.append(temp_model.layer4)
self.model.append(temp_model.fc)
if num_classes:
temp_model.classifier = nn.Linear(embeddings_num, num_classes)
self.model.append(temp_model.classifier)
elif (not is_fc) and (not is_AvgPool):
print('[INFO]:is_fc=False and is_AvgPool=False unsupported now...')
exit(-1)
self.model = nn.ModuleList()
if resnet == 'resnet18':
temp_model = models.resnet18(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet34':
temp_model = models.resnet34(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet50':
temp_model = models.resnet50(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet101':
temp_model = models.resnet101(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet152':
temp_model = models.resnet152(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
self.model.append(temp_model.conv1)
self.model.append(temp_model.bn1)
self.model.append(temp_model.relu)
self.model.append(temp_model.maxpool)
self.model.append(temp_model.layer1)
self.model.append(temp_model.layer2)
self.model.append(temp_model.layer3)
self.model.append(temp_model.layer4)
self.model.append(temp_model.conv2)
if num_classes:
temp_model.classifier = nn.Linear(embeddings_num, num_classes)
self.model.append(temp_model.classifier)
elif (not is_fc) and is_AvgPool:
self.model = nn.ModuleList()
if resnet == 'resnet18':
temp_model = models.resnet18(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet34':
temp_model = models.resnet34(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet50':
temp_model = models.resnet50(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet101':
temp_model = models.resnet101(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet152':
temp_model = models.resnet152(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
kernel_size = math.ceil(img_size/self.stride)
temp_model.avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=1, padding=0, ceil_mode=False, count_include_pad=True)
self.model.append(temp_model.conv1)
self.model.append(temp_model.bn1)
self.model.append(temp_model.relu)
self.model.append(temp_model.maxpool)
self.model.append(temp_model.layer1)
self.model.append(temp_model.layer2)
self.model.append(temp_model.layer3)
self.model.append(temp_model.layer4)
self.model.append(temp_model.conv2)
self.model.append(temp_model.avgpool)
if num_classes:
temp_model.classifier = nn.Linear(embeddings_num, num_classes)
self.model.append(temp_model.classifier)
def l2_norm(self, input_):
input_size = input_.size()
temp = torch.pow(input_, 2)
normp = torch.sum(temp, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input_, norm.view(-1, 1).expand_as(input_))
output = _output.view(input_size)
return output
def forward(self, x):
if self.is_fc and self.is_AvgPool:
x = self.model(x)
self.features = self.l2_norm(x)
elif self.is_fc and (not self.is_AvgPool):
if self.num_classes:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-2:
break
x = m(x)
x = x.view(x.size(0), -1)
x = self.model[-2](x)
else:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-1:
break
x = m(x)
x = x.view(x.size(0), -1)
x = self.model[-1](x)
self.features = self.l2_norm(x)
elif (not self.is_fc) and (not self.is_AvgPool):
if self.num_classes:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-1:
break
x = m(x)
x = x.view(x.size(0), -1)
else:
for m in self.model:
x = m(x)
x = x.view(x.size(0), -1)
self.features = self.l2_norm(x)
elif (not self.is_fc) and self.is_AvgPool:
if self.num_classes:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-1:
break
x = m(x)
x = x.view(x.size(0), -1)
else:
for m in self.model:
x = m(x)
x = x.view(x.size(0), -1)
self.features = self.l2_norm(x)
return self.features
def forward_classifier(self, x):
if self.num_classes:
x = self.forward(x)
if self.is_fc and self.is_AvgPool:
x = self.model.classifier(x)
elif self.is_fc and (not self.is_AvgPool):
x = self.model[-1](x)
elif (not self.is_fc) and (not self.is_AvgPool):
pass
elif (not self.is_fc) and self.is_AvgPool:
pass
else:
print('[Error]:<ResNet in ResNets.py> argument (num_classes) should be assigned...')
exit(-1)
return x if not self.is_softmax else F.log_softmax(x, dim=1)
def get_center_loss(self, target, alpha):
batch_size = target.size(0)
features_dim = self.features.size(1)
target_expand = target.view(batch_size, 1).expand(batch_size, features_dim)
centers_var = Variable(self.centers)
use_cuda = True if torch.cuda.is_available() else False
if use_cuda:
centers_batch = centers_var.gather(0, target_expand).cuda()
else:
centers_batch = centers_var.gather(0, target_expand)
criterion = nn.MSELoss()
center_loss = criterion(self.features, centers_batch)
diff = centers_batch - self.features
unique_label, unique_reverse, unique_count = np.unique(target.cpu().data.numpy(), return_inverse=True, return_counts=True)
appear_times = torch.from_numpy(unique_count).gather(0, torch.from_numpy(unique_reverse))
appear_times_expand = appear_times.view(-1, 1).expand(batch_size, features_dim).type(torch.FloatTensor)
diff_cpu = diff.cpu().data / appear_times_expand.add(1e-6)
diff_cpu = alpha * diff_cpu
for i in range(batch_size):
self.centers[target.data[i]] -= diff_cpu[i].type(self.centers.type())
return center_loss, self.centers
def load_weights(self, checkpoint=None, with_linear=False, is_center=False):
if checkpoint is not None:
if with_linear:
if list(checkpoint['state_dict'].values())[-1].size(0) == num_classes:
self.load_state_dict(checkpoint['state_dict'])
else:
own_state = self.state_dict()
for name, param in checkpoint['state_dict'].items():
if "classifier" not in name:
if isinstance(param, Parameter):
param = param.data
own_state[name].copy_(param)
if is_center:
self.centers = checkpoint['centers']
else:
own_state = self.state_dict()
for name, param in checkpoint['state_dict'].items():
if ("classifier" not in name) and ("fc" not in name):
if isinstance(param, Parameter):
param = param.data
own_state[name].copy_(param)
if is_center:
self.centers = checkpoint['centers']
def weights_init(self, m):
pass
|
# Author:
# Charlse
# Function:
# ResNet(in torchvision):
# resnet18, resnet34, resnet50, resnet101, resnet152.
import torch
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
# Parameters Explain:
# is_fc:
# True: use fully connected layer to get feature vector.
# False: don't use fully connected layer to get feature vector.
# is_AvgPool:
# True: use AvgPool.
# False: without AvgPool.
# resnet:
# choose ResNet structure.
# embeddings_num:
# feature vector(128 or 256 is suggested.)
# num_classes:
# not None: TripletLoss + ClassifierLoss.
# None: TripletLoss.
# img_size:
# the size of input image.
# This is a complex code express, however, I think its extensibility is better.
class ResNet(nn.Module):
def __init__(self, num_classes=None, embeddings_num=128, resnet='resnet34', pretrained=False, is_fc=True, img_size=224, is_AvgPool=True, is_softmax=False):
super(ResNet, self).__init__()
if num_classes:
assert isinstance(num_classes, int)
assert isinstance(embeddings_num, int)
assert isinstance(img_size, int)
self.is_fc = is_fc
self.embeddings_num = embeddings_num
self.num_classes = num_classes
self.img_size = img_size
self.is_AvgPool = is_AvgPool
self.pretrained = pretrained
self.resnet = resnet
self.is_softmax = is_softmax
self.centers = torch.zeros(num_classes, embeddings_num).type(torch.FloatTensor)
# strides of all models is 32.
self.stride = 32
if is_fc and is_AvgPool:
if resnet == 'resnet18':
self.model = models.resnet18(pretrained=pretrained)
elif resnet == 'resnet34':
self.model = models.resnet34(pretrained=pretrained)
elif resnet == 'resnet50':
self.model = models.resnet50(pretrained=pretrained)
elif resnet == 'resnet101':
self.model = models.resnet101(pretrained=pretrained)
elif resnet == 'resnet152':
self.model = models.resnet152(pretrained=pretrained)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
kernel_size = math.ceil(img_size/self.stride)
self.model.avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=1, padding=0, ceil_mode=False, count_include_pad=True)
if num_classes:
self.model.classifier = nn.Linear(embeddings_num, num_classes)
elif is_fc and (not is_AvgPool):
self.model = nn.ModuleList()
if resnet == 'resnet18':
temp_model = models.resnet18(pretrained=pretrained)
temp_model.fc = nn.Linear(512 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet34':
temp_model = models.resnet34(pretrained=pretrained)
temp_model.fc = nn.Linear(512 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet50':
temp_model = models.resnet50(pretrained=pretrained)
temp_model.fc = nn.Linear(2048 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet101':
temp_model = models.resnet101(pretrained=pretrained)
temp_model.fc = nn.Linear(2048 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
elif resnet == 'resnet152':
temp_model = models.resnet152(pretrained=pretrained)
temp_model.fc = nn.Linear(2048 * math.ceil(img_size/self.stride) * math.ceil(img_size/self.stride), embeddings_num)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
self.model.append(temp_model.conv1)
self.model.append(temp_model.bn1)
self.model.append(temp_model.relu)
self.model.append(temp_model.maxpool)
self.model.append(temp_model.layer1)
self.model.append(temp_model.layer2)
self.model.append(temp_model.layer3)
self.model.append(temp_model.layer4)
self.model.append(temp_model.fc)
if num_classes:
temp_model.classifier = nn.Linear(embeddings_num, num_classes)
self.model.append(temp_model.classifier)
elif (not is_fc) and (not is_AvgPool):
print('[INFO]:is_fc=False and is_AvgPool=False unsupported now...')
exit(-1)
self.model = nn.ModuleList()
if resnet == 'resnet18':
temp_model = models.resnet18(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet34':
temp_model = models.resnet34(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet50':
temp_model = models.resnet50(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet101':
temp_model = models.resnet101(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
elif resnet == 'resnet152':
temp_model = models.resnet152(pretrained=pretrained)
cov_stride = math.ceil(img_size/self.stride)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(cov_stride, cov_stride), padding=(1, 1), bias=False)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
self.model.append(temp_model.conv1)
self.model.append(temp_model.bn1)
self.model.append(temp_model.relu)
self.model.append(temp_model.maxpool)
self.model.append(temp_model.layer1)
self.model.append(temp_model.layer2)
self.model.append(temp_model.layer3)
self.model.append(temp_model.layer4)
self.model.append(temp_model.conv2)
if num_classes:
temp_model.classifier = nn.Linear(embeddings_num, num_classes)
self.model.append(temp_model.classifier)
elif (not is_fc) and is_AvgPool:
self.model = nn.ModuleList()
if resnet == 'resnet18':
temp_model = models.resnet18(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet34':
temp_model = models.resnet34(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(512, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet50':
temp_model = models.resnet50(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet101':
temp_model = models.resnet101(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
elif resnet == 'resnet152':
temp_model = models.resnet152(pretrained=pretrained)
temp_model.conv2 = nn.Conv2d(2048, embeddings_num, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
else:
print('[Error]:<ResNet in ResNets.py> ResNet structure unsupported...')
exit(-1)
kernel_size = math.ceil(img_size/self.stride)
temp_model.avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=1, padding=0, ceil_mode=False, count_include_pad=True)
self.model.append(temp_model.conv1)
self.model.append(temp_model.bn1)
self.model.append(temp_model.relu)
self.model.append(temp_model.maxpool)
self.model.append(temp_model.layer1)
self.model.append(temp_model.layer2)
self.model.append(temp_model.layer3)
self.model.append(temp_model.layer4)
self.model.append(temp_model.conv2)
self.model.append(temp_model.avgpool)
if num_classes:
temp_model.classifier = nn.Linear(embeddings_num, num_classes)
self.model.append(temp_model.classifier)
def l2_norm(self, input_):
input_size = input_.size()
temp = torch.pow(input_, 2)
normp = torch.sum(temp, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input_, norm.view(-1, 1).expand_as(input_))
output = _output.view(input_size)
return output
def forward(self, x):
if self.is_fc and self.is_AvgPool:
x = self.model(x)
self.features = self.l2_norm(x)
elif self.is_fc and (not self.is_AvgPool):
if self.num_classes:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-2:
break
x = m(x)
x = x.view(x.size(0), -1)
x = self.model[-2](x)
else:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-1:
break
x = m(x)
x = x.view(x.size(0), -1)
x = self.model[-1](x)
self.features = self.l2_norm(x)
elif (not self.is_fc) and (not self.is_AvgPool):
if self.num_classes:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-1:
break
x = m(x)
x = x.view(x.size(0), -1)
else:
for m in self.model:
x = m(x)
x = x.view(x.size(0), -1)
self.features = self.l2_norm(x)
elif (not self.is_fc) and self.is_AvgPool:
if self.num_classes:
idx = 0
for m in self.model:
idx += 1
if idx > len(self.model)-1:
break
x = m(x)
x = x.view(x.size(0), -1)
else:
for m in self.model:
x = m(x)
x = x.view(x.size(0), -1)
self.features = self.l2_norm(x)
return self.features
def forward_classifier(self, x):
if self.num_classes:
x = self.forward(x)
if self.is_fc and self.is_AvgPool:
x = self.model.classifier(x)
elif self.is_fc and (not self.is_AvgPool):
x = self.model[-1](x)
elif (not self.is_fc) and (not self.is_AvgPool):
pass
elif (not self.is_fc) and self.is_AvgPool:
pass
else:
print('[Error]:<ResNet in ResNets.py> argument (num_classes) should be assigned...')
exit(-1)
return x if not self.is_softmax else F.log_softmax(x, dim=1)
def get_center_loss(self, target, alpha):
batch_size = target.size(0)
features_dim = self.features.size(1)
target_expand = target.view(batch_size, 1).expand(batch_size, features_dim)
centers_var = Variable(self.centers)
use_cuda = True if torch.cuda.is_available() else False
if use_cuda:
centers_batch = centers_var.gather(0, target_expand).cuda()
else:
centers_batch = centers_var.gather(0, target_expand)
criterion = nn.MSELoss()
center_loss = criterion(self.features, centers_batch)
diff = centers_batch - self.features
unique_label, unique_reverse, unique_count = np.unique(target.cpu().data.numpy(), return_inverse=True, return_counts=True)
appear_times = torch.from_numpy(unique_count).gather(0, torch.from_numpy(unique_reverse))
appear_times_expand = appear_times.view(-1, 1).expand(batch_size, features_dim).type(torch.FloatTensor)
diff_cpu = diff.cpu().data / appear_times_expand.add(1e-6)
diff_cpu = alpha * diff_cpu
for i in range(batch_size):
self.centers[target.data[i]] -= diff_cpu[i].type(self.centers.type())
return center_loss, self.centers
def load_weights(self, checkpoint=None, with_linear=False, is_center=False):
if checkpoint is not None:
if with_linear:
if list(checkpoint['state_dict'].values())[-1].size(0) == num_classes:
self.load_state_dict(checkpoint['state_dict'])
else:
own_state = self.state_dict()
for name, param in checkpoint['state_dict'].items():
if "classifier" not in name:
if isinstance(param, Parameter):
param = param.data
own_state[name].copy_(param)
if is_center:
self.centers = checkpoint['centers']
else:
own_state = self.state_dict()
for name, param in checkpoint['state_dict'].items():
if ("classifier" not in name) and ("fc" not in name):
if isinstance(param, Parameter):
param = param.data
own_state[name].copy_(param)
if is_center:
self.centers = checkpoint['centers']
def weights_init(self, m):
pass
|
en
| 0.821441
|
# Author: # Charlse # Function: # ResNet(in torchvision): # resnet18, resnet34, resnet50, resnet101, resnet152. # Parameters Explain: # is_fc: # True: use fully connected layer to get feature vector. # False: don't use fully connected layer to get feature vector. # is_AvgPool: # True: use AvgPool. # False: without AvgPool. # resnet: # choose ResNet structure. # embeddings_num: # feature vector(128 or 256 is suggested.) # num_classes: # not None: TripletLoss + ClassifierLoss. # None: TripletLoss. # img_size: # the size of input image. # This is a complex code express, however, I think its extensibility is better. # strides of all models is 32.
| 2.648444
| 3
|
filip/ocb/orion.py
|
N5GEH/n5geh.tools.FiLiP
| 1
|
6626479
|
<reponame>N5GEH/n5geh.tools.FiLiP
import json
import requests
from filip.utils import request_utils as requtils
from filip.testing import test
import math
import logging
logger = logging.getLogger('ocb')
# ToDo Query params
# Class is only implemented for backward compatibility
class Attribute:
"""
Describes the attribute of an entity.
"""
def __init__(self, name, value, attr_type):
self.name = name
self.value = value
self.type = attr_type
def get_json(self):
return {'value': self.value, 'type': '{}'.format(self.type)}
class Entity:
def __init__(self, entity_dict: dict):
"""
:param entity_dict: A dictionarry describing the entity
Needed Structure: { "id" : "Sensor002",
"type": "temperature_Sensor",
"Temperature" : { "value" : 17,
"type" : "Number" },
"Status" : {"value": "Ok",
"type": "Text" }
}
"""
self.id = entity_dict["id"]
self.type = entity_dict["type"]
self.entity_dict = entity_dict
self._PROTECTED = ['id', 'type']
def __repr__(self):
"""
returns the object-representation
"""
attrs = self.get_attributes_key_values()
entity_str = '"entity_id": "{}", "type": "{}", "attributes": "{}" ' \
''.format(self.id, self.entity_dict["type"], attrs)
return entity_str
def get_json(self):
"""
Function returns the Entity to be posted as a JSON
:return: the Entity Json
"""
json_res = json.dumps(self.entity_dict)
return json_res
def add_attribute(self, attr_dict: dict):
"""
Function adds another Attribute to an existing Entity.
:param attr_dict: A dictionary describing an Attribute
"Temperature" : { "value" : 17,
"type" : "Number" },
:return: updated entity dict
"""
for key in attr_dict.keys():
self.entity_dict[key] = attr_dict[key]
def delete_attribute(self, attr_name: str):
"""
Function deletes an attribute from an existing Entity
:param attr_name: the name of the attribute to delete
:return: updated entity_dict
"""
del self.entity_dict[attr_name]
def get_attributes(self):
"""
Function returns list of attribute names.
"""
attributes = [key for key in self.entity_dict.keys() if key not in self._PROTECTED]
return attributes
def get_attributes_key_values(self):
"""
Function returns all attributes, their types and values of an entity
:return:
"""
attributes_values = {key: value for (key, value) in self.entity_dict.items() if key not in self._PROTECTED}
return attributes_values
class Relationship:
"""
Class implements the concept of FIWARE Entity Relationships.
"""
def __init__(self, ref_object: Entity, subject: Entity, predicate: str = None):
"""
:param ref_object: The parent / object of the relationship
:param subject: The child / subject of the relationship
:param predicate: currently not supported -> describes the relationship between object and subject
"""
self.object = ref_object
self.subject = subject
self.predicate = predicate
self.add_ref()
def add_ref(self):
"""
Function updates the subject Attribute with the relationship attribute
:return:
"""
ref_attr = json.loads(self.get_ref())
self.subject.add_attribute(ref_attr)
def get_ref(self):
"""
Function creates the NGSI Ref schema in a ref_dict, needed for the subject
:return: ref_dict
"""
ref_type = self.object.type
ref_key = "ref" + str(ref_type)
ref_dict = dict()
ref_dict[ref_key] = {"type": "Relationship",
"value": self.object.id}
return json.dumps(ref_dict)
def get_json(self):
"""
Function returns a JSON to describe the Relationship,
which then can be pushed to orion
:return: whole_dict
"""
temp_dict = dict()
temp_dict["id"] = self.subject.id
temp_dict["type"] = self.subject.type
ref_dict = json.loads(self.get_ref())
whole_dict = {**temp_dict, **ref_dict}
return json.dumps(whole_dict)
class FiwareService:
"""
Define entity service paths which are supported by the Orion Context Broker
to support hierarchical scopes:
https://fiware-orion.readthedocs.io/en/master/user/service_path/index.html
"""
def __init__(self, name: str, path: str):
self.name = name
self.path = path
def update(self, name: str, path: str):
"""Overwrites the fiware_service and service path of config.json"""
self.name = name
self.path = path
def get_header(self) -> object:
return {
"fiware-service": self.name,
"fiware-servicepath": self.path
}
def __repr__(self):
fiware_service_str = f'"fiware-service": "{self.name}", "fiware-servicepath": "{self.path}"'
return fiware_service_str
class Orion:
"""
Implementation of Orion Context Broker functionalities, such as creating
entities and subscriptions; retrieving, updating and deleting data.
Further documentation:
https://fiware-orion.readthedocs.io/en/master/
"""
def __init__(self, config, session=None):
"""
:param config:
:param version_2: if param version_2 is True, the standard used url is the v2, else v1
"""
self.session = session or requests.Session()
self.fiware_service = FiwareService(name=config.fiware.get('service'),
path=config.fiware.get(
'service_path'))
self.host = config.orion.get("host", None)
self.port = config.orion.get("port", None)
self.url = config.orion.get("url", None)
def set_service(self, fiware_service):
"""Overwrites the fiware_service and service path of config.json"""
self.fiware_service.update(fiware_service.name, fiware_service.path)
def get_service(self):
return self.fiware_service
def get_header(self, additional_headers: dict = None):
"""combine fiware_service header (if set) and additional headers"""
if self.fiware_service == None:
return additional_headers
elif additional_headers == None:
return self.fiware_service.get_header()
else:
headers = {**self.fiware_service.get_header(), **additional_headers}
return headers
def log_switch(self, level, response):
"""
Function returns the required log_level with the repsonse
:param level: The logging level that should be returned
:param response: The message for the logger
:return:
"""
switch_dict = {"INFO": logging.info,
"ERROR": logging.error,
"WARNING": logging.warning
}.get(level, logging.info)(msg=response)
def test_connection(self):
"""
Function utilises the test.test_connection() function to check the availability of a given url and service.
:return: Boolean, True if the service is reachable, False if not.
"""
boolean = test.test_connection(client=self.session,
url=self.url+'/version',
service_name=__name__)
return boolean
def post_entity(self, entity: Entity, force_update: bool = True):
"""
Function registers an Object with the Orion Context Broker, if it allready exists it can be automatically updated
if the overwrite bool is True
First a post request with the entity is tried, if the response code is 422 the entity is
uncrossable, as it already exists there are two options, either overwrite it, if the attribute have changed (e.g. at least one new/
new values) (update = True) or leave it the way it is (update=False)
:param entity: An entity object
:param update: If the response.status_code is 422, whether the old entity should be updated or not
:return:
"""
url = self.url + '/v2/entities'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = entity.get_json()
response = self.session.post(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
if (response.status_code == 422) & (force_update is True):
url += f"{entity.id}/attrs"
response = self.session.post(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, response)
def post_json(self, json=None, entity=None, params=None):
"""
Function registers a JSON with the Orion Context Broker.
:param json: A JSON (dictionary)
:param entity: An Orion entity, from which the json_data can be obatained.
:param params:
:return:
"""
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
if json is not None:
json_data = json
elif (json is None) and (entity is not None):
json_data = entity.get_json()
else:
logger.error(f"Please provide a valid data format.")
json_data = ""
if params is None:
url = self.url + '/v2/entities'
response = self.session.post(url, headers=headers, data=json_data)
else:
url = self.url + "/v2/entities" + "?options=" + params
response = self.session.post(url, headers=headers, data=json_data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def post_json_key_value(self, json_data=None, params="keyValues"):
"""
:param json_data:
:param params:
:return:
"""
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
url = self.url + "/v2/entities" + "?options=" + params
response = self.session.post(url, headers=headers, data=json_data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def post_relationship(self, json_data=None):
"""
Function can be used to post a one to many or one to one relationship.
:param json_data: Relationship Data obtained from the Relationship class. e.g. :
{"id": "urn:ngsi-ld:Shelf:unit001", "type": "Shelf",
"refStore": {"type": "Relationship", "value": "urn:ngsi-ld:Store:001"}}
Can be a one to one or a one to many relationship
"""
url = self.url + '/v2/op/update'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
# Action type append required,
# Will overwrite existing entities if they exist whereas
# the entities attribute holds an array of entities we wish to update.
payload = {"actionType": "APPEND",
"entities": [json.loads(json_data)]}
data = json.dumps(payload)
response = self.session.post(url=url, data=data, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def get_subjects(self, object_entity_name: str, object_entity_type: str, subject_type=None):
"""
Function gets the JSON for child / subject entities for a parent / object entity.
:param object_entity_name: The parent / object entity name
:param object_entity_type: The type of the parent / object entity
:param subject_type: optional parameter, if added only those child / subject entities are returned that match the type
:return: JSON containing the child / subject information
"""
url = self.url + '/v2/entities/?q=ref' + object_entity_type + '==' + object_entity_name + '&options=count'
if subject_type is not None:
url = url + '&attrs=type&type=' + subject_type
headers = self.get_header()
response = self.session.get(url=url, headers=headers, )
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_objects(self, subject_entity_name: str, subject_entity_type: str, object_type=None):
"""
Function returns a List of all objects associated to a subject. If object type is not None,
only those are returned, that match the object type.
:param subject_entity_name: The child / subject entity name
:param subject_entity_type: The type of the child / subject entity
:param object_type:
:return: List containing all associated objects
"""
url = self.url + '/v2/entities/' + subject_entity_name + '/?type=' + subject_entity_type + '&options=keyValues'
if object_type is not None:
url = url + '&attrs=ref' + object_type
headers = self.get_header()
response = self.session.get(url=url, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_associated(self, name: str, entity_type: str, associated_type=None):
"""
Function returns all associated data for a given entity name and type
:param name: name of the entity
:param entity_type: type of the entity
:param associated_type: if only associated data of one type should be returned, this parameter has to be the type
:return: A dictionary, containing the data of the entity, a key "subjects" and "objects" that contain each a list
with the reflective data
"""
data_dict = {}
associated_objects = self.get_objects(subject_entity_name=name, subject_entity_type=entity_type,
object_type=associated_type)
associated_subjects = self.get_subjects(object_entity_name=name, object_entity_type=entity_type,
subject_type=associated_type)
if associated_subjects is not None:
data_dict["subjects"] = json.loads(associated_subjects)
if associated_objects is not None:
object_json = json.loads(associated_objects)
data_dict["objects"] = []
if isinstance(object_json, list):
for associated_object in object_json:
entity_name = associated_object["id"]
object_data = json.loads(self.get_entity(entity_name=entity_name))
data_dict["objects"].append(object_data)
else:
entity_name = object_json["id"]
object_data = json.loads(self.get_entity(entity_name=entity_name))
data_dict["objects"].append(object_data)
entity_dict = json.loads(self.get_entity(entity_name=name))
whole_dict = {**entity_dict, **data_dict}
return whole_dict
def get_entity(self, entity_name, entity_params=None):
url = self.url + '/v2/entities/' + entity_name
headers = self.get_header()
if entity_params is None:
response = self.session.get(url, headers=headers)
else:
response = self.session.get(url, headers=headers,
params=entity_params)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_all_entities(self, parameter=None, parameter_value=None, limit=100):
url = self.url + '/v2/entities?options=count'
headers = self.get_header()
if parameter is None and parameter_value is None:
response = self.session.get(url, headers=headers)
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=headers,
limit=limit, count=sub_count)
return response
elif parameter is not None and parameter_value is not None:
parameters = {'{}'.format(parameter): '{}'.format(parameter_value)}
response = self.session.get(url, headers=headers, params=parameters)
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=headers,
limit=limit, count=sub_count, params=parameters)
return response
else:
logger.error("Getting all entities: both function parameters have to be 'not null'")
return None
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_entities_list(self, limit=100) -> list:
url = self.url + '/v2/entities?options=count'
header = self.get_header(requtils.HEADER_ACCEPT_JSON)
response = self.session.get(url, headers=header)
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=header,
limit=limit, count=sub_count)
return response
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
return None
else:
json_object = json.loads(response.text)
entities = []
for key in json_object:
entities.append(key["id"])
return entities
def get_entity_keyValues(self, entity_name):
parameter = {'{}'.format('options'): '{}'.format('keyValues')}
return self.get_entity(entity_name, parameter)
def get_entity_attribute_json(self, entity_name, attr_name):
url = self.url + '/v2/entities/' + entity_name + '/attrs/' + attr_name
response = self.session.get(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_entity_attribute_value(self, entity_name, attr_name):
url = self.url + '/v2/entities/' + entity_name + '/attrs/' \
+ attr_name + '/value'
response = self.session.get(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_entity_attribute_list(self, entity_name, attr_name_list):
"""
Function returns all types and values for a list of attributes of an entity,
given in attr_name_list
:param entity_name: Entity_name - Name of the entity to obtain the values from
:param attr_name_list: List of attributes - e.g. ["Temperature"]
:return: List, containin all attribute dictionaries e.g.: [{"value":33,"type":"Float"}]
"""
attributes = ','.join(attr_name_list)
parameters = {'{}'.format('options'): '{}'.format('values'),
'{}'.format('attrs'): attributes}
return self.get_entity(entity_name, parameters)
def update_entity(self, entity):
url = self.url + '/v2/entities/' + entity.id + '/attrs'
payload = entity.get_attributes_key_values()
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = json.dumps(payload)
response = self.session.patch(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def update_attribute(self, entity_name, attr_name, attr_value):
url = self.url + '/v2/entities/' + entity_name + '/attrs/' \
+ attr_name + '/value'
headers = self.get_header(requtils.HEADER_CONTENT_PLAIN)
data = json.dumps(attr_value)
response = self.session.put(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def add_attribute(self, entity: Entity = None, entity_name: str = None, attr_dict: dict = None):
# POST /v2/entities/{id}/attrs?options=append
"""
This function adds attributes to the Entity in the Context Broker. This can be done in two ways,
either by first adding the attribute to the Entity object or by directly sending it from a dict/JSON
The Function first compares it with existing attributes, and only adds (so updates) the ones not previoulsy existing
:param entity: The updated Entity Instance
:param entity_name: The Entity name which should be updated
:param attribute_dict: A JSON/Dict containing the attributes
:return: -
"""
if isinstance(entity, Entity):
attributes = entity.get_attributes()
entity_name = entity.id
else:
attributes = attr_dict
entity_name = entity_name
existing_attributes = self.get_attributes(entity_name)
new_attributes = {k: v for (k, v) in attributes.items() if k not in existing_attributes}
url = self.url + '/v2/entities/' + entity_name + '/attrs?options=append'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = json.dumps(new_attributes)
response = self.session.post(url, data=data, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def get_attributes(self, entity_name: str):
"""
For a given entity this function returns all attribute names
:param entity_name: the name of the entity
:return: attributes - list of attributes
"""
entity_json = json.loads(self.get_entity(entity_name))
attributes = [k for k in entity_json.keys() if k not in ["id", "type"]]
return attributes
def remove_attributes(self, entity_name):
url = self.url + '/v2/entities/' + entity_name + '/attrs'
response = self.session.put(url)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, response)
def create_subscription(self, subscription_body, check_duplicate: bool = True):
url = self.url + '/v2/subscriptions'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
if check_duplicate is True:
exists = self.check_duplicate_subscription(subscription_body)
if exists is True:
logger.info(f"A similar subscription already exists.")
response = self.session.post(url, headers=headers, data=subscription_body)
if response.headers is None:
return
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
return None
else:
location = response.headers.get('Location')
addr_parts = location.split('/')
subscription_id = addr_parts.pop()
return subscription_id
def get_subscription_list(self, limit=100):
url = self.url + '/v2/subscriptions?options=count'
response = self.session.get(url, headers=self.get_header())
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=self.get_header(),
limit=limit, count=sub_count)
return response
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
return None
json_object = json.loads(response.text)
subscriptions = []
for key in json_object:
subscriptions.append(key["id"])
return subscriptions
def get_subscription(self, subscription_id: str):
url = self.url + '/v2/subscriptions/' + subscription_id
response = self.session.get(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def delete_subscription(self, subscription_id: str):
url = self.url + '/v2/subscriptions/' + subscription_id
response = self.session.delete(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def get_pagination(self, url: str, headers: dict,
count: float, limit: int = 20, params=None):
"""
NGSIv2 implements a pagination mechanism in order to help clients to retrieve large sets of resources.
This mechanism works for all listing operations in the API (e.g. GET /v2/entities, GET /v2/subscriptions, POST /v2/op/query, etc.).
This function helps getting datasets that are larger than the limit for the different GET operations.
:param url: Information about the url, obtained from the orginal function e.g. : http://localhost:1026/v2/subscriptions?limit=20&options=count
:param headers: The headers from the original function, e.g: {'fiware-service': 'crio', 'fiware-servicepath': '/measurements'}
:param count: Number of total elements, obtained by adding "&options=count" to the url,
included in the response headers
:param limit: Limit, obtained from the oringal function, default is 20
:return: A list, containing all objects in a dictionary
"""
all_data = []
# due to a math, one of the both numbers has to be a float,
# otherwise the value is rounded down not up
no_intervals = int(math.ceil(count / limit))
for i in range(0, no_intervals):
offset = str(i * limit)
if i == 0:
url = url
else:
url = url + '&offset=' + offset
if params == (not None):
response = self.session.get(url=url, headers=headers, params=params)
else:
response = self.session.get(url=url, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
for resp_dict in json.loads(response.text):
all_data.append(resp_dict)
return all_data
def check_duplicate_subscription(self, subscription_body, limit: int = 20):
"""
Function compares the subject of the subscription body, on whether a subscription
already exists for a device / entity.
:param subscription_body: the body of the new subscripton
:param limit: pagination parameter, to set the number of subscriptions bodies the get request should grab
:return: exists, boolean -> True, if such a subscription allready exists
"""
exists = False
subscription_subject = json.loads(subscription_body)["subject"]
# Exact keys depend on subscription body
try:
subscription_url = json.loads(subscription_body)["notification"]["httpCustom"]["url"]
except KeyError:
subscription_url = json.loads(subscription_body)["notification"]["http"]["url"]
# If the number of subscriptions is larger then the limit, paginations methods have to be used
url = self.url + '/v2/subscriptions?limit=' + str(limit) + '&options=count'
response = self.session.get(url, headers=self.get_header())
sub_count = float(response.headers["Fiware-Total-Count"])
response = json.loads(response.text)
if sub_count >= limit:
response = self.get_pagination(url=url, headers=self.get_header(),
limit=limit, count=sub_count)
response = json.loads(response)
for existing_subscription in response:
# check whether the exact same subscriptions already exists
if existing_subscription["subject"] == subscription_subject:
exists = True
break
try:
existing_url = existing_subscription["notification"]["http"]["url"]
except KeyError:
existing_url = existing_subscription["notification"]["httpCustom"]["url"]
# check whether both subscriptions notify to the same path
if existing_url != subscription_url:
continue
else:
# iterate over all entities included in the subscription object
for entity in subscription_subject["entities"]:
if 'type' in entity.keys():
subscription_type = entity['type']
else:
subscription_type = entity['typePattern']
if 'id' in entity.keys():
subscription_id = entity['id']
else:
subscription_id = entity["idPattern"]
# iterate over all entities included in the exisiting subscriptions
for existing_entity in existing_subscription["subject"]["entities"]:
if "type" in entity.keys():
type_existing = entity["type"]
else:
type_existing = entity["typePattern"]
if "id" in entity.keys():
id_existing = entity["id"]
else:
id_existing = entity["idPattern"]
# as the ID field is non optional, it has to match
# check whether the type match
# if the type field is empty, they match all types
if (type_existing == subscription_type) or\
('*' in subscription_type) or \
('*' in type_existing)\
or (type_existing == "") or (
subscription_type == ""):
# check if on of the subscriptions is a pattern, or if they both refer to the same id
# Get the attrs first, to avoid code duplication
# last thing to compare is the attributes
# Assumption -> position is the same as the entities list
# i == j
i = subscription_subject["entities"].index(entity)
j = existing_subscription["subject"]["entities"].index(existing_entity)
try:
subscription_attrs = subscription_subject["condition"]["attrs"][i]
except (KeyError, IndexError):
subscription_attrs = []
try:
existing_attrs = existing_subscription["subject"]["condition"]["attrs"][j]
except (KeyError, IndexError):
existing_attrs = []
if (".*" in subscription_id) or ('.*' in id_existing) or (subscription_id == id_existing):
# Attributes have to match, or the have to be an empty array
if (subscription_attrs == existing_attrs) or (subscription_attrs == []) or (existing_attrs == []):
exists = True
# if they do not match completely or subscribe to all ids they have to match up to a certain position
elif ("*" in subscription_id) or ('*' in id_existing):
regex_existing = id_existing.find('*')
regex_subscription = subscription_id.find('*')
# slice the strings to compare
if (id_existing[:regex_existing] in subscription_id) or (subscription_id[:regex_subscription] in id_existing) or \
(id_existing[regex_existing:] in subscription_id) or (subscription_id[regex_subscription:] in id_existing):
if (subscription_attrs == existing_attrs) or (subscription_attrs == []) or (existing_attrs == []):
exists = True
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
return exists
def delete_all_subscriptions(self):
subscriptions = self.get_subscription_list()
for sub_id in subscriptions:
self.delete_subscription(sub_id)
def post_cmd_v1(self, entity_id: str, entity_type: str,
cmd_name: str, cmd_value: str):
url = self.url + '/v1/updateContext'
payload = {"updateAction": "UPDATE",
"contextElements": [
{"id": entity_id,
"type": entity_type,
"isPattern": "false",
"attributes": [
{"name": cmd_name,
"type": "command",
"value": cmd_value
}]
}]
}
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = json.dumps(payload)
response = self.session.post(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def delete(self, entity_id: str):
url = self.url + '/v2/entities/' + entity_id
response = self.session.delete(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def delete_all_entities(self):
entities = self.get_entities_list()
for entity_id in entities:
self.delete(entity_id)
|
import json
import requests
from filip.utils import request_utils as requtils
from filip.testing import test
import math
import logging
logger = logging.getLogger('ocb')
# ToDo Query params
# Class is only implemented for backward compatibility
class Attribute:
"""
Describes the attribute of an entity.
"""
def __init__(self, name, value, attr_type):
self.name = name
self.value = value
self.type = attr_type
def get_json(self):
return {'value': self.value, 'type': '{}'.format(self.type)}
class Entity:
def __init__(self, entity_dict: dict):
"""
:param entity_dict: A dictionarry describing the entity
Needed Structure: { "id" : "Sensor002",
"type": "temperature_Sensor",
"Temperature" : { "value" : 17,
"type" : "Number" },
"Status" : {"value": "Ok",
"type": "Text" }
}
"""
self.id = entity_dict["id"]
self.type = entity_dict["type"]
self.entity_dict = entity_dict
self._PROTECTED = ['id', 'type']
def __repr__(self):
"""
returns the object-representation
"""
attrs = self.get_attributes_key_values()
entity_str = '"entity_id": "{}", "type": "{}", "attributes": "{}" ' \
''.format(self.id, self.entity_dict["type"], attrs)
return entity_str
def get_json(self):
"""
Function returns the Entity to be posted as a JSON
:return: the Entity Json
"""
json_res = json.dumps(self.entity_dict)
return json_res
def add_attribute(self, attr_dict: dict):
"""
Function adds another Attribute to an existing Entity.
:param attr_dict: A dictionary describing an Attribute
"Temperature" : { "value" : 17,
"type" : "Number" },
:return: updated entity dict
"""
for key in attr_dict.keys():
self.entity_dict[key] = attr_dict[key]
def delete_attribute(self, attr_name: str):
"""
Function deletes an attribute from an existing Entity
:param attr_name: the name of the attribute to delete
:return: updated entity_dict
"""
del self.entity_dict[attr_name]
def get_attributes(self):
"""
Function returns list of attribute names.
"""
attributes = [key for key in self.entity_dict.keys() if key not in self._PROTECTED]
return attributes
def get_attributes_key_values(self):
"""
Function returns all attributes, their types and values of an entity
:return:
"""
attributes_values = {key: value for (key, value) in self.entity_dict.items() if key not in self._PROTECTED}
return attributes_values
class Relationship:
"""
Class implements the concept of FIWARE Entity Relationships.
"""
def __init__(self, ref_object: Entity, subject: Entity, predicate: str = None):
"""
:param ref_object: The parent / object of the relationship
:param subject: The child / subject of the relationship
:param predicate: currently not supported -> describes the relationship between object and subject
"""
self.object = ref_object
self.subject = subject
self.predicate = predicate
self.add_ref()
def add_ref(self):
"""
Function updates the subject Attribute with the relationship attribute
:return:
"""
ref_attr = json.loads(self.get_ref())
self.subject.add_attribute(ref_attr)
def get_ref(self):
"""
Function creates the NGSI Ref schema in a ref_dict, needed for the subject
:return: ref_dict
"""
ref_type = self.object.type
ref_key = "ref" + str(ref_type)
ref_dict = dict()
ref_dict[ref_key] = {"type": "Relationship",
"value": self.object.id}
return json.dumps(ref_dict)
def get_json(self):
"""
Function returns a JSON to describe the Relationship,
which then can be pushed to orion
:return: whole_dict
"""
temp_dict = dict()
temp_dict["id"] = self.subject.id
temp_dict["type"] = self.subject.type
ref_dict = json.loads(self.get_ref())
whole_dict = {**temp_dict, **ref_dict}
return json.dumps(whole_dict)
class FiwareService:
"""
Define entity service paths which are supported by the Orion Context Broker
to support hierarchical scopes:
https://fiware-orion.readthedocs.io/en/master/user/service_path/index.html
"""
def __init__(self, name: str, path: str):
self.name = name
self.path = path
def update(self, name: str, path: str):
"""Overwrites the fiware_service and service path of config.json"""
self.name = name
self.path = path
def get_header(self) -> object:
return {
"fiware-service": self.name,
"fiware-servicepath": self.path
}
def __repr__(self):
fiware_service_str = f'"fiware-service": "{self.name}", "fiware-servicepath": "{self.path}"'
return fiware_service_str
class Orion:
"""
Implementation of Orion Context Broker functionalities, such as creating
entities and subscriptions; retrieving, updating and deleting data.
Further documentation:
https://fiware-orion.readthedocs.io/en/master/
"""
def __init__(self, config, session=None):
"""
:param config:
:param version_2: if param version_2 is True, the standard used url is the v2, else v1
"""
self.session = session or requests.Session()
self.fiware_service = FiwareService(name=config.fiware.get('service'),
path=config.fiware.get(
'service_path'))
self.host = config.orion.get("host", None)
self.port = config.orion.get("port", None)
self.url = config.orion.get("url", None)
def set_service(self, fiware_service):
"""Overwrites the fiware_service and service path of config.json"""
self.fiware_service.update(fiware_service.name, fiware_service.path)
def get_service(self):
return self.fiware_service
def get_header(self, additional_headers: dict = None):
"""combine fiware_service header (if set) and additional headers"""
if self.fiware_service == None:
return additional_headers
elif additional_headers == None:
return self.fiware_service.get_header()
else:
headers = {**self.fiware_service.get_header(), **additional_headers}
return headers
def log_switch(self, level, response):
"""
Function returns the required log_level with the repsonse
:param level: The logging level that should be returned
:param response: The message for the logger
:return:
"""
switch_dict = {"INFO": logging.info,
"ERROR": logging.error,
"WARNING": logging.warning
}.get(level, logging.info)(msg=response)
def test_connection(self):
"""
Function utilises the test.test_connection() function to check the availability of a given url and service.
:return: Boolean, True if the service is reachable, False if not.
"""
boolean = test.test_connection(client=self.session,
url=self.url+'/version',
service_name=__name__)
return boolean
def post_entity(self, entity: Entity, force_update: bool = True):
"""
Function registers an Object with the Orion Context Broker, if it allready exists it can be automatically updated
if the overwrite bool is True
First a post request with the entity is tried, if the response code is 422 the entity is
uncrossable, as it already exists there are two options, either overwrite it, if the attribute have changed (e.g. at least one new/
new values) (update = True) or leave it the way it is (update=False)
:param entity: An entity object
:param update: If the response.status_code is 422, whether the old entity should be updated or not
:return:
"""
url = self.url + '/v2/entities'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = entity.get_json()
response = self.session.post(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
if (response.status_code == 422) & (force_update is True):
url += f"{entity.id}/attrs"
response = self.session.post(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, response)
def post_json(self, json=None, entity=None, params=None):
"""
Function registers a JSON with the Orion Context Broker.
:param json: A JSON (dictionary)
:param entity: An Orion entity, from which the json_data can be obatained.
:param params:
:return:
"""
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
if json is not None:
json_data = json
elif (json is None) and (entity is not None):
json_data = entity.get_json()
else:
logger.error(f"Please provide a valid data format.")
json_data = ""
if params is None:
url = self.url + '/v2/entities'
response = self.session.post(url, headers=headers, data=json_data)
else:
url = self.url + "/v2/entities" + "?options=" + params
response = self.session.post(url, headers=headers, data=json_data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def post_json_key_value(self, json_data=None, params="keyValues"):
"""
:param json_data:
:param params:
:return:
"""
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
url = self.url + "/v2/entities" + "?options=" + params
response = self.session.post(url, headers=headers, data=json_data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def post_relationship(self, json_data=None):
"""
Function can be used to post a one to many or one to one relationship.
:param json_data: Relationship Data obtained from the Relationship class. e.g. :
{"id": "urn:ngsi-ld:Shelf:unit001", "type": "Shelf",
"refStore": {"type": "Relationship", "value": "urn:ngsi-ld:Store:001"}}
Can be a one to one or a one to many relationship
"""
url = self.url + '/v2/op/update'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
# Action type append required,
# Will overwrite existing entities if they exist whereas
# the entities attribute holds an array of entities we wish to update.
payload = {"actionType": "APPEND",
"entities": [json.loads(json_data)]}
data = json.dumps(payload)
response = self.session.post(url=url, data=data, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def get_subjects(self, object_entity_name: str, object_entity_type: str, subject_type=None):
"""
Function gets the JSON for child / subject entities for a parent / object entity.
:param object_entity_name: The parent / object entity name
:param object_entity_type: The type of the parent / object entity
:param subject_type: optional parameter, if added only those child / subject entities are returned that match the type
:return: JSON containing the child / subject information
"""
url = self.url + '/v2/entities/?q=ref' + object_entity_type + '==' + object_entity_name + '&options=count'
if subject_type is not None:
url = url + '&attrs=type&type=' + subject_type
headers = self.get_header()
response = self.session.get(url=url, headers=headers, )
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_objects(self, subject_entity_name: str, subject_entity_type: str, object_type=None):
"""
Function returns a List of all objects associated to a subject. If object type is not None,
only those are returned, that match the object type.
:param subject_entity_name: The child / subject entity name
:param subject_entity_type: The type of the child / subject entity
:param object_type:
:return: List containing all associated objects
"""
url = self.url + '/v2/entities/' + subject_entity_name + '/?type=' + subject_entity_type + '&options=keyValues'
if object_type is not None:
url = url + '&attrs=ref' + object_type
headers = self.get_header()
response = self.session.get(url=url, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_associated(self, name: str, entity_type: str, associated_type=None):
"""
Function returns all associated data for a given entity name and type
:param name: name of the entity
:param entity_type: type of the entity
:param associated_type: if only associated data of one type should be returned, this parameter has to be the type
:return: A dictionary, containing the data of the entity, a key "subjects" and "objects" that contain each a list
with the reflective data
"""
data_dict = {}
associated_objects = self.get_objects(subject_entity_name=name, subject_entity_type=entity_type,
object_type=associated_type)
associated_subjects = self.get_subjects(object_entity_name=name, object_entity_type=entity_type,
subject_type=associated_type)
if associated_subjects is not None:
data_dict["subjects"] = json.loads(associated_subjects)
if associated_objects is not None:
object_json = json.loads(associated_objects)
data_dict["objects"] = []
if isinstance(object_json, list):
for associated_object in object_json:
entity_name = associated_object["id"]
object_data = json.loads(self.get_entity(entity_name=entity_name))
data_dict["objects"].append(object_data)
else:
entity_name = object_json["id"]
object_data = json.loads(self.get_entity(entity_name=entity_name))
data_dict["objects"].append(object_data)
entity_dict = json.loads(self.get_entity(entity_name=name))
whole_dict = {**entity_dict, **data_dict}
return whole_dict
def get_entity(self, entity_name, entity_params=None):
url = self.url + '/v2/entities/' + entity_name
headers = self.get_header()
if entity_params is None:
response = self.session.get(url, headers=headers)
else:
response = self.session.get(url, headers=headers,
params=entity_params)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_all_entities(self, parameter=None, parameter_value=None, limit=100):
url = self.url + '/v2/entities?options=count'
headers = self.get_header()
if parameter is None and parameter_value is None:
response = self.session.get(url, headers=headers)
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=headers,
limit=limit, count=sub_count)
return response
elif parameter is not None and parameter_value is not None:
parameters = {'{}'.format(parameter): '{}'.format(parameter_value)}
response = self.session.get(url, headers=headers, params=parameters)
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=headers,
limit=limit, count=sub_count, params=parameters)
return response
else:
logger.error("Getting all entities: both function parameters have to be 'not null'")
return None
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_entities_list(self, limit=100) -> list:
url = self.url + '/v2/entities?options=count'
header = self.get_header(requtils.HEADER_ACCEPT_JSON)
response = self.session.get(url, headers=header)
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=header,
limit=limit, count=sub_count)
return response
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
return None
else:
json_object = json.loads(response.text)
entities = []
for key in json_object:
entities.append(key["id"])
return entities
def get_entity_keyValues(self, entity_name):
parameter = {'{}'.format('options'): '{}'.format('keyValues')}
return self.get_entity(entity_name, parameter)
def get_entity_attribute_json(self, entity_name, attr_name):
url = self.url + '/v2/entities/' + entity_name + '/attrs/' + attr_name
response = self.session.get(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_entity_attribute_value(self, entity_name, attr_name):
url = self.url + '/v2/entities/' + entity_name + '/attrs/' \
+ attr_name + '/value'
response = self.session.get(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
return response.text
def get_entity_attribute_list(self, entity_name, attr_name_list):
"""
Function returns all types and values for a list of attributes of an entity,
given in attr_name_list
:param entity_name: Entity_name - Name of the entity to obtain the values from
:param attr_name_list: List of attributes - e.g. ["Temperature"]
:return: List, containin all attribute dictionaries e.g.: [{"value":33,"type":"Float"}]
"""
attributes = ','.join(attr_name_list)
parameters = {'{}'.format('options'): '{}'.format('values'),
'{}'.format('attrs'): attributes}
return self.get_entity(entity_name, parameters)
def update_entity(self, entity):
url = self.url + '/v2/entities/' + entity.id + '/attrs'
payload = entity.get_attributes_key_values()
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = json.dumps(payload)
response = self.session.patch(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def update_attribute(self, entity_name, attr_name, attr_value):
url = self.url + '/v2/entities/' + entity_name + '/attrs/' \
+ attr_name + '/value'
headers = self.get_header(requtils.HEADER_CONTENT_PLAIN)
data = json.dumps(attr_value)
response = self.session.put(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def add_attribute(self, entity: Entity = None, entity_name: str = None, attr_dict: dict = None):
# POST /v2/entities/{id}/attrs?options=append
"""
This function adds attributes to the Entity in the Context Broker. This can be done in two ways,
either by first adding the attribute to the Entity object or by directly sending it from a dict/JSON
The Function first compares it with existing attributes, and only adds (so updates) the ones not previoulsy existing
:param entity: The updated Entity Instance
:param entity_name: The Entity name which should be updated
:param attribute_dict: A JSON/Dict containing the attributes
:return: -
"""
if isinstance(entity, Entity):
attributes = entity.get_attributes()
entity_name = entity.id
else:
attributes = attr_dict
entity_name = entity_name
existing_attributes = self.get_attributes(entity_name)
new_attributes = {k: v for (k, v) in attributes.items() if k not in existing_attributes}
url = self.url + '/v2/entities/' + entity_name + '/attrs?options=append'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = json.dumps(new_attributes)
response = self.session.post(url, data=data, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def get_attributes(self, entity_name: str):
"""
For a given entity this function returns all attribute names
:param entity_name: the name of the entity
:return: attributes - list of attributes
"""
entity_json = json.loads(self.get_entity(entity_name))
attributes = [k for k in entity_json.keys() if k not in ["id", "type"]]
return attributes
def remove_attributes(self, entity_name):
url = self.url + '/v2/entities/' + entity_name + '/attrs'
response = self.session.put(url)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, response)
def create_subscription(self, subscription_body, check_duplicate: bool = True):
url = self.url + '/v2/subscriptions'
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
if check_duplicate is True:
exists = self.check_duplicate_subscription(subscription_body)
if exists is True:
logger.info(f"A similar subscription already exists.")
response = self.session.post(url, headers=headers, data=subscription_body)
if response.headers is None:
return
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
return None
else:
location = response.headers.get('Location')
addr_parts = location.split('/')
subscription_id = addr_parts.pop()
return subscription_id
def get_subscription_list(self, limit=100):
url = self.url + '/v2/subscriptions?options=count'
response = self.session.get(url, headers=self.get_header())
sub_count = float(response.headers["Fiware-Total-Count"])
if sub_count >= limit:
response = self.get_pagination(url=url, headers=self.get_header(),
limit=limit, count=sub_count)
return response
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
return None
json_object = json.loads(response.text)
subscriptions = []
for key in json_object:
subscriptions.append(key["id"])
return subscriptions
def get_subscription(self, subscription_id: str):
url = self.url + '/v2/subscriptions/' + subscription_id
response = self.session.get(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def delete_subscription(self, subscription_id: str):
url = self.url + '/v2/subscriptions/' + subscription_id
response = self.session.delete(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def get_pagination(self, url: str, headers: dict,
count: float, limit: int = 20, params=None):
"""
NGSIv2 implements a pagination mechanism in order to help clients to retrieve large sets of resources.
This mechanism works for all listing operations in the API (e.g. GET /v2/entities, GET /v2/subscriptions, POST /v2/op/query, etc.).
This function helps getting datasets that are larger than the limit for the different GET operations.
:param url: Information about the url, obtained from the orginal function e.g. : http://localhost:1026/v2/subscriptions?limit=20&options=count
:param headers: The headers from the original function, e.g: {'fiware-service': 'crio', 'fiware-servicepath': '/measurements'}
:param count: Number of total elements, obtained by adding "&options=count" to the url,
included in the response headers
:param limit: Limit, obtained from the oringal function, default is 20
:return: A list, containing all objects in a dictionary
"""
all_data = []
# due to a math, one of the both numbers has to be a float,
# otherwise the value is rounded down not up
no_intervals = int(math.ceil(count / limit))
for i in range(0, no_intervals):
offset = str(i * limit)
if i == 0:
url = url
else:
url = url + '&offset=' + offset
if params == (not None):
response = self.session.get(url=url, headers=headers, params=params)
else:
response = self.session.get(url=url, headers=headers)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
else:
for resp_dict in json.loads(response.text):
all_data.append(resp_dict)
return all_data
def check_duplicate_subscription(self, subscription_body, limit: int = 20):
"""
Function compares the subject of the subscription body, on whether a subscription
already exists for a device / entity.
:param subscription_body: the body of the new subscripton
:param limit: pagination parameter, to set the number of subscriptions bodies the get request should grab
:return: exists, boolean -> True, if such a subscription allready exists
"""
exists = False
subscription_subject = json.loads(subscription_body)["subject"]
# Exact keys depend on subscription body
try:
subscription_url = json.loads(subscription_body)["notification"]["httpCustom"]["url"]
except KeyError:
subscription_url = json.loads(subscription_body)["notification"]["http"]["url"]
# If the number of subscriptions is larger then the limit, paginations methods have to be used
url = self.url + '/v2/subscriptions?limit=' + str(limit) + '&options=count'
response = self.session.get(url, headers=self.get_header())
sub_count = float(response.headers["Fiware-Total-Count"])
response = json.loads(response.text)
if sub_count >= limit:
response = self.get_pagination(url=url, headers=self.get_header(),
limit=limit, count=sub_count)
response = json.loads(response)
for existing_subscription in response:
# check whether the exact same subscriptions already exists
if existing_subscription["subject"] == subscription_subject:
exists = True
break
try:
existing_url = existing_subscription["notification"]["http"]["url"]
except KeyError:
existing_url = existing_subscription["notification"]["httpCustom"]["url"]
# check whether both subscriptions notify to the same path
if existing_url != subscription_url:
continue
else:
# iterate over all entities included in the subscription object
for entity in subscription_subject["entities"]:
if 'type' in entity.keys():
subscription_type = entity['type']
else:
subscription_type = entity['typePattern']
if 'id' in entity.keys():
subscription_id = entity['id']
else:
subscription_id = entity["idPattern"]
# iterate over all entities included in the exisiting subscriptions
for existing_entity in existing_subscription["subject"]["entities"]:
if "type" in entity.keys():
type_existing = entity["type"]
else:
type_existing = entity["typePattern"]
if "id" in entity.keys():
id_existing = entity["id"]
else:
id_existing = entity["idPattern"]
# as the ID field is non optional, it has to match
# check whether the type match
# if the type field is empty, they match all types
if (type_existing == subscription_type) or\
('*' in subscription_type) or \
('*' in type_existing)\
or (type_existing == "") or (
subscription_type == ""):
# check if on of the subscriptions is a pattern, or if they both refer to the same id
# Get the attrs first, to avoid code duplication
# last thing to compare is the attributes
# Assumption -> position is the same as the entities list
# i == j
i = subscription_subject["entities"].index(entity)
j = existing_subscription["subject"]["entities"].index(existing_entity)
try:
subscription_attrs = subscription_subject["condition"]["attrs"][i]
except (KeyError, IndexError):
subscription_attrs = []
try:
existing_attrs = existing_subscription["subject"]["condition"]["attrs"][j]
except (KeyError, IndexError):
existing_attrs = []
if (".*" in subscription_id) or ('.*' in id_existing) or (subscription_id == id_existing):
# Attributes have to match, or the have to be an empty array
if (subscription_attrs == existing_attrs) or (subscription_attrs == []) or (existing_attrs == []):
exists = True
# if they do not match completely or subscribe to all ids they have to match up to a certain position
elif ("*" in subscription_id) or ('*' in id_existing):
regex_existing = id_existing.find('*')
regex_subscription = subscription_id.find('*')
# slice the strings to compare
if (id_existing[:regex_existing] in subscription_id) or (subscription_id[:regex_subscription] in id_existing) or \
(id_existing[regex_existing:] in subscription_id) or (subscription_id[regex_subscription:] in id_existing):
if (subscription_attrs == existing_attrs) or (subscription_attrs == []) or (existing_attrs == []):
exists = True
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
return exists
def delete_all_subscriptions(self):
subscriptions = self.get_subscription_list()
for sub_id in subscriptions:
self.delete_subscription(sub_id)
def post_cmd_v1(self, entity_id: str, entity_type: str,
cmd_name: str, cmd_value: str):
url = self.url + '/v1/updateContext'
payload = {"updateAction": "UPDATE",
"contextElements": [
{"id": entity_id,
"type": entity_type,
"isPattern": "false",
"attributes": [
{"name": cmd_name,
"type": "command",
"value": cmd_value
}]
}]
}
headers = self.get_header(requtils.HEADER_CONTENT_JSON)
data = json.dumps(payload)
response = self.session.post(url, headers=headers, data=data)
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def delete(self, entity_id: str):
url = self.url + '/v2/entities/' + entity_id
response = self.session.delete(url, headers=self.get_header())
ok, retstr = requtils.response_ok(response)
if not ok:
level, retstr = requtils.logging_switch(response)
self.log_switch(level, retstr)
def delete_all_entities(self):
entities = self.get_entities_list()
for entity_id in entities:
self.delete(entity_id)
|
en
| 0.742337
|
# ToDo Query params # Class is only implemented for backward compatibility Describes the attribute of an entity. :param entity_dict: A dictionarry describing the entity Needed Structure: { "id" : "Sensor002", "type": "temperature_Sensor", "Temperature" : { "value" : 17, "type" : "Number" }, "Status" : {"value": "Ok", "type": "Text" } } returns the object-representation Function returns the Entity to be posted as a JSON :return: the Entity Json Function adds another Attribute to an existing Entity. :param attr_dict: A dictionary describing an Attribute "Temperature" : { "value" : 17, "type" : "Number" }, :return: updated entity dict Function deletes an attribute from an existing Entity :param attr_name: the name of the attribute to delete :return: updated entity_dict Function returns list of attribute names. Function returns all attributes, their types and values of an entity :return: Class implements the concept of FIWARE Entity Relationships. :param ref_object: The parent / object of the relationship :param subject: The child / subject of the relationship :param predicate: currently not supported -> describes the relationship between object and subject Function updates the subject Attribute with the relationship attribute :return: Function creates the NGSI Ref schema in a ref_dict, needed for the subject :return: ref_dict Function returns a JSON to describe the Relationship, which then can be pushed to orion :return: whole_dict Define entity service paths which are supported by the Orion Context Broker to support hierarchical scopes: https://fiware-orion.readthedocs.io/en/master/user/service_path/index.html Overwrites the fiware_service and service path of config.json Implementation of Orion Context Broker functionalities, such as creating entities and subscriptions; retrieving, updating and deleting data. Further documentation: https://fiware-orion.readthedocs.io/en/master/ :param config: :param version_2: if param version_2 is True, the standard used url is the v2, else v1 Overwrites the fiware_service and service path of config.json combine fiware_service header (if set) and additional headers Function returns the required log_level with the repsonse :param level: The logging level that should be returned :param response: The message for the logger :return: Function utilises the test.test_connection() function to check the availability of a given url and service. :return: Boolean, True if the service is reachable, False if not. Function registers an Object with the Orion Context Broker, if it allready exists it can be automatically updated if the overwrite bool is True First a post request with the entity is tried, if the response code is 422 the entity is uncrossable, as it already exists there are two options, either overwrite it, if the attribute have changed (e.g. at least one new/ new values) (update = True) or leave it the way it is (update=False) :param entity: An entity object :param update: If the response.status_code is 422, whether the old entity should be updated or not :return: Function registers a JSON with the Orion Context Broker. :param json: A JSON (dictionary) :param entity: An Orion entity, from which the json_data can be obatained. :param params: :return: :param json_data: :param params: :return: Function can be used to post a one to many or one to one relationship. :param json_data: Relationship Data obtained from the Relationship class. e.g. : {"id": "urn:ngsi-ld:Shelf:unit001", "type": "Shelf", "refStore": {"type": "Relationship", "value": "urn:ngsi-ld:Store:001"}} Can be a one to one or a one to many relationship # Action type append required, # Will overwrite existing entities if they exist whereas # the entities attribute holds an array of entities we wish to update. Function gets the JSON for child / subject entities for a parent / object entity. :param object_entity_name: The parent / object entity name :param object_entity_type: The type of the parent / object entity :param subject_type: optional parameter, if added only those child / subject entities are returned that match the type :return: JSON containing the child / subject information Function returns a List of all objects associated to a subject. If object type is not None, only those are returned, that match the object type. :param subject_entity_name: The child / subject entity name :param subject_entity_type: The type of the child / subject entity :param object_type: :return: List containing all associated objects Function returns all associated data for a given entity name and type :param name: name of the entity :param entity_type: type of the entity :param associated_type: if only associated data of one type should be returned, this parameter has to be the type :return: A dictionary, containing the data of the entity, a key "subjects" and "objects" that contain each a list with the reflective data Function returns all types and values for a list of attributes of an entity, given in attr_name_list :param entity_name: Entity_name - Name of the entity to obtain the values from :param attr_name_list: List of attributes - e.g. ["Temperature"] :return: List, containin all attribute dictionaries e.g.: [{"value":33,"type":"Float"}] # POST /v2/entities/{id}/attrs?options=append This function adds attributes to the Entity in the Context Broker. This can be done in two ways, either by first adding the attribute to the Entity object or by directly sending it from a dict/JSON The Function first compares it with existing attributes, and only adds (so updates) the ones not previoulsy existing :param entity: The updated Entity Instance :param entity_name: The Entity name which should be updated :param attribute_dict: A JSON/Dict containing the attributes :return: - For a given entity this function returns all attribute names :param entity_name: the name of the entity :return: attributes - list of attributes NGSIv2 implements a pagination mechanism in order to help clients to retrieve large sets of resources. This mechanism works for all listing operations in the API (e.g. GET /v2/entities, GET /v2/subscriptions, POST /v2/op/query, etc.). This function helps getting datasets that are larger than the limit for the different GET operations. :param url: Information about the url, obtained from the orginal function e.g. : http://localhost:1026/v2/subscriptions?limit=20&options=count :param headers: The headers from the original function, e.g: {'fiware-service': 'crio', 'fiware-servicepath': '/measurements'} :param count: Number of total elements, obtained by adding "&options=count" to the url, included in the response headers :param limit: Limit, obtained from the oringal function, default is 20 :return: A list, containing all objects in a dictionary # due to a math, one of the both numbers has to be a float, # otherwise the value is rounded down not up Function compares the subject of the subscription body, on whether a subscription already exists for a device / entity. :param subscription_body: the body of the new subscripton :param limit: pagination parameter, to set the number of subscriptions bodies the get request should grab :return: exists, boolean -> True, if such a subscription allready exists # Exact keys depend on subscription body # If the number of subscriptions is larger then the limit, paginations methods have to be used # check whether the exact same subscriptions already exists # check whether both subscriptions notify to the same path # iterate over all entities included in the subscription object # iterate over all entities included in the exisiting subscriptions # as the ID field is non optional, it has to match # check whether the type match # if the type field is empty, they match all types # check if on of the subscriptions is a pattern, or if they both refer to the same id # Get the attrs first, to avoid code duplication # last thing to compare is the attributes # Assumption -> position is the same as the entities list # i == j # Attributes have to match, or the have to be an empty array # if they do not match completely or subscribe to all ids they have to match up to a certain position # slice the strings to compare
| 3.098836
| 3
|
auth0/v3/test/authentication/test_passwordless.py
|
santiagoroman/auth0-python
| 0
|
6626480
|
import unittest
import mock
from ...authentication.passwordless import Passwordless
class TestPasswordless(unittest.TestCase):
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_email(self, mock_post):
p = Passwordless('my.domain.com')
p.email(client_id='cid',
email='<EMAIL>',
send='snd')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'email': '<EMAIL>',
'send': 'snd',
'connection': 'email',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_email_with_auth_params(self, mock_post):
p = Passwordless('my.domain.com')
p.email(client_id='cid',
email='<EMAIL>',
send='snd',
auth_params={'a': 'b'})
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'email': '<EMAIL>',
'send': 'snd',
'authParams': {'a': 'b'},
'connection': 'email',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_email_with_client_secret(self, mock_post):
p = Passwordless('my.domain.com')
p.email(client_id='cid',
client_secret='csecret',
email='<EMAIL>',
send='snd')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'csecret',
'email': 'a<EMAIL>',
'send': 'snd',
'connection': 'email',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms(self, mock_post):
p = Passwordless('my.domain.com')
p.sms(client_id='cid', phone_number='123456')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'phone_number': '123456',
'connection': 'sms',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms_with_client_secret(self, mock_post):
p = Passwordless('my.domain.com')
p.sms(client_id='cid', client_secret='csecret', phone_number='123456')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'csecret',
'phone_number': '123456',
'connection': 'sms',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms_login(self, mock_post):
p = Passwordless('my.domain.com')
p.sms_login(client_id='cid', phone_number='123456', code='abcd')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/ro')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'connection': 'sms',
'grant_type': 'password',
'username': '123456',
'password': '<PASSWORD>',
'scope': 'openid',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms_login_with_scope(self, mock_post):
p = Passwordless('my.domain.com')
p.sms_login(client_id='cid', phone_number='123456',
code='abcd', scope='openid profile')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/ro')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'connection': 'sms',
'grant_type': 'password',
'username': '123456',
'password': '<PASSWORD>',
'scope': 'openid profile',
})
|
import unittest
import mock
from ...authentication.passwordless import Passwordless
class TestPasswordless(unittest.TestCase):
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_email(self, mock_post):
p = Passwordless('my.domain.com')
p.email(client_id='cid',
email='<EMAIL>',
send='snd')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'email': '<EMAIL>',
'send': 'snd',
'connection': 'email',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_email_with_auth_params(self, mock_post):
p = Passwordless('my.domain.com')
p.email(client_id='cid',
email='<EMAIL>',
send='snd',
auth_params={'a': 'b'})
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'email': '<EMAIL>',
'send': 'snd',
'authParams': {'a': 'b'},
'connection': 'email',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_email_with_client_secret(self, mock_post):
p = Passwordless('my.domain.com')
p.email(client_id='cid',
client_secret='csecret',
email='<EMAIL>',
send='snd')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'csecret',
'email': 'a<EMAIL>',
'send': 'snd',
'connection': 'email',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms(self, mock_post):
p = Passwordless('my.domain.com')
p.sms(client_id='cid', phone_number='123456')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'phone_number': '123456',
'connection': 'sms',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms_with_client_secret(self, mock_post):
p = Passwordless('my.domain.com')
p.sms(client_id='cid', client_secret='csecret', phone_number='123456')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/passwordless/start')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'client_secret': 'csecret',
'phone_number': '123456',
'connection': 'sms',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms_login(self, mock_post):
p = Passwordless('my.domain.com')
p.sms_login(client_id='cid', phone_number='123456', code='abcd')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/ro')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'connection': 'sms',
'grant_type': 'password',
'username': '123456',
'password': '<PASSWORD>',
'scope': 'openid',
})
@mock.patch('auth0.v3.authentication.passwordless.Passwordless.post')
def test_sms_login_with_scope(self, mock_post):
p = Passwordless('my.domain.com')
p.sms_login(client_id='cid', phone_number='123456',
code='abcd', scope='openid profile')
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://my.domain.com/oauth/ro')
self.assertEqual(kwargs['data'], {
'client_id': 'cid',
'connection': 'sms',
'grant_type': 'password',
'username': '123456',
'password': '<PASSWORD>',
'scope': 'openid profile',
})
|
none
| 1
| 2.760054
| 3
|
|
setup.py
|
shiquanwang/numba
| 1
|
6626481
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import sys
import shutil
import subprocess
from fnmatch import fnmatchcase
from distutils.util import convert_path
# Do not EVER use setuptools, it makes cythonization fail
# Distribute fixes that
from distutils.core import setup, Extension
import numpy
# import numba
import gen_type_conversion
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension as CythonExtension
if sys.version_info[:2] < (2, 6):
raise Exception('numba requires Python 2.6 or greater.')
import versioneer
#------------------------------------------------------------------------
# Setup constants and arguments
#------------------------------------------------------------------------
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
cmdclass['build_ext'] = build_ext
setup_args = {
'long_description': open('README.md').read(),
}
numba_root = os.path.dirname(os.path.abspath(__file__))
deps_root = os.path.join(numba_root, 'deps')
pyext_root = os.path.join(deps_root, 'pyextensibletype')
pyext_dst = os.path.join(numba_root, "numba", "pyextensibletype")
def get_include():
"""Use numba.get_include() instead (make numba importable without
building it first)
"""
return os.path.join(numba_root, "numba", "include")
numba_include_dir = get_include()
#------------------------------------------------------------------------
# Package finding
#------------------------------------------------------------------------
def find_packages(where='.', exclude=()):
out = []
stack=[(convert_path(where), '')]
while stack:
where, prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn, '__init__.py'))
):
out.append(prefix+name)
stack.append((fn, prefix+name+'.'))
if sys.version_info[0] == 3:
exclude = exclude + ('*py2only*', )
for pat in list(exclude) + ['ez_setup', 'distribute_setup']:
out = [item for item in out if not fnmatchcase(item, pat)]
return out
#------------------------------------------------------------------------
# 2to3
#------------------------------------------------------------------------
def run_2to3():
import lib2to3.refactor
from distutils.command.build_py import build_py_2to3 as build_py
print("Installing 2to3 fixers")
# need to convert sources to Py3 on installation
fixes = 'dict imports imports2 unicode ' \
'xrange itertools itertools_imports long types'.split()
fixes = ['lib2to3.fixes.fix_' + fix
for fix in fixes]
build_py.fixer_names = fixes
cmdclass["build_py"] = build_py
# cmdclass["build"] = build_py
# Distribute options
# setup_args["use_2to3"] = True
#------------------------------------------------------------------------
# pyextensibletype
#------------------------------------------------------------------------
def cleanup_pyextensibletype():
if os.path.exists(pyext_dst):
shutil.rmtree(pyext_dst)
def register_pyextensibletype():
with open(os.path.join(deps_root, '__init__.py'), 'w'):
pass
with open(os.path.join(pyext_root, '__init__.py'), 'w'):
pass
shutil.copytree(pyext_root, pyext_dst)
from deps.pyextensibletype import setupconfig
exts = setupconfig.get_extensions(pyext_dst, "numba.pyextensibletype")
return exts
#------------------------------------------------------------------------
# Generate code for build
#------------------------------------------------------------------------
build = set(sys.argv) & set(['build', 'build_ext', 'install',
'bdist_wininst'])
cleanup_pyextensibletype()
if build:
gen_type_conversion.run()
# TODO: Finish and release pyextensibletype
extensibletype_extensions = register_pyextensibletype()
else:
extensibletype_extensions = []
extensibletype_include = "numba/pyextensibletype/include"
if sys.version_info[0] >= 3:
run_2to3()
#------------------------------------------------------------------------
# setup
#------------------------------------------------------------------------
setup(
name="numba",
version=versioneer.get_version(),
author="<NAME>.",
author_email="<EMAIL>",
url="http://numba.github.com",
license="BSD",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
# "Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
# "Programming Language :: Python :: 3.2",
"Topic :: Utilities",
],
description="compiling Python code using LLVM",
packages=find_packages(exclude=('*deps*',)),
entry_points = {
'console_scripts': [
'pycc = numba.pycc:main',
]
},
package_data={
'': ['*.md'],
'numba.minivect': ['include/*'],
'numba.asdl.common': ['*.asdl'],
'numba.asdl.py2_7': ['*.asdl'],
'numba.asdl.py3_2': ['*.asdl'],
'numba.asdl.py3_3': ['*.asdl'],
'numba.external.utilities': ['*.c', '*.h'],
'numba': ['*.c', '*.h', 'include/*', '*.pxd'],
'numba.vectorize': ['*.h'],
},
ext_modules=extensibletype_extensions + [
Extension(
name="numba.vectorize._internal",
sources=["numba/vectorize/_internal.c",
"numba/vectorize/_ufunc.c",
"numba/vectorize/_gufunc.c"],
include_dirs=[numpy.get_include(), "numba/minivect/include/"],
depends=["numba/vectorize/_internal.h",
"numba/minivect/include/miniutils.h"]),
Extension(
name="numba.external.utilities.utilities",
sources=["numba/external/utilities/utilities.c"],
include_dirs=[numba_include_dir, extensibletype_include],
depends=["numba/external/utilities/type_conversion.c",
"numba/external/utilities/virtuallookup.c",
"numba/external/utilities/generated_conversions.c",
"numba/external/utilities/generated_conversions.h"]),
CythonExtension(
name="numba.pyconsts",
sources=["numba/pyconsts.pyx"],
depends=["numba/_pyconsts.pxd"],
include_dirs=[numba_include_dir]),
CythonExtension(
name="numba.exttypes.extension_types",
sources=["numba/exttypes/extension_types.pyx"],
cython_gdb=True),
CythonExtension(
name="numba.numbawrapper",
sources=["numba/numbawrapper.pyx", "numba/numbafunction.c"],
depends=["numba/numbafunction.h"],
include_dirs=[numba_include_dir,
numpy.get_include()],
cython_gdb=True),
],
cmdclass=cmdclass,
**setup_args
)
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import sys
import shutil
import subprocess
from fnmatch import fnmatchcase
from distutils.util import convert_path
# Do not EVER use setuptools, it makes cythonization fail
# Distribute fixes that
from distutils.core import setup, Extension
import numpy
# import numba
import gen_type_conversion
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension as CythonExtension
if sys.version_info[:2] < (2, 6):
raise Exception('numba requires Python 2.6 or greater.')
import versioneer
#------------------------------------------------------------------------
# Setup constants and arguments
#------------------------------------------------------------------------
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
cmdclass['build_ext'] = build_ext
setup_args = {
'long_description': open('README.md').read(),
}
numba_root = os.path.dirname(os.path.abspath(__file__))
deps_root = os.path.join(numba_root, 'deps')
pyext_root = os.path.join(deps_root, 'pyextensibletype')
pyext_dst = os.path.join(numba_root, "numba", "pyextensibletype")
def get_include():
"""Use numba.get_include() instead (make numba importable without
building it first)
"""
return os.path.join(numba_root, "numba", "include")
numba_include_dir = get_include()
#------------------------------------------------------------------------
# Package finding
#------------------------------------------------------------------------
def find_packages(where='.', exclude=()):
out = []
stack=[(convert_path(where), '')]
while stack:
where, prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn, '__init__.py'))
):
out.append(prefix+name)
stack.append((fn, prefix+name+'.'))
if sys.version_info[0] == 3:
exclude = exclude + ('*py2only*', )
for pat in list(exclude) + ['ez_setup', 'distribute_setup']:
out = [item for item in out if not fnmatchcase(item, pat)]
return out
#------------------------------------------------------------------------
# 2to3
#------------------------------------------------------------------------
def run_2to3():
import lib2to3.refactor
from distutils.command.build_py import build_py_2to3 as build_py
print("Installing 2to3 fixers")
# need to convert sources to Py3 on installation
fixes = 'dict imports imports2 unicode ' \
'xrange itertools itertools_imports long types'.split()
fixes = ['lib2to3.fixes.fix_' + fix
for fix in fixes]
build_py.fixer_names = fixes
cmdclass["build_py"] = build_py
# cmdclass["build"] = build_py
# Distribute options
# setup_args["use_2to3"] = True
#------------------------------------------------------------------------
# pyextensibletype
#------------------------------------------------------------------------
def cleanup_pyextensibletype():
if os.path.exists(pyext_dst):
shutil.rmtree(pyext_dst)
def register_pyextensibletype():
with open(os.path.join(deps_root, '__init__.py'), 'w'):
pass
with open(os.path.join(pyext_root, '__init__.py'), 'w'):
pass
shutil.copytree(pyext_root, pyext_dst)
from deps.pyextensibletype import setupconfig
exts = setupconfig.get_extensions(pyext_dst, "numba.pyextensibletype")
return exts
#------------------------------------------------------------------------
# Generate code for build
#------------------------------------------------------------------------
build = set(sys.argv) & set(['build', 'build_ext', 'install',
'bdist_wininst'])
cleanup_pyextensibletype()
if build:
gen_type_conversion.run()
# TODO: Finish and release pyextensibletype
extensibletype_extensions = register_pyextensibletype()
else:
extensibletype_extensions = []
extensibletype_include = "numba/pyextensibletype/include"
if sys.version_info[0] >= 3:
run_2to3()
#------------------------------------------------------------------------
# setup
#------------------------------------------------------------------------
setup(
name="numba",
version=versioneer.get_version(),
author="<NAME>.",
author_email="<EMAIL>",
url="http://numba.github.com",
license="BSD",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
# "Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
# "Programming Language :: Python :: 3.2",
"Topic :: Utilities",
],
description="compiling Python code using LLVM",
packages=find_packages(exclude=('*deps*',)),
entry_points = {
'console_scripts': [
'pycc = numba.pycc:main',
]
},
package_data={
'': ['*.md'],
'numba.minivect': ['include/*'],
'numba.asdl.common': ['*.asdl'],
'numba.asdl.py2_7': ['*.asdl'],
'numba.asdl.py3_2': ['*.asdl'],
'numba.asdl.py3_3': ['*.asdl'],
'numba.external.utilities': ['*.c', '*.h'],
'numba': ['*.c', '*.h', 'include/*', '*.pxd'],
'numba.vectorize': ['*.h'],
},
ext_modules=extensibletype_extensions + [
Extension(
name="numba.vectorize._internal",
sources=["numba/vectorize/_internal.c",
"numba/vectorize/_ufunc.c",
"numba/vectorize/_gufunc.c"],
include_dirs=[numpy.get_include(), "numba/minivect/include/"],
depends=["numba/vectorize/_internal.h",
"numba/minivect/include/miniutils.h"]),
Extension(
name="numba.external.utilities.utilities",
sources=["numba/external/utilities/utilities.c"],
include_dirs=[numba_include_dir, extensibletype_include],
depends=["numba/external/utilities/type_conversion.c",
"numba/external/utilities/virtuallookup.c",
"numba/external/utilities/generated_conversions.c",
"numba/external/utilities/generated_conversions.h"]),
CythonExtension(
name="numba.pyconsts",
sources=["numba/pyconsts.pyx"],
depends=["numba/_pyconsts.pxd"],
include_dirs=[numba_include_dir]),
CythonExtension(
name="numba.exttypes.extension_types",
sources=["numba/exttypes/extension_types.pyx"],
cython_gdb=True),
CythonExtension(
name="numba.numbawrapper",
sources=["numba/numbawrapper.pyx", "numba/numbafunction.c"],
depends=["numba/numbafunction.h"],
include_dirs=[numba_include_dir,
numpy.get_include()],
cython_gdb=True),
],
cmdclass=cmdclass,
**setup_args
)
|
en
| 0.178306
|
# -*- coding: utf-8 -*- # Do not EVER use setuptools, it makes cythonization fail # Distribute fixes that # import numba #------------------------------------------------------------------------ # Setup constants and arguments #------------------------------------------------------------------------ Use numba.get_include() instead (make numba importable without building it first) #------------------------------------------------------------------------ # Package finding #------------------------------------------------------------------------ #------------------------------------------------------------------------ # 2to3 #------------------------------------------------------------------------ # need to convert sources to Py3 on installation # cmdclass["build"] = build_py # Distribute options # setup_args["use_2to3"] = True #------------------------------------------------------------------------ # pyextensibletype #------------------------------------------------------------------------ #------------------------------------------------------------------------ # Generate code for build #------------------------------------------------------------------------ # TODO: Finish and release pyextensibletype #------------------------------------------------------------------------ # setup #------------------------------------------------------------------------ # "Programming Language :: Python :: 2.6", # "Programming Language :: Python :: 3.2",
| 1.83153
| 2
|
torch_geometric/loader/utils.py
|
lightaime/pytorch_geometric
| 1
|
6626482
|
<reponame>lightaime/pytorch_geometric<filename>torch_geometric/loader/utils.py
import copy
import math
from typing import Dict, Optional, Tuple, Union
import torch
from torch import Tensor
from torch_sparse import SparseTensor
from torch_geometric.data import Data, HeteroData
from torch_geometric.data.storage import EdgeStorage, NodeStorage
from torch_geometric.typing import EdgeType, OptTensor
def index_select(value: Tensor, index: Tensor, dim: int = 0) -> Tensor:
out: Optional[Tensor] = None
if torch.utils.data.get_worker_info() is not None:
# If we are in a background process, we write directly into a shared
# memory tensor to avoid an extra copy:
size = list(value.size())
size[dim] = index.numel()
numel = math.prod(size)
storage = value.storage()._new_shared(numel)
out = value.new(storage).view(size)
return torch.index_select(value, dim, index, out=out)
def edge_type_to_str(edge_type: Union[EdgeType, str]) -> str:
# Since C++ cannot take dictionaries with tuples as key as input, edge type
# triplets need to be converted into single strings.
return edge_type if isinstance(edge_type, str) else '__'.join(edge_type)
def to_csc(
data: Union[Data, EdgeStorage],
device: Optional[torch.device] = None,
share_memory: bool = False,
is_sorted: bool = False,
) -> Tuple[Tensor, Tensor, OptTensor]:
# Convert the graph data into a suitable format for sampling (CSC format).
# Returns the `colptr` and `row` indices of the graph, as well as an
# `perm` vector that denotes the permutation of edges.
# Since no permutation of edges is applied when using `SparseTensor`,
# `perm` can be of type `None`.
perm: Optional[Tensor] = None
if hasattr(data, 'adj_t'):
colptr, row, _ = data.adj_t.csr()
elif hasattr(data, 'edge_index'):
(row, col) = data.edge_index
if not is_sorted:
size = data.size()
perm = (col * size[0]).add_(row).argsort()
row = row[perm]
colptr = torch.ops.torch_sparse.ind2ptr(col[perm], size[1])
else:
raise AttributeError("Data object does not contain attributes "
"'adj_t' or 'edge_index'")
colptr = colptr.to(device)
row = row.to(device)
perm = perm.to(device) if perm is not None else None
if not colptr.is_cuda and share_memory:
colptr.share_memory_()
row.share_memory_()
if perm is not None:
perm.share_memory_()
return colptr, row, perm
def to_hetero_csc(
data: HeteroData,
device: Optional[torch.device] = None,
share_memory: bool = False,
is_sorted: bool = False,
) -> Tuple[Dict[str, Tensor], Dict[str, Tensor], Dict[str, OptTensor]]:
# Convert the heterogeneous graph data into a suitable format for sampling
# (CSC format).
# Returns dictionaries holding `colptr` and `row` indices as well as edge
# permutations for each edge type, respectively.
# Since C++ cannot take dictionaries with tuples as key as input, edge type
# triplets are converted into single strings.
colptr_dict, row_dict, perm_dict = {}, {}, {}
for store in data.edge_stores:
key = edge_type_to_str(store._key)
out = to_csc(store, device, share_memory, is_sorted)
colptr_dict[key], row_dict[key], perm_dict[key] = out
return colptr_dict, row_dict, perm_dict
def filter_node_store_(store: NodeStorage, out_store: NodeStorage,
index: Tensor) -> NodeStorage:
# Filters a node storage object to only hold the nodes in `index`:
for key, value in store.items():
if key == 'num_nodes':
out_store.num_nodes = index.numel()
elif store.is_node_attr(key):
index = index.to(value.device)
dim = store._parent().__cat_dim__(key, value, store)
out_store[key] = index_select(value, index, dim=dim)
return store
def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor,
col: Tensor, index: Tensor,
perm: OptTensor = None) -> EdgeStorage:
# Filters a edge storage object to only hold the edges in `index`,
# which represents the new graph as denoted by `(row, col)`:
for key, value in store.items():
if key == 'edge_index':
edge_index = torch.stack([row, col], dim=0)
out_store.edge_index = edge_index.to(value.device)
elif key == 'adj_t':
# NOTE: We expect `(row, col)` to be sorted by `col` (CSC layout).
row = row.to(value.device())
col = col.to(value.device())
edge_attr = value.storage.value()
if edge_attr is not None:
index = index.to(edge_attr.device)
edge_attr = edge_attr[index]
sparse_sizes = out_store.size()[::-1]
# TODO Currently, we set `is_sorted=False`, see:
# https://github.com/pyg-team/pytorch_geometric/issues/4346
out_store.adj_t = SparseTensor(row=col, col=row, value=edge_attr,
sparse_sizes=sparse_sizes,
is_sorted=False, trust_data=True)
elif store.is_edge_attr(key):
dim = store._parent().__cat_dim__(key, value, store)
if perm is None:
index = index.to(value.device)
out_store[key] = index_select(value, index, dim=dim)
else:
perm = perm.to(value.device)
index = index.to(value.device)
out_store[key] = index_select(value, perm[index], dim=dim)
return store
def filter_data(data: Data, node: Tensor, row: Tensor, col: Tensor,
edge: Tensor, perm: OptTensor = None) -> Data:
# Filters a data object to only hold nodes in `node` and edges in `edge`:
out = copy.copy(data)
filter_node_store_(data._store, out._store, node)
filter_edge_store_(data._store, out._store, row, col, edge, perm)
return out
def filter_hetero_data(
data: HeteroData,
node_dict: Dict[str, Tensor],
row_dict: Dict[str, Tensor],
col_dict: Dict[str, Tensor],
edge_dict: Dict[str, Tensor],
perm_dict: Dict[str, OptTensor],
) -> HeteroData:
# Filters a heterogeneous data object to only hold nodes in `node` and
# edges in `edge` for each node and edge type, respectively:
out = copy.copy(data)
for node_type in data.node_types:
filter_node_store_(data[node_type], out[node_type],
node_dict[node_type])
for edge_type in data.edge_types:
edge_type_str = edge_type_to_str(edge_type)
filter_edge_store_(data[edge_type], out[edge_type],
row_dict[edge_type_str], col_dict[edge_type_str],
edge_dict[edge_type_str], perm_dict[edge_type_str])
return out
|
import copy
import math
from typing import Dict, Optional, Tuple, Union
import torch
from torch import Tensor
from torch_sparse import SparseTensor
from torch_geometric.data import Data, HeteroData
from torch_geometric.data.storage import EdgeStorage, NodeStorage
from torch_geometric.typing import EdgeType, OptTensor
def index_select(value: Tensor, index: Tensor, dim: int = 0) -> Tensor:
out: Optional[Tensor] = None
if torch.utils.data.get_worker_info() is not None:
# If we are in a background process, we write directly into a shared
# memory tensor to avoid an extra copy:
size = list(value.size())
size[dim] = index.numel()
numel = math.prod(size)
storage = value.storage()._new_shared(numel)
out = value.new(storage).view(size)
return torch.index_select(value, dim, index, out=out)
def edge_type_to_str(edge_type: Union[EdgeType, str]) -> str:
# Since C++ cannot take dictionaries with tuples as key as input, edge type
# triplets need to be converted into single strings.
return edge_type if isinstance(edge_type, str) else '__'.join(edge_type)
def to_csc(
data: Union[Data, EdgeStorage],
device: Optional[torch.device] = None,
share_memory: bool = False,
is_sorted: bool = False,
) -> Tuple[Tensor, Tensor, OptTensor]:
# Convert the graph data into a suitable format for sampling (CSC format).
# Returns the `colptr` and `row` indices of the graph, as well as an
# `perm` vector that denotes the permutation of edges.
# Since no permutation of edges is applied when using `SparseTensor`,
# `perm` can be of type `None`.
perm: Optional[Tensor] = None
if hasattr(data, 'adj_t'):
colptr, row, _ = data.adj_t.csr()
elif hasattr(data, 'edge_index'):
(row, col) = data.edge_index
if not is_sorted:
size = data.size()
perm = (col * size[0]).add_(row).argsort()
row = row[perm]
colptr = torch.ops.torch_sparse.ind2ptr(col[perm], size[1])
else:
raise AttributeError("Data object does not contain attributes "
"'adj_t' or 'edge_index'")
colptr = colptr.to(device)
row = row.to(device)
perm = perm.to(device) if perm is not None else None
if not colptr.is_cuda and share_memory:
colptr.share_memory_()
row.share_memory_()
if perm is not None:
perm.share_memory_()
return colptr, row, perm
def to_hetero_csc(
data: HeteroData,
device: Optional[torch.device] = None,
share_memory: bool = False,
is_sorted: bool = False,
) -> Tuple[Dict[str, Tensor], Dict[str, Tensor], Dict[str, OptTensor]]:
# Convert the heterogeneous graph data into a suitable format for sampling
# (CSC format).
# Returns dictionaries holding `colptr` and `row` indices as well as edge
# permutations for each edge type, respectively.
# Since C++ cannot take dictionaries with tuples as key as input, edge type
# triplets are converted into single strings.
colptr_dict, row_dict, perm_dict = {}, {}, {}
for store in data.edge_stores:
key = edge_type_to_str(store._key)
out = to_csc(store, device, share_memory, is_sorted)
colptr_dict[key], row_dict[key], perm_dict[key] = out
return colptr_dict, row_dict, perm_dict
def filter_node_store_(store: NodeStorage, out_store: NodeStorage,
index: Tensor) -> NodeStorage:
# Filters a node storage object to only hold the nodes in `index`:
for key, value in store.items():
if key == 'num_nodes':
out_store.num_nodes = index.numel()
elif store.is_node_attr(key):
index = index.to(value.device)
dim = store._parent().__cat_dim__(key, value, store)
out_store[key] = index_select(value, index, dim=dim)
return store
def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor,
col: Tensor, index: Tensor,
perm: OptTensor = None) -> EdgeStorage:
# Filters a edge storage object to only hold the edges in `index`,
# which represents the new graph as denoted by `(row, col)`:
for key, value in store.items():
if key == 'edge_index':
edge_index = torch.stack([row, col], dim=0)
out_store.edge_index = edge_index.to(value.device)
elif key == 'adj_t':
# NOTE: We expect `(row, col)` to be sorted by `col` (CSC layout).
row = row.to(value.device())
col = col.to(value.device())
edge_attr = value.storage.value()
if edge_attr is not None:
index = index.to(edge_attr.device)
edge_attr = edge_attr[index]
sparse_sizes = out_store.size()[::-1]
# TODO Currently, we set `is_sorted=False`, see:
# https://github.com/pyg-team/pytorch_geometric/issues/4346
out_store.adj_t = SparseTensor(row=col, col=row, value=edge_attr,
sparse_sizes=sparse_sizes,
is_sorted=False, trust_data=True)
elif store.is_edge_attr(key):
dim = store._parent().__cat_dim__(key, value, store)
if perm is None:
index = index.to(value.device)
out_store[key] = index_select(value, index, dim=dim)
else:
perm = perm.to(value.device)
index = index.to(value.device)
out_store[key] = index_select(value, perm[index], dim=dim)
return store
def filter_data(data: Data, node: Tensor, row: Tensor, col: Tensor,
edge: Tensor, perm: OptTensor = None) -> Data:
# Filters a data object to only hold nodes in `node` and edges in `edge`:
out = copy.copy(data)
filter_node_store_(data._store, out._store, node)
filter_edge_store_(data._store, out._store, row, col, edge, perm)
return out
def filter_hetero_data(
data: HeteroData,
node_dict: Dict[str, Tensor],
row_dict: Dict[str, Tensor],
col_dict: Dict[str, Tensor],
edge_dict: Dict[str, Tensor],
perm_dict: Dict[str, OptTensor],
) -> HeteroData:
# Filters a heterogeneous data object to only hold nodes in `node` and
# edges in `edge` for each node and edge type, respectively:
out = copy.copy(data)
for node_type in data.node_types:
filter_node_store_(data[node_type], out[node_type],
node_dict[node_type])
for edge_type in data.edge_types:
edge_type_str = edge_type_to_str(edge_type)
filter_edge_store_(data[edge_type], out[edge_type],
row_dict[edge_type_str], col_dict[edge_type_str],
edge_dict[edge_type_str], perm_dict[edge_type_str])
return out
|
en
| 0.857121
|
# If we are in a background process, we write directly into a shared # memory tensor to avoid an extra copy: # Since C++ cannot take dictionaries with tuples as key as input, edge type # triplets need to be converted into single strings. # Convert the graph data into a suitable format for sampling (CSC format). # Returns the `colptr` and `row` indices of the graph, as well as an # `perm` vector that denotes the permutation of edges. # Since no permutation of edges is applied when using `SparseTensor`, # `perm` can be of type `None`. # Convert the heterogeneous graph data into a suitable format for sampling # (CSC format). # Returns dictionaries holding `colptr` and `row` indices as well as edge # permutations for each edge type, respectively. # Since C++ cannot take dictionaries with tuples as key as input, edge type # triplets are converted into single strings. # Filters a node storage object to only hold the nodes in `index`: # Filters a edge storage object to only hold the edges in `index`, # which represents the new graph as denoted by `(row, col)`: # NOTE: We expect `(row, col)` to be sorted by `col` (CSC layout). # TODO Currently, we set `is_sorted=False`, see: # https://github.com/pyg-team/pytorch_geometric/issues/4346 # Filters a data object to only hold nodes in `node` and edges in `edge`: # Filters a heterogeneous data object to only hold nodes in `node` and # edges in `edge` for each node and edge type, respectively:
| 2.194793
| 2
|
tests/http_client_test.py
|
dnephin/swagger-py
| 0
|
6626483
|
# -*- coding: utf-8 -*-
import base64
import unittest
import httpretty
import mock
import pytest
import requests
from swaggerpy.http_client import (
SynchronousHttpClient,
SynchronousEventual,
)
class SynchronousClientTestCase(unittest.TestCase):
def _default_params(self):
return {
'method': 'GET',
'url': 'http://swagger.py/client-test',
'headers': {},
}
@httpretty.activate
def test_simple_get(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
params = self._default_params()
params['params'] = {'foo': 'bar'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar']},
httpretty.last_request().querystring)
@httpretty.activate
def test_unicode_to_utf8_encode_params(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
params = self._default_params()
params['params'] = {'foo': u'酒場'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': [u'酒場']},
httpretty.last_request().querystring)
@httpretty.activate
def test_real_post(self):
httpretty.register_uri(
httpretty.POST, "http://swagger.py/client-test",
body='expected', content_type='text/json')
client = SynchronousHttpClient()
params = self._default_params()
params['data'] = {'foo': 'bar'}
params['method'] = 'POST'
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual('application/x-www-form-urlencoded',
httpretty.last_request().headers['content-type'])
self.assertEqual("foo=bar",
httpretty.last_request().body)
@httpretty.activate
def test_basic_auth(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
client.set_basic_auth("swagger.py", 'unit', 'peekaboo')
params = self._default_params()
params['params'] = {'foo': 'bar'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar']},
httpretty.last_request().querystring)
self.assertEqual('Basic %s' % base64.b64encode("unit:peekaboo"),
httpretty.last_request().headers.get('Authorization'))
@httpretty.activate
def test_api_key(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
client.set_api_key("swagger.py", 'abc123', param_name='test')
params = self._default_params()
params['params'] = {'foo': 'bar'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar'], 'test': ['abc123']},
httpretty.last_request().querystring)
@httpretty.activate
def test_auth_leak(self):
httpretty.register_uri(
httpretty.GET, "http://hackerz.py",
body='expected')
client = SynchronousHttpClient()
client.set_basic_auth("swagger.py", 'unit', 'peekaboo')
params = self._default_params()
params['params'] = {'foo': 'bar'}
params['url'] = 'http://hackerz.py'
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar']},
httpretty.last_request().querystring)
self.assertTrue(
httpretty.last_request().headers.get('Authorization') is None)
@pytest.fixture
def mock_session():
return mock.create_autospec(requests.Session)
@pytest.fixture
def mock_request():
return mock.create_autospec(
requests.Request,
method='GET',
url='http://example.com',
params={})
class TestSynchronousEventual(object):
def test_wait(self, mock_session, mock_request):
timeout = 20
sync_eventual = SynchronousEventual(mock_session, mock_request)
assert sync_eventual.wait(timeout) == mock_session.send.return_value
mock_session.send.assert_called_once_with(
mock_session.prepare_request.return_value,
timeout=timeout)
def test_cancel(self, mock_session, mock_request):
sync_eventual = SynchronousEventual(mock_session, mock_request)
# no-op cancel, test that is supports the interface
sync_eventual.cancel()
|
# -*- coding: utf-8 -*-
import base64
import unittest
import httpretty
import mock
import pytest
import requests
from swaggerpy.http_client import (
SynchronousHttpClient,
SynchronousEventual,
)
class SynchronousClientTestCase(unittest.TestCase):
def _default_params(self):
return {
'method': 'GET',
'url': 'http://swagger.py/client-test',
'headers': {},
}
@httpretty.activate
def test_simple_get(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
params = self._default_params()
params['params'] = {'foo': 'bar'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar']},
httpretty.last_request().querystring)
@httpretty.activate
def test_unicode_to_utf8_encode_params(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
params = self._default_params()
params['params'] = {'foo': u'酒場'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': [u'酒場']},
httpretty.last_request().querystring)
@httpretty.activate
def test_real_post(self):
httpretty.register_uri(
httpretty.POST, "http://swagger.py/client-test",
body='expected', content_type='text/json')
client = SynchronousHttpClient()
params = self._default_params()
params['data'] = {'foo': 'bar'}
params['method'] = 'POST'
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual('application/x-www-form-urlencoded',
httpretty.last_request().headers['content-type'])
self.assertEqual("foo=bar",
httpretty.last_request().body)
@httpretty.activate
def test_basic_auth(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
client.set_basic_auth("swagger.py", 'unit', 'peekaboo')
params = self._default_params()
params['params'] = {'foo': 'bar'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar']},
httpretty.last_request().querystring)
self.assertEqual('Basic %s' % base64.b64encode("unit:peekaboo"),
httpretty.last_request().headers.get('Authorization'))
@httpretty.activate
def test_api_key(self):
httpretty.register_uri(
httpretty.GET, "http://swagger.py/client-test",
body='expected')
client = SynchronousHttpClient()
client.set_api_key("swagger.py", 'abc123', param_name='test')
params = self._default_params()
params['params'] = {'foo': 'bar'}
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar'], 'test': ['abc123']},
httpretty.last_request().querystring)
@httpretty.activate
def test_auth_leak(self):
httpretty.register_uri(
httpretty.GET, "http://hackerz.py",
body='expected')
client = SynchronousHttpClient()
client.set_basic_auth("swagger.py", 'unit', 'peekaboo')
params = self._default_params()
params['params'] = {'foo': 'bar'}
params['url'] = 'http://hackerz.py'
resp = client.start_request(params).wait()
self.assertEqual(200, resp.status_code)
self.assertEqual('expected', resp.text)
self.assertEqual({'foo': ['bar']},
httpretty.last_request().querystring)
self.assertTrue(
httpretty.last_request().headers.get('Authorization') is None)
@pytest.fixture
def mock_session():
return mock.create_autospec(requests.Session)
@pytest.fixture
def mock_request():
return mock.create_autospec(
requests.Request,
method='GET',
url='http://example.com',
params={})
class TestSynchronousEventual(object):
def test_wait(self, mock_session, mock_request):
timeout = 20
sync_eventual = SynchronousEventual(mock_session, mock_request)
assert sync_eventual.wait(timeout) == mock_session.send.return_value
mock_session.send.assert_called_once_with(
mock_session.prepare_request.return_value,
timeout=timeout)
def test_cancel(self, mock_session, mock_request):
sync_eventual = SynchronousEventual(mock_session, mock_request)
# no-op cancel, test that is supports the interface
sync_eventual.cancel()
|
en
| 0.893052
|
# -*- coding: utf-8 -*- # no-op cancel, test that is supports the interface
| 2.263396
| 2
|
modules/emoji_utils.py
|
Ghost-Proxy/Voluspa
| 0
|
6626484
|
from emoji import emojize
def ri_alphabet(n):
if n < 1:
n = 1
elif n > 26:
n = 26
current_emoji = '\U0001f1e6' # Regional Indicator A
i = 0
while i < n:
yield current_emoji
current_emoji = chr(ord(current_emoji) + 1)
i += 1
def ri_at_index(i):
if i < 0:
i = 0
elif i > 25:
i = 25
a = '\U0001f1e6'
return chr(ord(a) + i)
def index_of_ri(ri):
return ord(ri) - ord('\U0001f1e6')
def normalize(name):
if name[0] != ':' and name[-1] != ':':
temp = emojize(f':{name}:', use_aliases=True)
if temp != f':{name}:':
name = f':{name}:'
return name
|
from emoji import emojize
def ri_alphabet(n):
if n < 1:
n = 1
elif n > 26:
n = 26
current_emoji = '\U0001f1e6' # Regional Indicator A
i = 0
while i < n:
yield current_emoji
current_emoji = chr(ord(current_emoji) + 1)
i += 1
def ri_at_index(i):
if i < 0:
i = 0
elif i > 25:
i = 25
a = '\U0001f1e6'
return chr(ord(a) + i)
def index_of_ri(ri):
return ord(ri) - ord('\U0001f1e6')
def normalize(name):
if name[0] != ':' and name[-1] != ':':
temp = emojize(f':{name}:', use_aliases=True)
if temp != f':{name}:':
name = f':{name}:'
return name
|
en
| 0.482499
|
# Regional Indicator A
| 3.417673
| 3
|
training/coach.py
|
maltetoelle/pixel2style2pixel
| 0
|
6626485
|
import os
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from utils import common, train_utils
from criteria import id_loss, w_norm, moco_loss
from configs import data_configs
from datasets.images_dataset import ImagesDataset
from criteria.lpips.lpips import LPIPS
from models.psp import pSp
from training.ranger import Ranger
class Coach:
def __init__(self, opts):
self.opts = opts
self.global_step = 0
self.device = 'cuda:0' # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES
self.opts.device = self.device
if self.opts.use_wandb:
from utils.wandb_utils import WBLogger
self.wb_logger = WBLogger(self.opts)
# Initialize network
self.net = pSp(self.opts).to(self.device)
if hasattr(self.opts, "start_checkpoint_path") and self.opts.start_checkpoint_path is not None:
self.net.load_state_dict(torch.load(self.opts.start_checkpoint_path)["state_dict"])
# Estimate latent_avg via dense sampling if latent_avg is not available
if not hasattr(self.net, "latent_avg") or self.net.latent_avg is None:
self.net.latent_avg = self.net.decoder.mean_latent(int(1e5))[0].detach()
# Initialize loss
if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0:
raise ValueError('Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!')
self.mse_loss = nn.MSELoss().to(self.device).eval()
if self.opts.lpips_lambda > 0:
self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()
if self.opts.id_lambda > 0:
self.id_loss = id_loss.IDLoss().to(self.device).eval()
if self.opts.w_norm_lambda > 0:
self.w_norm_loss = w_norm.WNormLoss(start_from_latent_avg=self.opts.start_from_latent_avg)
if self.opts.moco_lambda > 0:
self.moco_loss = moco_loss.MocoLoss().to(self.device).eval()
# Initialize optimizer
self.optimizer = self.configure_optimizers()
# Initialize dataset
self.train_dataset, self.test_dataset = self.configure_datasets()
self.train_dataloader = DataLoader(self.train_dataset,
batch_size=self.opts.batch_size,
shuffle=True,
num_workers=int(self.opts.workers),
drop_last=True)
self.test_dataloader = DataLoader(self.test_dataset,
batch_size=self.opts.test_batch_size,
shuffle=False,
num_workers=int(self.opts.test_workers),
drop_last=True)
# Initialize logger
log_dir = os.path.join(opts.exp_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
self.logger = SummaryWriter(log_dir=log_dir)
# Initialize checkpoint dir
self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.best_val_loss = None
if self.opts.save_interval is None:
self.opts.save_interval = self.opts.max_steps
def train(self):
self.net.train()
while self.global_step < self.opts.max_steps:
for batch_idx, batch in enumerate(self.train_dataloader):
self.optimizer.zero_grad()
x, y = batch
x, y = x.to(self.device).float(), y.to(self.device).float()
y_hat, latent = self.net.forward(x, return_latents=True)
loss, loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
loss.backward()
self.optimizer.step()
# Logging related
if self.global_step % self.opts.image_interval == 0 or (self.global_step < 1000 and self.global_step % 25 == 0):
self.parse_and_log_images(id_logs, x, y, y_hat, title='images/train/faces')
if self.global_step % self.opts.board_interval == 0:
self.print_metrics(loss_dict, prefix='train')
self.log_metrics(loss_dict, prefix='train')
# Log images of first batch to wandb
if self.opts.use_wandb and batch_idx == 0:
self.wb_logger.log_images_to_wandb(x, y, y_hat, id_logs, prefix="train", step=self.global_step, opts=self.opts)
# Validation related
val_loss_dict = None
if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
val_loss_dict = self.validate()
if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
self.best_val_loss = val_loss_dict['loss']
self.checkpoint_me(val_loss_dict, is_best=True)
if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
if val_loss_dict is not None:
self.checkpoint_me(val_loss_dict, is_best=False)
else:
self.checkpoint_me(loss_dict, is_best=False)
if self.global_step == self.opts.max_steps:
print('OMG, finished training!')
break
self.global_step += 1
def validate(self):
self.net.eval()
agg_loss_dict = []
for batch_idx, batch in enumerate(self.test_dataloader):
x, y = batch
with torch.no_grad():
x, y = x.to(self.device).float(), y.to(self.device).float()
y_hat, latent = self.net.forward(x, return_latents=True)
loss, cur_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
agg_loss_dict.append(cur_loss_dict)
# Logging related
self.parse_and_log_images(id_logs, x, y, y_hat,
title='images/test/faces',
subscript='{:04d}'.format(batch_idx))
# Log images of first batch to wandb
if self.opts.use_wandb and batch_idx == 0:
self.wb_logger.log_images_to_wandb(x, y, y_hat, id_logs, prefix="test", step=self.global_step, opts=self.opts)
# For first step just do sanity test on small amount of data
if self.global_step == 0 and batch_idx >= 4:
self.net.train()
return None # Do not log, inaccurate in first batch
loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
self.log_metrics(loss_dict, prefix='test')
self.print_metrics(loss_dict, prefix='test')
self.net.train()
return loss_dict
def checkpoint_me(self, loss_dict, is_best):
save_name = 'best_model.pt' if is_best else f'iteration_{self.global_step}.pt'
save_dict = self.__get_save_dict()
checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
torch.save(save_dict, checkpoint_path)
with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
if is_best:
f.write(f'**Best**: Step - {self.global_step}, Loss - {self.best_val_loss} \n{loss_dict}\n')
if self.opts.use_wandb:
self.wb_logger.log_best_model()
else:
f.write(f'Step - {self.global_step}, \n{loss_dict}\n')
def configure_optimizers(self):
params = list(self.net.encoder.parameters())
if self.opts.train_decoder:
params += list(self.net.decoder.parameters())
if self.opts.optim_name == 'adam':
optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
else:
optimizer = Ranger(params, lr=self.opts.learning_rate)
return optimizer
def configure_datasets(self):
if self.opts.dataset_type not in data_configs.DATASETS.keys():
Exception(f'{self.opts.dataset_type} is not a valid dataset_type')
print(f'Loading dataset for {self.opts.dataset_type}')
# dataset_args = data_configs.DATASETS[self.opts.dataset_type]
# train_dataset = torch.load(dataset_args["train_source_root"])
# test_dataset = torch.load(dataset_args)
dataset_args = data_configs.DATASETS[self.opts.dataset_type]
transforms_dict = dataset_args['transforms'](self.opts).get_transforms()
train_dataset = ImagesDataset(source_root=dataset_args['train_source_root'],
target_root=dataset_args['train_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_gt_train'],
opts=self.opts)
test_dataset = ImagesDataset(source_root=dataset_args['test_source_root'],
target_root=dataset_args['test_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_test'],
opts=self.opts)
if self.opts.use_wandb:
self.wb_logger.log_dataset_wandb(train_dataset, dataset_name="Train")
self.wb_logger.log_dataset_wandb(test_dataset, dataset_name="Test")
print(f"Number of training samples: {len(train_dataset)}")
print(f"Number of test samples: {len(test_dataset)}")
return train_dataset, test_dataset
def calc_loss(self, x, y, y_hat, latent):
loss_dict = {}
loss = 0.0
id_logs = None
if self.opts.id_lambda > 0:
loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x)
loss_dict['loss_id'] = float(loss_id)
loss_dict['id_improve'] = float(sim_improvement)
loss = loss_id * self.opts.id_lambda
if self.opts.l2_lambda > 0:
loss_l2 = F.mse_loss(y_hat, y)
loss_dict['loss_l2'] = float(loss_l2)
loss += loss_l2 * self.opts.l2_lambda
if self.opts.lpips_lambda > 0:
loss_lpips = self.lpips_loss(y_hat, y)
loss_dict['loss_lpips'] = float(loss_lpips)
loss += loss_lpips * self.opts.lpips_lambda
if self.opts.lpips_lambda_crop > 0:
loss_lpips_crop = self.lpips_loss(y_hat[:, :, 35:223, 32:220], y[:, :, 35:223, 32:220])
loss_dict['loss_lpips_crop'] = float(loss_lpips_crop)
loss += loss_lpips_crop * self.opts.lpips_lambda_crop
if self.opts.l2_lambda_crop > 0:
loss_l2_crop = F.mse_loss(y_hat[:, :, 35:223, 32:220], y[:, :, 35:223, 32:220])
loss_dict['loss_l2_crop'] = float(loss_l2_crop)
loss += loss_l2_crop * self.opts.l2_lambda_crop
if self.opts.w_norm_lambda > 0:
loss_w_norm = self.w_norm_loss(latent, self.net.latent_avg)
loss_dict['loss_w_norm'] = float(loss_w_norm)
loss += loss_w_norm * self.opts.w_norm_lambda
if self.opts.moco_lambda > 0:
loss_moco, sim_improvement, id_logs = self.moco_loss(y_hat, y, x)
loss_dict['loss_moco'] = float(loss_moco)
loss_dict['id_improve'] = float(sim_improvement)
loss += loss_moco * self.opts.moco_lambda
loss_dict['loss'] = float(loss)
return loss, loss_dict, id_logs
def log_metrics(self, metrics_dict, prefix):
for key, value in metrics_dict.items():
self.logger.add_scalar(f'{prefix}/{key}', value, self.global_step)
if self.opts.use_wandb:
self.wb_logger.log(prefix, metrics_dict, self.global_step)
def print_metrics(self, metrics_dict, prefix):
print(f'Metrics for {prefix}, step {self.global_step}')
for key, value in metrics_dict.items():
print(f'\t{key} = ', value)
def parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=2):
im_data = []
for i in range(display_count):
cur_im_data = {
'input_face': common.log_input_image(x[i], self.opts),
'target_face': common.tensor2im(y[i]),
'output_face': common.tensor2im(y_hat[i]),
}
if id_logs is not None:
for key in id_logs[i]:
cur_im_data[key] = id_logs[i][key]
im_data.append(cur_im_data)
self.log_images(title, im_data=im_data, subscript=subscript)
def log_images(self, name, im_data, subscript=None, log_latest=False):
fig = common.vis_faces(im_data)
step = self.global_step
if log_latest:
step = 0
if subscript:
path = os.path.join(self.logger.log_dir, name, f'{subscript}_{step:04d}.jpg')
else:
path = os.path.join(self.logger.log_dir, name, f'{step:04d}.jpg')
os.makedirs(os.path.dirname(path), exist_ok=True)
fig.savefig(path)
plt.close(fig)
def __get_save_dict(self):
save_dict = {
'state_dict': self.net.state_dict(),
'opts': vars(self.opts)
}
# save the latent avg in state_dict for inference if truncation of w was used during training
if self.opts.start_from_latent_avg:
save_dict['latent_avg'] = self.net.latent_avg
return save_dict
|
import os
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from utils import common, train_utils
from criteria import id_loss, w_norm, moco_loss
from configs import data_configs
from datasets.images_dataset import ImagesDataset
from criteria.lpips.lpips import LPIPS
from models.psp import pSp
from training.ranger import Ranger
class Coach:
def __init__(self, opts):
self.opts = opts
self.global_step = 0
self.device = 'cuda:0' # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES
self.opts.device = self.device
if self.opts.use_wandb:
from utils.wandb_utils import WBLogger
self.wb_logger = WBLogger(self.opts)
# Initialize network
self.net = pSp(self.opts).to(self.device)
if hasattr(self.opts, "start_checkpoint_path") and self.opts.start_checkpoint_path is not None:
self.net.load_state_dict(torch.load(self.opts.start_checkpoint_path)["state_dict"])
# Estimate latent_avg via dense sampling if latent_avg is not available
if not hasattr(self.net, "latent_avg") or self.net.latent_avg is None:
self.net.latent_avg = self.net.decoder.mean_latent(int(1e5))[0].detach()
# Initialize loss
if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0:
raise ValueError('Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!')
self.mse_loss = nn.MSELoss().to(self.device).eval()
if self.opts.lpips_lambda > 0:
self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()
if self.opts.id_lambda > 0:
self.id_loss = id_loss.IDLoss().to(self.device).eval()
if self.opts.w_norm_lambda > 0:
self.w_norm_loss = w_norm.WNormLoss(start_from_latent_avg=self.opts.start_from_latent_avg)
if self.opts.moco_lambda > 0:
self.moco_loss = moco_loss.MocoLoss().to(self.device).eval()
# Initialize optimizer
self.optimizer = self.configure_optimizers()
# Initialize dataset
self.train_dataset, self.test_dataset = self.configure_datasets()
self.train_dataloader = DataLoader(self.train_dataset,
batch_size=self.opts.batch_size,
shuffle=True,
num_workers=int(self.opts.workers),
drop_last=True)
self.test_dataloader = DataLoader(self.test_dataset,
batch_size=self.opts.test_batch_size,
shuffle=False,
num_workers=int(self.opts.test_workers),
drop_last=True)
# Initialize logger
log_dir = os.path.join(opts.exp_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
self.logger = SummaryWriter(log_dir=log_dir)
# Initialize checkpoint dir
self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.best_val_loss = None
if self.opts.save_interval is None:
self.opts.save_interval = self.opts.max_steps
def train(self):
self.net.train()
while self.global_step < self.opts.max_steps:
for batch_idx, batch in enumerate(self.train_dataloader):
self.optimizer.zero_grad()
x, y = batch
x, y = x.to(self.device).float(), y.to(self.device).float()
y_hat, latent = self.net.forward(x, return_latents=True)
loss, loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
loss.backward()
self.optimizer.step()
# Logging related
if self.global_step % self.opts.image_interval == 0 or (self.global_step < 1000 and self.global_step % 25 == 0):
self.parse_and_log_images(id_logs, x, y, y_hat, title='images/train/faces')
if self.global_step % self.opts.board_interval == 0:
self.print_metrics(loss_dict, prefix='train')
self.log_metrics(loss_dict, prefix='train')
# Log images of first batch to wandb
if self.opts.use_wandb and batch_idx == 0:
self.wb_logger.log_images_to_wandb(x, y, y_hat, id_logs, prefix="train", step=self.global_step, opts=self.opts)
# Validation related
val_loss_dict = None
if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
val_loss_dict = self.validate()
if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
self.best_val_loss = val_loss_dict['loss']
self.checkpoint_me(val_loss_dict, is_best=True)
if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
if val_loss_dict is not None:
self.checkpoint_me(val_loss_dict, is_best=False)
else:
self.checkpoint_me(loss_dict, is_best=False)
if self.global_step == self.opts.max_steps:
print('OMG, finished training!')
break
self.global_step += 1
def validate(self):
self.net.eval()
agg_loss_dict = []
for batch_idx, batch in enumerate(self.test_dataloader):
x, y = batch
with torch.no_grad():
x, y = x.to(self.device).float(), y.to(self.device).float()
y_hat, latent = self.net.forward(x, return_latents=True)
loss, cur_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
agg_loss_dict.append(cur_loss_dict)
# Logging related
self.parse_and_log_images(id_logs, x, y, y_hat,
title='images/test/faces',
subscript='{:04d}'.format(batch_idx))
# Log images of first batch to wandb
if self.opts.use_wandb and batch_idx == 0:
self.wb_logger.log_images_to_wandb(x, y, y_hat, id_logs, prefix="test", step=self.global_step, opts=self.opts)
# For first step just do sanity test on small amount of data
if self.global_step == 0 and batch_idx >= 4:
self.net.train()
return None # Do not log, inaccurate in first batch
loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
self.log_metrics(loss_dict, prefix='test')
self.print_metrics(loss_dict, prefix='test')
self.net.train()
return loss_dict
def checkpoint_me(self, loss_dict, is_best):
save_name = 'best_model.pt' if is_best else f'iteration_{self.global_step}.pt'
save_dict = self.__get_save_dict()
checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
torch.save(save_dict, checkpoint_path)
with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
if is_best:
f.write(f'**Best**: Step - {self.global_step}, Loss - {self.best_val_loss} \n{loss_dict}\n')
if self.opts.use_wandb:
self.wb_logger.log_best_model()
else:
f.write(f'Step - {self.global_step}, \n{loss_dict}\n')
def configure_optimizers(self):
params = list(self.net.encoder.parameters())
if self.opts.train_decoder:
params += list(self.net.decoder.parameters())
if self.opts.optim_name == 'adam':
optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
else:
optimizer = Ranger(params, lr=self.opts.learning_rate)
return optimizer
def configure_datasets(self):
if self.opts.dataset_type not in data_configs.DATASETS.keys():
Exception(f'{self.opts.dataset_type} is not a valid dataset_type')
print(f'Loading dataset for {self.opts.dataset_type}')
# dataset_args = data_configs.DATASETS[self.opts.dataset_type]
# train_dataset = torch.load(dataset_args["train_source_root"])
# test_dataset = torch.load(dataset_args)
dataset_args = data_configs.DATASETS[self.opts.dataset_type]
transforms_dict = dataset_args['transforms'](self.opts).get_transforms()
train_dataset = ImagesDataset(source_root=dataset_args['train_source_root'],
target_root=dataset_args['train_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_gt_train'],
opts=self.opts)
test_dataset = ImagesDataset(source_root=dataset_args['test_source_root'],
target_root=dataset_args['test_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_test'],
opts=self.opts)
if self.opts.use_wandb:
self.wb_logger.log_dataset_wandb(train_dataset, dataset_name="Train")
self.wb_logger.log_dataset_wandb(test_dataset, dataset_name="Test")
print(f"Number of training samples: {len(train_dataset)}")
print(f"Number of test samples: {len(test_dataset)}")
return train_dataset, test_dataset
def calc_loss(self, x, y, y_hat, latent):
loss_dict = {}
loss = 0.0
id_logs = None
if self.opts.id_lambda > 0:
loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x)
loss_dict['loss_id'] = float(loss_id)
loss_dict['id_improve'] = float(sim_improvement)
loss = loss_id * self.opts.id_lambda
if self.opts.l2_lambda > 0:
loss_l2 = F.mse_loss(y_hat, y)
loss_dict['loss_l2'] = float(loss_l2)
loss += loss_l2 * self.opts.l2_lambda
if self.opts.lpips_lambda > 0:
loss_lpips = self.lpips_loss(y_hat, y)
loss_dict['loss_lpips'] = float(loss_lpips)
loss += loss_lpips * self.opts.lpips_lambda
if self.opts.lpips_lambda_crop > 0:
loss_lpips_crop = self.lpips_loss(y_hat[:, :, 35:223, 32:220], y[:, :, 35:223, 32:220])
loss_dict['loss_lpips_crop'] = float(loss_lpips_crop)
loss += loss_lpips_crop * self.opts.lpips_lambda_crop
if self.opts.l2_lambda_crop > 0:
loss_l2_crop = F.mse_loss(y_hat[:, :, 35:223, 32:220], y[:, :, 35:223, 32:220])
loss_dict['loss_l2_crop'] = float(loss_l2_crop)
loss += loss_l2_crop * self.opts.l2_lambda_crop
if self.opts.w_norm_lambda > 0:
loss_w_norm = self.w_norm_loss(latent, self.net.latent_avg)
loss_dict['loss_w_norm'] = float(loss_w_norm)
loss += loss_w_norm * self.opts.w_norm_lambda
if self.opts.moco_lambda > 0:
loss_moco, sim_improvement, id_logs = self.moco_loss(y_hat, y, x)
loss_dict['loss_moco'] = float(loss_moco)
loss_dict['id_improve'] = float(sim_improvement)
loss += loss_moco * self.opts.moco_lambda
loss_dict['loss'] = float(loss)
return loss, loss_dict, id_logs
def log_metrics(self, metrics_dict, prefix):
for key, value in metrics_dict.items():
self.logger.add_scalar(f'{prefix}/{key}', value, self.global_step)
if self.opts.use_wandb:
self.wb_logger.log(prefix, metrics_dict, self.global_step)
def print_metrics(self, metrics_dict, prefix):
print(f'Metrics for {prefix}, step {self.global_step}')
for key, value in metrics_dict.items():
print(f'\t{key} = ', value)
def parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=2):
im_data = []
for i in range(display_count):
cur_im_data = {
'input_face': common.log_input_image(x[i], self.opts),
'target_face': common.tensor2im(y[i]),
'output_face': common.tensor2im(y_hat[i]),
}
if id_logs is not None:
for key in id_logs[i]:
cur_im_data[key] = id_logs[i][key]
im_data.append(cur_im_data)
self.log_images(title, im_data=im_data, subscript=subscript)
def log_images(self, name, im_data, subscript=None, log_latest=False):
fig = common.vis_faces(im_data)
step = self.global_step
if log_latest:
step = 0
if subscript:
path = os.path.join(self.logger.log_dir, name, f'{subscript}_{step:04d}.jpg')
else:
path = os.path.join(self.logger.log_dir, name, f'{step:04d}.jpg')
os.makedirs(os.path.dirname(path), exist_ok=True)
fig.savefig(path)
plt.close(fig)
def __get_save_dict(self):
save_dict = {
'state_dict': self.net.state_dict(),
'opts': vars(self.opts)
}
# save the latent avg in state_dict for inference if truncation of w was used during training
if self.opts.start_from_latent_avg:
save_dict['latent_avg'] = self.net.latent_avg
return save_dict
|
en
| 0.66334
|
# TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES # Initialize network # Estimate latent_avg via dense sampling if latent_avg is not available # Initialize loss # Initialize optimizer # Initialize dataset # Initialize logger # Initialize checkpoint dir # Logging related # Log images of first batch to wandb # Validation related # Logging related # Log images of first batch to wandb # For first step just do sanity test on small amount of data # Do not log, inaccurate in first batch # dataset_args = data_configs.DATASETS[self.opts.dataset_type] # train_dataset = torch.load(dataset_args["train_source_root"]) # test_dataset = torch.load(dataset_args) # save the latent avg in state_dict for inference if truncation of w was used during training
| 1.889022
| 2
|
dbops_venv/lib/python3.5/site-packages/alembic/config.py
|
fractal520/dbops
| 15
|
6626486
|
from argparse import ArgumentParser
from .compat import SafeConfigParser
import inspect
import os
import sys
from . import command, util, package_dir, compat
class Config(object):
"""Represent an Alembic configuration.
Within an ``env.py`` script, this is available
via the :attr:`.EnvironmentContext.config` attribute,
which in turn is available at ``alembic.context``::
from alembic import context
some_param = context.config.get_main_option("my option")
When invoking Alembic programatically, a new
:class:`.Config` can be created by passing
the name of an .ini file to the constructor::
from alembic.config import Config
alembic_cfg = Config("/path/to/yourapp/alembic.ini")
With a :class:`.Config` object, you can then
run Alembic commands programmatically using the directives
in :mod:`alembic.command`.
The :class:`.Config` object can also be constructed without
a filename. Values can be set programmatically, and
new sections will be created as needed::
from alembic.config import Config
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", "myapp:migrations")
alembic_cfg.set_main_option("url", "postgresql://foo/bar")
alembic_cfg.set_section_option("mysection", "foo", "bar")
:param file_: name of the .ini file to open.
:param ini_section: name of the main Alembic section within the
.ini file
:param output_buffer: optional file-like input buffer which
will be passed to the :class:`.MigrationContext` - used to redirect
the output of "offline generation" when using Alembic programmatically.
:param stdout: buffer where the "print" output of commands will be sent.
Defaults to ``sys.stdout``.
..versionadded:: 0.4
"""
def __init__(self, file_=None, ini_section='alembic', output_buffer=None,
stdout=sys.stdout, cmd_opts=None):
"""Construct a new :class:`.Config`
"""
self.config_file_name = file_
self.config_ini_section = ini_section
self.output_buffer = output_buffer
self.stdout = stdout
self.cmd_opts = cmd_opts
cmd_opts = None
"""The command-line options passed to the ``alembic`` script.
Within an ``env.py`` script this can be accessed via the
:attr:`.EnvironmentContext.config` attribute.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_x_argument`
"""
config_file_name = None
"""Filesystem path to the .ini file in use."""
config_ini_section = None
"""Name of the config file section to read basic configuration
from. Defaults to ``alembic``, that is the ``[alembic]`` section
of the .ini file. This value is modified using the ``-n/--name``
option to the Alembic runnier.
"""
def print_stdout(self, text, *arg):
"""Render a message to standard out."""
util.write_outstream(
self.stdout,
(compat.text_type(text) % arg),
"\n"
)
@util.memoized_property
def file_config(self):
"""Return the underlying :class:`ConfigParser` object.
Direct access to the .ini file is available here,
though the :meth:`.Config.get_section` and
:meth:`.Config.get_main_option`
methods provide a possibly simpler interface.
"""
if self.config_file_name:
here = os.path.abspath(os.path.dirname(self.config_file_name))
else:
here = ""
file_config = SafeConfigParser({'here': here})
if self.config_file_name:
file_config.read([self.config_file_name])
else:
file_config.add_section(self.config_ini_section)
return file_config
def get_template_directory(self):
"""Return the directory where Alembic setup templates are found.
This method is used by the alembic ``init`` and ``list_templates``
commands.
"""
return os.path.join(package_dir, 'templates')
def get_section(self, name):
"""Return all the configuration options from a given .ini file section
as a dictionary.
"""
return dict(self.file_config.items(name))
def set_main_option(self, name, value):
"""Set an option programmatically within the 'main' section.
This overrides whatever was in the .ini file.
"""
self.file_config.set(self.config_ini_section, name, value)
def remove_main_option(self, name):
self.file_config.remove_option(self.config_ini_section, name)
def set_section_option(self, section, name, value):
"""Set an option programmatically within the given section.
The section is created if it doesn't exist already.
The value here will override whatever was in the .ini
file.
"""
if not self.file_config.has_section(section):
self.file_config.add_section(section)
self.file_config.set(section, name, value)
def get_section_option(self, section, name, default=None):
"""Return an option from the given section of the .ini file.
"""
if not self.file_config.has_section(section):
raise util.CommandError("No config file %r found, or file has no "
"'[%s]' section" %
(self.config_file_name, section))
if self.file_config.has_option(section, name):
return self.file_config.get(section, name)
else:
return default
def get_main_option(self, name, default=None):
"""Return an option from the 'main' section of the .ini file.
This defaults to being a key from the ``[alembic]``
section, unless the ``-n/--name`` flag were used to
indicate a different section.
"""
return self.get_section_option(self.config_ini_section, name, default)
class CommandLine(object):
def __init__(self, prog=None):
self._generate_args(prog)
def _generate_args(self, prog):
def add_options(parser, positional, kwargs):
if 'template' in kwargs:
parser.add_argument("-t", "--template",
default='generic',
type=str,
help="Setup template for use with 'init'")
if 'message' in kwargs:
parser.add_argument("-m", "--message",
type=str,
help="Message string to use with 'revision'")
if 'sql' in kwargs:
parser.add_argument("--sql",
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead")
if 'tag' in kwargs:
parser.add_argument("--tag",
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.")
if 'autogenerate' in kwargs:
parser.add_argument("--autogenerate",
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.")
# "current" command
if 'head_only' in kwargs:
parser.add_argument("--head-only",
action="store_true",
help="Only show current version and "
"whether or not this is the head revision.")
if 'rev_range' in kwargs:
parser.add_argument("-r", "--rev-range",
action="store",
help="Specify a revision range; "
"format is [start]:[end]")
positional_help = {
'directory': "location of scripts directory",
'revision': "revision identifier"
}
for arg in positional:
subparser.add_argument(arg, help=positional_help.get(arg))
parser = ArgumentParser(prog=prog)
parser.add_argument("-c", "--config",
type=str,
default="alembic.ini",
help="Alternate config file")
parser.add_argument("-n", "--name",
type=str,
default="alembic",
help="Name of section in .ini file to "
"use for Alembic config")
parser.add_argument("-x", action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting")
subparsers = parser.add_subparsers()
for fn in [getattr(command, n) for n in dir(command)]:
if inspect.isfunction(fn) and \
fn.__name__[0] != '_' and \
fn.__module__ == 'alembic.command':
spec = inspect.getargspec(fn)
if spec[3]:
positional = spec[0][1:-len(spec[3])]
kwarg = spec[0][-len(spec[3]):]
else:
positional = spec[0][1:]
kwarg = []
subparser = subparsers.add_parser(
fn.__name__,
help=fn.__doc__)
add_options(subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
self.parser = parser
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(config,
*[getattr(options, k) for k in positional],
**dict((k, getattr(options, k)) for k in kwarg)
)
except util.CommandError as e:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
cfg = Config(file_=options.config,
ini_section=options.name, cmd_opts=options)
self.run_cmd(cfg, options)
def main(argv=None, prog=None, **kwargs):
"""The console runner function for Alembic."""
CommandLine(prog=prog).main(argv=argv)
if __name__ == '__main__':
main()
|
from argparse import ArgumentParser
from .compat import SafeConfigParser
import inspect
import os
import sys
from . import command, util, package_dir, compat
class Config(object):
"""Represent an Alembic configuration.
Within an ``env.py`` script, this is available
via the :attr:`.EnvironmentContext.config` attribute,
which in turn is available at ``alembic.context``::
from alembic import context
some_param = context.config.get_main_option("my option")
When invoking Alembic programatically, a new
:class:`.Config` can be created by passing
the name of an .ini file to the constructor::
from alembic.config import Config
alembic_cfg = Config("/path/to/yourapp/alembic.ini")
With a :class:`.Config` object, you can then
run Alembic commands programmatically using the directives
in :mod:`alembic.command`.
The :class:`.Config` object can also be constructed without
a filename. Values can be set programmatically, and
new sections will be created as needed::
from alembic.config import Config
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", "myapp:migrations")
alembic_cfg.set_main_option("url", "postgresql://foo/bar")
alembic_cfg.set_section_option("mysection", "foo", "bar")
:param file_: name of the .ini file to open.
:param ini_section: name of the main Alembic section within the
.ini file
:param output_buffer: optional file-like input buffer which
will be passed to the :class:`.MigrationContext` - used to redirect
the output of "offline generation" when using Alembic programmatically.
:param stdout: buffer where the "print" output of commands will be sent.
Defaults to ``sys.stdout``.
..versionadded:: 0.4
"""
def __init__(self, file_=None, ini_section='alembic', output_buffer=None,
stdout=sys.stdout, cmd_opts=None):
"""Construct a new :class:`.Config`
"""
self.config_file_name = file_
self.config_ini_section = ini_section
self.output_buffer = output_buffer
self.stdout = stdout
self.cmd_opts = cmd_opts
cmd_opts = None
"""The command-line options passed to the ``alembic`` script.
Within an ``env.py`` script this can be accessed via the
:attr:`.EnvironmentContext.config` attribute.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_x_argument`
"""
config_file_name = None
"""Filesystem path to the .ini file in use."""
config_ini_section = None
"""Name of the config file section to read basic configuration
from. Defaults to ``alembic``, that is the ``[alembic]`` section
of the .ini file. This value is modified using the ``-n/--name``
option to the Alembic runnier.
"""
def print_stdout(self, text, *arg):
"""Render a message to standard out."""
util.write_outstream(
self.stdout,
(compat.text_type(text) % arg),
"\n"
)
@util.memoized_property
def file_config(self):
"""Return the underlying :class:`ConfigParser` object.
Direct access to the .ini file is available here,
though the :meth:`.Config.get_section` and
:meth:`.Config.get_main_option`
methods provide a possibly simpler interface.
"""
if self.config_file_name:
here = os.path.abspath(os.path.dirname(self.config_file_name))
else:
here = ""
file_config = SafeConfigParser({'here': here})
if self.config_file_name:
file_config.read([self.config_file_name])
else:
file_config.add_section(self.config_ini_section)
return file_config
def get_template_directory(self):
"""Return the directory where Alembic setup templates are found.
This method is used by the alembic ``init`` and ``list_templates``
commands.
"""
return os.path.join(package_dir, 'templates')
def get_section(self, name):
"""Return all the configuration options from a given .ini file section
as a dictionary.
"""
return dict(self.file_config.items(name))
def set_main_option(self, name, value):
"""Set an option programmatically within the 'main' section.
This overrides whatever was in the .ini file.
"""
self.file_config.set(self.config_ini_section, name, value)
def remove_main_option(self, name):
self.file_config.remove_option(self.config_ini_section, name)
def set_section_option(self, section, name, value):
"""Set an option programmatically within the given section.
The section is created if it doesn't exist already.
The value here will override whatever was in the .ini
file.
"""
if not self.file_config.has_section(section):
self.file_config.add_section(section)
self.file_config.set(section, name, value)
def get_section_option(self, section, name, default=None):
"""Return an option from the given section of the .ini file.
"""
if not self.file_config.has_section(section):
raise util.CommandError("No config file %r found, or file has no "
"'[%s]' section" %
(self.config_file_name, section))
if self.file_config.has_option(section, name):
return self.file_config.get(section, name)
else:
return default
def get_main_option(self, name, default=None):
"""Return an option from the 'main' section of the .ini file.
This defaults to being a key from the ``[alembic]``
section, unless the ``-n/--name`` flag were used to
indicate a different section.
"""
return self.get_section_option(self.config_ini_section, name, default)
class CommandLine(object):
def __init__(self, prog=None):
self._generate_args(prog)
def _generate_args(self, prog):
def add_options(parser, positional, kwargs):
if 'template' in kwargs:
parser.add_argument("-t", "--template",
default='generic',
type=str,
help="Setup template for use with 'init'")
if 'message' in kwargs:
parser.add_argument("-m", "--message",
type=str,
help="Message string to use with 'revision'")
if 'sql' in kwargs:
parser.add_argument("--sql",
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead")
if 'tag' in kwargs:
parser.add_argument("--tag",
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.")
if 'autogenerate' in kwargs:
parser.add_argument("--autogenerate",
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.")
# "current" command
if 'head_only' in kwargs:
parser.add_argument("--head-only",
action="store_true",
help="Only show current version and "
"whether or not this is the head revision.")
if 'rev_range' in kwargs:
parser.add_argument("-r", "--rev-range",
action="store",
help="Specify a revision range; "
"format is [start]:[end]")
positional_help = {
'directory': "location of scripts directory",
'revision': "revision identifier"
}
for arg in positional:
subparser.add_argument(arg, help=positional_help.get(arg))
parser = ArgumentParser(prog=prog)
parser.add_argument("-c", "--config",
type=str,
default="alembic.ini",
help="Alternate config file")
parser.add_argument("-n", "--name",
type=str,
default="alembic",
help="Name of section in .ini file to "
"use for Alembic config")
parser.add_argument("-x", action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting")
subparsers = parser.add_subparsers()
for fn in [getattr(command, n) for n in dir(command)]:
if inspect.isfunction(fn) and \
fn.__name__[0] != '_' and \
fn.__module__ == 'alembic.command':
spec = inspect.getargspec(fn)
if spec[3]:
positional = spec[0][1:-len(spec[3])]
kwarg = spec[0][-len(spec[3]):]
else:
positional = spec[0][1:]
kwarg = []
subparser = subparsers.add_parser(
fn.__name__,
help=fn.__doc__)
add_options(subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
self.parser = parser
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(config,
*[getattr(options, k) for k in positional],
**dict((k, getattr(options, k)) for k in kwarg)
)
except util.CommandError as e:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
cfg = Config(file_=options.config,
ini_section=options.name, cmd_opts=options)
self.run_cmd(cfg, options)
def main(argv=None, prog=None, **kwargs):
"""The console runner function for Alembic."""
CommandLine(prog=prog).main(argv=argv)
if __name__ == '__main__':
main()
|
en
| 0.659824
|
Represent an Alembic configuration. Within an ``env.py`` script, this is available via the :attr:`.EnvironmentContext.config` attribute, which in turn is available at ``alembic.context``:: from alembic import context some_param = context.config.get_main_option("my option") When invoking Alembic programatically, a new :class:`.Config` can be created by passing the name of an .ini file to the constructor:: from alembic.config import Config alembic_cfg = Config("/path/to/yourapp/alembic.ini") With a :class:`.Config` object, you can then run Alembic commands programmatically using the directives in :mod:`alembic.command`. The :class:`.Config` object can also be constructed without a filename. Values can be set programmatically, and new sections will be created as needed:: from alembic.config import Config alembic_cfg = Config() alembic_cfg.set_main_option("script_location", "myapp:migrations") alembic_cfg.set_main_option("url", "postgresql://foo/bar") alembic_cfg.set_section_option("mysection", "foo", "bar") :param file_: name of the .ini file to open. :param ini_section: name of the main Alembic section within the .ini file :param output_buffer: optional file-like input buffer which will be passed to the :class:`.MigrationContext` - used to redirect the output of "offline generation" when using Alembic programmatically. :param stdout: buffer where the "print" output of commands will be sent. Defaults to ``sys.stdout``. ..versionadded:: 0.4 Construct a new :class:`.Config` The command-line options passed to the ``alembic`` script. Within an ``env.py`` script this can be accessed via the :attr:`.EnvironmentContext.config` attribute. .. versionadded:: 0.6.0 .. seealso:: :meth:`.EnvironmentContext.get_x_argument` Filesystem path to the .ini file in use. Name of the config file section to read basic configuration from. Defaults to ``alembic``, that is the ``[alembic]`` section of the .ini file. This value is modified using the ``-n/--name`` option to the Alembic runnier. Render a message to standard out. Return the underlying :class:`ConfigParser` object. Direct access to the .ini file is available here, though the :meth:`.Config.get_section` and :meth:`.Config.get_main_option` methods provide a possibly simpler interface. Return the directory where Alembic setup templates are found. This method is used by the alembic ``init`` and ``list_templates`` commands. Return all the configuration options from a given .ini file section as a dictionary. Set an option programmatically within the 'main' section. This overrides whatever was in the .ini file. Set an option programmatically within the given section. The section is created if it doesn't exist already. The value here will override whatever was in the .ini file. Return an option from the given section of the .ini file. Return an option from the 'main' section of the .ini file. This defaults to being a key from the ``[alembic]`` section, unless the ``-n/--name`` flag were used to indicate a different section. # "current" command # see http://bugs.python.org/issue9253, argparse # behavior changed incompatibly in py3.3 The console runner function for Alembic.
| 2.858855
| 3
|
djangoProject1/polls/views/details_note.py
|
Pavel-Petkov03/djangoFirstProject
| 1
|
6626487
|
<reponame>Pavel-Petkov03/djangoFirstProject
from django.shortcuts import render
from django.views import View
from djangoProject1.polls.models import Note
class DetailsView(View):
def get(self, req, pk):
current_note = Note.objects.get(id=pk)
return render(req, "note-details.html", {"note": current_note})
|
from django.shortcuts import render
from django.views import View
from djangoProject1.polls.models import Note
class DetailsView(View):
def get(self, req, pk):
current_note = Note.objects.get(id=pk)
return render(req, "note-details.html", {"note": current_note})
|
none
| 1
| 1.850647
| 2
|
|
moe/tests/bandit/bandit_test_case.py
|
mikepsinn/MOE
| 0
|
6626488
|
<filename>moe/tests/bandit/bandit_test_case.py
# -*- coding: utf-8 -*-
"""Base test case class for bandit tests; includes different historical infos (different sampled arms)."""
import testify as T
from moe.bandit.data_containers import BernoulliArm, HistoricalData, SampleArm
class BanditTestCase(T.TestCase):
"""Base test case for the bandit library.
This sets up arms for test cases and includes an integration test case for
verifying that default values do not throw an error.
"""
bandit_class = None # Define in a subclass
"""Set up arms for test cases."""
one_arm_test_case = HistoricalData(sample_arms={"arm1": BernoulliArm(win=0, loss=0, total=0)})
two_unsampled_arms_test_case = HistoricalData(sample_arms={"arm1": BernoulliArm(win=0, loss=0, total=0), "arm2": BernoulliArm(win=0, loss=0, total=0)})
two_arms_test_case = HistoricalData(sample_arms={"arm1": BernoulliArm(win=1, loss=0, total=1), "arm2": BernoulliArm(win=0, loss=0, total=0)})
three_arms_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=3), "arm2": SampleArm(win=1, loss=1, total=2), "arm3": SampleArm(win=0, loss=0, total=0)})
three_arms_float_payoffs_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2.2, loss=1.1, total=3), "arm2": SampleArm(win=2.1, loss=1.1, total=3), "arm3": SampleArm(win=0, loss=0, total=0)})
three_arms_two_winners_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=3), "arm2": SampleArm(win=2, loss=1, total=3), "arm3": SampleArm(win=0, loss=0, total=0)})
three_arms_two_winners_no_unsampled_arm_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=3), "arm2": SampleArm(win=2, loss=1, total=3), "arm3": SampleArm(win=0, loss=1, total=1)})
three_arms_with_variance_no_unsampled_arm_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=500, variance=0.1), "arm2": SampleArm(win=2, loss=1, total=500, variance=0.01), "arm3": SampleArm(win=2, loss=1, total=500, variance=0.001)})
bernoulli_historical_infos_to_test = [
one_arm_test_case,
two_unsampled_arms_test_case,
two_arms_test_case,
]
historical_infos_to_test = [
three_arms_test_case,
three_arms_float_payoffs_test_case,
three_arms_two_winners_test_case,
three_arms_two_winners_no_unsampled_arm_test_case,
three_arms_with_variance_no_unsampled_arm_test_case,
]
historical_infos_to_test.extend(bernoulli_historical_infos_to_test)
def _test_init_default(self):
"""Verify that default values do not throw and error. This is purely an integration test."""
for historical_info in self.historical_infos_to_test:
bandit = self.bandit_class(historical_info=historical_info)
bandit.choose_arm(bandit.allocate_arms())
def _test_one_arm(self, bandit):
"""Check that the one-arm case always returns the given arm as the winning arm and the allocation is 1.0."""
bandit = self.bandit_class(self.one_arm_test_case)
arms_to_allocations = bandit.allocate_arms()
T.assert_dicts_equal(arms_to_allocations, {"arm1": 1.0})
T.assert_equal(bandit.choose_arm(arms_to_allocations), "arm1")
|
<filename>moe/tests/bandit/bandit_test_case.py
# -*- coding: utf-8 -*-
"""Base test case class for bandit tests; includes different historical infos (different sampled arms)."""
import testify as T
from moe.bandit.data_containers import BernoulliArm, HistoricalData, SampleArm
class BanditTestCase(T.TestCase):
"""Base test case for the bandit library.
This sets up arms for test cases and includes an integration test case for
verifying that default values do not throw an error.
"""
bandit_class = None # Define in a subclass
"""Set up arms for test cases."""
one_arm_test_case = HistoricalData(sample_arms={"arm1": BernoulliArm(win=0, loss=0, total=0)})
two_unsampled_arms_test_case = HistoricalData(sample_arms={"arm1": BernoulliArm(win=0, loss=0, total=0), "arm2": BernoulliArm(win=0, loss=0, total=0)})
two_arms_test_case = HistoricalData(sample_arms={"arm1": BernoulliArm(win=1, loss=0, total=1), "arm2": BernoulliArm(win=0, loss=0, total=0)})
three_arms_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=3), "arm2": SampleArm(win=1, loss=1, total=2), "arm3": SampleArm(win=0, loss=0, total=0)})
three_arms_float_payoffs_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2.2, loss=1.1, total=3), "arm2": SampleArm(win=2.1, loss=1.1, total=3), "arm3": SampleArm(win=0, loss=0, total=0)})
three_arms_two_winners_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=3), "arm2": SampleArm(win=2, loss=1, total=3), "arm3": SampleArm(win=0, loss=0, total=0)})
three_arms_two_winners_no_unsampled_arm_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=3), "arm2": SampleArm(win=2, loss=1, total=3), "arm3": SampleArm(win=0, loss=1, total=1)})
three_arms_with_variance_no_unsampled_arm_test_case = HistoricalData(sample_arms={"arm1": SampleArm(win=2, loss=1, total=500, variance=0.1), "arm2": SampleArm(win=2, loss=1, total=500, variance=0.01), "arm3": SampleArm(win=2, loss=1, total=500, variance=0.001)})
bernoulli_historical_infos_to_test = [
one_arm_test_case,
two_unsampled_arms_test_case,
two_arms_test_case,
]
historical_infos_to_test = [
three_arms_test_case,
three_arms_float_payoffs_test_case,
three_arms_two_winners_test_case,
three_arms_two_winners_no_unsampled_arm_test_case,
three_arms_with_variance_no_unsampled_arm_test_case,
]
historical_infos_to_test.extend(bernoulli_historical_infos_to_test)
def _test_init_default(self):
"""Verify that default values do not throw and error. This is purely an integration test."""
for historical_info in self.historical_infos_to_test:
bandit = self.bandit_class(historical_info=historical_info)
bandit.choose_arm(bandit.allocate_arms())
def _test_one_arm(self, bandit):
"""Check that the one-arm case always returns the given arm as the winning arm and the allocation is 1.0."""
bandit = self.bandit_class(self.one_arm_test_case)
arms_to_allocations = bandit.allocate_arms()
T.assert_dicts_equal(arms_to_allocations, {"arm1": 1.0})
T.assert_equal(bandit.choose_arm(arms_to_allocations), "arm1")
|
en
| 0.826482
|
# -*- coding: utf-8 -*- Base test case class for bandit tests; includes different historical infos (different sampled arms). Base test case for the bandit library. This sets up arms for test cases and includes an integration test case for verifying that default values do not throw an error. # Define in a subclass Set up arms for test cases. Verify that default values do not throw and error. This is purely an integration test. Check that the one-arm case always returns the given arm as the winning arm and the allocation is 1.0.
| 2.741557
| 3
|
NDS/model/_model.py
|
lincis/ObsDataRest
| 0
|
6626489
|
<filename>NDS/model/_model.py
from werkzeug.security import generate_password_hash, check_password_hash
from ..database import db
class UserModel(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(80), unique = True, nullable = False)
password = db.Column(db.String(120), nullable = False)
def __repr__(self):
return '<User %r>' % self.username
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username = username).first()
class DataTypesModel(db.Model):
__tablename__ = 'datatypes'
id = db.Column(db.String(40), primary_key = True, nullable = False)
name = db.Column(db.String(255), nullable = False)
description = db.Column(db.Text())
units = db.Column(db.String(255))
def __repr__(self):
return '<Data type %r (%r)>' % (self.name, self.id)
class DataSourcesModel(db.Model):
__tablename__ = 'datasources'
id = db.Column(db.String(40), primary_key = True, nullable = False)
name = db.Column(db.String(255), nullable = False)
description = db.Column(db.Text())
def __repr__(self):
return '<Data source %r (%r)>' % (self.name, self.id)
class DataModel(db.Model):
__tablename__ = 'data'
data_type_id = db.Column(db.String(40), db.ForeignKey('datatypes.id'), primary_key = True, nullable = False)
data_source_id = db.Column(db.String(40), db.ForeignKey('datasources.id'), primary_key = True, nullable = False)
entity_created = db.Column(db.DateTime(), primary_key = True, nullable = False)
value = db.Column(db.Numeric())
def __repr__(self):
return '<Data entry %r>' % self.value
|
<filename>NDS/model/_model.py
from werkzeug.security import generate_password_hash, check_password_hash
from ..database import db
class UserModel(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(80), unique = True, nullable = False)
password = db.Column(db.String(120), nullable = False)
def __repr__(self):
return '<User %r>' % self.username
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username = username).first()
class DataTypesModel(db.Model):
__tablename__ = 'datatypes'
id = db.Column(db.String(40), primary_key = True, nullable = False)
name = db.Column(db.String(255), nullable = False)
description = db.Column(db.Text())
units = db.Column(db.String(255))
def __repr__(self):
return '<Data type %r (%r)>' % (self.name, self.id)
class DataSourcesModel(db.Model):
__tablename__ = 'datasources'
id = db.Column(db.String(40), primary_key = True, nullable = False)
name = db.Column(db.String(255), nullable = False)
description = db.Column(db.Text())
def __repr__(self):
return '<Data source %r (%r)>' % (self.name, self.id)
class DataModel(db.Model):
__tablename__ = 'data'
data_type_id = db.Column(db.String(40), db.ForeignKey('datatypes.id'), primary_key = True, nullable = False)
data_source_id = db.Column(db.String(40), db.ForeignKey('datasources.id'), primary_key = True, nullable = False)
entity_created = db.Column(db.DateTime(), primary_key = True, nullable = False)
value = db.Column(db.Numeric())
def __repr__(self):
return '<Data entry %r>' % self.value
|
none
| 1
| 2.638135
| 3
|
|
lambda/prediction-lambda/handler/technical_indicators.py
|
CheranMahalingam/Forex_Sentiment_Analysis
| 6
|
6626490
|
<filename>lambda/prediction-lambda/handler/technical_indicators.py
"""
Module generates technical indicator data such as exponential moving averages,
and accumulation/distribution lines to help LSTM model generate accurate predictions
"""
import boto3
from boto3.dynamodb.conditions import Key
import datetime
# LSTM models only supported for these currency pairs for now
SYMBOLS = ["EURUSD", "GBPUSD", "USDJPY", "AUDCAD"]
def generate_technical_indicators(new_ohlc_data):
"""
Controller function for calculating new economic indicator data.
Args:
new_ohlc_data: Dictionary from DynamoDB containing latest ohlc data and
timestamp
Returns:
Dictionary containing 10 day ema, 50 day ema, accumulation/distribution
and closing price for each currency pair
"""
date = new_ohlc_data['Date']['S']
timestamp = new_ohlc_data['Timestamp']['S']
indicator_data = {}
for symbol in SYMBOLS:
symbolData = new_ohlc_data[symbol]
open_price = float(symbolData['M']['Open']['N'])
high_price = float(symbolData['M']['High']['N'])
low_price = float(symbolData['M']['Low']['N'])
close_price = float(symbolData['M']['Close']['N'])
trade_volume = float(symbolData['M']['Volume']['N'])
indicator_data[symbol + 'AccumulationDistribution'] = calculate_accumulation_distribution(
open_price,
high_price,
low_price,
close_price,
trade_volume
)
previous_ema_10 = get_previous_ema(close_price, symbol, 10)
previous_ema_50 = get_previous_ema(close_price, symbol, 50)
indicator_data[symbol + 'Ema10'] = calculate_ema(close_price, 10, previous_ema_10)
indicator_data[symbol + 'Ema50'] = calculate_ema(close_price, 50, previous_ema_50)
indicator_data[symbol] = close_price
return indicator_data
def calculate_accumulation_distribution(open, high, low, close, volume):
"""
Calculates changes in accumulation/distribution line.
A/D = ((Close - Low) - (High - Close))/(High - Low)
Args:
open: Float representing exchange rate at the beginning of an interval
high: Float representing the highest exchange rate during the interval
low: Float respresenting the lowest exchange rate during the interval
close: Float representing the exchange rate at the end of an interval
volume: Float representing the number of trades during the interval
Returns:
Float representing the change in accumulation/distribution
"""
if high == low:
# Prevent x/0 undefined error
return 0
return ((2*close - low - high)/(high - low))*volume
def calculate_ema(close, periods, previous_ema):
"""
Calculates the exponential moving average.
EMA = Price(t)*weighting_multipler + previous_ema*(1-weighting_multiplier)
*weighting_multiplier is given by 2/(periods + 1)
Args:
close: Float representing the exchange rate at the end of an interval
periods: Integer representing the number of days in the EMA period (commonly 12 or 26)
previous_ema: Float representing the last calculated EMA
Returns:
Float representing the new EMA
"""
return close*(2/(periods + 1)) + previous_ema*(1-(2/(periods + 1)))
def get_previous_ema(close, symbol, interval):
"""
Searches DynamoDB for the last calculated EMA to avoid recalculating
EMAs for multiple periods.
Args:
close: Float representing the exchange rate at the end of an interval
symbol: String representing the name of the currency pair (e.g. EURUSD)
interval: Integer representing the number of periods used in the EMA calculation
Returns:
Float representing the previous EMA
"""
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('TechnicalAnalysisTable')
# Many different EMAs were calculated based on the number of
# periods they considered (10 day EMA and 50 day EMAs are stored)
column_name = symbol + "Ema" + str(interval)
current_date = datetime.datetime.now()
formatted_date = current_date.strftime("%Y-%m-%d")
# Searches for data from current date
# Searches timestamps in reverse to get data quickly
# Uses query instead of scan to speed up operation
response = table.query(
KeyConditionExpression=Key('Date').eq(formatted_date),
ScanIndexForward=False,
Limit=1
)
# If no data exists just use current closing price as EMA
if response['Count'] == 0:
return close
else:
return float(response['Items'][0][column_name])
|
<filename>lambda/prediction-lambda/handler/technical_indicators.py
"""
Module generates technical indicator data such as exponential moving averages,
and accumulation/distribution lines to help LSTM model generate accurate predictions
"""
import boto3
from boto3.dynamodb.conditions import Key
import datetime
# LSTM models only supported for these currency pairs for now
SYMBOLS = ["EURUSD", "GBPUSD", "USDJPY", "AUDCAD"]
def generate_technical_indicators(new_ohlc_data):
"""
Controller function for calculating new economic indicator data.
Args:
new_ohlc_data: Dictionary from DynamoDB containing latest ohlc data and
timestamp
Returns:
Dictionary containing 10 day ema, 50 day ema, accumulation/distribution
and closing price for each currency pair
"""
date = new_ohlc_data['Date']['S']
timestamp = new_ohlc_data['Timestamp']['S']
indicator_data = {}
for symbol in SYMBOLS:
symbolData = new_ohlc_data[symbol]
open_price = float(symbolData['M']['Open']['N'])
high_price = float(symbolData['M']['High']['N'])
low_price = float(symbolData['M']['Low']['N'])
close_price = float(symbolData['M']['Close']['N'])
trade_volume = float(symbolData['M']['Volume']['N'])
indicator_data[symbol + 'AccumulationDistribution'] = calculate_accumulation_distribution(
open_price,
high_price,
low_price,
close_price,
trade_volume
)
previous_ema_10 = get_previous_ema(close_price, symbol, 10)
previous_ema_50 = get_previous_ema(close_price, symbol, 50)
indicator_data[symbol + 'Ema10'] = calculate_ema(close_price, 10, previous_ema_10)
indicator_data[symbol + 'Ema50'] = calculate_ema(close_price, 50, previous_ema_50)
indicator_data[symbol] = close_price
return indicator_data
def calculate_accumulation_distribution(open, high, low, close, volume):
"""
Calculates changes in accumulation/distribution line.
A/D = ((Close - Low) - (High - Close))/(High - Low)
Args:
open: Float representing exchange rate at the beginning of an interval
high: Float representing the highest exchange rate during the interval
low: Float respresenting the lowest exchange rate during the interval
close: Float representing the exchange rate at the end of an interval
volume: Float representing the number of trades during the interval
Returns:
Float representing the change in accumulation/distribution
"""
if high == low:
# Prevent x/0 undefined error
return 0
return ((2*close - low - high)/(high - low))*volume
def calculate_ema(close, periods, previous_ema):
"""
Calculates the exponential moving average.
EMA = Price(t)*weighting_multipler + previous_ema*(1-weighting_multiplier)
*weighting_multiplier is given by 2/(periods + 1)
Args:
close: Float representing the exchange rate at the end of an interval
periods: Integer representing the number of days in the EMA period (commonly 12 or 26)
previous_ema: Float representing the last calculated EMA
Returns:
Float representing the new EMA
"""
return close*(2/(periods + 1)) + previous_ema*(1-(2/(periods + 1)))
def get_previous_ema(close, symbol, interval):
"""
Searches DynamoDB for the last calculated EMA to avoid recalculating
EMAs for multiple periods.
Args:
close: Float representing the exchange rate at the end of an interval
symbol: String representing the name of the currency pair (e.g. EURUSD)
interval: Integer representing the number of periods used in the EMA calculation
Returns:
Float representing the previous EMA
"""
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('TechnicalAnalysisTable')
# Many different EMAs were calculated based on the number of
# periods they considered (10 day EMA and 50 day EMAs are stored)
column_name = symbol + "Ema" + str(interval)
current_date = datetime.datetime.now()
formatted_date = current_date.strftime("%Y-%m-%d")
# Searches for data from current date
# Searches timestamps in reverse to get data quickly
# Uses query instead of scan to speed up operation
response = table.query(
KeyConditionExpression=Key('Date').eq(formatted_date),
ScanIndexForward=False,
Limit=1
)
# If no data exists just use current closing price as EMA
if response['Count'] == 0:
return close
else:
return float(response['Items'][0][column_name])
|
en
| 0.833028
|
Module generates technical indicator data such as exponential moving averages, and accumulation/distribution lines to help LSTM model generate accurate predictions # LSTM models only supported for these currency pairs for now Controller function for calculating new economic indicator data. Args: new_ohlc_data: Dictionary from DynamoDB containing latest ohlc data and timestamp Returns: Dictionary containing 10 day ema, 50 day ema, accumulation/distribution and closing price for each currency pair Calculates changes in accumulation/distribution line. A/D = ((Close - Low) - (High - Close))/(High - Low) Args: open: Float representing exchange rate at the beginning of an interval high: Float representing the highest exchange rate during the interval low: Float respresenting the lowest exchange rate during the interval close: Float representing the exchange rate at the end of an interval volume: Float representing the number of trades during the interval Returns: Float representing the change in accumulation/distribution # Prevent x/0 undefined error Calculates the exponential moving average. EMA = Price(t)*weighting_multipler + previous_ema*(1-weighting_multiplier) *weighting_multiplier is given by 2/(periods + 1) Args: close: Float representing the exchange rate at the end of an interval periods: Integer representing the number of days in the EMA period (commonly 12 or 26) previous_ema: Float representing the last calculated EMA Returns: Float representing the new EMA Searches DynamoDB for the last calculated EMA to avoid recalculating EMAs for multiple periods. Args: close: Float representing the exchange rate at the end of an interval symbol: String representing the name of the currency pair (e.g. EURUSD) interval: Integer representing the number of periods used in the EMA calculation Returns: Float representing the previous EMA # Many different EMAs were calculated based on the number of # periods they considered (10 day EMA and 50 day EMAs are stored) # Searches for data from current date # Searches timestamps in reverse to get data quickly # Uses query instead of scan to speed up operation # If no data exists just use current closing price as EMA
| 2.231987
| 2
|
star_13.py
|
onesk/aoc2017
| 0
|
6626491
|
data = """
0: 3
1: 2
4: 4
6: 4
"""
from star_13_input import data
fw = []
for line in data.strip().split('\n'):
d_s, r_s = line.split(': ')
d, r = int(d_s), int(r_s)
fw.append((d, r))
max_d = max(fw)[0]
ranges = [None] * (max_d + 1)
for d, r in fw:
ranges[d] = r
# read before coding next time
def wraparound(c, r):
mc = c % (2*r-2)
return mc if mc < r else 2*(r-1)-mc
sev = 0
for c in range(max_d+1):
if ranges[c] is not None:
cp = wraparound(c, ranges[c])
if cp == 0:
sev += c*ranges[c]
print sev
safe = 0
while True:
caught = False
for c in range(max_d+1):
if ranges[c] is not None:
cp = wraparound(c+safe, ranges[c])
if cp == 0:
caught = True
break
if not caught:
print safe
break
safe += 1
|
data = """
0: 3
1: 2
4: 4
6: 4
"""
from star_13_input import data
fw = []
for line in data.strip().split('\n'):
d_s, r_s = line.split(': ')
d, r = int(d_s), int(r_s)
fw.append((d, r))
max_d = max(fw)[0]
ranges = [None] * (max_d + 1)
for d, r in fw:
ranges[d] = r
# read before coding next time
def wraparound(c, r):
mc = c % (2*r-2)
return mc if mc < r else 2*(r-1)-mc
sev = 0
for c in range(max_d+1):
if ranges[c] is not None:
cp = wraparound(c, ranges[c])
if cp == 0:
sev += c*ranges[c]
print sev
safe = 0
while True:
caught = False
for c in range(max_d+1):
if ranges[c] is not None:
cp = wraparound(c+safe, ranges[c])
if cp == 0:
caught = True
break
if not caught:
print safe
break
safe += 1
|
en
| 0.817887
|
0: 3 1: 2 4: 4 6: 4 # read before coding next time
| 2.63571
| 3
|
examples/klvdata_test.py
|
felipeservicos/klvdata
| 0
|
6626492
|
#!/usr/bin/env python
import sys, klvdata;
for packet in klvdata.StreamParser(sys.stdin.buffer.read()): packet.structure()
|
#!/usr/bin/env python
import sys, klvdata;
for packet in klvdata.StreamParser(sys.stdin.buffer.read()): packet.structure()
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.245632
| 2
|
grr/server/grr_response_server/data_stores/mysql_advanced_data_store_benchmark_test.py
|
4ndygu/grr
| 0
|
6626493
|
<filename>grr/server/grr_response_server/data_stores/mysql_advanced_data_store_benchmark_test.py
#!/usr/bin/env python
"""Benchmark tests for MySQL advanced data store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_server import data_store_test
from grr_response_server.data_stores import mysql_advanced_data_store_test
from grr.test_lib import test_lib
class MysqlAdvancedDataStoreBenchmarks(
mysql_advanced_data_store_test.MysqlAdvancedTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlAdvancedDataStoreCSVBenchmarks(
mysql_advanced_data_store_test.MysqlAdvancedTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
<filename>grr/server/grr_response_server/data_stores/mysql_advanced_data_store_benchmark_test.py
#!/usr/bin/env python
"""Benchmark tests for MySQL advanced data store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_server import data_store_test
from grr_response_server.data_stores import mysql_advanced_data_store_test
from grr.test_lib import test_lib
class MysqlAdvancedDataStoreBenchmarks(
mysql_advanced_data_store_test.MysqlAdvancedTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlAdvancedDataStoreCSVBenchmarks(
mysql_advanced_data_store_test.MysqlAdvancedTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
en
| 0.435183
|
#!/usr/bin/env python Benchmark tests for MySQL advanced data store. Benchmark the mysql data store abstraction. Benchmark the mysql data store abstraction.
| 1.899203
| 2
|
layoutx/widgets/spinbox.py
|
8or5q/LayoutX
| 61
|
6626494
|
<filename>layoutx/widgets/spinbox.py
from .widget import Widget
from tkinter import ttk, StringVar
class SpinBox(Widget):
def __init__(self, master, **kwargs):
self._textv = StringVar()
super().__init__(
tk=ttk.Spinbox(master, textvariable=self._textv), **kwargs
)
self._setter = self.connect_to_prop("value", self.on_changed_value)
self._trace = self._textv.trace_add("write",
lambda *_: self._setter(self._textv.get())
)
def on_changed_value(self, value):
if value:
self._textv.set(value)
def on_disposed(self):
self._textv.trace_remove("write", self._trace)
self._setter = None
|
<filename>layoutx/widgets/spinbox.py
from .widget import Widget
from tkinter import ttk, StringVar
class SpinBox(Widget):
def __init__(self, master, **kwargs):
self._textv = StringVar()
super().__init__(
tk=ttk.Spinbox(master, textvariable=self._textv), **kwargs
)
self._setter = self.connect_to_prop("value", self.on_changed_value)
self._trace = self._textv.trace_add("write",
lambda *_: self._setter(self._textv.get())
)
def on_changed_value(self, value):
if value:
self._textv.set(value)
def on_disposed(self):
self._textv.trace_remove("write", self._trace)
self._setter = None
|
none
| 1
| 2.820243
| 3
|
|
sksurgerysurfacematch/algorithms/reconstructor_with_rectified_images.py
|
UCL/scikit-surgerysurfacematch
| 1
|
6626495
|
<filename>sksurgerysurfacematch/algorithms/reconstructor_with_rectified_images.py
# -*- coding: utf-8 -*-
""" Base class for surface reconstruction on already rectified images. """
import numpy as np
import cv2
import sksurgerysurfacematch.interfaces.stereo_reconstructor as sr
class StereoReconstructorWithRectifiedImages(sr.StereoReconstructor):
"""
Base class for those stereo reconstruction methods that work specifically
from rectified images. This class handles rectification and
the necessary coordinate transformations. Note: The client calls
the reconstruct() method which requires undistorted images,
which are NOT already rectified. It's THIS class that does the
rectification for you, and calls through to the _compute_disparity()
method that derived classes must implement.
Constructor creates some member variables, so this class
becomes statefull. You call reconstruct() once, and then
you can call extract multiple times with different masks
to pull out different subsets of data.
:param lower_disparity_multiplier: min=median - (this * std), default = 2.0.
:param upper_disparity_multiplier: max=median + (this * std), default = 2.0.
:param apha: OpenCV alpha parameter, default = 0.
"""
def __init__(self,
lower_disparity_multiplier: float = 2.0,
upper_disparity_multiplier: float = 2.0,
alpha: float = 0
):
super().__init__()
self.disparity = None
self.points = None
self.rgb_image = None
self.r_1 = None
self.lower_disparity_multiplier = lower_disparity_multiplier
self.upper_disparity_multiplier = upper_disparity_multiplier
self.left_rectified = None
self.right_rectified = None
self.left_mask = None
self.alpha = alpha
# pylint:disable=too-many-arguments
def reconstruct(self,
left_image: np.ndarray,
left_camera_matrix: np.ndarray,
right_image: np.ndarray,
right_camera_matrix: np.ndarray,
left_to_right_rmat: np.ndarray,
left_to_right_tvec: np.ndarray,
left_mask: np.ndarray = None,
):
"""
Implementation of stereo surface reconstruction that takes
undistorted images, rectifies them, asks derived classes
to compute a disparity map on the rectified images, and
then sorts out extracting points and their colours.
Camera parameters are those obtained from OpenCV.
:param left_image: undistorted left image, BGR
:param left_camera_matrix: [3x3] camera matrix
:param right_image: undistorted right image, BGR
:param right_camera_matrix: [3x3] camera matrix
:param left_to_right_rmat: [3x3] rotation matrix
:param left_to_right_tvec: [3x1] translation vector
:param left_mask: mask image, single channel, same size as left_image
:return: [Nx6] point cloud where the 6 columns
are x, y, z in left camera space, followed by r, g, b colours.
"""
# pylint:disable=too-many-locals
(width, height) = (left_image.shape[1], left_image.shape[0])
self.r_1, r_2, p_1, p_2, q_mat, _, _ = \
cv2.stereoRectify(left_camera_matrix,
None,
right_camera_matrix,
None,
(width, height),
left_to_right_rmat,
left_to_right_tvec,
alpha=self.alpha
)
undistort_rectify_map_l_x, undistort_rectify_map_l_y = \
cv2.initUndistortRectifyMap(left_camera_matrix,
None,
self.r_1, p_1,
(width, height), cv2.CV_32FC1)
undistort_rectify_map_r_x, undistort_rectify_map_r_y = \
cv2.initUndistortRectifyMap(right_camera_matrix,
None,
r_2, p_2,
(width, height), cv2.CV_32FC1)
self.left_rectified = \
cv2.remap(left_image, undistort_rectify_map_l_x,
undistort_rectify_map_l_y, cv2.INTER_LINEAR)
self.right_rectified = \
cv2.remap(right_image, undistort_rectify_map_r_x,
undistort_rectify_map_r_y, cv2.INTER_LINEAR)
# Need to remap the mask if we have one
self.left_mask = left_mask
if left_mask is not None:
self.left_mask = \
cv2.remap(left_mask, undistort_rectify_map_l_x,
undistort_rectify_map_l_y, cv2.INTER_NEAREST)
self.disparity = self._compute_disparity(self.left_rectified,
self.right_rectified)
self.points = cv2.reprojectImageTo3D(self.disparity, q_mat)
self.rgb_image = cv2.cvtColor(self.left_rectified, cv2.COLOR_BGR2RGB)
# Calls method below to extract data.
return self.extract(left_mask)
def extract(self, left_mask: np.ndarray):
"""
Extracts the actual point cloud. This is a separate method,
so that you can reconstruct once using reconstruct(), and then
call this extract method with multiple masks, without incurring
the cost of multiple calls to the reconstruction algorithm, which
may be expensive.
:param left_mask: mask image, single channel, same size as left_image
:return: [Nx6] point cloud where the 6 columns
are x, y, z in left camera space, followed by r, g, b colours.
"""
median_disp = np.median(self.disparity)
std_dev_disp = np.std(self.disparity)
lower_bound = (median_disp - (self.lower_disparity_multiplier
* std_dev_disp))
upper_bound = (median_disp + (self.upper_disparity_multiplier
* std_dev_disp))
mask = np.logical_and(self.disparity > lower_bound,
self.disparity < upper_bound)
print("Disparity, min=" + str(self.disparity.min())
+ ", max=" + str(self.disparity.max())
+ ", med=" + str(np.median(self.disparity))
+ ", std=" + str(np.std(self.disparity))
+ ", lower=" + str(lower_bound)
+ ", upper=" + str(upper_bound)
)
if left_mask is not None:
if left_mask.dtype != bool:
left_mask = left_mask > 0
mask = np.logical_and(mask, left_mask)
out_points = self.points[mask]
out_colors = self.rgb_image[mask]
non_zero = np.count_nonzero(mask)
result = np.zeros((non_zero, 6))
result[:, 0:3] = out_points
result[:, 3:6] = out_colors
# Convert from first (left) camera rectified to left camera unrectified
result[:, 0:3] = np.transpose(
np.matmul(np.linalg.inv(self.r_1), np.transpose(result[:, 0:3])))
return result
def _compute_disparity(self, left_rectified_image, right_rectified_image):
"""
Derived classes implement this to compute a disparity map from
pre-rectified images. But clients still call the reconstruct() method.
The returned disparity map, must be equivalent to what OpenCV
returns from other stereo reconstructors like the SGBM reconstructor.
That is an image, same size as left and right rectified images,
of type float32, where each pixel value represents left-to-right
disparity.
:param left_rectified_image: undistorted, rectified image, BGR
:param right_rectified_image: undistorted, rectified image, BGR
:return: disparity map
"""
raise NotImplementedError("Derived classes should implement this.")
|
<filename>sksurgerysurfacematch/algorithms/reconstructor_with_rectified_images.py
# -*- coding: utf-8 -*-
""" Base class for surface reconstruction on already rectified images. """
import numpy as np
import cv2
import sksurgerysurfacematch.interfaces.stereo_reconstructor as sr
class StereoReconstructorWithRectifiedImages(sr.StereoReconstructor):
"""
Base class for those stereo reconstruction methods that work specifically
from rectified images. This class handles rectification and
the necessary coordinate transformations. Note: The client calls
the reconstruct() method which requires undistorted images,
which are NOT already rectified. It's THIS class that does the
rectification for you, and calls through to the _compute_disparity()
method that derived classes must implement.
Constructor creates some member variables, so this class
becomes statefull. You call reconstruct() once, and then
you can call extract multiple times with different masks
to pull out different subsets of data.
:param lower_disparity_multiplier: min=median - (this * std), default = 2.0.
:param upper_disparity_multiplier: max=median + (this * std), default = 2.0.
:param apha: OpenCV alpha parameter, default = 0.
"""
def __init__(self,
lower_disparity_multiplier: float = 2.0,
upper_disparity_multiplier: float = 2.0,
alpha: float = 0
):
super().__init__()
self.disparity = None
self.points = None
self.rgb_image = None
self.r_1 = None
self.lower_disparity_multiplier = lower_disparity_multiplier
self.upper_disparity_multiplier = upper_disparity_multiplier
self.left_rectified = None
self.right_rectified = None
self.left_mask = None
self.alpha = alpha
# pylint:disable=too-many-arguments
def reconstruct(self,
left_image: np.ndarray,
left_camera_matrix: np.ndarray,
right_image: np.ndarray,
right_camera_matrix: np.ndarray,
left_to_right_rmat: np.ndarray,
left_to_right_tvec: np.ndarray,
left_mask: np.ndarray = None,
):
"""
Implementation of stereo surface reconstruction that takes
undistorted images, rectifies them, asks derived classes
to compute a disparity map on the rectified images, and
then sorts out extracting points and their colours.
Camera parameters are those obtained from OpenCV.
:param left_image: undistorted left image, BGR
:param left_camera_matrix: [3x3] camera matrix
:param right_image: undistorted right image, BGR
:param right_camera_matrix: [3x3] camera matrix
:param left_to_right_rmat: [3x3] rotation matrix
:param left_to_right_tvec: [3x1] translation vector
:param left_mask: mask image, single channel, same size as left_image
:return: [Nx6] point cloud where the 6 columns
are x, y, z in left camera space, followed by r, g, b colours.
"""
# pylint:disable=too-many-locals
(width, height) = (left_image.shape[1], left_image.shape[0])
self.r_1, r_2, p_1, p_2, q_mat, _, _ = \
cv2.stereoRectify(left_camera_matrix,
None,
right_camera_matrix,
None,
(width, height),
left_to_right_rmat,
left_to_right_tvec,
alpha=self.alpha
)
undistort_rectify_map_l_x, undistort_rectify_map_l_y = \
cv2.initUndistortRectifyMap(left_camera_matrix,
None,
self.r_1, p_1,
(width, height), cv2.CV_32FC1)
undistort_rectify_map_r_x, undistort_rectify_map_r_y = \
cv2.initUndistortRectifyMap(right_camera_matrix,
None,
r_2, p_2,
(width, height), cv2.CV_32FC1)
self.left_rectified = \
cv2.remap(left_image, undistort_rectify_map_l_x,
undistort_rectify_map_l_y, cv2.INTER_LINEAR)
self.right_rectified = \
cv2.remap(right_image, undistort_rectify_map_r_x,
undistort_rectify_map_r_y, cv2.INTER_LINEAR)
# Need to remap the mask if we have one
self.left_mask = left_mask
if left_mask is not None:
self.left_mask = \
cv2.remap(left_mask, undistort_rectify_map_l_x,
undistort_rectify_map_l_y, cv2.INTER_NEAREST)
self.disparity = self._compute_disparity(self.left_rectified,
self.right_rectified)
self.points = cv2.reprojectImageTo3D(self.disparity, q_mat)
self.rgb_image = cv2.cvtColor(self.left_rectified, cv2.COLOR_BGR2RGB)
# Calls method below to extract data.
return self.extract(left_mask)
def extract(self, left_mask: np.ndarray):
"""
Extracts the actual point cloud. This is a separate method,
so that you can reconstruct once using reconstruct(), and then
call this extract method with multiple masks, without incurring
the cost of multiple calls to the reconstruction algorithm, which
may be expensive.
:param left_mask: mask image, single channel, same size as left_image
:return: [Nx6] point cloud where the 6 columns
are x, y, z in left camera space, followed by r, g, b colours.
"""
median_disp = np.median(self.disparity)
std_dev_disp = np.std(self.disparity)
lower_bound = (median_disp - (self.lower_disparity_multiplier
* std_dev_disp))
upper_bound = (median_disp + (self.upper_disparity_multiplier
* std_dev_disp))
mask = np.logical_and(self.disparity > lower_bound,
self.disparity < upper_bound)
print("Disparity, min=" + str(self.disparity.min())
+ ", max=" + str(self.disparity.max())
+ ", med=" + str(np.median(self.disparity))
+ ", std=" + str(np.std(self.disparity))
+ ", lower=" + str(lower_bound)
+ ", upper=" + str(upper_bound)
)
if left_mask is not None:
if left_mask.dtype != bool:
left_mask = left_mask > 0
mask = np.logical_and(mask, left_mask)
out_points = self.points[mask]
out_colors = self.rgb_image[mask]
non_zero = np.count_nonzero(mask)
result = np.zeros((non_zero, 6))
result[:, 0:3] = out_points
result[:, 3:6] = out_colors
# Convert from first (left) camera rectified to left camera unrectified
result[:, 0:3] = np.transpose(
np.matmul(np.linalg.inv(self.r_1), np.transpose(result[:, 0:3])))
return result
def _compute_disparity(self, left_rectified_image, right_rectified_image):
"""
Derived classes implement this to compute a disparity map from
pre-rectified images. But clients still call the reconstruct() method.
The returned disparity map, must be equivalent to what OpenCV
returns from other stereo reconstructors like the SGBM reconstructor.
That is an image, same size as left and right rectified images,
of type float32, where each pixel value represents left-to-right
disparity.
:param left_rectified_image: undistorted, rectified image, BGR
:param right_rectified_image: undistorted, rectified image, BGR
:return: disparity map
"""
raise NotImplementedError("Derived classes should implement this.")
|
en
| 0.791563
|
# -*- coding: utf-8 -*- Base class for surface reconstruction on already rectified images. Base class for those stereo reconstruction methods that work specifically from rectified images. This class handles rectification and the necessary coordinate transformations. Note: The client calls the reconstruct() method which requires undistorted images, which are NOT already rectified. It's THIS class that does the rectification for you, and calls through to the _compute_disparity() method that derived classes must implement. Constructor creates some member variables, so this class becomes statefull. You call reconstruct() once, and then you can call extract multiple times with different masks to pull out different subsets of data. :param lower_disparity_multiplier: min=median - (this * std), default = 2.0. :param upper_disparity_multiplier: max=median + (this * std), default = 2.0. :param apha: OpenCV alpha parameter, default = 0. # pylint:disable=too-many-arguments Implementation of stereo surface reconstruction that takes undistorted images, rectifies them, asks derived classes to compute a disparity map on the rectified images, and then sorts out extracting points and their colours. Camera parameters are those obtained from OpenCV. :param left_image: undistorted left image, BGR :param left_camera_matrix: [3x3] camera matrix :param right_image: undistorted right image, BGR :param right_camera_matrix: [3x3] camera matrix :param left_to_right_rmat: [3x3] rotation matrix :param left_to_right_tvec: [3x1] translation vector :param left_mask: mask image, single channel, same size as left_image :return: [Nx6] point cloud where the 6 columns are x, y, z in left camera space, followed by r, g, b colours. # pylint:disable=too-many-locals # Need to remap the mask if we have one # Calls method below to extract data. Extracts the actual point cloud. This is a separate method, so that you can reconstruct once using reconstruct(), and then call this extract method with multiple masks, without incurring the cost of multiple calls to the reconstruction algorithm, which may be expensive. :param left_mask: mask image, single channel, same size as left_image :return: [Nx6] point cloud where the 6 columns are x, y, z in left camera space, followed by r, g, b colours. # Convert from first (left) camera rectified to left camera unrectified Derived classes implement this to compute a disparity map from pre-rectified images. But clients still call the reconstruct() method. The returned disparity map, must be equivalent to what OpenCV returns from other stereo reconstructors like the SGBM reconstructor. That is an image, same size as left and right rectified images, of type float32, where each pixel value represents left-to-right disparity. :param left_rectified_image: undistorted, rectified image, BGR :param right_rectified_image: undistorted, rectified image, BGR :return: disparity map
| 2.825157
| 3
|
sirena_client/base/messaging/response.py
|
utair-digital/sirena-client
| 11
|
6626496
|
from struct import unpack
from Crypto.Cipher import DES, PKCS1_v1_5
from Crypto.PublicKey import RSA
from .base import ResponseABC, Header
from ..types import AsymEncryptionHandShake
class Response(ResponseABC):
"""
Не зашифрованный ответ
"""
class ResponseEncryptedSym(ResponseABC):
"""
Ответ зашифрованный симмитричным ключем
"""
def __init__(
self,
header: Header,
key: DES,
**kwargs # noqa
):
super().__init__(
header,
method_name=kwargs.get('method_name')
)
self.key: DES = key
def decrypt(self, body: bytes) -> str:
plaintext = self.key.decrypt(body)
return plaintext
def parse(self, body):
"""Обработчик тела ответа"""
body = self.decrypt(body)
decompressed = super(ResponseEncryptedSym, self).parse(body)
self.payload = self.un_pad(decompressed)
@classmethod
def un_pad(cls, message_bytes: bytes) -> bytes:
"""Распаковать сообщение по стандартам PKCS padding"""
one = message_bytes[-1]
if 0 < one < 8:
return message_bytes[:-one]
return message_bytes
class ResponseEncryptedAsym(ResponseABC):
"""
Ответ зашифрованный ассимитричным ключем
"""
def __init__(
self,
header: Header,
private_key: RSA.RsaKey,
public_key: RSA.RsaKey,
**kwargs, # noqa
):
self.private_key = private_key
self.public_key = public_key
super().__init__(header, method_name=kwargs.get('method_name'))
def parse(self, body):
"""Обработчик тела ответа на запрос"""
# Длина зашифрованного сообщения в сетевом формате находится в первых 4 байтах сообщения
message_size_bytes = 4
(len_encrypted,) = unpack('!i', body[:message_size_bytes])
# минус 4 байта в начале + длинна шифрованного сообщения
message = bytes(body[message_size_bytes:len_encrypted + message_size_bytes])
try:
key: bytes = PKCS1_v1_5.new(self.private_key).decrypt(message, b'')
# Ожидаем получить тут открытый ключ, если что-то пойдет не так - падаем сильно и громко
# Подкидываем фейковый XML, так как это единственный метод, который отвечает просто данными без структуры
self.payload = f"""<?xml version="1.0" encoding="UTF-8"?>
<sirena>
<answer>
<{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
{key}
</{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
</answer>
</sirena>
"""
except Exception as e: # noqa
self.payload = f"""<?xml version="1.0" encoding="UTF-8"?>
<sirena>
<answer>
<{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
<error code="-42">Unable to decrypt AsymEncryption body, reason: {str(e)}</error>
</{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
</answer>
</sirena>
"""
|
from struct import unpack
from Crypto.Cipher import DES, PKCS1_v1_5
from Crypto.PublicKey import RSA
from .base import ResponseABC, Header
from ..types import AsymEncryptionHandShake
class Response(ResponseABC):
"""
Не зашифрованный ответ
"""
class ResponseEncryptedSym(ResponseABC):
"""
Ответ зашифрованный симмитричным ключем
"""
def __init__(
self,
header: Header,
key: DES,
**kwargs # noqa
):
super().__init__(
header,
method_name=kwargs.get('method_name')
)
self.key: DES = key
def decrypt(self, body: bytes) -> str:
plaintext = self.key.decrypt(body)
return plaintext
def parse(self, body):
"""Обработчик тела ответа"""
body = self.decrypt(body)
decompressed = super(ResponseEncryptedSym, self).parse(body)
self.payload = self.un_pad(decompressed)
@classmethod
def un_pad(cls, message_bytes: bytes) -> bytes:
"""Распаковать сообщение по стандартам PKCS padding"""
one = message_bytes[-1]
if 0 < one < 8:
return message_bytes[:-one]
return message_bytes
class ResponseEncryptedAsym(ResponseABC):
"""
Ответ зашифрованный ассимитричным ключем
"""
def __init__(
self,
header: Header,
private_key: RSA.RsaKey,
public_key: RSA.RsaKey,
**kwargs, # noqa
):
self.private_key = private_key
self.public_key = public_key
super().__init__(header, method_name=kwargs.get('method_name'))
def parse(self, body):
"""Обработчик тела ответа на запрос"""
# Длина зашифрованного сообщения в сетевом формате находится в первых 4 байтах сообщения
message_size_bytes = 4
(len_encrypted,) = unpack('!i', body[:message_size_bytes])
# минус 4 байта в начале + длинна шифрованного сообщения
message = bytes(body[message_size_bytes:len_encrypted + message_size_bytes])
try:
key: bytes = PKCS1_v1_5.new(self.private_key).decrypt(message, b'')
# Ожидаем получить тут открытый ключ, если что-то пойдет не так - падаем сильно и громко
# Подкидываем фейковый XML, так как это единственный метод, который отвечает просто данными без структуры
self.payload = f"""<?xml version="1.0" encoding="UTF-8"?>
<sirena>
<answer>
<{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
{key}
</{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
</answer>
</sirena>
"""
except Exception as e: # noqa
self.payload = f"""<?xml version="1.0" encoding="UTF-8"?>
<sirena>
<answer>
<{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
<error code="-42">Unable to decrypt AsymEncryption body, reason: {str(e)}</error>
</{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}>
</answer>
</sirena>
"""
|
ru
| 0.955131
|
Не зашифрованный ответ Ответ зашифрованный симмитричным ключем # noqa Обработчик тела ответа Распаковать сообщение по стандартам PKCS padding Ответ зашифрованный ассимитричным ключем # noqa Обработчик тела ответа на запрос # Длина зашифрованного сообщения в сетевом формате находится в первых 4 байтах сообщения # минус 4 байта в начале + длинна шифрованного сообщения # Ожидаем получить тут открытый ключ, если что-то пойдет не так - падаем сильно и громко # Подкидываем фейковый XML, так как это единственный метод, который отвечает просто данными без структуры <?xml version="1.0" encoding="UTF-8"?> <sirena> <answer> <{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}> {key} </{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}> </answer> </sirena> # noqa <?xml version="1.0" encoding="UTF-8"?> <sirena> <answer> <{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}> <error code="-42">Unable to decrypt AsymEncryption body, reason: {str(e)}</error> </{AsymEncryptionHandShake.ASYM_HAND_SHAKE.value}> </answer> </sirena>
| 2.621664
| 3
|
manage.py
|
Temeez/uncertain-dashboard
| 0
|
6626497
|
<filename>manage.py
#!/usr/bin/env python
import os
from flask.ext.script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
from uncertaind import app
from uncertaind.models import db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('runserver', Server())
manager.add_command('shell', Shell())
manager.add_command('db', MigrateCommand)
@manager.command
def db_create():
db.create_all()
if __name__ == "__main__":
if os.geteuid() != 0:
print('WARNING: Not run as root! Some features will not work without root access!')
manager.run()
|
<filename>manage.py
#!/usr/bin/env python
import os
from flask.ext.script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
from uncertaind import app
from uncertaind.models import db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('runserver', Server())
manager.add_command('shell', Shell())
manager.add_command('db', MigrateCommand)
@manager.command
def db_create():
db.create_all()
if __name__ == "__main__":
if os.geteuid() != 0:
print('WARNING: Not run as root! Some features will not work without root access!')
manager.run()
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.219069
| 2
|
python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py
|
zmxdream/Paddle
| 2
|
6626498
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import PassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestMapMatmulToMulPass(PassAutoScanTest):
"""
x_var y_var(persistable)
\ /
matmul
"""
def sample_predictor_configs(self, program_config):
# cpu
config = self.create_inference_config(use_gpu=False)
yield config, ["mul", ], (1e-5, 1e-5)
# for gpu
config = self.create_inference_config(use_gpu=True)
yield config, ["mul", ], (1e-5, 1e-5)
# TRT
# config = self.create_trt_inference_config()
# config.enable_tensorrt_engine(
# max_batch_size=10,
# workspace_size=10240,
# min_subgraph_size=0,
# precision_mode=paddle_infer.PrecisionType.Float32,
# use_static=False,
# use_calib_mode=False)
# yield config, ["mul", ], (1e-5, 1e-5)
def add_ignore_pass_case(self):
# Here we put some skip rules to avoid known bugs
def teller1(program_config, predictor_config):
if predictor_config.use_gpu():
# On 3080, the results of MatMul and Mul are different
return True
if predictor_config.tensorrt_engine_enabled():
# On 3080, the results of MatMul and Mul are different
return True
x_shape = list(program_config.inputs["matmul_x"].shape)
if len(x_shape) > 5:
return True
return False
self.add_ignore_check_case(
teller1, IgnoreReasons.PASS_ACCURACY_ERROR,
"The pass error on TRT while shape of mul_x > 5.")
def sample_program_config(self, draw):
# 1. Generate shape and attr of matmul
x_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=2, max_size=5))
y_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=2, max_size=2))
y_shape[0] = x_shape[-1]
alpha = 1.0
transpose_X = False
transpose_Y = False
matmul_op = OpConfig(
"matmul",
inputs={"X": ["matmul_x"],
"Y": ["matmul_y"]},
outputs={"Out": ["matmul_out"]},
alpha=alpha,
transpose_X=transpose_X,
transpose_Y=transpose_Y,
fused_reshape_X=[],
fused_reshape_Y=[],
fused_transpose_X=[],
fused_transpose_Y=[],
fused_reshape_Out=[],
fused_transpose_Out=[], )
ops = [matmul_op, ]
weights = {"matmul_y": TensorConfig(shape=y_shape), }
inputs = {"matmul_x": TensorConfig(shape=x_shape), }
program_config = ProgramConfig(
ops=ops,
weights=weights,
inputs=inputs,
outputs=ops[-1].outputs["Out"], )
return program_config
def test(self):
self.run_and_statis(
quant=False,
max_examples=100,
passes=["map_matmul_to_mul_pass"],
max_duration=180)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import PassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestMapMatmulToMulPass(PassAutoScanTest):
"""
x_var y_var(persistable)
\ /
matmul
"""
def sample_predictor_configs(self, program_config):
# cpu
config = self.create_inference_config(use_gpu=False)
yield config, ["mul", ], (1e-5, 1e-5)
# for gpu
config = self.create_inference_config(use_gpu=True)
yield config, ["mul", ], (1e-5, 1e-5)
# TRT
# config = self.create_trt_inference_config()
# config.enable_tensorrt_engine(
# max_batch_size=10,
# workspace_size=10240,
# min_subgraph_size=0,
# precision_mode=paddle_infer.PrecisionType.Float32,
# use_static=False,
# use_calib_mode=False)
# yield config, ["mul", ], (1e-5, 1e-5)
def add_ignore_pass_case(self):
# Here we put some skip rules to avoid known bugs
def teller1(program_config, predictor_config):
if predictor_config.use_gpu():
# On 3080, the results of MatMul and Mul are different
return True
if predictor_config.tensorrt_engine_enabled():
# On 3080, the results of MatMul and Mul are different
return True
x_shape = list(program_config.inputs["matmul_x"].shape)
if len(x_shape) > 5:
return True
return False
self.add_ignore_check_case(
teller1, IgnoreReasons.PASS_ACCURACY_ERROR,
"The pass error on TRT while shape of mul_x > 5.")
def sample_program_config(self, draw):
# 1. Generate shape and attr of matmul
x_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=2, max_size=5))
y_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=2, max_size=2))
y_shape[0] = x_shape[-1]
alpha = 1.0
transpose_X = False
transpose_Y = False
matmul_op = OpConfig(
"matmul",
inputs={"X": ["matmul_x"],
"Y": ["matmul_y"]},
outputs={"Out": ["matmul_out"]},
alpha=alpha,
transpose_X=transpose_X,
transpose_Y=transpose_Y,
fused_reshape_X=[],
fused_reshape_Y=[],
fused_transpose_X=[],
fused_transpose_Y=[],
fused_reshape_Out=[],
fused_transpose_Out=[], )
ops = [matmul_op, ]
weights = {"matmul_y": TensorConfig(shape=y_shape), }
inputs = {"matmul_x": TensorConfig(shape=x_shape), }
program_config = ProgramConfig(
ops=ops,
weights=weights,
inputs=inputs,
outputs=ops[-1].outputs["Out"], )
return program_config
def test(self):
self.run_and_statis(
quant=False,
max_examples=100,
passes=["map_matmul_to_mul_pass"],
max_duration=180)
if __name__ == "__main__":
unittest.main()
|
en
| 0.755761
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. x_var y_var(persistable) \ / matmul # cpu # for gpu # TRT # config = self.create_trt_inference_config() # config.enable_tensorrt_engine( # max_batch_size=10, # workspace_size=10240, # min_subgraph_size=0, # precision_mode=paddle_infer.PrecisionType.Float32, # use_static=False, # use_calib_mode=False) # yield config, ["mul", ], (1e-5, 1e-5) # Here we put some skip rules to avoid known bugs # On 3080, the results of MatMul and Mul are different # On 3080, the results of MatMul and Mul are different # 1. Generate shape and attr of matmul
| 1.847797
| 2
|
examples/datatables/project/buildui.py
|
DevStar0804/karen-django-react
| 2,536
|
6626499
|
import pandas as pd
import re
import numpy as np
from collections import OrderedDict
from pyxley.charts.datatables import DataTable
from pyxley import UILayout, register_layouts
import json
def get_data(filename="./project/static/data.json"):
df = pd.DataFrame(json.load(open(filename, "r")))
df = df.dropna()
# fix the salary column and cast as float
df["salary"] = df["salary"].apply(lambda x: float(re.sub("[^\d\.]", "", x)))
# make some random bounds
_lower = ( 1. - (0.03*np.random.randn(df.shape[0]) + 0.15))
_upper = ( 1. + (0.03*np.random.randn(df.shape[0]) + 0.15))
df = df.assign(
salary_upper = _upper * df.salary,
salary_lower = _lower * df.salary
)
return df
def create_datatable(df, tablename="mytable"):
cols = OrderedDict([
("position", {"label": "Position"}),
("office", {"label": "Office"}),
("start_date", {"label": "Start Date"}),
("salary_lower", {
"label": "Salary Range",
"confidence": {
"lower": "salary_lower",
"upper": "salary_upper"
}
})
])
addfunc = (
"""
confidence_interval(this.api().column(3,
{{"page":"current"}}).data(), "{tablename}");
""".format(tablename=tablename))
drawfunc = (
"""
confidence_interval(this.api().column(3,
{{"page":"current"}}).data(), "{tablename}");
""".format(tablename=tablename))
_table = DataTable(tablename, "/api/{}/".format(tablename), df,
columns=cols, paging=True, pageLength=9, scrollX=True,
columnDefs=[{
"render": """<svg width="156" height="20"><g></g></svg>""",
"orderable": False,
"targets": 3
}],
sDom='<"top">rt<"bottom"lp><"clear">', deferRender=True,
initComplete=addfunc, drawCallback=drawfunc)
return _table
def make_table_layout(filename):
df = get_data(filename)
ui = UILayout("SimpleChart")
ui.add_chart(create_datatable(df))
return ui
def get_layouts(mod, filename):
dt_ui = make_table_layout(filename)
dt_ui.assign_routes(mod)
dt_props = dt_ui.build_props()
_layouts = {
"datatable": {"layout": [dt_props], "title": "Datatables"}
}
register_layouts(_layouts, mod)
|
import pandas as pd
import re
import numpy as np
from collections import OrderedDict
from pyxley.charts.datatables import DataTable
from pyxley import UILayout, register_layouts
import json
def get_data(filename="./project/static/data.json"):
df = pd.DataFrame(json.load(open(filename, "r")))
df = df.dropna()
# fix the salary column and cast as float
df["salary"] = df["salary"].apply(lambda x: float(re.sub("[^\d\.]", "", x)))
# make some random bounds
_lower = ( 1. - (0.03*np.random.randn(df.shape[0]) + 0.15))
_upper = ( 1. + (0.03*np.random.randn(df.shape[0]) + 0.15))
df = df.assign(
salary_upper = _upper * df.salary,
salary_lower = _lower * df.salary
)
return df
def create_datatable(df, tablename="mytable"):
cols = OrderedDict([
("position", {"label": "Position"}),
("office", {"label": "Office"}),
("start_date", {"label": "Start Date"}),
("salary_lower", {
"label": "Salary Range",
"confidence": {
"lower": "salary_lower",
"upper": "salary_upper"
}
})
])
addfunc = (
"""
confidence_interval(this.api().column(3,
{{"page":"current"}}).data(), "{tablename}");
""".format(tablename=tablename))
drawfunc = (
"""
confidence_interval(this.api().column(3,
{{"page":"current"}}).data(), "{tablename}");
""".format(tablename=tablename))
_table = DataTable(tablename, "/api/{}/".format(tablename), df,
columns=cols, paging=True, pageLength=9, scrollX=True,
columnDefs=[{
"render": """<svg width="156" height="20"><g></g></svg>""",
"orderable": False,
"targets": 3
}],
sDom='<"top">rt<"bottom"lp><"clear">', deferRender=True,
initComplete=addfunc, drawCallback=drawfunc)
return _table
def make_table_layout(filename):
df = get_data(filename)
ui = UILayout("SimpleChart")
ui.add_chart(create_datatable(df))
return ui
def get_layouts(mod, filename):
dt_ui = make_table_layout(filename)
dt_ui.assign_routes(mod)
dt_props = dt_ui.build_props()
_layouts = {
"datatable": {"layout": [dt_props], "title": "Datatables"}
}
register_layouts(_layouts, mod)
|
en
| 0.320163
|
# fix the salary column and cast as float # make some random bounds confidence_interval(this.api().column(3, {{"page":"current"}}).data(), "{tablename}"); confidence_interval(this.api().column(3, {{"page":"current"}}).data(), "{tablename}"); <svg width="156" height="20"><g></g></svg>
| 2.952867
| 3
|
python/az/aro/azext_aro/_rbac.py
|
amanohar/ARO-RP
| 0
|
6626500
|
<gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the Apache License 2.0.
import os
import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import get_sdk
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import resource_id
from msrestazure.tools import parse_resource_id
CONTRIBUTOR = '<PASSWORD>'
DEVELOPMENT_CONTRIBUTOR = '<PASSWORD>'
def _gen_uuid():
return uuid.uuid4()
def assign_contributor_to_vnet(cli_ctx, vnet, object_id):
auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=DEVELOPMENT_CONTRIBUTOR if rp_mode_development() else CONTRIBUTOR,
)
if has_assignment(auth_client.role_assignments.list_for_scope(vnet), role_definition_id, object_id):
return
# generate random uuid for role assignment
role_uuid = _gen_uuid()
auth_client.role_assignments.create(vnet, role_uuid, RoleAssignmentCreateParameters(
role_definition_id=role_definition_id,
principal_id=object_id,
principal_type='ServicePrincipal',
))
def assign_contributor_to_routetable(cli_ctx, master_subnet, worker_subnet, object_id):
auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
network_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK)
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=DEVELOPMENT_CONTRIBUTOR if rp_mode_development() else CONTRIBUTOR,
)
route_tables = set()
for sn in [master_subnet, worker_subnet]:
sid = parse_resource_id(sn)
subnet = network_client.subnets.get(resource_group_name=sid['resource_group'],
virtual_network_name=sid['name'],
subnet_name=sid['resource_name'])
if subnet.route_table is not None:
route_tables.add(subnet.route_table.id)
for rt in route_tables:
if has_assignment(auth_client.role_assignments.list_for_scope(rt),
role_definition_id, object_id):
continue
role_uuid = _gen_uuid()
auth_client.role_assignments.create(rt, role_uuid, RoleAssignmentCreateParameters(
role_definition_id=role_definition_id,
principal_id=object_id,
principal_type='ServicePrincipal',
))
def has_assignment(assignments, role_definition_id, object_id):
for assignment in assignments:
if assignment.role_definition_id.lower() == role_definition_id.lower() and \
assignment.principal_id.lower() == object_id.lower():
return True
return False
def rp_mode_development():
return os.environ.get('RP_MODE', '').lower() == 'development'
|
# Copyright (c) Microsoft Corporation.
# Licensed under the Apache License 2.0.
import os
import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import get_sdk
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import resource_id
from msrestazure.tools import parse_resource_id
CONTRIBUTOR = '<PASSWORD>'
DEVELOPMENT_CONTRIBUTOR = '<PASSWORD>'
def _gen_uuid():
return uuid.uuid4()
def assign_contributor_to_vnet(cli_ctx, vnet, object_id):
auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=DEVELOPMENT_CONTRIBUTOR if rp_mode_development() else CONTRIBUTOR,
)
if has_assignment(auth_client.role_assignments.list_for_scope(vnet), role_definition_id, object_id):
return
# generate random uuid for role assignment
role_uuid = _gen_uuid()
auth_client.role_assignments.create(vnet, role_uuid, RoleAssignmentCreateParameters(
role_definition_id=role_definition_id,
principal_id=object_id,
principal_type='ServicePrincipal',
))
def assign_contributor_to_routetable(cli_ctx, master_subnet, worker_subnet, object_id):
auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
network_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK)
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=DEVELOPMENT_CONTRIBUTOR if rp_mode_development() else CONTRIBUTOR,
)
route_tables = set()
for sn in [master_subnet, worker_subnet]:
sid = parse_resource_id(sn)
subnet = network_client.subnets.get(resource_group_name=sid['resource_group'],
virtual_network_name=sid['name'],
subnet_name=sid['resource_name'])
if subnet.route_table is not None:
route_tables.add(subnet.route_table.id)
for rt in route_tables:
if has_assignment(auth_client.role_assignments.list_for_scope(rt),
role_definition_id, object_id):
continue
role_uuid = _gen_uuid()
auth_client.role_assignments.create(rt, role_uuid, RoleAssignmentCreateParameters(
role_definition_id=role_definition_id,
principal_id=object_id,
principal_type='ServicePrincipal',
))
def has_assignment(assignments, role_definition_id, object_id):
for assignment in assignments:
if assignment.role_definition_id.lower() == role_definition_id.lower() and \
assignment.principal_id.lower() == object_id.lower():
return True
return False
def rp_mode_development():
return os.environ.get('RP_MODE', '').lower() == 'development'
|
en
| 0.801546
|
# Copyright (c) Microsoft Corporation. # Licensed under the Apache License 2.0. # generate random uuid for role assignment
| 1.863942
| 2
|
src/first_month/.idea/task_1_3_3.py
|
NareTorosyan/Python_Introduction_to_Data_Science
| 0
|
6626501
|
#1 Write a Python function which returns factorial value of given number n.
def fact(n):
if n == 1:
return 1
return n*fact(n-1)
#2 Write a Python function which returns the n-th element of the fibonacci series.
def fib(n):
if n == 1:
return 0
elif n== 2:
return 1
return fib(n - 1) + fib(n - 2)
def main():
print(fib(4))
print(fact(19))
|
#1 Write a Python function which returns factorial value of given number n.
def fact(n):
if n == 1:
return 1
return n*fact(n-1)
#2 Write a Python function which returns the n-th element of the fibonacci series.
def fib(n):
if n == 1:
return 0
elif n== 2:
return 1
return fib(n - 1) + fib(n - 2)
def main():
print(fib(4))
print(fact(19))
|
en
| 0.612906
|
#1 Write a Python function which returns factorial value of given number n. #2 Write a Python function which returns the n-th element of the fibonacci series.
| 4.266487
| 4
|
timekeeper/tools/lockwatcher.py
|
antonopa/timekeeper
| 1
|
6626502
|
<reponame>antonopa/timekeeper
#!/usr/bin/env python3
"""
Basic dbus listener to monitor locking/unlocking of screen. Unlocking tries to insert a new day
(only first insertion, aka start of day, will succeed) and locking update the end of day time
(last lock is assumed to be the time we leave the office).
"""
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GLib
from os import path
from timekeeper.worktimedb import WorkTimeDB
SQLITE_FILE = path.join(path.expanduser('~'), '.timekeeper.sqlite')
def handle_lock(lock_state):
""" ActiveChanged signal from org.gnome.ScreenSaver is emitted whenever
a user lock or unlocks their system. We use this to update our DB.
Since INSERT only inserts a day if it doesn't exist only the first
unlock (LockState False) in a day will insert an entry.
Every lock (LockState True) updates the DB with the current time and
as a result last lock marks the end of the work day.
"""
with WorkTimeDB(SQLITE_FILE) as worktime:
if lock_state:
# System locked
worktime.update_end(day='now', end='now')
else:
# System unlocked
worktime.insert_day('now')
SIGNALS = {
'ActiveChanged':
{'service':'org.freedesktop.ScreenSaver', 'iface':'org.freedesktop.ScreenSaver', 'method':handle_lock}
}
def attach_to_signal(name, properties):
""" Attach method to a DBus signal """
bus = dbus.SessionBus()
bus.add_signal_receiver(
properties['method'], signal_name=name,
dbus_interface=properties['iface'], bus_name=properties['service'])
def main():
""" main entry """
DBusGMainLoop(set_as_default=True)
loop = GLib.MainLoop()
for signal, properties in SIGNALS.items():
attach_to_signal(signal, properties)
loop.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""
Basic dbus listener to monitor locking/unlocking of screen. Unlocking tries to insert a new day
(only first insertion, aka start of day, will succeed) and locking update the end of day time
(last lock is assumed to be the time we leave the office).
"""
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GLib
from os import path
from timekeeper.worktimedb import WorkTimeDB
SQLITE_FILE = path.join(path.expanduser('~'), '.timekeeper.sqlite')
def handle_lock(lock_state):
""" ActiveChanged signal from org.gnome.ScreenSaver is emitted whenever
a user lock or unlocks their system. We use this to update our DB.
Since INSERT only inserts a day if it doesn't exist only the first
unlock (LockState False) in a day will insert an entry.
Every lock (LockState True) updates the DB with the current time and
as a result last lock marks the end of the work day.
"""
with WorkTimeDB(SQLITE_FILE) as worktime:
if lock_state:
# System locked
worktime.update_end(day='now', end='now')
else:
# System unlocked
worktime.insert_day('now')
SIGNALS = {
'ActiveChanged':
{'service':'org.freedesktop.ScreenSaver', 'iface':'org.freedesktop.ScreenSaver', 'method':handle_lock}
}
def attach_to_signal(name, properties):
""" Attach method to a DBus signal """
bus = dbus.SessionBus()
bus.add_signal_receiver(
properties['method'], signal_name=name,
dbus_interface=properties['iface'], bus_name=properties['service'])
def main():
""" main entry """
DBusGMainLoop(set_as_default=True)
loop = GLib.MainLoop()
for signal, properties in SIGNALS.items():
attach_to_signal(signal, properties)
loop.run()
if __name__ == "__main__":
main()
|
en
| 0.874086
|
#!/usr/bin/env python3 Basic dbus listener to monitor locking/unlocking of screen. Unlocking tries to insert a new day (only first insertion, aka start of day, will succeed) and locking update the end of day time (last lock is assumed to be the time we leave the office). ActiveChanged signal from org.gnome.ScreenSaver is emitted whenever a user lock or unlocks their system. We use this to update our DB. Since INSERT only inserts a day if it doesn't exist only the first unlock (LockState False) in a day will insert an entry. Every lock (LockState True) updates the DB with the current time and as a result last lock marks the end of the work day. # System locked # System unlocked Attach method to a DBus signal main entry
| 2.995117
| 3
|
src/data/iter_parquet/iter_parquet.py
|
grantmwilliams/Useful-Snippets
| 0
|
6626503
|
"""
This program takes a S3 URi a parquet file returns a lazy iterator of tuples to the values
I have 3 implementations shown below. Benchmarking shows them to be similar in speed with 2 & 3 tending to be fastest.
"""
import s3fs
import pyarrow as pa
import pyarrow.parquet as pq
from itertools import chain
from typing import Tuple, Any
def iter_parquet(s3_uri: str, columns = None, batch_size=1_000) -> None:
# create file system for file interface objects from S3
fs = s3fs.S3FileSystem()
# open a file interface object
with fs.open(s3_uri) as fp:
# convert the python file object into a ParquetFile object for iterating
parquet_file = pq.ParquetFile(fp)
# an iterator of pyarrow.RecordBatch
record_batches = parquet_file.iter_batches(batch_size=batch_size, columns=columns)
for batch in record_batches:
columns = batch.columns
pycols = []
for col in columns:
pycols.append(col.to_pylist())
# convert from columnar to row format
for row in zip(*pycols):
yield row
def iter_parquet2(s3_uri: str, columns = None, batch_size=1_000) -> None:
def _stream_from_record(record_batches: pa.RecordBatch):
return chain.from_iterable(map(lambda batch: zip(*batch), record_batches))
# create file system for file interface objects from S3
fs = s3fs.S3FileSystem()
# open a file interface object
with fs.open(s3_uri) as fp:
# convert the python file object into a ParquetFile object for iterating
parquet_file = pq.ParquetFile(fp)
# an iterator of pyarrow.RecordBatch
record_batches = parquet_file.iter_batches(batch_size=batch_size, columns=columns)
arrow_iter = _stream_from_record(record_batches)
yield from (tuple(value.as_py() for value in row) for row in arrow_iter)
def iter_parquet3(s3_uri: str, columns = None, batch_size=1_000) -> Tuple[Any]:
# create file system for file interface objects from S3
fs = s3fs.S3FileSystem()
# open a file interface object
with fs.open(s3_uri) as fp:
# convert the python file object into a ParquetFile object for iterating
parquet_file = pq.ParquetFile(fp)
# an iterator of pyarrow.RecordBatch
record_batches = parquet_file.iter_batches(batch_size=batch_size, columns=columns)
# convert from columnar format of pyarrow arrays to a row format of python objects (yields tuples)
yield from chain.from_iterable(zip(*map(lambda col: col.to_pylist(), batch.columns)) for batch in record_batches)
|
"""
This program takes a S3 URi a parquet file returns a lazy iterator of tuples to the values
I have 3 implementations shown below. Benchmarking shows them to be similar in speed with 2 & 3 tending to be fastest.
"""
import s3fs
import pyarrow as pa
import pyarrow.parquet as pq
from itertools import chain
from typing import Tuple, Any
def iter_parquet(s3_uri: str, columns = None, batch_size=1_000) -> None:
# create file system for file interface objects from S3
fs = s3fs.S3FileSystem()
# open a file interface object
with fs.open(s3_uri) as fp:
# convert the python file object into a ParquetFile object for iterating
parquet_file = pq.ParquetFile(fp)
# an iterator of pyarrow.RecordBatch
record_batches = parquet_file.iter_batches(batch_size=batch_size, columns=columns)
for batch in record_batches:
columns = batch.columns
pycols = []
for col in columns:
pycols.append(col.to_pylist())
# convert from columnar to row format
for row in zip(*pycols):
yield row
def iter_parquet2(s3_uri: str, columns = None, batch_size=1_000) -> None:
def _stream_from_record(record_batches: pa.RecordBatch):
return chain.from_iterable(map(lambda batch: zip(*batch), record_batches))
# create file system for file interface objects from S3
fs = s3fs.S3FileSystem()
# open a file interface object
with fs.open(s3_uri) as fp:
# convert the python file object into a ParquetFile object for iterating
parquet_file = pq.ParquetFile(fp)
# an iterator of pyarrow.RecordBatch
record_batches = parquet_file.iter_batches(batch_size=batch_size, columns=columns)
arrow_iter = _stream_from_record(record_batches)
yield from (tuple(value.as_py() for value in row) for row in arrow_iter)
def iter_parquet3(s3_uri: str, columns = None, batch_size=1_000) -> Tuple[Any]:
# create file system for file interface objects from S3
fs = s3fs.S3FileSystem()
# open a file interface object
with fs.open(s3_uri) as fp:
# convert the python file object into a ParquetFile object for iterating
parquet_file = pq.ParquetFile(fp)
# an iterator of pyarrow.RecordBatch
record_batches = parquet_file.iter_batches(batch_size=batch_size, columns=columns)
# convert from columnar format of pyarrow arrays to a row format of python objects (yields tuples)
yield from chain.from_iterable(zip(*map(lambda col: col.to_pylist(), batch.columns)) for batch in record_batches)
|
en
| 0.718635
|
This program takes a S3 URi a parquet file returns a lazy iterator of tuples to the values I have 3 implementations shown below. Benchmarking shows them to be similar in speed with 2 & 3 tending to be fastest. # create file system for file interface objects from S3 # open a file interface object # convert the python file object into a ParquetFile object for iterating # an iterator of pyarrow.RecordBatch # convert from columnar to row format # create file system for file interface objects from S3 # open a file interface object # convert the python file object into a ParquetFile object for iterating # an iterator of pyarrow.RecordBatch # create file system for file interface objects from S3 # open a file interface object # convert the python file object into a ParquetFile object for iterating # an iterator of pyarrow.RecordBatch # convert from columnar format of pyarrow arrays to a row format of python objects (yields tuples)
| 2.97929
| 3
|
integration_tests/test-packages/python/pythonspecific2/setup.py
|
franklinen/doppel-cli
| 5
|
6626504
|
from setuptools import find_packages
from setuptools import setup
setup(
name="pythonspecific2",
version="0.0.1",
packages=find_packages(),
)
|
from setuptools import find_packages
from setuptools import setup
setup(
name="pythonspecific2",
version="0.0.1",
packages=find_packages(),
)
|
none
| 1
| 1.209932
| 1
|
|
venv/Lib/site-packages/tensorflow_core/_api/v2/math/__init__.py
|
TEDxVienna/continuum
| 0
|
6626505
|
<filename>venv/Lib/site-packages/tensorflow_core/_api/v2/math/__init__.py
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Math Operations.
Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.
Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
TensorFlow provides a variety of math functions including:
* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)
See: `tf.linalg` for matrix and tensor functions.
<a id=Segmentation></a>
## About Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
# ==> [[0 0 0 0]
# [5 6 7 8]]
```
The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
Thses functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.
``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6, 8, 10, 12],
# [-1, -2, -3, -4]]
```
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.ops.check_ops import is_non_decreasing
from tensorflow.python.ops.check_ops import is_strictly_increasing
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.gen_array_ops import invert_permutation
from tensorflow.python.ops.gen_math_ops import acos
from tensorflow.python.ops.gen_math_ops import acosh
from tensorflow.python.ops.gen_math_ops import add
from tensorflow.python.ops.gen_math_ops import asin
from tensorflow.python.ops.gen_math_ops import asinh
from tensorflow.python.ops.gen_math_ops import atan
from tensorflow.python.ops.gen_math_ops import atan2
from tensorflow.python.ops.gen_math_ops import atanh
from tensorflow.python.ops.gen_math_ops import bessel_i0e
from tensorflow.python.ops.gen_math_ops import bessel_i1e
from tensorflow.python.ops.gen_math_ops import betainc
from tensorflow.python.ops.gen_math_ops import ceil
from tensorflow.python.ops.gen_math_ops import cos
from tensorflow.python.ops.gen_math_ops import cosh
from tensorflow.python.ops.gen_math_ops import digamma
from tensorflow.python.ops.gen_math_ops import erf
from tensorflow.python.ops.gen_math_ops import erfc
from tensorflow.python.ops.gen_math_ops import exp
from tensorflow.python.ops.gen_math_ops import expm1
from tensorflow.python.ops.gen_math_ops import floor
from tensorflow.python.ops.gen_math_ops import floor_mod as floormod
from tensorflow.python.ops.gen_math_ops import floor_mod as mod
from tensorflow.python.ops.gen_math_ops import greater
from tensorflow.python.ops.gen_math_ops import greater_equal
from tensorflow.python.ops.gen_math_ops import igamma
from tensorflow.python.ops.gen_math_ops import igammac
from tensorflow.python.ops.gen_math_ops import is_finite
from tensorflow.python.ops.gen_math_ops import is_inf
from tensorflow.python.ops.gen_math_ops import is_nan
from tensorflow.python.ops.gen_math_ops import less
from tensorflow.python.ops.gen_math_ops import less_equal
from tensorflow.python.ops.gen_math_ops import lgamma
from tensorflow.python.ops.gen_math_ops import log
from tensorflow.python.ops.gen_math_ops import log1p
from tensorflow.python.ops.gen_math_ops import logical_and
from tensorflow.python.ops.gen_math_ops import logical_not
from tensorflow.python.ops.gen_math_ops import logical_or
from tensorflow.python.ops.gen_math_ops import maximum
from tensorflow.python.ops.gen_math_ops import minimum
from tensorflow.python.ops.gen_math_ops import neg as negative
from tensorflow.python.ops.gen_math_ops import next_after as nextafter
from tensorflow.python.ops.gen_math_ops import polygamma
from tensorflow.python.ops.gen_math_ops import reciprocal
from tensorflow.python.ops.gen_math_ops import rint
from tensorflow.python.ops.gen_math_ops import rsqrt
from tensorflow.python.ops.gen_math_ops import segment_max
from tensorflow.python.ops.gen_math_ops import segment_mean
from tensorflow.python.ops.gen_math_ops import segment_min
from tensorflow.python.ops.gen_math_ops import segment_prod
from tensorflow.python.ops.gen_math_ops import segment_sum
from tensorflow.python.ops.gen_math_ops import sign
from tensorflow.python.ops.gen_math_ops import sin
from tensorflow.python.ops.gen_math_ops import sinh
from tensorflow.python.ops.gen_math_ops import sqrt
from tensorflow.python.ops.gen_math_ops import square
from tensorflow.python.ops.gen_math_ops import squared_difference
from tensorflow.python.ops.gen_math_ops import tan
from tensorflow.python.ops.gen_math_ops import tanh
from tensorflow.python.ops.gen_math_ops import unsorted_segment_max
from tensorflow.python.ops.gen_math_ops import unsorted_segment_min
from tensorflow.python.ops.gen_math_ops import unsorted_segment_prod
from tensorflow.python.ops.gen_math_ops import unsorted_segment_sum
from tensorflow.python.ops.gen_math_ops import xdivy
from tensorflow.python.ops.gen_math_ops import xlogy
from tensorflow.python.ops.gen_math_ops import zeta
from tensorflow.python.ops.gen_nn_ops import softplus
from tensorflow.python.ops.gen_nn_ops import softsign
from tensorflow.python.ops.math_ops import abs
from tensorflow.python.ops.math_ops import accumulate_n
from tensorflow.python.ops.math_ops import add_n
from tensorflow.python.ops.math_ops import angle
from tensorflow.python.ops.math_ops import argmax_v2 as argmax
from tensorflow.python.ops.math_ops import argmin_v2 as argmin
from tensorflow.python.ops.math_ops import bincount
from tensorflow.python.ops.math_ops import conj
from tensorflow.python.ops.math_ops import count_nonzero_v2 as count_nonzero
from tensorflow.python.ops.math_ops import cumprod
from tensorflow.python.ops.math_ops import cumsum
from tensorflow.python.ops.math_ops import cumulative_logsumexp
from tensorflow.python.ops.math_ops import div_no_nan as divide_no_nan
from tensorflow.python.ops.math_ops import divide
from tensorflow.python.ops.math_ops import equal
from tensorflow.python.ops.math_ops import floordiv
from tensorflow.python.ops.math_ops import imag
from tensorflow.python.ops.math_ops import log_sigmoid
from tensorflow.python.ops.math_ops import logical_xor
from tensorflow.python.ops.math_ops import multiply
from tensorflow.python.ops.math_ops import multiply_no_nan
from tensorflow.python.ops.math_ops import not_equal
from tensorflow.python.ops.math_ops import polyval
from tensorflow.python.ops.math_ops import pow
from tensorflow.python.ops.math_ops import real
from tensorflow.python.ops.math_ops import reciprocal_no_nan
from tensorflow.python.ops.math_ops import reduce_all
from tensorflow.python.ops.math_ops import reduce_any
from tensorflow.python.ops.math_ops import reduce_euclidean_norm
from tensorflow.python.ops.math_ops import reduce_logsumexp
from tensorflow.python.ops.math_ops import reduce_max
from tensorflow.python.ops.math_ops import reduce_mean
from tensorflow.python.ops.math_ops import reduce_min
from tensorflow.python.ops.math_ops import reduce_prod
from tensorflow.python.ops.math_ops import reduce_std
from tensorflow.python.ops.math_ops import reduce_sum
from tensorflow.python.ops.math_ops import reduce_variance
from tensorflow.python.ops.math_ops import round
from tensorflow.python.ops.math_ops import scalar_mul_v2 as scalar_mul
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import subtract
from tensorflow.python.ops.math_ops import truediv
from tensorflow.python.ops.math_ops import unsorted_segment_mean
from tensorflow.python.ops.math_ops import unsorted_segment_sqrt_n
from tensorflow.python.ops.nn_impl import l2_normalize_v2 as l2_normalize
from tensorflow.python.ops.nn_impl import zero_fraction
from tensorflow.python.ops.nn_ops import in_top_k_v2 as in_top_k
from tensorflow.python.ops.nn_ops import log_softmax_v2 as log_softmax
from tensorflow.python.ops.nn_ops import softmax_v2 as softmax
from tensorflow.python.ops.nn_ops import top_k
from tensorflow.python.ops.special_math_ops import bessel_i0
from tensorflow.python.ops.special_math_ops import bessel_i1
from tensorflow.python.ops.special_math_ops import lbeta
del _print_function
|
<filename>venv/Lib/site-packages/tensorflow_core/_api/v2/math/__init__.py
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Math Operations.
Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.
Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
TensorFlow provides a variety of math functions including:
* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)
See: `tf.linalg` for matrix and tensor functions.
<a id=Segmentation></a>
## About Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
# ==> [[0 0 0 0]
# [5 6 7 8]]
```
The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
Thses functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.
``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6, 8, 10, 12],
# [-1, -2, -3, -4]]
```
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.ops.check_ops import is_non_decreasing
from tensorflow.python.ops.check_ops import is_strictly_increasing
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.gen_array_ops import invert_permutation
from tensorflow.python.ops.gen_math_ops import acos
from tensorflow.python.ops.gen_math_ops import acosh
from tensorflow.python.ops.gen_math_ops import add
from tensorflow.python.ops.gen_math_ops import asin
from tensorflow.python.ops.gen_math_ops import asinh
from tensorflow.python.ops.gen_math_ops import atan
from tensorflow.python.ops.gen_math_ops import atan2
from tensorflow.python.ops.gen_math_ops import atanh
from tensorflow.python.ops.gen_math_ops import bessel_i0e
from tensorflow.python.ops.gen_math_ops import bessel_i1e
from tensorflow.python.ops.gen_math_ops import betainc
from tensorflow.python.ops.gen_math_ops import ceil
from tensorflow.python.ops.gen_math_ops import cos
from tensorflow.python.ops.gen_math_ops import cosh
from tensorflow.python.ops.gen_math_ops import digamma
from tensorflow.python.ops.gen_math_ops import erf
from tensorflow.python.ops.gen_math_ops import erfc
from tensorflow.python.ops.gen_math_ops import exp
from tensorflow.python.ops.gen_math_ops import expm1
from tensorflow.python.ops.gen_math_ops import floor
from tensorflow.python.ops.gen_math_ops import floor_mod as floormod
from tensorflow.python.ops.gen_math_ops import floor_mod as mod
from tensorflow.python.ops.gen_math_ops import greater
from tensorflow.python.ops.gen_math_ops import greater_equal
from tensorflow.python.ops.gen_math_ops import igamma
from tensorflow.python.ops.gen_math_ops import igammac
from tensorflow.python.ops.gen_math_ops import is_finite
from tensorflow.python.ops.gen_math_ops import is_inf
from tensorflow.python.ops.gen_math_ops import is_nan
from tensorflow.python.ops.gen_math_ops import less
from tensorflow.python.ops.gen_math_ops import less_equal
from tensorflow.python.ops.gen_math_ops import lgamma
from tensorflow.python.ops.gen_math_ops import log
from tensorflow.python.ops.gen_math_ops import log1p
from tensorflow.python.ops.gen_math_ops import logical_and
from tensorflow.python.ops.gen_math_ops import logical_not
from tensorflow.python.ops.gen_math_ops import logical_or
from tensorflow.python.ops.gen_math_ops import maximum
from tensorflow.python.ops.gen_math_ops import minimum
from tensorflow.python.ops.gen_math_ops import neg as negative
from tensorflow.python.ops.gen_math_ops import next_after as nextafter
from tensorflow.python.ops.gen_math_ops import polygamma
from tensorflow.python.ops.gen_math_ops import reciprocal
from tensorflow.python.ops.gen_math_ops import rint
from tensorflow.python.ops.gen_math_ops import rsqrt
from tensorflow.python.ops.gen_math_ops import segment_max
from tensorflow.python.ops.gen_math_ops import segment_mean
from tensorflow.python.ops.gen_math_ops import segment_min
from tensorflow.python.ops.gen_math_ops import segment_prod
from tensorflow.python.ops.gen_math_ops import segment_sum
from tensorflow.python.ops.gen_math_ops import sign
from tensorflow.python.ops.gen_math_ops import sin
from tensorflow.python.ops.gen_math_ops import sinh
from tensorflow.python.ops.gen_math_ops import sqrt
from tensorflow.python.ops.gen_math_ops import square
from tensorflow.python.ops.gen_math_ops import squared_difference
from tensorflow.python.ops.gen_math_ops import tan
from tensorflow.python.ops.gen_math_ops import tanh
from tensorflow.python.ops.gen_math_ops import unsorted_segment_max
from tensorflow.python.ops.gen_math_ops import unsorted_segment_min
from tensorflow.python.ops.gen_math_ops import unsorted_segment_prod
from tensorflow.python.ops.gen_math_ops import unsorted_segment_sum
from tensorflow.python.ops.gen_math_ops import xdivy
from tensorflow.python.ops.gen_math_ops import xlogy
from tensorflow.python.ops.gen_math_ops import zeta
from tensorflow.python.ops.gen_nn_ops import softplus
from tensorflow.python.ops.gen_nn_ops import softsign
from tensorflow.python.ops.math_ops import abs
from tensorflow.python.ops.math_ops import accumulate_n
from tensorflow.python.ops.math_ops import add_n
from tensorflow.python.ops.math_ops import angle
from tensorflow.python.ops.math_ops import argmax_v2 as argmax
from tensorflow.python.ops.math_ops import argmin_v2 as argmin
from tensorflow.python.ops.math_ops import bincount
from tensorflow.python.ops.math_ops import conj
from tensorflow.python.ops.math_ops import count_nonzero_v2 as count_nonzero
from tensorflow.python.ops.math_ops import cumprod
from tensorflow.python.ops.math_ops import cumsum
from tensorflow.python.ops.math_ops import cumulative_logsumexp
from tensorflow.python.ops.math_ops import div_no_nan as divide_no_nan
from tensorflow.python.ops.math_ops import divide
from tensorflow.python.ops.math_ops import equal
from tensorflow.python.ops.math_ops import floordiv
from tensorflow.python.ops.math_ops import imag
from tensorflow.python.ops.math_ops import log_sigmoid
from tensorflow.python.ops.math_ops import logical_xor
from tensorflow.python.ops.math_ops import multiply
from tensorflow.python.ops.math_ops import multiply_no_nan
from tensorflow.python.ops.math_ops import not_equal
from tensorflow.python.ops.math_ops import polyval
from tensorflow.python.ops.math_ops import pow
from tensorflow.python.ops.math_ops import real
from tensorflow.python.ops.math_ops import reciprocal_no_nan
from tensorflow.python.ops.math_ops import reduce_all
from tensorflow.python.ops.math_ops import reduce_any
from tensorflow.python.ops.math_ops import reduce_euclidean_norm
from tensorflow.python.ops.math_ops import reduce_logsumexp
from tensorflow.python.ops.math_ops import reduce_max
from tensorflow.python.ops.math_ops import reduce_mean
from tensorflow.python.ops.math_ops import reduce_min
from tensorflow.python.ops.math_ops import reduce_prod
from tensorflow.python.ops.math_ops import reduce_std
from tensorflow.python.ops.math_ops import reduce_sum
from tensorflow.python.ops.math_ops import reduce_variance
from tensorflow.python.ops.math_ops import round
from tensorflow.python.ops.math_ops import scalar_mul_v2 as scalar_mul
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import subtract
from tensorflow.python.ops.math_ops import truediv
from tensorflow.python.ops.math_ops import unsorted_segment_mean
from tensorflow.python.ops.math_ops import unsorted_segment_sqrt_n
from tensorflow.python.ops.nn_impl import l2_normalize_v2 as l2_normalize
from tensorflow.python.ops.nn_impl import zero_fraction
from tensorflow.python.ops.nn_ops import in_top_k_v2 as in_top_k
from tensorflow.python.ops.nn_ops import log_softmax_v2 as log_softmax
from tensorflow.python.ops.nn_ops import softmax_v2 as softmax
from tensorflow.python.ops.nn_ops import top_k
from tensorflow.python.ops.special_math_ops import bessel_i0
from tensorflow.python.ops.special_math_ops import bessel_i1
from tensorflow.python.ops.special_math_ops import lbeta
del _print_function
|
en
| 0.708127
|
# This file is MACHINE GENERATED! Do not edit. # Generated by: tensorflow/python/tools/api/generator/create_python_api.py script. Math Operations. Note: Functions taking `Tensor` arguments can also take anything accepted by `tf.convert_to_tensor`. Note: Elementwise binary operations in TensorFlow follow [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). TensorFlow provides a variety of math functions including: * Basic arithmetic operators and trigonometric functions. * Special math functions (like: `tf.math.igamma` and `tf.math.zeta`) * Complex number functions (like: `tf.math.imag` and `tf.math.angle`) * Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`) * Segment functions (like: `tf.math.segment_sum`) See: `tf.linalg` for matrix and tensor functions. <a id=Segmentation></a> ## About Segmentation TensorFlow provides several operations that you can use to perform common math computations on tensor segments. Here a segmentation is a partitioning of a tensor along the first dimension, i.e. it defines a mapping from the first dimension onto `segment_ids`. The `segment_ids` tensor should be the size of the first dimension, `d0`, with consecutive IDs in the range `0` to `k`, where `k<d0`. In particular, a segmentation of a matrix tensor is a mapping of rows to segments. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf.math.segment_sum(c, tf.constant([0, 0, 1])) # ==> [[0 0 0 0] # [5 6 7 8]] ``` The standard `segment_*` functions assert that the segment indices are sorted. If you have unsorted indices use the equivalent `unsorted_segment_` function. Thses functions take an additional argument `num_segments` so that the output tensor can be efficiently allocated. ``` python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) # ==> [[ 6, 8, 10, 12], # [-1, -2, -3, -4]] ```
| 3.108411
| 3
|
tests/test_keybuilders.py
|
z4r/django-stopwatch
| 3
|
6626506
|
<gh_stars>1-10
from django.core.urlresolvers import ResolverMatch
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from stopwatch import errors
from stopwatch.keybuilders import RestApiKeyBuilder
def func():
pass
class RestApiKeyBuilderTestCase(TestCase):
def setUp(self):
self.keybuilder = RestApiKeyBuilder()
self.request = HttpRequest()
self.request.method = 'GET'
self.request.resolver_match = ResolverMatch(func, (), {}, namespaces=['namespace'])
self.response = HttpResponse()
def test_build(self):
key = self.keybuilder.build(self.request, self.response)
self.assertEqual(key, 'namespace.tests.test_keybuilders.func.GET.200')
def test_build_unresolved(self):
delattr(self.request, 'resolver_match')
self.assertRaises(errors.KeyBuilderException, self.keybuilder.build, self.request, self.response)
|
from django.core.urlresolvers import ResolverMatch
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from stopwatch import errors
from stopwatch.keybuilders import RestApiKeyBuilder
def func():
pass
class RestApiKeyBuilderTestCase(TestCase):
def setUp(self):
self.keybuilder = RestApiKeyBuilder()
self.request = HttpRequest()
self.request.method = 'GET'
self.request.resolver_match = ResolverMatch(func, (), {}, namespaces=['namespace'])
self.response = HttpResponse()
def test_build(self):
key = self.keybuilder.build(self.request, self.response)
self.assertEqual(key, 'namespace.tests.test_keybuilders.func.GET.200')
def test_build_unresolved(self):
delattr(self.request, 'resolver_match')
self.assertRaises(errors.KeyBuilderException, self.keybuilder.build, self.request, self.response)
|
none
| 1
| 2.205068
| 2
|
|
GAScore/testbench/am_rx.py
|
sharm294/shoal
| 1
|
6626507
|
<reponame>sharm294/shoal<gh_stars>1-10
import os
from sonar.testbench import Testbench, Module, TestVector, Thread
from sonar.interfaces import AXIS
from sonar_strToInt import strToInt
am_rx = Testbench.default('am_rx')
filepath = os.path.join(os.path.dirname(__file__), 'build/am_rx/')
dut = Module.default("DUT")
dut.add_clock_port('ap_clk', '20ns')
dut.add_reset_port('ap_rst_n')
dut.add_port('release_V', 'output')
axis_net = AXIS('axis_net', 'slave', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_l')
axis_net.port.init_channels('tkeep', 64, True)
dut.add_interface(axis_net)
axis_xpams_rx = AXIS('axis_xpams_rx', 'master', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_l')
axis_xpams_rx.port.init_channels('tkeep', 64, True)
dut.add_interface(axis_xpams_rx)
axis_s2mmCommand = AXIS('axis_s2mmCommand_V_data_V', 'master', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_o')
axis_s2mmCommand.port.init_channels('min', 64, True)
axis_s2mmCommand.port.add_channel('TREADY', 'tready')
dut.add_interface(axis_s2mmCommand)
axis_s2mm = AXIS('axis_s2mm', 'master', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_l')
axis_s2mm.port.init_channels('tkeep', 64, True)
dut.add_interface(axis_s2mm)
axis_s2mmStatus = AXIS('axis_s2mmStatus', 'slave', 'ap_clk',
c_struct='axis_word_mm2sStatus', c_stream='uaxis_l')
axis_s2mmStatus.port.init_channels('default', 64, True)
dut.add_interface(axis_s2mmStatus)
am_rx.add_module(dut)
################################################################################
# Test Vectors
################################################################################
# Initialization thread (added to each test vector to reset everything)
initT = Thread()
initT.init_signals()
initT.wait_negedge('ap_clk')
initT.add_delay('40ns')
initT.set_signal('ap_rst_n', 1)
initT.set_signal('axis_xpams_rx_tready', 1)
initT.set_signal('axis_s2mmCommand_V_data_V_tready', 1)
initT.set_signal('axis_s2mm_tready', 1)
#-------------------------------------------------------------------------------
# Short Message A
#
#-------------------------------------------------------------------------------
short_message_A = TestVector()
short_message_A.add_thread(initT)
smA_t1 = short_message_A.add_thread()
smA_t1.add_delay('300ns')
smA_t1.init_timer()
axis_net.writes(smA_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,48,0xE,1,2}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,2}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x0}"), "callTB": 1},
{"tdata": 0xDEADBEEF},
{"tdata": 0x1234, "tlast": 1, "callTB": 2}
])
smA_t2 = short_message_A.add_thread()
axis_xpams_rx.reads(smA_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,48,0xE,1,2}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,2}")},
{"tdata": strToInt("{AMToken,0x0}")},
{"tdata": 0xDEADBEEF},
{"tdata": 0x1234, "tlast": 1}
])
smA_t2.print_elapsed_time("Short_Message_A")
smA_t2.end_vector()
#-------------------------------------------------------------------------------
# Short Message B
#
#-------------------------------------------------------------------------------
short_message_B = TestVector()
short_message_B.add_thread(initT)
smB_t1 = short_message_B.add_thread()
smB_t1.add_delay('300ns')
smB_t1.init_timer()
axis_net.writes(smB_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,24,0xE,1,0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,0}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x1}"), "callTB": 2}
])
smB_t2 = short_message_B.add_thread()
axis_xpams_rx.reads(smB_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,24,0xE,1,0}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,0}")},
{"tdata": strToInt("{AMToken,0x1}")}
])
smB_t2.print_elapsed_time("short_message_B")
smB_t2.end_vector()
#-------------------------------------------------------------------------------
# Medium Message A
#
#-------------------------------------------------------------------------------
medium_message_A = TestVector()
medium_message_A.add_thread(initT)
mmA_t1 = medium_message_A.add_thread()
mmA_t1.add_delay('300ns')
mmA_t1.init_timer()
axis_net.writes(mmA_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,36,0xE,2,0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,8,0xE,2,0}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x2}"), "callTB": 1},
{"tdata": 0xDEADBEEF, "tlast": 1, "callTB": 2}
])
medium_message_A.add_thread(mmA_t1)
mmA_t2 = medium_message_A.add_thread()
axis_xpams_rx.reads(mmA_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,36,0xE,2,0}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,8,0xE,2,0}")},
{"tdata": strToInt("{AMToken,0x2}")},
{"tdata": 0xDEADBEEF, "tlast": 1}
])
mmA_t2.print_elapsed_time("Medium_Message_A")
mmA_t2.end_vector()
medium_message_A.add_thread(mmA_t2)
#-------------------------------------------------------------------------------
# Medium Message B
#
#-------------------------------------------------------------------------------
medium_message_B = TestVector()
medium_message_B.add_thread(initT)
mmB_t1 = medium_message_B.add_thread()
mmB_t1.add_delay('300ns')
mmB_t1.init_timer()
axis_net.writes(mmB_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,2,6}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x800,0xE,2,6}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x3}"), "callTB": 1}
])
for i in range(5):
axis_net.write(mmB_t1, 0x98765432)
axis_net.write(mmB_t1, 0x98765432, callTB=1)
for i in range(255):
axis_net.write(mmB_t1, 0xDEADBEEF)
axis_net.write(mmB_t1, 0xFEEDDEED, tlast=1, callTB=2)
mmB_t2 = medium_message_B.add_thread()
axis_xpams_rx.reads(mmB_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,2,6}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x800,0xE,2,6}")},
{"tdata": strToInt("{AMToken,0x3}")}
])
for i in range(6):
axis_xpams_rx.read(mmB_t2, 0x98765432)
for i in range(255):
axis_xpams_rx.read(mmB_t2, 0xDEADBEEF)
axis_xpams_rx.read(mmB_t2, 0xFEEDDEED, tlast=1)
mmB_t2.print_elapsed_time("medium_message_B")
mmB_t2.end_vector()
#-------------------------------------------------------------------------------
# Long Message A
#
#-------------------------------------------------------------------------------
long_message_A = TestVector()
long_message_A.add_thread(initT)
lmA_t1 = long_message_A.add_thread()
lmA_t1.add_delay('300ns')
lmA_t1.init_timer()
axis_net.writes(lmA_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,4,0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x800,0xE,4,0}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x4}"), "callTB": 1},
{"tdata": "0xAABBCCD8", "callTB": 1} # address
])
for i in range(255):
axis_net.write(lmA_t1, 0xDEADBEEF)
axis_net.write(lmA_t1, 0xDEADBEEF, tlast=1, callTB=2)
lmA_t2 = long_message_A.add_thread()
# USE_ABS_PAYLOAD axis_xpams_rx.read(lmA_t2, strToInt("{AMHeader,0x01,0x02,0x818,0xE,4,0}"))
axis_xpams_rx.read(lmA_t2, strToInt("{AMHeader,0x01,0x02,0x800,0xE,4,0}"))
axis_xpams_rx.read(lmA_t2, strToInt("{AMToken,0x4}"))
for i in range(255):
axis_s2mm.read(lmA_t2, 0xDEADBEEF)
axis_s2mm.read(lmA_t2, 0xDEADBEEF, tlast=1)
lmA_t3 = long_message_A.add_thread()
axis_s2mmCommand.read(lmA_t3, strToInt("{dataMoverCommand,0x800,1,0,1,0,0xAABBCCD8,0}"))
lmA_t4 = long_message_A.add_thread()
lmA_t4.wait_level('axis_s2mm_tlast == $value', value=1)
lmA_t4.add_delay('50ns')
axis_s2mmStatus.write(lmA_t4, 0x80, callTB=1)
lmA_t4.print_elapsed_time("Long_Message_A")
lmA_t4.end_vector()
#-------------------------------------------------------------------------------
# Long Message B
#
# Long strided message from 0xAA to 0xCC with 0xC words of payload
# across two vectors. It calls handler 0xD with two handler args. Each
# stride is 0x100 words, and it write 4 words each time
#-------------------------------------------------------------------------------
long_message_B = TestVector()
long_message_B.add_thread(initT)
lmB_t1 = long_message_B.add_thread()
lmB_t1.add_delay('300ns')
lmB_t1.init_timer()
axis_net.writes(lmB_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x88,0xD,0x5,2}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x60,0xD,0x5,2}"), "callTB": 1},
{"tdata": strToInt("{AMLongStride,0x100,32,3,0x5}"), "callTB": 1},
{"tdata": 0, "callTB": 1}, # initial address
{"tdata": 0xAABBCCDD}, # handler arg 0
{"tdata": 0xDDCCBBAA, "callTB": 1}, # handler arg 1
])
for i in range(2):
for j in range(3):
axis_net.write(lmB_t1, 0xBEEFDEAD)
axis_net.write(lmB_t1, 0xDEADBEEF)
for j in range(3):
axis_net.write(lmB_t1, 0xBEEFDEAD)
axis_net.write(lmB_t1, 0xDEADBEEF, tlast=1, callTB=1)
lmB_t2 = long_message_B.add_thread()
axis_s2mmCommand.read(lmB_t2, strToInt("{dataMoverCommand,0x20,1,0,1,0,0,0}"))
axis_s2mmCommand.read(lmB_t2, strToInt("{dataMoverCommand,0x20,1,0,1,0,0x100,0}"))
axis_s2mmCommand.read(lmB_t2, strToInt("{dataMoverCommand,0x20,1,0,1,0,0x200,0}"))
lmB_t3 = long_message_B.add_thread()
# USE_ABS_PAYLOAD axis_xpams_rx.read(lmB_t3, strToInt("{AMHeader,0x01,0x02,0x88,0xD,0x5,2}"))
axis_xpams_rx.read(lmB_t3, strToInt("{AMHeader,0x01,0x02,0x60,0xD,0x5,2}"))
axis_xpams_rx.read(lmB_t3, strToInt("{AMLongStride,0x100,32,3,0x5}"))
axis_xpams_rx.read(lmB_t3, 0xAABBCCDD) # handler arg 0
axis_xpams_rx.read(lmB_t3, 0xDDCCBBAA) # handler arg 1
for i in range(3):
for j in range(3):
axis_s2mm.read(lmB_t3, 0xBEEFDEAD)
axis_s2mm.read(lmB_t3, 0xDEADBEEF, tlast=1)
lmB_t3.add_delay('50ns')
axis_s2mmStatus.write(lmB_t3, 0x80)
axis_s2mmStatus.write(lmB_t3, 0x80)
axis_s2mmStatus.write(lmB_t3, 0x80, callTB=1)
lmB_t3.print_elapsed_time("long_message_B")
lmB_t3.end_vector()
#-------------------------------------------------------------------------------
# Long Message C
#
# Long vector message from 0xAA to 0xCC with 0x10C words of payload
# across two vectors. It calls handler 0xF with zero handler args. First
# vector has 0xC words to address 0 and second has has 0x100 words to
# address 0xFF00
#-------------------------------------------------------------------------------
long_message_C = TestVector()
long_message_C.add_thread(initT)
lmC_t1 = long_message_C.add_thread()
lmC_t1.add_delay('300ns')
lmC_t1.init_timer()
axis_net.writes(lmC_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,type:0x6,src:0x01,dst:0x02,payload:0x888,handler:0xF,args:0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,type:0x6,src:0x01,dst:0x02,payload:0x860,handler:0xF,args:0}"), "callTB": 1},
{"tdata": strToInt("{AMLongVector,dst:2,dstSize:0x60,token:0x6}")},
{"tdata": 0}, # destination 0
{"tdata": 0x800}, # size 1
{"tdata": 0xFF00, "callTB": 1}, # destination 1
])
for i in range(3):
for j in range(3):
axis_net.write(lmC_t1, 0xBEEFDEAD)
axis_net.write(lmC_t1, 0xDEADBEEF)
for i in range(255):
axis_net.write(lmC_t1, 0xFFAAFFAA)
axis_net.write(lmC_t1, 0xDDDDDDDD, tlast=1, callTB=2)
lmC_t2 = long_message_C.add_thread()
axis_s2mmCommand.read(lmC_t2, strToInt("{dataMoverCommand,0x60,1,0,1,0,0,0}"))
axis_s2mmCommand.read(lmC_t2, strToInt("{dataMoverCommand,0x800,1,0,1,0,0xFF00,0}"))
lmC_t3 = long_message_C.add_thread()
axis_xpams_rx.reads(lmC_t3, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x888,0xF,0x6,0}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x860,0xF,0x6,0}")},
{"tdata": strToInt("{AMLongVector,2,0x60,0x6}")}
])
for i in range(2):
for j in range(3):
axis_s2mm.read(lmC_t2, 0xBEEFDEAD)
axis_s2mm.read(lmC_t2, 0xDEADBEEF)
for j in range(3):
axis_s2mm.read(lmC_t2, 0xBEEFDEAD)
axis_s2mm.read(lmC_t2, 0xDEADBEEF, tlast=1)
for i in range(255):
axis_s2mm.read(lmC_t2, 0xFFAAFFAA)
axis_s2mm.read(lmC_t2, 0xDDDDDDDD, tlast=1)
lmC_t4 = long_message_C.add_thread()
lmC_t4.wait_level('axis_s2mm_tlast == $value', value=1)
lmC_t4.wait_level('axis_s2mm_tlast == $value', value=0)
lmC_t4.wait_level('axis_s2mm_tlast == $value', value=1)
lmC_t4.add_delay('50ns')
axis_s2mmStatus.write(lmC_t4, 0x80)
axis_s2mmStatus.write(lmC_t4, 0x80, callTB=1)
lmC_t4.print_elapsed_time("long_message_C")
lmC_t4.end_vector()
am_rx.add_test_vector(short_message_A)
am_rx.add_test_vector(short_message_B)
am_rx.add_test_vector(medium_message_A)
am_rx.add_test_vector(medium_message_B)
am_rx.add_test_vector(long_message_A)
am_rx.add_test_vector(long_message_B)
am_rx.add_test_vector(long_message_C)
# original
# Short_Message_A: 0.370 us
# Test vector 0 complete
# short_message_B: 0.180 us
# Test vector 1 complete
# Medium_Message_A: 0.280 us
# Test vector 2 complete
# Medium_Message_A: 0.280 us
# Test vector 2 complete
# medium_message_B: 21.100 us
# Test vector 3 complete
# Long_Message_A: 20.890 us
# Test vector 4 complete
# long_message_B: 1.550 us
# Test vector 5 complete
# long_message_C: 21.870 us
# Test vector 6 complete
# copy statics, pipeline
# Short_Message_A: 0.310 us
# Test vector 0 complete
# short_message_B: 0.140 us
# Test vector 1 complete
# Medium_Message_A: 0.240 us
# Test vector 2 complete
# Medium_Message_A: 0.240 us
# Test vector 2 complete
# medium_message_B: 21.040 us
# Test vector 3 complete
# Long_Message_A: 20.830 us
# Test vector 4 complete
# long_message_B: 1.510 us
# Test vector 5 complete
# long_message_C: 21.850 us
# Test vector 6 complete
# Final: II = 2
# Short_Message_A: 0.190 us
# Test vector 0 complete
# short_message_B: 0.100 us
# Test vector 1 complete
# Medium_Message_A: 0.120 us
# Test vector 2 complete
# Medium_Message_A: 0.120 us
# Test vector 2 complete
# medium_message_B: 10.560 us
# Test vector 3 complete
# Long_Message_A: 10.410 us
# Test vector 4 complete
# long_message_B: 0.810 us
# Test vector 5 complete
# long_message_C: 10.930 us
# Test vector 6 complete
am_rx.generateTB(filepath, 'all')
|
import os
from sonar.testbench import Testbench, Module, TestVector, Thread
from sonar.interfaces import AXIS
from sonar_strToInt import strToInt
am_rx = Testbench.default('am_rx')
filepath = os.path.join(os.path.dirname(__file__), 'build/am_rx/')
dut = Module.default("DUT")
dut.add_clock_port('ap_clk', '20ns')
dut.add_reset_port('ap_rst_n')
dut.add_port('release_V', 'output')
axis_net = AXIS('axis_net', 'slave', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_l')
axis_net.port.init_channels('tkeep', 64, True)
dut.add_interface(axis_net)
axis_xpams_rx = AXIS('axis_xpams_rx', 'master', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_l')
axis_xpams_rx.port.init_channels('tkeep', 64, True)
dut.add_interface(axis_xpams_rx)
axis_s2mmCommand = AXIS('axis_s2mmCommand_V_data_V', 'master', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_o')
axis_s2mmCommand.port.init_channels('min', 64, True)
axis_s2mmCommand.port.add_channel('TREADY', 'tready')
dut.add_interface(axis_s2mmCommand)
axis_s2mm = AXIS('axis_s2mm', 'master', 'ap_clk', c_struct='axis_word',
c_stream='uaxis_l')
axis_s2mm.port.init_channels('tkeep', 64, True)
dut.add_interface(axis_s2mm)
axis_s2mmStatus = AXIS('axis_s2mmStatus', 'slave', 'ap_clk',
c_struct='axis_word_mm2sStatus', c_stream='uaxis_l')
axis_s2mmStatus.port.init_channels('default', 64, True)
dut.add_interface(axis_s2mmStatus)
am_rx.add_module(dut)
################################################################################
# Test Vectors
################################################################################
# Initialization thread (added to each test vector to reset everything)
initT = Thread()
initT.init_signals()
initT.wait_negedge('ap_clk')
initT.add_delay('40ns')
initT.set_signal('ap_rst_n', 1)
initT.set_signal('axis_xpams_rx_tready', 1)
initT.set_signal('axis_s2mmCommand_V_data_V_tready', 1)
initT.set_signal('axis_s2mm_tready', 1)
#-------------------------------------------------------------------------------
# Short Message A
#
#-------------------------------------------------------------------------------
short_message_A = TestVector()
short_message_A.add_thread(initT)
smA_t1 = short_message_A.add_thread()
smA_t1.add_delay('300ns')
smA_t1.init_timer()
axis_net.writes(smA_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,48,0xE,1,2}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,2}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x0}"), "callTB": 1},
{"tdata": 0xDEADBEEF},
{"tdata": 0x1234, "tlast": 1, "callTB": 2}
])
smA_t2 = short_message_A.add_thread()
axis_xpams_rx.reads(smA_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,48,0xE,1,2}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,2}")},
{"tdata": strToInt("{AMToken,0x0}")},
{"tdata": 0xDEADBEEF},
{"tdata": 0x1234, "tlast": 1}
])
smA_t2.print_elapsed_time("Short_Message_A")
smA_t2.end_vector()
#-------------------------------------------------------------------------------
# Short Message B
#
#-------------------------------------------------------------------------------
short_message_B = TestVector()
short_message_B.add_thread(initT)
smB_t1 = short_message_B.add_thread()
smB_t1.add_delay('300ns')
smB_t1.init_timer()
axis_net.writes(smB_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,24,0xE,1,0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,0}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x1}"), "callTB": 2}
])
smB_t2 = short_message_B.add_thread()
axis_xpams_rx.reads(smB_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,24,0xE,1,0}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0,0xE,1,0}")},
{"tdata": strToInt("{AMToken,0x1}")}
])
smB_t2.print_elapsed_time("short_message_B")
smB_t2.end_vector()
#-------------------------------------------------------------------------------
# Medium Message A
#
#-------------------------------------------------------------------------------
medium_message_A = TestVector()
medium_message_A.add_thread(initT)
mmA_t1 = medium_message_A.add_thread()
mmA_t1.add_delay('300ns')
mmA_t1.init_timer()
axis_net.writes(mmA_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,36,0xE,2,0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,8,0xE,2,0}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x2}"), "callTB": 1},
{"tdata": 0xDEADBEEF, "tlast": 1, "callTB": 2}
])
medium_message_A.add_thread(mmA_t1)
mmA_t2 = medium_message_A.add_thread()
axis_xpams_rx.reads(mmA_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,36,0xE,2,0}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,8,0xE,2,0}")},
{"tdata": strToInt("{AMToken,0x2}")},
{"tdata": 0xDEADBEEF, "tlast": 1}
])
mmA_t2.print_elapsed_time("Medium_Message_A")
mmA_t2.end_vector()
medium_message_A.add_thread(mmA_t2)
#-------------------------------------------------------------------------------
# Medium Message B
#
#-------------------------------------------------------------------------------
medium_message_B = TestVector()
medium_message_B.add_thread(initT)
mmB_t1 = medium_message_B.add_thread()
mmB_t1.add_delay('300ns')
mmB_t1.init_timer()
axis_net.writes(mmB_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,2,6}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x800,0xE,2,6}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x3}"), "callTB": 1}
])
for i in range(5):
axis_net.write(mmB_t1, 0x98765432)
axis_net.write(mmB_t1, 0x98765432, callTB=1)
for i in range(255):
axis_net.write(mmB_t1, 0xDEADBEEF)
axis_net.write(mmB_t1, 0xFEEDDEED, tlast=1, callTB=2)
mmB_t2 = medium_message_B.add_thread()
axis_xpams_rx.reads(mmB_t2, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,2,6}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x800,0xE,2,6}")},
{"tdata": strToInt("{AMToken,0x3}")}
])
for i in range(6):
axis_xpams_rx.read(mmB_t2, 0x98765432)
for i in range(255):
axis_xpams_rx.read(mmB_t2, 0xDEADBEEF)
axis_xpams_rx.read(mmB_t2, 0xFEEDDEED, tlast=1)
mmB_t2.print_elapsed_time("medium_message_B")
mmB_t2.end_vector()
#-------------------------------------------------------------------------------
# Long Message A
#
#-------------------------------------------------------------------------------
long_message_A = TestVector()
long_message_A.add_thread(initT)
lmA_t1 = long_message_A.add_thread()
lmA_t1.add_delay('300ns')
lmA_t1.init_timer()
axis_net.writes(lmA_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,4,0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x800,0xE,4,0}"), "callTB": 1},
{"tdata": strToInt("{AMToken,0x4}"), "callTB": 1},
{"tdata": "0xAABBCCD8", "callTB": 1} # address
])
for i in range(255):
axis_net.write(lmA_t1, 0xDEADBEEF)
axis_net.write(lmA_t1, 0xDEADBEEF, tlast=1, callTB=2)
lmA_t2 = long_message_A.add_thread()
# USE_ABS_PAYLOAD axis_xpams_rx.read(lmA_t2, strToInt("{AMHeader,0x01,0x02,0x818,0xE,4,0}"))
axis_xpams_rx.read(lmA_t2, strToInt("{AMHeader,0x01,0x02,0x800,0xE,4,0}"))
axis_xpams_rx.read(lmA_t2, strToInt("{AMToken,0x4}"))
for i in range(255):
axis_s2mm.read(lmA_t2, 0xDEADBEEF)
axis_s2mm.read(lmA_t2, 0xDEADBEEF, tlast=1)
lmA_t3 = long_message_A.add_thread()
axis_s2mmCommand.read(lmA_t3, strToInt("{dataMoverCommand,0x800,1,0,1,0,0xAABBCCD8,0}"))
lmA_t4 = long_message_A.add_thread()
lmA_t4.wait_level('axis_s2mm_tlast == $value', value=1)
lmA_t4.add_delay('50ns')
axis_s2mmStatus.write(lmA_t4, 0x80, callTB=1)
lmA_t4.print_elapsed_time("Long_Message_A")
lmA_t4.end_vector()
#-------------------------------------------------------------------------------
# Long Message B
#
# Long strided message from 0xAA to 0xCC with 0xC words of payload
# across two vectors. It calls handler 0xD with two handler args. Each
# stride is 0x100 words, and it write 4 words each time
#-------------------------------------------------------------------------------
long_message_B = TestVector()
long_message_B.add_thread(initT)
lmB_t1 = long_message_B.add_thread()
lmB_t1.add_delay('300ns')
lmB_t1.init_timer()
axis_net.writes(lmB_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x88,0xD,0x5,2}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x60,0xD,0x5,2}"), "callTB": 1},
{"tdata": strToInt("{AMLongStride,0x100,32,3,0x5}"), "callTB": 1},
{"tdata": 0, "callTB": 1}, # initial address
{"tdata": 0xAABBCCDD}, # handler arg 0
{"tdata": 0xDDCCBBAA, "callTB": 1}, # handler arg 1
])
for i in range(2):
for j in range(3):
axis_net.write(lmB_t1, 0xBEEFDEAD)
axis_net.write(lmB_t1, 0xDEADBEEF)
for j in range(3):
axis_net.write(lmB_t1, 0xBEEFDEAD)
axis_net.write(lmB_t1, 0xDEADBEEF, tlast=1, callTB=1)
lmB_t2 = long_message_B.add_thread()
axis_s2mmCommand.read(lmB_t2, strToInt("{dataMoverCommand,0x20,1,0,1,0,0,0}"))
axis_s2mmCommand.read(lmB_t2, strToInt("{dataMoverCommand,0x20,1,0,1,0,0x100,0}"))
axis_s2mmCommand.read(lmB_t2, strToInt("{dataMoverCommand,0x20,1,0,1,0,0x200,0}"))
lmB_t3 = long_message_B.add_thread()
# USE_ABS_PAYLOAD axis_xpams_rx.read(lmB_t3, strToInt("{AMHeader,0x01,0x02,0x88,0xD,0x5,2}"))
axis_xpams_rx.read(lmB_t3, strToInt("{AMHeader,0x01,0x02,0x60,0xD,0x5,2}"))
axis_xpams_rx.read(lmB_t3, strToInt("{AMLongStride,0x100,32,3,0x5}"))
axis_xpams_rx.read(lmB_t3, 0xAABBCCDD) # handler arg 0
axis_xpams_rx.read(lmB_t3, 0xDDCCBBAA) # handler arg 1
for i in range(3):
for j in range(3):
axis_s2mm.read(lmB_t3, 0xBEEFDEAD)
axis_s2mm.read(lmB_t3, 0xDEADBEEF, tlast=1)
lmB_t3.add_delay('50ns')
axis_s2mmStatus.write(lmB_t3, 0x80)
axis_s2mmStatus.write(lmB_t3, 0x80)
axis_s2mmStatus.write(lmB_t3, 0x80, callTB=1)
lmB_t3.print_elapsed_time("long_message_B")
lmB_t3.end_vector()
#-------------------------------------------------------------------------------
# Long Message C
#
# Long vector message from 0xAA to 0xCC with 0x10C words of payload
# across two vectors. It calls handler 0xF with zero handler args. First
# vector has 0xC words to address 0 and second has has 0x100 words to
# address 0xFF00
#-------------------------------------------------------------------------------
long_message_C = TestVector()
long_message_C.add_thread(initT)
lmC_t1 = long_message_C.add_thread()
lmC_t1.add_delay('300ns')
lmC_t1.init_timer()
axis_net.writes(lmC_t1, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,type:0x6,src:0x01,dst:0x02,payload:0x888,handler:0xF,args:0}"), "callTB": 1},
{"tdata": strToInt("{AMHeader,type:0x6,src:0x01,dst:0x02,payload:0x860,handler:0xF,args:0}"), "callTB": 1},
{"tdata": strToInt("{AMLongVector,dst:2,dstSize:0x60,token:0x6}")},
{"tdata": 0}, # destination 0
{"tdata": 0x800}, # size 1
{"tdata": 0xFF00, "callTB": 1}, # destination 1
])
for i in range(3):
for j in range(3):
axis_net.write(lmC_t1, 0xBEEFDEAD)
axis_net.write(lmC_t1, 0xDEADBEEF)
for i in range(255):
axis_net.write(lmC_t1, 0xFFAAFFAA)
axis_net.write(lmC_t1, 0xDDDDDDDD, tlast=1, callTB=2)
lmC_t2 = long_message_C.add_thread()
axis_s2mmCommand.read(lmC_t2, strToInt("{dataMoverCommand,0x60,1,0,1,0,0,0}"))
axis_s2mmCommand.read(lmC_t2, strToInt("{dataMoverCommand,0x800,1,0,1,0,0xFF00,0}"))
lmC_t3 = long_message_C.add_thread()
axis_xpams_rx.reads(lmC_t3, [
# USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x888,0xF,0x6,0}")},
{"tdata": strToInt("{AMHeader,0x01,0x02,0x860,0xF,0x6,0}")},
{"tdata": strToInt("{AMLongVector,2,0x60,0x6}")}
])
for i in range(2):
for j in range(3):
axis_s2mm.read(lmC_t2, 0xBEEFDEAD)
axis_s2mm.read(lmC_t2, 0xDEADBEEF)
for j in range(3):
axis_s2mm.read(lmC_t2, 0xBEEFDEAD)
axis_s2mm.read(lmC_t2, 0xDEADBEEF, tlast=1)
for i in range(255):
axis_s2mm.read(lmC_t2, 0xFFAAFFAA)
axis_s2mm.read(lmC_t2, 0xDDDDDDDD, tlast=1)
lmC_t4 = long_message_C.add_thread()
lmC_t4.wait_level('axis_s2mm_tlast == $value', value=1)
lmC_t4.wait_level('axis_s2mm_tlast == $value', value=0)
lmC_t4.wait_level('axis_s2mm_tlast == $value', value=1)
lmC_t4.add_delay('50ns')
axis_s2mmStatus.write(lmC_t4, 0x80)
axis_s2mmStatus.write(lmC_t4, 0x80, callTB=1)
lmC_t4.print_elapsed_time("long_message_C")
lmC_t4.end_vector()
am_rx.add_test_vector(short_message_A)
am_rx.add_test_vector(short_message_B)
am_rx.add_test_vector(medium_message_A)
am_rx.add_test_vector(medium_message_B)
am_rx.add_test_vector(long_message_A)
am_rx.add_test_vector(long_message_B)
am_rx.add_test_vector(long_message_C)
# original
# Short_Message_A: 0.370 us
# Test vector 0 complete
# short_message_B: 0.180 us
# Test vector 1 complete
# Medium_Message_A: 0.280 us
# Test vector 2 complete
# Medium_Message_A: 0.280 us
# Test vector 2 complete
# medium_message_B: 21.100 us
# Test vector 3 complete
# Long_Message_A: 20.890 us
# Test vector 4 complete
# long_message_B: 1.550 us
# Test vector 5 complete
# long_message_C: 21.870 us
# Test vector 6 complete
# copy statics, pipeline
# Short_Message_A: 0.310 us
# Test vector 0 complete
# short_message_B: 0.140 us
# Test vector 1 complete
# Medium_Message_A: 0.240 us
# Test vector 2 complete
# Medium_Message_A: 0.240 us
# Test vector 2 complete
# medium_message_B: 21.040 us
# Test vector 3 complete
# Long_Message_A: 20.830 us
# Test vector 4 complete
# long_message_B: 1.510 us
# Test vector 5 complete
# long_message_C: 21.850 us
# Test vector 6 complete
# Final: II = 2
# Short_Message_A: 0.190 us
# Test vector 0 complete
# short_message_B: 0.100 us
# Test vector 1 complete
# Medium_Message_A: 0.120 us
# Test vector 2 complete
# Medium_Message_A: 0.120 us
# Test vector 2 complete
# medium_message_B: 10.560 us
# Test vector 3 complete
# Long_Message_A: 10.410 us
# Test vector 4 complete
# long_message_B: 0.810 us
# Test vector 5 complete
# long_message_C: 10.930 us
# Test vector 6 complete
am_rx.generateTB(filepath, 'all')
|
en
| 0.451921
|
################################################################################ # Test Vectors ################################################################################ # Initialization thread (added to each test vector to reset everything) #------------------------------------------------------------------------------- # Short Message A # #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,48,0xE,1,2}"), "callTB": 1}, # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,48,0xE,1,2}")}, #------------------------------------------------------------------------------- # Short Message B # #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,24,0xE,1,0}"), "callTB": 1}, # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,24,0xE,1,0}")}, #------------------------------------------------------------------------------- # Medium Message A # #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,36,0xE,2,0}"), "callTB": 1}, # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,36,0xE,2,0}")}, #------------------------------------------------------------------------------- # Medium Message B # #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,2,6}"), "callTB": 1}, # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,2,6}")}, #------------------------------------------------------------------------------- # Long Message A # #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x818,0xE,4,0}"), "callTB": 1}, # address # USE_ABS_PAYLOAD axis_xpams_rx.read(lmA_t2, strToInt("{AMHeader,0x01,0x02,0x818,0xE,4,0}")) #------------------------------------------------------------------------------- # Long Message B # # Long strided message from 0xAA to 0xCC with 0xC words of payload # across two vectors. It calls handler 0xD with two handler args. Each # stride is 0x100 words, and it write 4 words each time #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x88,0xD,0x5,2}"), "callTB": 1}, # initial address # handler arg 0 # handler arg 1 # USE_ABS_PAYLOAD axis_xpams_rx.read(lmB_t3, strToInt("{AMHeader,0x01,0x02,0x88,0xD,0x5,2}")) # handler arg 0 # handler arg 1 #------------------------------------------------------------------------------- # Long Message C # # Long vector message from 0xAA to 0xCC with 0x10C words of payload # across two vectors. It calls handler 0xF with zero handler args. First # vector has 0xC words to address 0 and second has has 0x100 words to # address 0xFF00 #------------------------------------------------------------------------------- # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,type:0x6,src:0x01,dst:0x02,payload:0x888,handler:0xF,args:0}"), "callTB": 1}, # destination 0 # size 1 # destination 1 # USE_ABS_PAYLOAD {"tdata": strToInt("{AMHeader,0x01,0x02,0x888,0xF,0x6,0}")}, # original # Short_Message_A: 0.370 us # Test vector 0 complete # short_message_B: 0.180 us # Test vector 1 complete # Medium_Message_A: 0.280 us # Test vector 2 complete # Medium_Message_A: 0.280 us # Test vector 2 complete # medium_message_B: 21.100 us # Test vector 3 complete # Long_Message_A: 20.890 us # Test vector 4 complete # long_message_B: 1.550 us # Test vector 5 complete # long_message_C: 21.870 us # Test vector 6 complete # copy statics, pipeline # Short_Message_A: 0.310 us # Test vector 0 complete # short_message_B: 0.140 us # Test vector 1 complete # Medium_Message_A: 0.240 us # Test vector 2 complete # Medium_Message_A: 0.240 us # Test vector 2 complete # medium_message_B: 21.040 us # Test vector 3 complete # Long_Message_A: 20.830 us # Test vector 4 complete # long_message_B: 1.510 us # Test vector 5 complete # long_message_C: 21.850 us # Test vector 6 complete # Final: II = 2 # Short_Message_A: 0.190 us # Test vector 0 complete # short_message_B: 0.100 us # Test vector 1 complete # Medium_Message_A: 0.120 us # Test vector 2 complete # Medium_Message_A: 0.120 us # Test vector 2 complete # medium_message_B: 10.560 us # Test vector 3 complete # Long_Message_A: 10.410 us # Test vector 4 complete # long_message_B: 0.810 us # Test vector 5 complete # long_message_C: 10.930 us # Test vector 6 complete
| 2.038277
| 2
|
gcloud/tests/utils/cmdb/test_business_host_topo.py
|
ZhuoZhuoCrayon/bk-sops
| 1
|
6626508
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from mock import MagicMock, patch
from django.test import TestCase
from gcloud.utils.cmdb import get_business_host_topo
class GetBusinessHostTopoTestCase(TestCase):
def setUp(self):
mock_client = MagicMock()
mock_client.cc.list_biz_hosts_topo = "list_biz_hosts_topo"
self.get_client_by_user_patcher = patch(
"gcloud.utils.cmdb.get_client_by_user", MagicMock(return_value=mock_client)
)
self.get_client_by_user_patcher.start()
self.username = "user_token"
self.bk_biz_id = "biz_id_token"
self.supplier_account = "supplier_account_token"
self.host_fields = "host_fields_token"
self.ip_list = "ip_list_token"
self.list_biz_hosts_topo_return = [
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 1,
"bk_host_innerip": "127.0.0.1",
"bk_mac": "",
"bk_os_type": None,
},
"topo": [
{"bk_set_id": 11, "bk_set_name": "set1", "module": [{"bk_module_id": 56, "bk_module_name": "m1"}]}
],
},
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 3,
"bk_host_innerip": "127.0.0.3",
"bk_mac": "",
"bk_os_type": None,
},
"topo": [
{
"bk_set_id": 10,
"bk_set_name": "空闲机池",
"module": [
{"bk_module_id": 54, "bk_module_name": "空闲机"},
{"bk_module_id": 55, "bk_module_name": "空闲机1"},
],
},
{"bk_set_id": 11, "bk_set_name": "set1", "module": [{"bk_module_id": 56, "bk_module_name": "m1"}]},
],
},
]
self.get_business_host_topo_expect_return = [
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 1,
"bk_host_innerip": "127.0.0.1",
"bk_mac": "",
"bk_os_type": None,
},
"set": [{"bk_set_id": 11, "bk_set_name": "set1"}],
"module": [{"bk_module_id": 56, "bk_module_name": "m1"}],
},
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 3,
"bk_host_innerip": "127.0.0.3",
"bk_mac": "",
"bk_os_type": None,
},
"set": [{"bk_set_id": 10, "bk_set_name": "空闲机池"}, {"bk_set_id": 11, "bk_set_name": "set1"}],
"module": [
{"bk_module_id": 54, "bk_module_name": "空闲机"},
{"bk_module_id": 55, "bk_module_name": "空闲机1"},
{"bk_module_id": 56, "bk_module_name": "m1"},
],
},
]
def tearDown(self):
self.get_client_by_user_patcher.stop()
def test__list_biz_hosts_topo_return_empty(self):
mock_batch_request = MagicMock(return_value=[])
with patch("gcloud.utils.cmdb.batch_request", mock_batch_request):
hosts_topo = get_business_host_topo(self.username, self.bk_biz_id, self.supplier_account, self.host_fields)
self.assertEqual(hosts_topo, [])
mock_batch_request.assert_called_once_with(
"list_biz_hosts_topo",
{"bk_biz_id": self.bk_biz_id, "bk_supplier_account": self.supplier_account, "fields": self.host_fields},
)
def test__get_with_ip_list(self):
mock_batch_request = MagicMock(return_value=self.list_biz_hosts_topo_return)
with patch("gcloud.utils.cmdb.batch_request", mock_batch_request):
hosts_topo = get_business_host_topo(
self.username, self.bk_biz_id, self.supplier_account, self.host_fields, self.ip_list
)
self.assertEqual(hosts_topo, self.get_business_host_topo_expect_return)
mock_batch_request.assert_called_once_with(
"list_biz_hosts_topo",
{
"bk_biz_id": self.bk_biz_id,
"bk_supplier_account": self.supplier_account,
"fields": self.host_fields,
"host_property_filter": {
"condition": "AND",
"rules": [{"field": "bk_host_innerip", "operator": "in", "value": self.ip_list}],
},
},
)
def test__get_without_ip_list(self):
mock_batch_request = MagicMock(return_value=self.list_biz_hosts_topo_return)
with patch("gcloud.utils.cmdb.batch_request", mock_batch_request):
hosts_topo = get_business_host_topo(self.username, self.bk_biz_id, self.supplier_account, self.host_fields)
self.assertEqual(hosts_topo, self.get_business_host_topo_expect_return)
mock_batch_request.assert_called_once_with(
"list_biz_hosts_topo",
{"bk_biz_id": self.bk_biz_id, "bk_supplier_account": self.supplier_account, "fields": self.host_fields},
)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from mock import MagicMock, patch
from django.test import TestCase
from gcloud.utils.cmdb import get_business_host_topo
class GetBusinessHostTopoTestCase(TestCase):
def setUp(self):
mock_client = MagicMock()
mock_client.cc.list_biz_hosts_topo = "list_biz_hosts_topo"
self.get_client_by_user_patcher = patch(
"gcloud.utils.cmdb.get_client_by_user", MagicMock(return_value=mock_client)
)
self.get_client_by_user_patcher.start()
self.username = "user_token"
self.bk_biz_id = "biz_id_token"
self.supplier_account = "supplier_account_token"
self.host_fields = "host_fields_token"
self.ip_list = "ip_list_token"
self.list_biz_hosts_topo_return = [
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 1,
"bk_host_innerip": "127.0.0.1",
"bk_mac": "",
"bk_os_type": None,
},
"topo": [
{"bk_set_id": 11, "bk_set_name": "set1", "module": [{"bk_module_id": 56, "bk_module_name": "m1"}]}
],
},
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 3,
"bk_host_innerip": "127.0.0.3",
"bk_mac": "",
"bk_os_type": None,
},
"topo": [
{
"bk_set_id": 10,
"bk_set_name": "空闲机池",
"module": [
{"bk_module_id": 54, "bk_module_name": "空闲机"},
{"bk_module_id": 55, "bk_module_name": "空闲机1"},
],
},
{"bk_set_id": 11, "bk_set_name": "set1", "module": [{"bk_module_id": 56, "bk_module_name": "m1"}]},
],
},
]
self.get_business_host_topo_expect_return = [
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 1,
"bk_host_innerip": "127.0.0.1",
"bk_mac": "",
"bk_os_type": None,
},
"set": [{"bk_set_id": 11, "bk_set_name": "set1"}],
"module": [{"bk_module_id": 56, "bk_module_name": "m1"}],
},
{
"host": {
"bk_cloud_id": 0,
"bk_host_id": 3,
"bk_host_innerip": "127.0.0.3",
"bk_mac": "",
"bk_os_type": None,
},
"set": [{"bk_set_id": 10, "bk_set_name": "空闲机池"}, {"bk_set_id": 11, "bk_set_name": "set1"}],
"module": [
{"bk_module_id": 54, "bk_module_name": "空闲机"},
{"bk_module_id": 55, "bk_module_name": "空闲机1"},
{"bk_module_id": 56, "bk_module_name": "m1"},
],
},
]
def tearDown(self):
self.get_client_by_user_patcher.stop()
def test__list_biz_hosts_topo_return_empty(self):
mock_batch_request = MagicMock(return_value=[])
with patch("gcloud.utils.cmdb.batch_request", mock_batch_request):
hosts_topo = get_business_host_topo(self.username, self.bk_biz_id, self.supplier_account, self.host_fields)
self.assertEqual(hosts_topo, [])
mock_batch_request.assert_called_once_with(
"list_biz_hosts_topo",
{"bk_biz_id": self.bk_biz_id, "bk_supplier_account": self.supplier_account, "fields": self.host_fields},
)
def test__get_with_ip_list(self):
mock_batch_request = MagicMock(return_value=self.list_biz_hosts_topo_return)
with patch("gcloud.utils.cmdb.batch_request", mock_batch_request):
hosts_topo = get_business_host_topo(
self.username, self.bk_biz_id, self.supplier_account, self.host_fields, self.ip_list
)
self.assertEqual(hosts_topo, self.get_business_host_topo_expect_return)
mock_batch_request.assert_called_once_with(
"list_biz_hosts_topo",
{
"bk_biz_id": self.bk_biz_id,
"bk_supplier_account": self.supplier_account,
"fields": self.host_fields,
"host_property_filter": {
"condition": "AND",
"rules": [{"field": "bk_host_innerip", "operator": "in", "value": self.ip_list}],
},
},
)
def test__get_without_ip_list(self):
mock_batch_request = MagicMock(return_value=self.list_biz_hosts_topo_return)
with patch("gcloud.utils.cmdb.batch_request", mock_batch_request):
hosts_topo = get_business_host_topo(self.username, self.bk_biz_id, self.supplier_account, self.host_fields)
self.assertEqual(hosts_topo, self.get_business_host_topo_expect_return)
mock_batch_request.assert_called_once_with(
"list_biz_hosts_topo",
{"bk_biz_id": self.bk_biz_id, "bk_supplier_account": self.supplier_account, "fields": self.host_fields},
)
|
en
| 0.864615
|
# -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 1.912605
| 2
|
tools/mo/openvino/tools/mo/front/tf/next_iteration_ext.py
|
ryanloney/openvino-1
| 1,127
|
6626509
|
<gh_stars>1000+
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.graph.graph import Node
class NextIterationExtractor(FrontExtractorOp):
op = "NextIteration"
enabled = True
@classmethod
def extract(cls, node: Node):
node['is_cyclic'] = True
node['infer'] = copy_shape_infer
return cls.enabled
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.graph.graph import Node
class NextIterationExtractor(FrontExtractorOp):
op = "NextIteration"
enabled = True
@classmethod
def extract(cls, node: Node):
node['is_cyclic'] = True
node['infer'] = copy_shape_infer
return cls.enabled
|
de
| 0.250642
|
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0
| 1.784384
| 2
|
parse/tests.py
|
danrobinson/PingBox
| 0
|
6626510
|
import unittest
from parse_rest.connection import register, ParseBatcher, SessionToken
from parse_rest.user import User
from parse_rest.datatypes import Object, Function
"""
# Prod
register(
"8fhsO5d7WTt6c7ffpVrPpHTVvuAi6vArrciyt8cK",
"tBnfG7a0P38w0ka3jWVTRkqcDxMIOdUXxNv8sZFp",
master_key="<KEY>",
)
"""
# Test
def register_app(**kw):
register(
"<KEY>",
"<KEY>",
#master_key="<KEY>",
**kw
)
class Task(Object):
pass
class Ping(Object):
pass
sample_users = [
("baracky", "usanumber1", "<EMAIL>"),
("somethingelse", "usanumber4", "<EMAIL>"),
('deepthroat', 'lolol', '<EMAIL>'),
]
def signup_sample_users():
for username, pw, email in sample_users:
User.signup(username, pw, email=email)
def delete_sample_users():
for username, password, email in sample_users:
try:
u = User.login(username, password)
u.delete()
except:
pass
class TestPingBox(unittest.TestCase):
@classmethod
def setUpClass(cls):
delete_sample_users()
signup_sample_users()
User.signup('redsox55', 'secure<PASSWORD>', email='<EMAIL>')
ParseBatcher().batch_delete(Task.Query.all())
@classmethod
def tearDownClass(cls):
delete_sample_users()
u1 = User.login('redsox55', 'secure123')
with SessionToken(u1.sessionToken):
ParseBatcher().batch_delete(Task.Query.all())
u1.delete()
def setUp(self):
self.user = User.Query.get(username='redsox55')
def tearDown(self):
u = User.login('redsox55', 'secure123')
with SessionToken(u.sessionToken):
ParseBatcher().batch_delete(Task.Query.all())
ParseBatcher().batch_delete(Ping.Query.all())
def test_create_task(self):
assignTask = Function("assignTask")
assignee = User.Query.get(username='deepthroat')
u = User.login('redsox55', 'secure123')
with SessionToken(u.sessionToken):
title = 'w45h45r4g4h'
response = assignTask(
title=title,
description="See title",
watchers=[user[2] for user in sample_users],
email=None,
assignee=assignee.email,
)
self.assertIn('task', response['result'])
tasks = Task.Query.all()
self.assertEqual(len(tasks), 1)
t = tasks[0]
self.assertEqual(t.title, title)
self.assertEqual(t.creator.objectId, u.objectId)
self.assertEqual(t.score, 0)
self.assertEqual(len(t.watchers), len(sample_users))
self.assertEqual(t.assignee.objectId, assignee.objectId)
self.assertTrue(all(w["className"] == '_User' for w in t.watchers))
def test_create_ping(self):
assignTask = Function("assignTask")
ping = Function("ping")
u = User.login('redsox55', 'secure123')
assignee = User.Query.get(username='deepthroat')
with SessionToken(u.sessionToken):
title = 'serha34g444'
response = assignTask(
title=title,
description="Send a ping to this task",
watchers=[user[2] for user in sample_users],
score=2,
assignee=assignee.email,
)
self.assertIn('task', response['result'])
with SessionToken(u.sessionToken):
task = Task.Query.get(title=title)
response = ping(taskId=task.objectId)
task = Task.Query.get(title=title)
self.assertIn('task', response['result'])
self.assertIn('ping', response['result'])
self.assertEqual(task.score, 1)
def test_today_pings(self):
assignTask = Function("assignTask")
ping = Function("ping")
u1 = User.login('redsox55', 'secure<PASSWORD>')
assignee = User.Query.get(username='deepthroat')
with SessionToken(u1.sessionToken):
title = 'serha34g444'
response = assignTask(
title=title,
description="Send a ping to this task",
watchers=[u[2] for u in sample_users],
score=2,
assignee=assignee.email,
)
self.assertIn('task', response['result'])
todayPings = Function('todayPings')
with SessionToken(u1.sessionToken):
task = Task.Query.get(title=title)
ping(taskId=task.objectId)
todayPingsU1 = todayPings()
print "TODAY PINGS U1: %s" % todayPingsU1
self.assertEqual(len(todayPingsU1['result']), 1)
u2 = User.login('baracky', 'usanumber1')
with SessionToken(u2.sessionToken):
ping(taskId=task.objectId)
ping(taskId=task.objectId)
todayPingsU2 = todayPings()
print "TODAY PINGS U2: %s" % todayPingsU2
self.assertEqual(len(todayPingsU2['result']), 2)
"""
def test_tasks_by_user(self):
pass
def test_tasks_by_watcher(self):
pass
def test_tasks_by_creator(self):
pass
def test_initial_score_is_zero(self):
pass
def test_one_user_pings_another(self):
pass
def test_score_increments(self):
pass
"""
if __name__ == '__main__':
register_app()
unittest.main()
|
import unittest
from parse_rest.connection import register, ParseBatcher, SessionToken
from parse_rest.user import User
from parse_rest.datatypes import Object, Function
"""
# Prod
register(
"8fhsO5d7WTt6c7ffpVrPpHTVvuAi6vArrciyt8cK",
"tBnfG7a0P38w0ka3jWVTRkqcDxMIOdUXxNv8sZFp",
master_key="<KEY>",
)
"""
# Test
def register_app(**kw):
register(
"<KEY>",
"<KEY>",
#master_key="<KEY>",
**kw
)
class Task(Object):
pass
class Ping(Object):
pass
sample_users = [
("baracky", "usanumber1", "<EMAIL>"),
("somethingelse", "usanumber4", "<EMAIL>"),
('deepthroat', 'lolol', '<EMAIL>'),
]
def signup_sample_users():
for username, pw, email in sample_users:
User.signup(username, pw, email=email)
def delete_sample_users():
for username, password, email in sample_users:
try:
u = User.login(username, password)
u.delete()
except:
pass
class TestPingBox(unittest.TestCase):
@classmethod
def setUpClass(cls):
delete_sample_users()
signup_sample_users()
User.signup('redsox55', 'secure<PASSWORD>', email='<EMAIL>')
ParseBatcher().batch_delete(Task.Query.all())
@classmethod
def tearDownClass(cls):
delete_sample_users()
u1 = User.login('redsox55', 'secure123')
with SessionToken(u1.sessionToken):
ParseBatcher().batch_delete(Task.Query.all())
u1.delete()
def setUp(self):
self.user = User.Query.get(username='redsox55')
def tearDown(self):
u = User.login('redsox55', 'secure123')
with SessionToken(u.sessionToken):
ParseBatcher().batch_delete(Task.Query.all())
ParseBatcher().batch_delete(Ping.Query.all())
def test_create_task(self):
assignTask = Function("assignTask")
assignee = User.Query.get(username='deepthroat')
u = User.login('redsox55', 'secure123')
with SessionToken(u.sessionToken):
title = 'w45h45r4g4h'
response = assignTask(
title=title,
description="See title",
watchers=[user[2] for user in sample_users],
email=None,
assignee=assignee.email,
)
self.assertIn('task', response['result'])
tasks = Task.Query.all()
self.assertEqual(len(tasks), 1)
t = tasks[0]
self.assertEqual(t.title, title)
self.assertEqual(t.creator.objectId, u.objectId)
self.assertEqual(t.score, 0)
self.assertEqual(len(t.watchers), len(sample_users))
self.assertEqual(t.assignee.objectId, assignee.objectId)
self.assertTrue(all(w["className"] == '_User' for w in t.watchers))
def test_create_ping(self):
assignTask = Function("assignTask")
ping = Function("ping")
u = User.login('redsox55', 'secure123')
assignee = User.Query.get(username='deepthroat')
with SessionToken(u.sessionToken):
title = 'serha34g444'
response = assignTask(
title=title,
description="Send a ping to this task",
watchers=[user[2] for user in sample_users],
score=2,
assignee=assignee.email,
)
self.assertIn('task', response['result'])
with SessionToken(u.sessionToken):
task = Task.Query.get(title=title)
response = ping(taskId=task.objectId)
task = Task.Query.get(title=title)
self.assertIn('task', response['result'])
self.assertIn('ping', response['result'])
self.assertEqual(task.score, 1)
def test_today_pings(self):
assignTask = Function("assignTask")
ping = Function("ping")
u1 = User.login('redsox55', 'secure<PASSWORD>')
assignee = User.Query.get(username='deepthroat')
with SessionToken(u1.sessionToken):
title = 'serha34g444'
response = assignTask(
title=title,
description="Send a ping to this task",
watchers=[u[2] for u in sample_users],
score=2,
assignee=assignee.email,
)
self.assertIn('task', response['result'])
todayPings = Function('todayPings')
with SessionToken(u1.sessionToken):
task = Task.Query.get(title=title)
ping(taskId=task.objectId)
todayPingsU1 = todayPings()
print "TODAY PINGS U1: %s" % todayPingsU1
self.assertEqual(len(todayPingsU1['result']), 1)
u2 = User.login('baracky', 'usanumber1')
with SessionToken(u2.sessionToken):
ping(taskId=task.objectId)
ping(taskId=task.objectId)
todayPingsU2 = todayPings()
print "TODAY PINGS U2: %s" % todayPingsU2
self.assertEqual(len(todayPingsU2['result']), 2)
"""
def test_tasks_by_user(self):
pass
def test_tasks_by_watcher(self):
pass
def test_tasks_by_creator(self):
pass
def test_initial_score_is_zero(self):
pass
def test_one_user_pings_another(self):
pass
def test_score_increments(self):
pass
"""
if __name__ == '__main__':
register_app()
unittest.main()
|
en
| 0.453104
|
# Prod register( "8fhsO5d7WTt6c7ffpVrPpHTVvuAi6vArrciyt8cK", "tBnfG7a0P38w0ka3jWVTRkqcDxMIOdUXxNv8sZFp", master_key="<KEY>", ) # Test #master_key="<KEY>", def test_tasks_by_user(self): pass def test_tasks_by_watcher(self): pass def test_tasks_by_creator(self): pass def test_initial_score_is_zero(self): pass def test_one_user_pings_another(self): pass def test_score_increments(self): pass
| 2.412788
| 2
|
sentiment.py
|
chauhanjatin10/sentiment_analysis
| 0
|
6626511
|
<filename>sentiment.py<gh_stars>0
from nltk import tokenize
from nltk import pos_tag, word_tokenize, ne_chunk
from nltk.tree import Tree
from nltk.sentiment.vader import SentimentIntensityAnalyzer
class NER:
def __init__(self):
self.text = text
def extract_named_entities(self):
named_entiites = []
for chunk in ne_chunk(pos_tag(word_tokenize(self.text))):
if type(chunk) == Tree:
named_entiites.append(' '.join(c[0] for c in chunk))
return named_entiites
class Sentiment:
def __init__(self, text):
self.text = text
self.analyser = SentimentIntensityAnalyzer()
def sentiment_polarity(self):
with open('sentiment.txt', 'w') as f:
for paragraph in self.text:
for line in paragraph[:-1]:
f.write(line + ' -> analysis - ')
ss = self.analyser.polarity_scores(line)
for k in sorted(ss):
f.write('{0}: {1} , '.format(k, ss[k]))
f.write('\n')
f.write('\n')
|
<filename>sentiment.py<gh_stars>0
from nltk import tokenize
from nltk import pos_tag, word_tokenize, ne_chunk
from nltk.tree import Tree
from nltk.sentiment.vader import SentimentIntensityAnalyzer
class NER:
def __init__(self):
self.text = text
def extract_named_entities(self):
named_entiites = []
for chunk in ne_chunk(pos_tag(word_tokenize(self.text))):
if type(chunk) == Tree:
named_entiites.append(' '.join(c[0] for c in chunk))
return named_entiites
class Sentiment:
def __init__(self, text):
self.text = text
self.analyser = SentimentIntensityAnalyzer()
def sentiment_polarity(self):
with open('sentiment.txt', 'w') as f:
for paragraph in self.text:
for line in paragraph[:-1]:
f.write(line + ' -> analysis - ')
ss = self.analyser.polarity_scores(line)
for k in sorted(ss):
f.write('{0}: {1} , '.format(k, ss[k]))
f.write('\n')
f.write('\n')
|
none
| 1
| 2.897044
| 3
|
|
server/manage.py
|
kdknive/buku-aska
| 0
|
6626512
|
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from src.app import create_app, db
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from src.app import create_app, db
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
none
| 1
| 1.918087
| 2
|
|
tango_user/views.py
|
tBaxter/Tango
| 17
|
6626513
|
import datetime
import json
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from django.views.generic.edit import UpdateView
from .filters import ProfileFilter
from .forms import PublicProfileForm, ProfileSettingsForm
UserModel = get_user_model()
past_year = datetime.datetime.now() - datetime.timedelta(days=365)
class MemberList(ListView):
"""
Renders either default user list (paginated) or search results.
To-do: split search to separate view, make pagination work better.
"""
queryset = UserModel.objects.filter(is_active=1, last_login__gte=past_year).order_by('display_name').values()
template_name = "users/user_list.html"
paginate_by = 100
def get_context_data(self, **kwargs):
context = super(MemberList, self).get_context_data(**kwargs)
if 'display_name' in self.request.GET or 'state' in self.request.GET:
filter = ProfileFilter(self.request.GET, queryset=self.queryset)
else:
filter = ProfileFilter()
context['filter'] = filter
return context
member_index = MemberList.as_view()
class EditProfile(UpdateView):
model = UserModel
template_name = "users/user_edit_form.html"
form_class = PublicProfileForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EditProfile, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context = super(EditProfile, self).get_context_data(**kwargs)
context['settings_form'] = ProfileSettingsForm(instance=self.get_object())
return context
edit_profile = EditProfile.as_view()
class EditProfileSettings(EditProfile):
form_class = ProfileSettingsForm
def get_context_data(self, **kwargs):
context = super(EditProfileSettings, self).get_context_data(**kwargs)
context['settings_form'] = self.form_class
context['form'] = PublicProfileForm()
return context
def form_valid(self, form, *args, **kwargs):
messages.success(self.request, "Your settings have been updated.")
theme = form.cleaned_data.get('theme')
if theme:
self.request.COOKIES['theme'] = theme
return super(EditProfile, self).form_valid(form, *args, **kwargs)
edit_settings = EditProfileSettings.as_view()
def view_profile(request, slug='', pk=''):
""" Returns detail view for a single user """
if pk:
user = get_object_or_404(UserModel, id=pk)
else:
user = get_object_or_404(UserModel, username=slug)
if request.is_ajax():
location = user.city
if user.state:
location += ", {}".format(user.state)
xhr_dict = {
'name': user.display_name,
'username': user.username,
'joined': user.date_joined.strftime('%m/%d/%Y'),
'location': location or '',
'website': user.homepage or '',
'profile_url': user.get_absolute_url(),
'contact_url': reverse('contact_member', args=(user.username,)),
}
return HttpResponse(json.dumps(xhr_dict), mimetype='application/javascript')
return render(request, "users/user_detail.html", {'user': user})
|
import datetime
import json
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from django.views.generic.edit import UpdateView
from .filters import ProfileFilter
from .forms import PublicProfileForm, ProfileSettingsForm
UserModel = get_user_model()
past_year = datetime.datetime.now() - datetime.timedelta(days=365)
class MemberList(ListView):
"""
Renders either default user list (paginated) or search results.
To-do: split search to separate view, make pagination work better.
"""
queryset = UserModel.objects.filter(is_active=1, last_login__gte=past_year).order_by('display_name').values()
template_name = "users/user_list.html"
paginate_by = 100
def get_context_data(self, **kwargs):
context = super(MemberList, self).get_context_data(**kwargs)
if 'display_name' in self.request.GET or 'state' in self.request.GET:
filter = ProfileFilter(self.request.GET, queryset=self.queryset)
else:
filter = ProfileFilter()
context['filter'] = filter
return context
member_index = MemberList.as_view()
class EditProfile(UpdateView):
model = UserModel
template_name = "users/user_edit_form.html"
form_class = PublicProfileForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EditProfile, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context = super(EditProfile, self).get_context_data(**kwargs)
context['settings_form'] = ProfileSettingsForm(instance=self.get_object())
return context
edit_profile = EditProfile.as_view()
class EditProfileSettings(EditProfile):
form_class = ProfileSettingsForm
def get_context_data(self, **kwargs):
context = super(EditProfileSettings, self).get_context_data(**kwargs)
context['settings_form'] = self.form_class
context['form'] = PublicProfileForm()
return context
def form_valid(self, form, *args, **kwargs):
messages.success(self.request, "Your settings have been updated.")
theme = form.cleaned_data.get('theme')
if theme:
self.request.COOKIES['theme'] = theme
return super(EditProfile, self).form_valid(form, *args, **kwargs)
edit_settings = EditProfileSettings.as_view()
def view_profile(request, slug='', pk=''):
""" Returns detail view for a single user """
if pk:
user = get_object_or_404(UserModel, id=pk)
else:
user = get_object_or_404(UserModel, username=slug)
if request.is_ajax():
location = user.city
if user.state:
location += ", {}".format(user.state)
xhr_dict = {
'name': user.display_name,
'username': user.username,
'joined': user.date_joined.strftime('%m/%d/%Y'),
'location': location or '',
'website': user.homepage or '',
'profile_url': user.get_absolute_url(),
'contact_url': reverse('contact_member', args=(user.username,)),
}
return HttpResponse(json.dumps(xhr_dict), mimetype='application/javascript')
return render(request, "users/user_detail.html", {'user': user})
|
en
| 0.744296
|
Renders either default user list (paginated) or search results.
To-do: split search to separate view, make pagination work better. Returns detail view for a single user
| 2.1923
| 2
|
setup.py
|
DefaltSimon/SSKJpy
| 0
|
6626514
|
# coding=utf-8
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# with open("README.md") as rdm:
# long_desc = rdm.read()
setup(
name='sskjpy',
author="DefaltSimon",
version='0.2.1',
license='MIT',
packages=["sskjpy"],
description="A Slovenian dictionary parser written in python",
# long_description=long_desc,
install_requires=[
"beautifulsoup4 >= 4.4.1",
],
extras_require={
"requests": ["requests >= 2.9.1"],
}
)
|
# coding=utf-8
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# with open("README.md") as rdm:
# long_desc = rdm.read()
setup(
name='sskjpy',
author="DefaltSimon",
version='0.2.1',
license='MIT',
packages=["sskjpy"],
description="A Slovenian dictionary parser written in python",
# long_description=long_desc,
install_requires=[
"beautifulsoup4 >= 4.4.1",
],
extras_require={
"requests": ["requests >= 2.9.1"],
}
)
|
en
| 0.558152
|
# coding=utf-8 # with open("README.md") as rdm: # long_desc = rdm.read() # long_description=long_desc,
| 1.220868
| 1
|
src/app.py
|
zayscue/christmas-list
| 0
|
6626515
|
<gh_stars>0
import uuid
import boto3
import logging
from datetime import datetime
import awsgi
from flask import (
Flask,
request,
jsonify,
)
from inputs.create_list_inputs import CreateListInputs
logging.getLogger().setLevel(logging.INFO)
db = boto3.resource("dynamodb")
app = Flask(__name__)
TABLE_NAME = "ChristmasLists"
@app.route("/")
def index():
return jsonify(status=200, message="OK")
@app.route("/api/christmas-list", methods=["POST"])
def create_list():
inputs = CreateListInputs(request)
if not inputs.validate():
app.logger.error("Invalid Create Christmas List Request")
return jsonify(errors=inputs.errors), 400
data = request.get_json()
item = {
"name": data["name"],
"id": str(uuid.uuid4()),
"created_at": str(datetime.utcnow()),
}
table = db.Table(TABLE_NAME)
table.put_item(Item=item)
app.logger.info("Created New Christmas List")
return jsonify(item), 201
@app.route("/api/christmas-list/<id>", methods=["GET"])
def get_list(id):
table = db.Table(TABLE_NAME)
get_item_response = table.get_item(Key={"id": id})
if "Item" not in get_item_response:
app.logger.error("Christmas List Not Found")
return "", 404
return jsonify(get_item_response["Item"])
def lambda_handler(event, context):
return awsgi.response(app, event, context, base64_content_types={"image/png"})
|
import uuid
import boto3
import logging
from datetime import datetime
import awsgi
from flask import (
Flask,
request,
jsonify,
)
from inputs.create_list_inputs import CreateListInputs
logging.getLogger().setLevel(logging.INFO)
db = boto3.resource("dynamodb")
app = Flask(__name__)
TABLE_NAME = "ChristmasLists"
@app.route("/")
def index():
return jsonify(status=200, message="OK")
@app.route("/api/christmas-list", methods=["POST"])
def create_list():
inputs = CreateListInputs(request)
if not inputs.validate():
app.logger.error("Invalid Create Christmas List Request")
return jsonify(errors=inputs.errors), 400
data = request.get_json()
item = {
"name": data["name"],
"id": str(uuid.uuid4()),
"created_at": str(datetime.utcnow()),
}
table = db.Table(TABLE_NAME)
table.put_item(Item=item)
app.logger.info("Created New Christmas List")
return jsonify(item), 201
@app.route("/api/christmas-list/<id>", methods=["GET"])
def get_list(id):
table = db.Table(TABLE_NAME)
get_item_response = table.get_item(Key={"id": id})
if "Item" not in get_item_response:
app.logger.error("Christmas List Not Found")
return "", 404
return jsonify(get_item_response["Item"])
def lambda_handler(event, context):
return awsgi.response(app, event, context, base64_content_types={"image/png"})
|
none
| 1
| 2.234045
| 2
|
|
examples/mujoco/train_ppo_gym.py
|
xinyuewang1/chainerrl
| 0
|
6626516
|
<filename>examples/mujoco/train_ppo_gym.py
"""An example of training PPO against OpenAI Gym Envs.
This script is an example of training a PPO agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_ppo_gym.py --env CartPole-v0
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import chainer
from chainer import functions as F
import gym
import chainerrl
from chainerrl.agents import a3c
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import links
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
from chainerrl import policies
class A3CFFSoftmax(chainer.ChainList, a3c.A3CModel):
"""An example of A3C feedforward softmax policy."""
def __init__(self, ndim_obs, n_actions, hidden_sizes=(200, 200)):
self.pi = policies.SoftmaxPolicy(
model=links.MLP(ndim_obs, n_actions, hidden_sizes))
self.v = links.MLP(ndim_obs, 1, hidden_sizes=hidden_sizes)
super().__init__(self.pi, self.v)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
class A3CFFMellowmax(chainer.ChainList, a3c.A3CModel):
"""An example of A3C feedforward mellowmax policy."""
def __init__(self, ndim_obs, n_actions, hidden_sizes=(200, 200)):
self.pi = policies.MellowmaxPolicy(
model=links.MLP(ndim_obs, n_actions, hidden_sizes))
self.v = links.MLP(ndim_obs, 1, hidden_sizes=hidden_sizes)
super().__init__(self.pi, self.v)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
class A3CFFGaussian(chainer.Chain, a3c.A3CModel):
"""An example of A3C feedforward Gaussian policy."""
def __init__(self, obs_size, action_space,
n_hidden_layers=2, n_hidden_channels=64,
bound_mean=None):
assert bound_mean in [False, True]
super().__init__()
hidden_sizes = (n_hidden_channels,) * n_hidden_layers
with self.init_scope():
self.pi = policies.FCGaussianPolicyWithStateIndependentCovariance(
obs_size, action_space.low.size,
n_hidden_layers, n_hidden_channels,
var_type='diagonal', nonlinearity=F.tanh,
bound_mean=bound_mean,
min_action=action_space.low, max_action=action_space.high,
mean_wscale=1e-2)
self.v = links.MLP(obs_size, 1, hidden_sizes=hidden_sizes)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='Hopper-v2')
parser.add_argument('--arch', type=str, default='FFGaussian',
choices=('FFSoftmax', 'FFMellowmax',
'FFGaussian'))
parser.add_argument('--bound-mean', action='store_true')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=10 ** 6)
parser.add_argument('--eval-interval', type=int, default=10000)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--update-interval', type=int, default=2048)
parser.add_argument('--batchsize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--entropy-coef', type=float, default=0.0)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
env_seed = 2 ** 32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
# Switch policy types accordingly to action space types
if args.arch == 'FFSoftmax':
model = A3CFFSoftmax(obs_space.low.size, action_space.n)
elif args.arch == 'FFMellowmax':
model = A3CFFMellowmax(obs_space.low.size, action_space.n)
elif args.arch == 'FFGaussian':
model = A3CFFGaussian(obs_space.low.size, action_space,
bound_mean=args.bound_mean)
opt = chainer.optimizers.Adam(alpha=args.lr, eps=1e-5)
opt.setup(model)
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = PPO(model, opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps_vf=None, entropy_coef=args.entropy_coef,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
# Linearly decay the clipping parameter to zero
def clip_eps_setter(env, agent, value):
agent.clip_eps = max(value, 1e-8)
clip_eps_decay_hook = experiments.LinearInterpolationHook(
args.steps, 0.2, 0, clip_eps_setter)
experiments.train_agent_with_evaluation(
agent=agent,
env=make_env(False),
eval_env=make_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
train_max_episode_len=timestep_limit,
save_best_so_far_agent=False,
step_hooks=[
lr_decay_hook,
clip_eps_decay_hook,
],
)
if __name__ == '__main__':
main()
|
<filename>examples/mujoco/train_ppo_gym.py
"""An example of training PPO against OpenAI Gym Envs.
This script is an example of training a PPO agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_ppo_gym.py --env CartPole-v0
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import chainer
from chainer import functions as F
import gym
import chainerrl
from chainerrl.agents import a3c
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import links
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
from chainerrl import policies
class A3CFFSoftmax(chainer.ChainList, a3c.A3CModel):
"""An example of A3C feedforward softmax policy."""
def __init__(self, ndim_obs, n_actions, hidden_sizes=(200, 200)):
self.pi = policies.SoftmaxPolicy(
model=links.MLP(ndim_obs, n_actions, hidden_sizes))
self.v = links.MLP(ndim_obs, 1, hidden_sizes=hidden_sizes)
super().__init__(self.pi, self.v)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
class A3CFFMellowmax(chainer.ChainList, a3c.A3CModel):
"""An example of A3C feedforward mellowmax policy."""
def __init__(self, ndim_obs, n_actions, hidden_sizes=(200, 200)):
self.pi = policies.MellowmaxPolicy(
model=links.MLP(ndim_obs, n_actions, hidden_sizes))
self.v = links.MLP(ndim_obs, 1, hidden_sizes=hidden_sizes)
super().__init__(self.pi, self.v)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
class A3CFFGaussian(chainer.Chain, a3c.A3CModel):
"""An example of A3C feedforward Gaussian policy."""
def __init__(self, obs_size, action_space,
n_hidden_layers=2, n_hidden_channels=64,
bound_mean=None):
assert bound_mean in [False, True]
super().__init__()
hidden_sizes = (n_hidden_channels,) * n_hidden_layers
with self.init_scope():
self.pi = policies.FCGaussianPolicyWithStateIndependentCovariance(
obs_size, action_space.low.size,
n_hidden_layers, n_hidden_channels,
var_type='diagonal', nonlinearity=F.tanh,
bound_mean=bound_mean,
min_action=action_space.low, max_action=action_space.high,
mean_wscale=1e-2)
self.v = links.MLP(obs_size, 1, hidden_sizes=hidden_sizes)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='Hopper-v2')
parser.add_argument('--arch', type=str, default='FFGaussian',
choices=('FFSoftmax', 'FFMellowmax',
'FFGaussian'))
parser.add_argument('--bound-mean', action='store_true')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=10 ** 6)
parser.add_argument('--eval-interval', type=int, default=10000)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--update-interval', type=int, default=2048)
parser.add_argument('--batchsize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--entropy-coef', type=float, default=0.0)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
env_seed = 2 ** 32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
# Switch policy types accordingly to action space types
if args.arch == 'FFSoftmax':
model = A3CFFSoftmax(obs_space.low.size, action_space.n)
elif args.arch == 'FFMellowmax':
model = A3CFFMellowmax(obs_space.low.size, action_space.n)
elif args.arch == 'FFGaussian':
model = A3CFFGaussian(obs_space.low.size, action_space,
bound_mean=args.bound_mean)
opt = chainer.optimizers.Adam(alpha=args.lr, eps=1e-5)
opt.setup(model)
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = PPO(model, opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps_vf=None, entropy_coef=args.entropy_coef,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
# Linearly decay the clipping parameter to zero
def clip_eps_setter(env, agent, value):
agent.clip_eps = max(value, 1e-8)
clip_eps_decay_hook = experiments.LinearInterpolationHook(
args.steps, 0.2, 0, clip_eps_setter)
experiments.train_agent_with_evaluation(
agent=agent,
env=make_env(False),
eval_env=make_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
train_max_episode_len=timestep_limit,
save_best_so_far_agent=False,
step_hooks=[
lr_decay_hook,
clip_eps_decay_hook,
],
)
if __name__ == '__main__':
main()
|
en
| 0.828952
|
An example of training PPO against OpenAI Gym Envs. This script is an example of training a PPO agent against OpenAI Gym envs. Both discrete and continuous action spaces are supported. To solve CartPole-v0, run: python train_ppo_gym.py --env CartPole-v0 # NOQA # NOQA An example of A3C feedforward softmax policy. An example of A3C feedforward mellowmax policy. An example of A3C feedforward Gaussian policy. # Set a random seed used in ChainerRL # Use different random seeds for train and test envs # Cast observations to float32 because our model uses float32 # Scale rewards (and thus returns) to a reasonable range so that # training is easier # Normalize observations based on their empirical mean and variance # Switch policy types accordingly to action space types # Linearly decay the learning rate to zero # Linearly decay the clipping parameter to zero
| 2.35332
| 2
|
plugins/dcmp/settings.py
|
qmgeng/docker-airflow
| 269
|
6626517
|
# encoding: utf-8
import os
import socket
from airflow import configuration
TASK_TYPES = ["bash", "hql", "python", "short_circuit", "partition_sensor", "time_sensor", "timedelta_sensor"]
AUTHENTICATE = configuration.getboolean('webserver', 'AUTHENTICATE')
BASE_URL = configuration.get('webserver', 'BASE_URL')
try:
DAG_CREATION_MANAGER_LINE_INTERPOLATE = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_LINE_INTERPOLATE')
except Exception as e:
DAG_CREATION_MANAGER_LINE_INTERPOLATE = "basis"
HOSTNAME = socket.gethostname()
AIRFLOW_DAGS_FOLDER = configuration.get('core', 'DAGS_FOLDER')
DAG_CREATION_MANAGER_DEPLOYED_DAGS_FOLDER = os.path.join(AIRFLOW_DAGS_FOLDER, "deployedDags")
DAG_CREATION_MANAGER_QUEUE_POOL_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_QUEUE_POOL')
DAG_CREATION_MANAGER_QUEUE_POOL = []
for queue_pool_str in DAG_CREATION_MANAGER_QUEUE_POOL_STR.split(","):
key, queue_pool = queue_pool_str.split(":")
queue, pool = queue_pool.split("|")
DAG_CREATION_MANAGER_QUEUE_POOL.append((key, (queue, pool)))
DAG_CREATION_MANAGER_QUEUE_POOL_DICT = dict(DAG_CREATION_MANAGER_QUEUE_POOL)
DAG_CREATION_MANAGER_CATEGORY_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_CATEGORY')
DAG_CREATION_MANAGER_CATEGORYS = ["default"]
for category in DAG_CREATION_MANAGER_CATEGORY_STR.split(","):
if category not in DAG_CREATION_MANAGER_CATEGORYS:
DAG_CREATION_MANAGER_CATEGORYS.append(category)
DAG_CREATION_MANAGER_TASK_CATEGORY_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_TASK_CATEGORY')
DAG_CREATION_MANAGER_TASK_CATEGORYS = []
for task_category in DAG_CREATION_MANAGER_TASK_CATEGORY_STR.split(","):
key, color = task_category.split(":")
if key != "default":
DAG_CREATION_MANAGER_TASK_CATEGORYS.append((key, color))
DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE')
DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE = [queue_pool_mr_str.split(":") for queue_pool_mr_str in DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE_STR.split(",")]
DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE_DICT = dict(DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE)
DAG_CREATION_MANAGER_DEFAULT_EMAIL_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_DEFAULT_EMAIL')
DAG_CREATION_MANAGER_DEFAULT_EMAILS = [email.strip() for email in DAG_CREATION_MANAGER_DEFAULT_EMAIL_STR.split(",") if email.strip()]
try:
DAG_CREATION_MANAGER_NEED_APPROVER = configuration.getboolean('dag_creation_manager', 'DAG_CREATION_MANAGER_NEED_APPROVER')
except Exception as e:
DAG_CREATION_MANAGER_NEED_APPROVER = False
try:
DAG_CREATION_MANAGER_CAN_APPROVE_SELF = configuration.getboolean('dag_creation_manager', 'DAG_CREATION_MANAGER_CAN_APPROVE_SELF')
except Exception as e:
DAG_CREATION_MANAGER_CAN_APPROVE_SELF = True
try:
DAG_CREATION_MANAGER_DAG_TEMPLATES_DIR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_DAG_TEMPLATES_DIR')
except Exception as e:
DAG_CREATION_MANAGER_DAG_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "dag_templates")
|
# encoding: utf-8
import os
import socket
from airflow import configuration
TASK_TYPES = ["bash", "hql", "python", "short_circuit", "partition_sensor", "time_sensor", "timedelta_sensor"]
AUTHENTICATE = configuration.getboolean('webserver', 'AUTHENTICATE')
BASE_URL = configuration.get('webserver', 'BASE_URL')
try:
DAG_CREATION_MANAGER_LINE_INTERPOLATE = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_LINE_INTERPOLATE')
except Exception as e:
DAG_CREATION_MANAGER_LINE_INTERPOLATE = "basis"
HOSTNAME = socket.gethostname()
AIRFLOW_DAGS_FOLDER = configuration.get('core', 'DAGS_FOLDER')
DAG_CREATION_MANAGER_DEPLOYED_DAGS_FOLDER = os.path.join(AIRFLOW_DAGS_FOLDER, "deployedDags")
DAG_CREATION_MANAGER_QUEUE_POOL_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_QUEUE_POOL')
DAG_CREATION_MANAGER_QUEUE_POOL = []
for queue_pool_str in DAG_CREATION_MANAGER_QUEUE_POOL_STR.split(","):
key, queue_pool = queue_pool_str.split(":")
queue, pool = queue_pool.split("|")
DAG_CREATION_MANAGER_QUEUE_POOL.append((key, (queue, pool)))
DAG_CREATION_MANAGER_QUEUE_POOL_DICT = dict(DAG_CREATION_MANAGER_QUEUE_POOL)
DAG_CREATION_MANAGER_CATEGORY_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_CATEGORY')
DAG_CREATION_MANAGER_CATEGORYS = ["default"]
for category in DAG_CREATION_MANAGER_CATEGORY_STR.split(","):
if category not in DAG_CREATION_MANAGER_CATEGORYS:
DAG_CREATION_MANAGER_CATEGORYS.append(category)
DAG_CREATION_MANAGER_TASK_CATEGORY_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_TASK_CATEGORY')
DAG_CREATION_MANAGER_TASK_CATEGORYS = []
for task_category in DAG_CREATION_MANAGER_TASK_CATEGORY_STR.split(","):
key, color = task_category.split(":")
if key != "default":
DAG_CREATION_MANAGER_TASK_CATEGORYS.append((key, color))
DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE')
DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE = [queue_pool_mr_str.split(":") for queue_pool_mr_str in DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE_STR.split(",")]
DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE_DICT = dict(DAG_CREATION_MANAGER_QUEUE_POOL_MR_QUEUE)
DAG_CREATION_MANAGER_DEFAULT_EMAIL_STR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_DEFAULT_EMAIL')
DAG_CREATION_MANAGER_DEFAULT_EMAILS = [email.strip() for email in DAG_CREATION_MANAGER_DEFAULT_EMAIL_STR.split(",") if email.strip()]
try:
DAG_CREATION_MANAGER_NEED_APPROVER = configuration.getboolean('dag_creation_manager', 'DAG_CREATION_MANAGER_NEED_APPROVER')
except Exception as e:
DAG_CREATION_MANAGER_NEED_APPROVER = False
try:
DAG_CREATION_MANAGER_CAN_APPROVE_SELF = configuration.getboolean('dag_creation_manager', 'DAG_CREATION_MANAGER_CAN_APPROVE_SELF')
except Exception as e:
DAG_CREATION_MANAGER_CAN_APPROVE_SELF = True
try:
DAG_CREATION_MANAGER_DAG_TEMPLATES_DIR = configuration.get('dag_creation_manager', 'DAG_CREATION_MANAGER_DAG_TEMPLATES_DIR')
except Exception as e:
DAG_CREATION_MANAGER_DAG_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "dag_templates")
|
en
| 0.83829
|
# encoding: utf-8
| 1.800622
| 2
|
keras_model/model.py
|
anthonyhu/domain-adapt
| 1
|
6626518
|
<gh_stars>1-10
import os
import random
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Input, Lambda, Conv2DTranspose, AveragePooling2D, Flatten, Dense, UpSampling2D
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard
from glob import glob
from .layers import conv_block, res_block, sample, instance_norm2d
from .losses import reconst_loss, kl_div_loss, compute_vgg_loss
from .dataset import load_day_and_night, create_dataset, load_batch
from .utils import convert_to_uint8, preprocess_vgg, write_log
lr_rate = 1e-4
lambda_0 = 1 # GAN coefficient
lambda_1 = 10 # Reconstruction coefficient
lambda_2 = 0.01 # KL divergence coefficient
lambda_3 = 0 # perceptual loss
def build_encoder(input_size, norm='in', activation='relu', name=''):
"""
Parameters
----------
input_size: tuple(int, int, int)
name: str
Returns
-------
encoder: tf.keras.model.Model
input: image
outputs: z_mean and z (a sample)
"""
inputs = Input(input_size)
h = conv_block(inputs, 64, 7, 1, norm, activation)
h = conv_block(h, 128, 3, 2, norm, activation)
h = conv_block(h, 256, 3, 2, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation, last_block=True)
z_mean = h
z = Lambda(sample)(z_mean)
encoder = Model(inputs=inputs, outputs=[z_mean, z], name=name)
return encoder
def build_decoder(latent_size, norm='in', activation='relu', name=''):
"""
Parameters
----------
latent_size: tuple(int, int, int)
name: str
Returns
-------
decoder: tf.keras.model.Model
input: latent tensor
output: reconstructed image
"""
latent_inputs = Input(latent_size)
h = latent_inputs
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = UpSampling2D(2)(h)
h = conv_block(h, 128, 5, 1, norm, activation)
h = UpSampling2D(2)(h)
h = conv_block(h, 64, 5, 1, norm, activation)
outputs = conv_block(h, 3, 7, 1, norm='none', activation='tanh', bias=True)
decoder = Model(inputs=latent_inputs, outputs=outputs, name=name)
return decoder
# TODO: make discriminator multi-scale
def build_discriminator(input_size, norm='none', activation='lrelu', name=''):
"""
Parameters
----------
input_size: tuple(int, int, int)
name: str
Returns
-------
discriminator: tf.keras.model.Model
input: image
outputs: soft labels
"""
inputs = Input(input_size)
h = conv_block(inputs, 64, 3, 2, norm, activation)
h = conv_block(h, 128, 3, 2, norm, activation)
h = conv_block(h, 256, 3, 2, norm, activation)
h = conv_block(h, 512, 3, 2, norm, activation)
h = conv_block(h, 1, 1, 1, norm='none', activation='none')
outputs = Flatten()(h)
discriminator = Model(inputs=inputs, outputs=outputs, name=name)
discriminator.summary()
return discriminator
def build_vgg16(image_size, name):
""" vgg16 preprocessing -> vgg16 model -> instance norm
Input: Image in [-1, 1]
"""
vgg16 = tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet',
input_tensor=None, input_shape=(*image_size, 3))
inputs = tf.keras.layers.Input((*image_size, 3))
x = tf.keras.layers.Lambda(preprocess_vgg)(inputs)
x = vgg16(x)
inst_norm_x = tf.keras.layers.Lambda(instance_norm2d)(x)
vgg16_model = tf.keras.models.Model(inputs, inst_norm_x, name=name)
vgg16_model.trainable = False
return vgg16_model
def build_controller(input_size, name):
inputs = Input(input_size)
h = conv_block(inputs, 256, 3, 2)
h = conv_block(h, 128, 3, 2)
h = conv_block(h, 64, 3, 2)
h = conv_block(h, 32, 3, 2)
h = AveragePooling2D(pool_size=(3, 5))(h)
h = Flatten()(h)
h = Dense(16)(h)
outputs = Dense(1)(h)
controller = Model(inputs=inputs, outputs=outputs, name=name)
controller.summary()
return controller
class CycleVAE():
""" UNIT network (Unsupervised Image-to-Image Translation)"""
def __init__(self, save_folder, image_size=(512, 512), checkpoint_path=''):
self.save_folder = save_folder
self.image_size = image_size
self.checkpoint_path = checkpoint_path
self._build_model()
if self.checkpoint_path:
print('Loading weights from checkpoint {}'.format(os.path.basename(self.checkpoint_path)))
self.cycle_vae.load_weights(self.checkpoint_path)
else:
checkpoint = glob(os.path.join(self.save_folder, '*.h5'))
if checkpoint:
print('Loading weights from checkpoint {}'.format(os.path.basename(checkpoint[0])))
self.cycle_vae.load_weights(checkpoint[0])
self.X_day_val, self.X_night_val = None, None
def _build_model(self):
"""
Creates
-------
self.cycle_vae: keras Model
full model
"""
K.clear_session()
optimizer = Adam(lr=lr_rate, beta_1=0.5, beta_2=0.999)
# Source distribution
self.E_source = build_encoder((*self.image_size, 3), name='E_source')
latent_size = self.E_source.outputs[1].get_shape()[1:]
self.G_source = build_decoder(latent_size, name='G_source')
# Target distribution
self.E_targ = build_encoder((*self.image_size, 3), name='E_targ')
latent_size = self.E_targ.outputs[1].get_shape()[1:]
self.G_targ = build_decoder(latent_size, name='G_targ')
# VGG16 feature extractor
#self.vgg16 = build_vgg16(self.image_size, 'vgg16')
#self.vgg16.summary()
self.x_source = self.E_source.input
self.x_targ = self.E_targ.input
# Source reconstruction
self.source_reconst = self.G_source(self.E_source(self.x_source)[1])
# Target reconstruction
self.targ_reconst = self.G_targ(self.E_targ(self.x_targ)[1])
# Translations
self.translation_to_source = self.G_source(self.E_targ(self.x_targ)[1])
self.translation_to_targ = self.G_targ(self.E_source(self.x_source)[1])
# Cycle reconst: source -> targ -> source
self.cycle1_reconst = self.G_source(self.E_targ(self.translation_to_targ)[1])
# Cycle reconst: targ -> source -> targ
self.cycle2_reconst = self.G_targ(self.E_source(self.translation_to_source)[1])
# GANs
# Build and compile discriminators
self.D_source = build_discriminator((*self.image_size, 3), name='D_source')
self.D_targ = build_discriminator((*self.image_size, 3), name='D_targ')
self.D_source.compile(optimizer=optimizer, loss='mse', loss_weights=[lambda_0], metrics=['binary_accuracy'])
self.D_targ.compile(optimizer=optimizer, loss='mse', loss_weights=[lambda_0], metrics=['binary_accuracy'])
# set discriminator weights to False
self.D_source.trainable = False
self.D_targ.trainable = False
valid_source = self.D_source(self.translation_to_source)
valid_targ = self.D_targ(self.translation_to_targ)
inputs = [self.x_source, self.x_targ]
outputs = [valid_source, valid_targ]
self.cycle_vae = Model(inputs=inputs, outputs=outputs, name='cycle_vae')
def in_domain_reconst_loss(y_true, y_pred):
r_source_loss = lambda_1 * reconst_loss(self.x_source, self.source_reconst)
r_targ_loss = lambda_1 * reconst_loss(self.x_targ, self.targ_reconst)
return r_source_loss + r_targ_loss
def in_domain_kl_loss(y_true, y_pred):
kl_source_loss = lambda_2 * kl_div_loss(self.E_source.outputs[0])
kl_targ_loss = lambda_2 * kl_div_loss(self.E_targ.outputs[0])
return kl_source_loss + kl_targ_loss
def cyclic_loss(y_true, y_pred):
cyclic_1_loss = lambda_1 * reconst_loss(self.x_source, self.cycle1_reconst)
cyclic_2_loss = lambda_1 * reconst_loss(self.x_targ, self.cycle2_reconst)
return cyclic_1_loss + cyclic_2_loss
def kl_cyclic_loss(y_true, y_pred):
kl_cyclic_1_loss = lambda_2 * kl_div_loss(self.E_source(self.translation_to_source)[0])
kl_cyclic_2_loss = lambda_2 * kl_div_loss(self.E_targ(self.translation_to_targ)[0])
return kl_cyclic_1_loss + kl_cyclic_2_loss
def gan_loss(y_true, y_pred):
gan_loss1 = lambda_0 * K.mean(K.square(y_true - self.D_source(self.translation_to_source)))
gan_loss2 = lambda_0 * K.mean(K.square(y_true - self.D_targ(self.translation_to_targ)))
return gan_loss1 + gan_loss2
def vgg_loss(y_true, y_pred):
vgg_loss1 = lambda_3 * compute_vgg_loss(self.vgg16(self.x_source), self.vgg16(self.translation_to_targ))
vgg_loss2 = lambda_3 * compute_vgg_loss(self.vgg16(self.x_targ), self.vgg16(self.translation_to_source))
return vgg_loss1 + vgg_loss2
# Loss function
def cycle_vae_loss(y_true, y_pred):
"""
Returns
-------
cycle_vae_loss: tf.Tensor
L2 distance + KL divergence
"""
# In-domain reconst loss
# In-domain KL loss
# Cyclic loss
# Cyclic KL loss
# GAN loss
# Perceptual loss
total_loss = (in_domain_reconst_loss(y_true, y_pred)
+ in_domain_kl_loss(y_true, y_pred)
+ cyclic_loss(y_true, y_pred)
+ kl_cyclic_loss(y_true, y_pred)
+ gan_loss(y_true, y_pred)
#+ vgg_loss(y_true, y_pred)
)
return total_loss
def dummy_loss(y_true, y_pred):
return K.zeros(1)
self.cycle_vae.compile(optimizer, loss=[cycle_vae_loss, dummy_loss],
metrics=[in_domain_reconst_loss, in_domain_kl_loss, cyclic_loss,
kl_cyclic_loss, gan_loss, dummy_loss])#, vgg_loss])
self.cycle_vae.summary()
def train(self, epochs=10, batch_size=1, print_interval=50):
# Tensorboard callback
train_callback = TensorBoard(os.path.join(self.save_folder, 'train'))
train_callback.set_model(self.cycle_vae)
val_callback = TensorBoard(os.path.join(self.save_folder, 'val'))
val_callback.set_model(self.cycle_vae)
metrics_names = ['discri_loss_source', 'discri_accuracy_source', 'discri_loss_targ', 'discri_accuracy_targ',
'g_total_loss', 'g_reconst', 'g_kl', 'g_cyclic_reconst', 'g_cyclic_kl', 'g_gan', 'g_vgg']
# Dataset
X_day_train, X_night_train = load_day_and_night('train')
self.X_day_val, self.X_night_val = load_day_and_night('val')
n_train = len(X_day_train)
n_val = len(self.X_day_val)
print('Training examples: {}'.format(n_train))
print('Validation examples: {}'.format(n_val))
discri_dim = self.D_source.output.get_shape()[1]
valid = np.ones((batch_size, discri_dim))
fake = np.zeros((batch_size, discri_dim))
val_loss_min = float('inf')
iter_nb = 0
for e in range(epochs):
n_iter = n_train // batch_size
indices = np.random.permutation(n_train)
for i in range(n_iter):
idx = np.random.choice(indices, size=batch_size, replace=False)
#indices[i*batch_size : (i+1)*batch_size]
source_batch = load_batch(X_day_train[idx], self.image_size)
targ_batch = load_batch(X_night_train[idx], self.image_size)
# Discriminator
d_loss_source, d_loss_targ = self.discri_train_on_batch(source_batch, targ_batch, valid, fake)
# Generator
g_loss = self.cycle_vae.train_on_batch([source_batch, targ_batch], [valid, valid])
if i%print_interval == 0:
print('Iteration {:4d}: Training loss:'.format(i))
logs = self.get_logs(d_loss_source, d_loss_targ, g_loss)
print('Discri source loss: {:.2f}, accuracy: {:.1f}%'.format(logs[0], logs[1]))
print('Discri targ loss: {:.2f}, accuracy: {:.1f}%'.format(logs[2], logs[3]))
print('Gen loss: {:.2f}'.format(logs[4]))
print('Reconst: {:.2f}, kl: {:.2f}, cyclic_reconst: {:.2f}, cyclic_kl: {:.2f},gan: {:.2f}, vgg: {:.2f}'\
.format(*logs[5:]))
write_log(train_callback, metrics_names, logs, iter_nb)
if i%100 == 0:
self.predict(e + 1, iter=iter_nb, save_fig=True)
print('Figure saved.')
iter_nb += 1
# Calculate validation loss
val_loss = 0.0
n_iter_val = n_val // batch_size
indices = np.arange(n_val)
logs = np.zeros(len(metrics_names))
for i in range(n_iter_val):
idx = indices[i * batch_size: (i + 1) * batch_size]
source_batch = load_batch(self.X_day_val[idx], self.image_size)
targ_batch = load_batch(self.X_night_val[idx], self.image_size)
# Discriminator
d_loss_source, d_loss_targ = self.discri_train_on_batch(source_batch, targ_batch, valid, fake)
# Generator
g_loss = self.cycle_vae.train_on_batch([source_batch, targ_batch], [valid, valid])
logs += self.get_logs(d_loss_source, d_loss_targ, g_loss)
val_loss += g_loss[0]
val_loss /= n_iter_val
logs /= n_iter_val
print('\n Epoch {} - Validation loss: {:.2f}'.format(e + 1, val_loss))
print('Discri source loss: {:.2f}, accuracy: {:.1f}%'.format(logs[0], logs[1]))
print('Discri targ loss: {:.2f}, accuracy: {:.1f}%'.format(logs[2], logs[3]))
print('Gen loss: {:.2f}'.format(logs[4]))
print('Reconst: {:.2f}, kl: {:.2f}, cyclic_reconst: {:.2f}, cyclic_kl: {:.2f},gan: {:.2f}, vgg: {:.2f}' \
.format(*logs[5:]))
write_log(val_callback, metrics_names, logs, iter_nb)
if val_loss < val_loss_min:
val_loss_min = val_loss
print('Saving model\n')
weights_filename = os.path.join(self.save_folder, 'cycle_vae.epoch{:02d}-val_loss{:.2f}.h5'.format(e+1, val_loss))
self.cycle_vae.save_weights(weights_filename)
def predict(self, epoch=0, save_fig=True, iter=0, n_examples=3):
if self.X_day_val is None or self.X_night_val is None:
self.X_day_val, self.X_night_val = load_day_and_night('val')
f_source_to_targ = K.function([self.x_source, K.learning_phase()], [self.G_targ(self.E_source(self.x_source)[0])])
# TODO: might need to use the mus instead of the zs
f_cycle1_reconst = K.function([self.x_source, K.learning_phase()], [self.cycle1_reconst])
for i, filename in enumerate(np.random.choice(self.X_day_val, n_examples)):
img = load_batch([filename], self.image_size)
x_targ = f_source_to_targ([img, 0])[0]
x_cycle1_reconst = f_cycle1_reconst([img, 0])[0]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.imshow(convert_to_uint8(img)[0])
plt.title('Original')
plt.subplot(132)
plt.imshow(convert_to_uint8(x_targ[0]))
plt.title('Translated image')
plt.subplot(133)
plt.imshow(convert_to_uint8(x_cycle1_reconst[0]))
plt.title('Cycle reconstruction')
fig_filename = os.path.join(self.save_folder, 'epoch{:02d}-iter{:04d}-day-example{}.png'.format(epoch, iter, i))
if save_fig:
plt.savefig(fig_filename)
plt.close()
else:
plt.show()
f_targ_to_source = K.function([self.x_targ, K.learning_phase()], [self.G_source(self.E_targ(self.x_targ)[0])])
# TODO: might need to use the mus instead of the zs
f_cycle2_reconst = K.function([self.x_targ, K.learning_phase()], [self.cycle2_reconst])
for i, filename in enumerate(np.random.choice(self.X_night_val, n_examples)):
img = load_batch([filename], self.image_size)
x_source = f_targ_to_source([img, 0])[0]
x_cycle2_reconst = f_cycle2_reconst([img, 0])[0]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.imshow(convert_to_uint8(img)[0])
plt.title('Original')
plt.subplot(132)
plt.imshow(convert_to_uint8(x_source[0]))
plt.title('Translated image')
plt.subplot(133)
plt.imshow(convert_to_uint8(x_cycle2_reconst[0]))
plt.title('Cycle reconstruction')
fig_filename = os.path.join(self.save_folder, 'epoch{:02d}-iter{:04d}-night-example{}.png'.format(epoch, iter, i))
if save_fig:
plt.savefig(fig_filename)
plt.close()
else:
plt.show()
def discri_train_on_batch(self, source_batch, targ_batch, valid, fake):
gen_translated_to_source = self.G_source.predict(self.E_targ.predict(targ_batch)[1])
gen_translated_to_targ = self.G_targ.predict(self.E_source.predict(source_batch)[1])
d_loss_source_real = self.D_source.train_on_batch(source_batch, valid)
d_loss_source_fake = self.D_source.train_on_batch(gen_translated_to_source, fake)
d_loss_source = 0.5 * np.add(d_loss_source_real, d_loss_source_fake)
d_loss_targ_real = self.D_targ.train_on_batch(targ_batch, valid)
d_loss_targ_fake = self.D_targ.train_on_batch(gen_translated_to_targ, fake)
d_loss_targ = 0.5 * np.add(d_loss_targ_real, d_loss_targ_fake)
return d_loss_source, d_loss_targ
def get_logs(self, d_loss_source, d_loss_targ, g_loss):
logs = np.array([d_loss_source[0], 100 * d_loss_source[1], d_loss_targ[0], 100 * d_loss_targ[1],
g_loss[0], g_loss[3], g_loss[4], g_loss[5], g_loss[6], g_loss[7], g_loss[8]])
return logs
def vae_model(image_size=(180, 320)):
""" Compiles a VAE model (architecture from https://arxiv.org/pdf/1703.00848.pdf)
base vae from https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py
"""
K.clear_session()
# Encoder
encoder = build_encoder((*image_size, 3), name='encoder')
latent_size = encoder.outputs[1].get_shape()[1:]
decoder = build_decoder(latent_size, name='decoder')
inputs = encoder.input
outputs = decoder(encoder(inputs)[1])
vae = Model(inputs=inputs, outputs=outputs, name='vae')
# Loss function
def vae_loss(y_true, y_pred):
"""
Parameters
----------
inputs: tf.Tensor (-1, h, w, 3)
outputs: tf.Tensor (-1, h, w, 3)
Returns
-------
vae_loss: tf.Tensor
L2 distance + KL divergence
"""
r_loss = lambda_1 * reconst_loss(inputs, outputs)
z_mean = encoder.outputs[0]
kl_loss = lambda_2 * kl_div_loss(z_mean)
return r_loss + kl_loss
optimizer = Adam(lr=lr_rate, beta_1=0.5, beta_2=0.999)
vae.compile(optimizer, loss=vae_loss)
vae.summary()
return vae
|
import os
import random
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Input, Lambda, Conv2DTranspose, AveragePooling2D, Flatten, Dense, UpSampling2D
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard
from glob import glob
from .layers import conv_block, res_block, sample, instance_norm2d
from .losses import reconst_loss, kl_div_loss, compute_vgg_loss
from .dataset import load_day_and_night, create_dataset, load_batch
from .utils import convert_to_uint8, preprocess_vgg, write_log
lr_rate = 1e-4
lambda_0 = 1 # GAN coefficient
lambda_1 = 10 # Reconstruction coefficient
lambda_2 = 0.01 # KL divergence coefficient
lambda_3 = 0 # perceptual loss
def build_encoder(input_size, norm='in', activation='relu', name=''):
"""
Parameters
----------
input_size: tuple(int, int, int)
name: str
Returns
-------
encoder: tf.keras.model.Model
input: image
outputs: z_mean and z (a sample)
"""
inputs = Input(input_size)
h = conv_block(inputs, 64, 7, 1, norm, activation)
h = conv_block(h, 128, 3, 2, norm, activation)
h = conv_block(h, 256, 3, 2, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation, last_block=True)
z_mean = h
z = Lambda(sample)(z_mean)
encoder = Model(inputs=inputs, outputs=[z_mean, z], name=name)
return encoder
def build_decoder(latent_size, norm='in', activation='relu', name=''):
"""
Parameters
----------
latent_size: tuple(int, int, int)
name: str
Returns
-------
decoder: tf.keras.model.Model
input: latent tensor
output: reconstructed image
"""
latent_inputs = Input(latent_size)
h = latent_inputs
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = res_block(h, 256, 3, 1, norm, activation)
h = UpSampling2D(2)(h)
h = conv_block(h, 128, 5, 1, norm, activation)
h = UpSampling2D(2)(h)
h = conv_block(h, 64, 5, 1, norm, activation)
outputs = conv_block(h, 3, 7, 1, norm='none', activation='tanh', bias=True)
decoder = Model(inputs=latent_inputs, outputs=outputs, name=name)
return decoder
# TODO: make discriminator multi-scale
def build_discriminator(input_size, norm='none', activation='lrelu', name=''):
"""
Parameters
----------
input_size: tuple(int, int, int)
name: str
Returns
-------
discriminator: tf.keras.model.Model
input: image
outputs: soft labels
"""
inputs = Input(input_size)
h = conv_block(inputs, 64, 3, 2, norm, activation)
h = conv_block(h, 128, 3, 2, norm, activation)
h = conv_block(h, 256, 3, 2, norm, activation)
h = conv_block(h, 512, 3, 2, norm, activation)
h = conv_block(h, 1, 1, 1, norm='none', activation='none')
outputs = Flatten()(h)
discriminator = Model(inputs=inputs, outputs=outputs, name=name)
discriminator.summary()
return discriminator
def build_vgg16(image_size, name):
""" vgg16 preprocessing -> vgg16 model -> instance norm
Input: Image in [-1, 1]
"""
vgg16 = tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet',
input_tensor=None, input_shape=(*image_size, 3))
inputs = tf.keras.layers.Input((*image_size, 3))
x = tf.keras.layers.Lambda(preprocess_vgg)(inputs)
x = vgg16(x)
inst_norm_x = tf.keras.layers.Lambda(instance_norm2d)(x)
vgg16_model = tf.keras.models.Model(inputs, inst_norm_x, name=name)
vgg16_model.trainable = False
return vgg16_model
def build_controller(input_size, name):
inputs = Input(input_size)
h = conv_block(inputs, 256, 3, 2)
h = conv_block(h, 128, 3, 2)
h = conv_block(h, 64, 3, 2)
h = conv_block(h, 32, 3, 2)
h = AveragePooling2D(pool_size=(3, 5))(h)
h = Flatten()(h)
h = Dense(16)(h)
outputs = Dense(1)(h)
controller = Model(inputs=inputs, outputs=outputs, name=name)
controller.summary()
return controller
class CycleVAE():
""" UNIT network (Unsupervised Image-to-Image Translation)"""
def __init__(self, save_folder, image_size=(512, 512), checkpoint_path=''):
self.save_folder = save_folder
self.image_size = image_size
self.checkpoint_path = checkpoint_path
self._build_model()
if self.checkpoint_path:
print('Loading weights from checkpoint {}'.format(os.path.basename(self.checkpoint_path)))
self.cycle_vae.load_weights(self.checkpoint_path)
else:
checkpoint = glob(os.path.join(self.save_folder, '*.h5'))
if checkpoint:
print('Loading weights from checkpoint {}'.format(os.path.basename(checkpoint[0])))
self.cycle_vae.load_weights(checkpoint[0])
self.X_day_val, self.X_night_val = None, None
def _build_model(self):
"""
Creates
-------
self.cycle_vae: keras Model
full model
"""
K.clear_session()
optimizer = Adam(lr=lr_rate, beta_1=0.5, beta_2=0.999)
# Source distribution
self.E_source = build_encoder((*self.image_size, 3), name='E_source')
latent_size = self.E_source.outputs[1].get_shape()[1:]
self.G_source = build_decoder(latent_size, name='G_source')
# Target distribution
self.E_targ = build_encoder((*self.image_size, 3), name='E_targ')
latent_size = self.E_targ.outputs[1].get_shape()[1:]
self.G_targ = build_decoder(latent_size, name='G_targ')
# VGG16 feature extractor
#self.vgg16 = build_vgg16(self.image_size, 'vgg16')
#self.vgg16.summary()
self.x_source = self.E_source.input
self.x_targ = self.E_targ.input
# Source reconstruction
self.source_reconst = self.G_source(self.E_source(self.x_source)[1])
# Target reconstruction
self.targ_reconst = self.G_targ(self.E_targ(self.x_targ)[1])
# Translations
self.translation_to_source = self.G_source(self.E_targ(self.x_targ)[1])
self.translation_to_targ = self.G_targ(self.E_source(self.x_source)[1])
# Cycle reconst: source -> targ -> source
self.cycle1_reconst = self.G_source(self.E_targ(self.translation_to_targ)[1])
# Cycle reconst: targ -> source -> targ
self.cycle2_reconst = self.G_targ(self.E_source(self.translation_to_source)[1])
# GANs
# Build and compile discriminators
self.D_source = build_discriminator((*self.image_size, 3), name='D_source')
self.D_targ = build_discriminator((*self.image_size, 3), name='D_targ')
self.D_source.compile(optimizer=optimizer, loss='mse', loss_weights=[lambda_0], metrics=['binary_accuracy'])
self.D_targ.compile(optimizer=optimizer, loss='mse', loss_weights=[lambda_0], metrics=['binary_accuracy'])
# set discriminator weights to False
self.D_source.trainable = False
self.D_targ.trainable = False
valid_source = self.D_source(self.translation_to_source)
valid_targ = self.D_targ(self.translation_to_targ)
inputs = [self.x_source, self.x_targ]
outputs = [valid_source, valid_targ]
self.cycle_vae = Model(inputs=inputs, outputs=outputs, name='cycle_vae')
def in_domain_reconst_loss(y_true, y_pred):
r_source_loss = lambda_1 * reconst_loss(self.x_source, self.source_reconst)
r_targ_loss = lambda_1 * reconst_loss(self.x_targ, self.targ_reconst)
return r_source_loss + r_targ_loss
def in_domain_kl_loss(y_true, y_pred):
kl_source_loss = lambda_2 * kl_div_loss(self.E_source.outputs[0])
kl_targ_loss = lambda_2 * kl_div_loss(self.E_targ.outputs[0])
return kl_source_loss + kl_targ_loss
def cyclic_loss(y_true, y_pred):
cyclic_1_loss = lambda_1 * reconst_loss(self.x_source, self.cycle1_reconst)
cyclic_2_loss = lambda_1 * reconst_loss(self.x_targ, self.cycle2_reconst)
return cyclic_1_loss + cyclic_2_loss
def kl_cyclic_loss(y_true, y_pred):
kl_cyclic_1_loss = lambda_2 * kl_div_loss(self.E_source(self.translation_to_source)[0])
kl_cyclic_2_loss = lambda_2 * kl_div_loss(self.E_targ(self.translation_to_targ)[0])
return kl_cyclic_1_loss + kl_cyclic_2_loss
def gan_loss(y_true, y_pred):
gan_loss1 = lambda_0 * K.mean(K.square(y_true - self.D_source(self.translation_to_source)))
gan_loss2 = lambda_0 * K.mean(K.square(y_true - self.D_targ(self.translation_to_targ)))
return gan_loss1 + gan_loss2
def vgg_loss(y_true, y_pred):
vgg_loss1 = lambda_3 * compute_vgg_loss(self.vgg16(self.x_source), self.vgg16(self.translation_to_targ))
vgg_loss2 = lambda_3 * compute_vgg_loss(self.vgg16(self.x_targ), self.vgg16(self.translation_to_source))
return vgg_loss1 + vgg_loss2
# Loss function
def cycle_vae_loss(y_true, y_pred):
"""
Returns
-------
cycle_vae_loss: tf.Tensor
L2 distance + KL divergence
"""
# In-domain reconst loss
# In-domain KL loss
# Cyclic loss
# Cyclic KL loss
# GAN loss
# Perceptual loss
total_loss = (in_domain_reconst_loss(y_true, y_pred)
+ in_domain_kl_loss(y_true, y_pred)
+ cyclic_loss(y_true, y_pred)
+ kl_cyclic_loss(y_true, y_pred)
+ gan_loss(y_true, y_pred)
#+ vgg_loss(y_true, y_pred)
)
return total_loss
def dummy_loss(y_true, y_pred):
return K.zeros(1)
self.cycle_vae.compile(optimizer, loss=[cycle_vae_loss, dummy_loss],
metrics=[in_domain_reconst_loss, in_domain_kl_loss, cyclic_loss,
kl_cyclic_loss, gan_loss, dummy_loss])#, vgg_loss])
self.cycle_vae.summary()
def train(self, epochs=10, batch_size=1, print_interval=50):
# Tensorboard callback
train_callback = TensorBoard(os.path.join(self.save_folder, 'train'))
train_callback.set_model(self.cycle_vae)
val_callback = TensorBoard(os.path.join(self.save_folder, 'val'))
val_callback.set_model(self.cycle_vae)
metrics_names = ['discri_loss_source', 'discri_accuracy_source', 'discri_loss_targ', 'discri_accuracy_targ',
'g_total_loss', 'g_reconst', 'g_kl', 'g_cyclic_reconst', 'g_cyclic_kl', 'g_gan', 'g_vgg']
# Dataset
X_day_train, X_night_train = load_day_and_night('train')
self.X_day_val, self.X_night_val = load_day_and_night('val')
n_train = len(X_day_train)
n_val = len(self.X_day_val)
print('Training examples: {}'.format(n_train))
print('Validation examples: {}'.format(n_val))
discri_dim = self.D_source.output.get_shape()[1]
valid = np.ones((batch_size, discri_dim))
fake = np.zeros((batch_size, discri_dim))
val_loss_min = float('inf')
iter_nb = 0
for e in range(epochs):
n_iter = n_train // batch_size
indices = np.random.permutation(n_train)
for i in range(n_iter):
idx = np.random.choice(indices, size=batch_size, replace=False)
#indices[i*batch_size : (i+1)*batch_size]
source_batch = load_batch(X_day_train[idx], self.image_size)
targ_batch = load_batch(X_night_train[idx], self.image_size)
# Discriminator
d_loss_source, d_loss_targ = self.discri_train_on_batch(source_batch, targ_batch, valid, fake)
# Generator
g_loss = self.cycle_vae.train_on_batch([source_batch, targ_batch], [valid, valid])
if i%print_interval == 0:
print('Iteration {:4d}: Training loss:'.format(i))
logs = self.get_logs(d_loss_source, d_loss_targ, g_loss)
print('Discri source loss: {:.2f}, accuracy: {:.1f}%'.format(logs[0], logs[1]))
print('Discri targ loss: {:.2f}, accuracy: {:.1f}%'.format(logs[2], logs[3]))
print('Gen loss: {:.2f}'.format(logs[4]))
print('Reconst: {:.2f}, kl: {:.2f}, cyclic_reconst: {:.2f}, cyclic_kl: {:.2f},gan: {:.2f}, vgg: {:.2f}'\
.format(*logs[5:]))
write_log(train_callback, metrics_names, logs, iter_nb)
if i%100 == 0:
self.predict(e + 1, iter=iter_nb, save_fig=True)
print('Figure saved.')
iter_nb += 1
# Calculate validation loss
val_loss = 0.0
n_iter_val = n_val // batch_size
indices = np.arange(n_val)
logs = np.zeros(len(metrics_names))
for i in range(n_iter_val):
idx = indices[i * batch_size: (i + 1) * batch_size]
source_batch = load_batch(self.X_day_val[idx], self.image_size)
targ_batch = load_batch(self.X_night_val[idx], self.image_size)
# Discriminator
d_loss_source, d_loss_targ = self.discri_train_on_batch(source_batch, targ_batch, valid, fake)
# Generator
g_loss = self.cycle_vae.train_on_batch([source_batch, targ_batch], [valid, valid])
logs += self.get_logs(d_loss_source, d_loss_targ, g_loss)
val_loss += g_loss[0]
val_loss /= n_iter_val
logs /= n_iter_val
print('\n Epoch {} - Validation loss: {:.2f}'.format(e + 1, val_loss))
print('Discri source loss: {:.2f}, accuracy: {:.1f}%'.format(logs[0], logs[1]))
print('Discri targ loss: {:.2f}, accuracy: {:.1f}%'.format(logs[2], logs[3]))
print('Gen loss: {:.2f}'.format(logs[4]))
print('Reconst: {:.2f}, kl: {:.2f}, cyclic_reconst: {:.2f}, cyclic_kl: {:.2f},gan: {:.2f}, vgg: {:.2f}' \
.format(*logs[5:]))
write_log(val_callback, metrics_names, logs, iter_nb)
if val_loss < val_loss_min:
val_loss_min = val_loss
print('Saving model\n')
weights_filename = os.path.join(self.save_folder, 'cycle_vae.epoch{:02d}-val_loss{:.2f}.h5'.format(e+1, val_loss))
self.cycle_vae.save_weights(weights_filename)
def predict(self, epoch=0, save_fig=True, iter=0, n_examples=3):
if self.X_day_val is None or self.X_night_val is None:
self.X_day_val, self.X_night_val = load_day_and_night('val')
f_source_to_targ = K.function([self.x_source, K.learning_phase()], [self.G_targ(self.E_source(self.x_source)[0])])
# TODO: might need to use the mus instead of the zs
f_cycle1_reconst = K.function([self.x_source, K.learning_phase()], [self.cycle1_reconst])
for i, filename in enumerate(np.random.choice(self.X_day_val, n_examples)):
img = load_batch([filename], self.image_size)
x_targ = f_source_to_targ([img, 0])[0]
x_cycle1_reconst = f_cycle1_reconst([img, 0])[0]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.imshow(convert_to_uint8(img)[0])
plt.title('Original')
plt.subplot(132)
plt.imshow(convert_to_uint8(x_targ[0]))
plt.title('Translated image')
plt.subplot(133)
plt.imshow(convert_to_uint8(x_cycle1_reconst[0]))
plt.title('Cycle reconstruction')
fig_filename = os.path.join(self.save_folder, 'epoch{:02d}-iter{:04d}-day-example{}.png'.format(epoch, iter, i))
if save_fig:
plt.savefig(fig_filename)
plt.close()
else:
plt.show()
f_targ_to_source = K.function([self.x_targ, K.learning_phase()], [self.G_source(self.E_targ(self.x_targ)[0])])
# TODO: might need to use the mus instead of the zs
f_cycle2_reconst = K.function([self.x_targ, K.learning_phase()], [self.cycle2_reconst])
for i, filename in enumerate(np.random.choice(self.X_night_val, n_examples)):
img = load_batch([filename], self.image_size)
x_source = f_targ_to_source([img, 0])[0]
x_cycle2_reconst = f_cycle2_reconst([img, 0])[0]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.imshow(convert_to_uint8(img)[0])
plt.title('Original')
plt.subplot(132)
plt.imshow(convert_to_uint8(x_source[0]))
plt.title('Translated image')
plt.subplot(133)
plt.imshow(convert_to_uint8(x_cycle2_reconst[0]))
plt.title('Cycle reconstruction')
fig_filename = os.path.join(self.save_folder, 'epoch{:02d}-iter{:04d}-night-example{}.png'.format(epoch, iter, i))
if save_fig:
plt.savefig(fig_filename)
plt.close()
else:
plt.show()
def discri_train_on_batch(self, source_batch, targ_batch, valid, fake):
gen_translated_to_source = self.G_source.predict(self.E_targ.predict(targ_batch)[1])
gen_translated_to_targ = self.G_targ.predict(self.E_source.predict(source_batch)[1])
d_loss_source_real = self.D_source.train_on_batch(source_batch, valid)
d_loss_source_fake = self.D_source.train_on_batch(gen_translated_to_source, fake)
d_loss_source = 0.5 * np.add(d_loss_source_real, d_loss_source_fake)
d_loss_targ_real = self.D_targ.train_on_batch(targ_batch, valid)
d_loss_targ_fake = self.D_targ.train_on_batch(gen_translated_to_targ, fake)
d_loss_targ = 0.5 * np.add(d_loss_targ_real, d_loss_targ_fake)
return d_loss_source, d_loss_targ
def get_logs(self, d_loss_source, d_loss_targ, g_loss):
logs = np.array([d_loss_source[0], 100 * d_loss_source[1], d_loss_targ[0], 100 * d_loss_targ[1],
g_loss[0], g_loss[3], g_loss[4], g_loss[5], g_loss[6], g_loss[7], g_loss[8]])
return logs
def vae_model(image_size=(180, 320)):
""" Compiles a VAE model (architecture from https://arxiv.org/pdf/1703.00848.pdf)
base vae from https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py
"""
K.clear_session()
# Encoder
encoder = build_encoder((*image_size, 3), name='encoder')
latent_size = encoder.outputs[1].get_shape()[1:]
decoder = build_decoder(latent_size, name='decoder')
inputs = encoder.input
outputs = decoder(encoder(inputs)[1])
vae = Model(inputs=inputs, outputs=outputs, name='vae')
# Loss function
def vae_loss(y_true, y_pred):
"""
Parameters
----------
inputs: tf.Tensor (-1, h, w, 3)
outputs: tf.Tensor (-1, h, w, 3)
Returns
-------
vae_loss: tf.Tensor
L2 distance + KL divergence
"""
r_loss = lambda_1 * reconst_loss(inputs, outputs)
z_mean = encoder.outputs[0]
kl_loss = lambda_2 * kl_div_loss(z_mean)
return r_loss + kl_loss
optimizer = Adam(lr=lr_rate, beta_1=0.5, beta_2=0.999)
vae.compile(optimizer, loss=vae_loss)
vae.summary()
return vae
|
en
| 0.537007
|
# GAN coefficient # Reconstruction coefficient # KL divergence coefficient # perceptual loss Parameters ---------- input_size: tuple(int, int, int) name: str Returns ------- encoder: tf.keras.model.Model input: image outputs: z_mean and z (a sample) Parameters ---------- latent_size: tuple(int, int, int) name: str Returns ------- decoder: tf.keras.model.Model input: latent tensor output: reconstructed image # TODO: make discriminator multi-scale Parameters ---------- input_size: tuple(int, int, int) name: str Returns ------- discriminator: tf.keras.model.Model input: image outputs: soft labels vgg16 preprocessing -> vgg16 model -> instance norm Input: Image in [-1, 1] UNIT network (Unsupervised Image-to-Image Translation) Creates ------- self.cycle_vae: keras Model full model # Source distribution # Target distribution # VGG16 feature extractor #self.vgg16 = build_vgg16(self.image_size, 'vgg16') #self.vgg16.summary() # Source reconstruction # Target reconstruction # Translations # Cycle reconst: source -> targ -> source # Cycle reconst: targ -> source -> targ # GANs # Build and compile discriminators # set discriminator weights to False # Loss function Returns ------- cycle_vae_loss: tf.Tensor L2 distance + KL divergence # In-domain reconst loss # In-domain KL loss # Cyclic loss # Cyclic KL loss # GAN loss # Perceptual loss #+ vgg_loss(y_true, y_pred) #, vgg_loss]) # Tensorboard callback # Dataset #indices[i*batch_size : (i+1)*batch_size] # Discriminator # Generator # Calculate validation loss # Discriminator # Generator # TODO: might need to use the mus instead of the zs # TODO: might need to use the mus instead of the zs Compiles a VAE model (architecture from https://arxiv.org/pdf/1703.00848.pdf) base vae from https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py # Encoder # Loss function Parameters ---------- inputs: tf.Tensor (-1, h, w, 3) outputs: tf.Tensor (-1, h, w, 3) Returns ------- vae_loss: tf.Tensor L2 distance + KL divergence
| 2.302594
| 2
|
blogs/xmlload/xmlload.py
|
wietze/bigquery-oreilly-book
| 365
|
6626519
|
<filename>blogs/xmlload/xmlload.py
import argparse
import logging
import apache_beam as beam
def parse_into_dict(xmlfile):
import xmltodict
with open(xmlfile) as ifp:
doc = xmltodict.parse(ifp.read())
return doc
table_schema = {
'fields': [
{'name' : 'CustomerID', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'EmployeeID', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'OrderDate', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'RequiredDate', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipInfo', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [
{'name' : 'ShipVia', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'Freight', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipName', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipAddress', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipCity', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipRegion', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipPostalCode', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipCountry', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShippedDate', 'type': 'STRING', 'mode': 'NULLABLE'},
]},
]
}
# The @ symbol is not allowed as a column name in BigQuery
def cleanup(x):
import copy
y = copy.deepcopy(x)
if '@ShippedDate' in x['ShipInfo']: # optional attribute
y['ShipInfo']['ShippedDate'] = x['ShipInfo']['@ShippedDate']
del y['ShipInfo']['@ShippedDate']
print(y)
return y
def get_orders(doc):
for order in doc['Root']['Orders']['Order']:
yield cleanup(order)
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
required=True,
help=(
'Specify text file orders.txt or BigQuery table project:dataset.table '))
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
orders = (p
| 'files' >> beam.Create(['orders.xml'])
| 'parse' >> beam.Map(lambda filename: parse_into_dict(filename))
| 'orders' >> beam.FlatMap(lambda doc: get_orders(doc)))
if '.txt' in known_args.output:
orders | 'totxt' >> beam.io.WriteToText(known_args.output)
else:
orders | 'tobq' >> beam.io.WriteToBigQuery(known_args.output,
schema=table_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND, #WRITE_TRUNCATE
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
<filename>blogs/xmlload/xmlload.py
import argparse
import logging
import apache_beam as beam
def parse_into_dict(xmlfile):
import xmltodict
with open(xmlfile) as ifp:
doc = xmltodict.parse(ifp.read())
return doc
table_schema = {
'fields': [
{'name' : 'CustomerID', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'EmployeeID', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'OrderDate', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'RequiredDate', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipInfo', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [
{'name' : 'ShipVia', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'Freight', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipName', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipAddress', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipCity', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipRegion', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipPostalCode', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipCountry', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShippedDate', 'type': 'STRING', 'mode': 'NULLABLE'},
]},
]
}
# The @ symbol is not allowed as a column name in BigQuery
def cleanup(x):
import copy
y = copy.deepcopy(x)
if '@ShippedDate' in x['ShipInfo']: # optional attribute
y['ShipInfo']['ShippedDate'] = x['ShipInfo']['@ShippedDate']
del y['ShipInfo']['@ShippedDate']
print(y)
return y
def get_orders(doc):
for order in doc['Root']['Orders']['Order']:
yield cleanup(order)
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
required=True,
help=(
'Specify text file orders.txt or BigQuery table project:dataset.table '))
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
orders = (p
| 'files' >> beam.Create(['orders.xml'])
| 'parse' >> beam.Map(lambda filename: parse_into_dict(filename))
| 'orders' >> beam.FlatMap(lambda doc: get_orders(doc)))
if '.txt' in known_args.output:
orders | 'totxt' >> beam.io.WriteToText(known_args.output)
else:
orders | 'tobq' >> beam.io.WriteToBigQuery(known_args.output,
schema=table_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND, #WRITE_TRUNCATE
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
en
| 0.746644
|
# The @ symbol is not allowed as a column name in BigQuery # optional attribute #WRITE_TRUNCATE
| 2.473955
| 2
|
hw/hw_train_and_test.py
|
vihari/CSD
| 41
|
6626520
|
<gh_stars>10-100
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import os
import sys
import time
import tqdm
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from matplotlib.pyplot import imread
import pickle
import input_data
import lipitk
import nhcd
import rmnist
import pickle
import datasets
# Basic model parameters as external flags.
FLAGS = None
DEF = 0
NETWORK, NUM_CLASSES, IMAGE_PIXELS, IMAGE_SIZE = None, None, None, None
train, in_dev, dev, test = None, None, None, None
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
def placeholder_inputs(batch_size=None):
images_placeholder = tf.placeholder(
tf.float32, shape=(None, IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(None))
domain_placeholder = tf.placeholder(tf.int32, shape=(None))
return images_placeholder, labels_placeholder, domain_placeholder
def fill_feed_dict(x_ys, images_pl, labels_pl, domain_pl, batch_size=None):
xs, ys, us = x_ys
if batch_size is None:
batch_size = FLAGS.batch_size
elif batch_size == -1:
batch_size = len(xs)
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
# print ("Batch size: ", FLAGS.batch_size)
def next_batch(_xs, _ys, _us, bs):
if len(_xs) < bs:
bs = len(_xs)
idxs = np.random.choice(len(_xs), size=bs, replace=False)
return _xs[idxs], _ys[idxs], _us[idxs]
images_feed, labels_feed, domain_feed = next_batch(xs, ys, us, FLAGS.batch_size)
images_feed = np.reshape(images_feed, [FLAGS.batch_size, IMAGE_PIXELS])
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
domain_pl: domain_feed,
}
return feed_dict
def eprint(*args):
_str = " ".join([str(arg) for arg in args])
sys.stderr.write("%s\n" % _str)
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
data_set):
bs = FLAGS.batch_size
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = max(1, (len(data_set[0]) // bs))
num_examples = steps_per_epoch * bs
xs, ys, us = data_set
for step in xrange(steps_per_epoch):
_mx = min((step+1)*bs, len(data_set[0]))
idxs = np.arange(step*bs, _mx, dtype=np.int32)
_xs, _ys, _us = xs[idxs], ys[idxs], us[idxs]
_xs = np.reshape(_xs, [(_mx - (step*bs)), IMAGE_PIXELS])
feed_dict = {images_placeholder: _xs,
labels_placeholder: _ys,
domain_placeholder: _us}
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
eprint('Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
return precision, true_count
def do_eval_macro(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
data_set):
agg_prec, agg_corr = 0, 0
num = 0
for dom in np.unique(data_set[-1]):
idxs = np.where(np.equal(data_set[-1], dom))
data = (data_set[0][idxs], data_set[1][idxs], data_set[2][idxs])
FLAGS.batch_size = min(FLAGS.batch_size, len(data[0]))
prec, corr = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, data)
agg_prec += prec
agg_corr += corr
num += 1
precision, true_count = agg_prec/num, agg_corr/num
eprint('Num domains: %d Num correct: %d Precision @ 1: %0.04f' %
(num, true_count, precision))
return precision, true_count
def run_simple():
SEED = FLAGS.seed
np.random.seed(SEED)
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, shape=[], name='learning_rate')
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder, domain_placeholder = placeholder_inputs()
final_layer = tf.layers.Dense(NUM_CLASSES, kernel_initializer=tf.random_normal_initializer(0, 0.05))
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
logits = final_layer(reprs)
with tf.variable_scope('', reuse=True):
reprs_for_eval = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
logits_for_eval = final_layer(reprs_for_eval)
loss = lipitk.loss(logits, labels_placeholder, num_classes=NUM_CLASSES)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
_vars = tf.all_variables()
saver = tf.train.Saver(var_list=_vars)
sess = tf.Session(config=config)
sess.run(init)
best_dev, best_test = -1, -1
best_dev_abs, best_test_abs = -1, -1
nepochs = FLAGS.nepochs
num_steps = (nepochs*len(train[0]))//FLAGS.batch_size
nsteps_per_epoch = len(train[0])/FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(num_steps)):
start_time = time.time()
feed_dict = fill_feed_dict(train,
images_placeholder,
labels_placeholder,
domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
_, np_loss, np_logits = sess.run([train_op, loss, logits], feed_dict = feed_dict)
if (step + 1) % 1000 == 0 or (step + 1) == num_steps:
eprint ("Loss: ", np_loss)
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
eprint('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
train)
# Evaluate against the dev set.
in_dev_prec, in_dev_corr = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
in_dev)
dev_prec, dev_corr = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
dev)
test_prec, test_corr = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
test)
if dev_prec >= best_dev:
best_dev, best_dev_abs = dev_prec, dev_corr
best_test, best_test_abs = test_prec, test_corr
print ("test prec for best dev, test acc: %f, test acc: %f" % (best_dev, best_test))
return in_dev_prec, best_dev, best_test
def run_training():
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = max([np.max(train[2]), np.max(dev[2]), np.max(test[2])]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, name="learning_rate", shape=[])
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder, domain_placeholder = placeholder_inputs(FLAGS.batch_size)
LN = tf.keras.layers.LayerNormalization(axis=1)
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
reprs = LN(reprs)
logits1, logits2, reg_loss, common_var, specialized_common_wt, _e = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
with tf.variable_scope('', reuse=True):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
reprs = LN(reprs)
_, logits_for_eval, _, _, _, _ = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
loss1 = lipitk.loss(logits1, labels_placeholder, num_classes=NUM_CLASSES)
loss2 = lipitk.loss(logits2, labels_placeholder, num_classes=NUM_CLASSES)
loss = (FLAGS.lmbda * loss1) + loss2
if FLAGS.lmbda > 0:
loss = FLAGS.lmbda*loss1 + (1 - FLAGS.lmbda)*loss2
loss += FLAGS.alpha*reg_loss
if FLAGS.lmbda > 1:
loss /= FLAGS.lmbda
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
_vars = tf.global_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
best_acc, best_test = -1, -1
nepochs = FLAGS.nepochs
nsteps = (nepochs*len(train[0]))//FLAGS.batch_size
nsteps_per_epoch = len(train[0])/FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train,
images_placeholder,
labels_placeholder,
domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
_, np_loss, _ = sess.run([train_op, loss, increment_global_step], feed_dict = feed_dict)
all_losses = sess.run([loss1, loss2, reg_loss], feed_dict=feed_dict)
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == nsteps:
print ("Loss: ", np_loss)
print ("Losses: ", all_losses)
print ("Common wt: ", sess.run(common_var))
print ("Specialized common wt: ", sess.run(specialized_common_wt))
print ("Emb matrix: ", sess.run(_e)[:5])
# Evaluate against the training set.
eprint('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
train)
# Evaluate against the dev set.
in_dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
in_dev)
dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
dev)
test_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print ("Best in-domain dev acc: %f test: %f" % (best_acc, best_test))
return in_dev_acc, best_acc, best_test
def run_cg(cgpp=False):
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = np.max(train[2]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder, domain_placeholder = placeholder_inputs(
FLAGS.batch_size)
ph_lr = tf.placeholder(tf.float32, name="learning_rate")
if not cgpp:
cg_fn = lipitk.cg
else:
cg_fn = lipitk.cgpp
with tf.variable_scope('', ):
loss, _, debug_print = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=True, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
with tf.variable_scope('', reuse=True):
_, logits_for_eval, __ = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=False, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
_vars = tf.all_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
best_acc, best_test = -1, -1
nepochs = FLAGS.nepochs
nsteps = (nepochs*len(train[0]))//FLAGS.batch_size
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train,
images_placeholder,
labels_placeholder,
domain_placeholder)
lr = FLAGS.learning_rate
feed_dict[ph_lr] = lr
_, np_loss = sess.run([train_op, loss], feed_dict = feed_dict)
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == nsteps:
print ("Loss: ", np_loss)
if debug_print is not None:
np_dp = sess.run(debug_print, feed_dict=feed_dict)
print ("****Debug:****")
print (np_dp)
# Evaluate against the training set.
eprint('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
train)
# Evaluate against the dev set.
in_dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
in_dev)
dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
dev)
test_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print ("Best in-domain ind dev acc: %f dev acc: %f test: %f" % (in_dev_acc, best_acc, best_test))
return in_dev_acc, best_acc, best_test
def main(_):
seed = 0
in_dev_accs, dev_accs, test_accs = [], [], []
for seed in range(3):
FLAGS.seed = seed
np.random.seed(seed)
if FLAGS.simple:
# in_dev_acc, dev_acc, test_acc = run_simple()
FLAGS.lmbda = 0
in_dev_acc, dev_acc, test_acc = run_training()
elif FLAGS.cg:
in_dev_acc, dev_acc, test_acc = run_cg()
else:
in_dev_acc, dev_acc, test_acc = run_training()
in_dev_accs.append(in_dev_acc)
dev_accs.append(dev_acc)
test_accs.append(test_acc)
print ( "InD Val, Val, test acc: %0.4f (%0.4f), %0.4f (%0.4f), %0.4f (%0.4f)" % (np.mean(in_dev_accs), np.std(in_dev_accs), np.mean(dev_accs), np.std(dev_accs), np.mean(test_accs), np.std(test_accs)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
type=str,
default='lipitk',
help='Dataset to evaluate on.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='Initial learning rate.'
)
parser.add_argument(
'--seed',
type=int,
default=0,
help='Random seed.'
)
parser.add_argument(
'--num_train',
type=int,
default=-1,
help='Number of training domains'
)
parser.add_argument(
'--max_steps',
type=int,
default=15000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--batch_size',
type=int,
default=128,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--nepochs',
type=int,
default=100,
help='Number of epochs'
)
parser.add_argument(
'--init_ckpt',
type=str,
default=None,
help='Path to initializing checkpoint'
)
parser.add_argument(
'--emb_dim',
type=int,
default=2,
help='Number of basis vectors'
)
parser.add_argument(
'--lmbda',
type=float,
default=.5,
help='Loss specific component weight'
)
parser.add_argument(
'--alpha',
type=float,
default=1,
help='Coeff for reg. loss'
)
parser.add_argument(
'--simple',
action='store_true',
help='Trains and evaluates a simple baseline with no experts.'
)
parser.add_argument(
'--cg',
action='store_true',
help='Evaluate the Crossgrad baseline.'
)
parser.add_argument(
'--cg_eps',
type=float,
default=10,
help='Step size for perturbations in CG/CG++'
)
parser.set_defaults(simple=False)
FLAGS, unparsed = parser.parse_known_args()
if not FLAGS.simple:
FLAGS.log_dir = "lipitk_log/lipitktuner_nt=%d_fonts_e=%d_seed_%d" % (FLAGS.num_train, FLAGS.emb_dim, FLAGS.seed)
else:
FLAGS.log_dir = "lipitk_log/lipitktuner_wnorm_simple_nt=%d_fonts_e=%d_seed_%d" % (FLAGS.num_train, FLAGS.emb_dim, FLAGS.seed)
os.system("mkdir %s" % FLAGS.log_dir)
if FLAGS.dataset == 'lipitk':
train, in_dev, dev, test = lipitk.prepare_data(FLAGS.num_train)
NETWORK = 'lenet'
NUM_CLASSES = 111
IMAGE_PIXELS = 1024
IMAGE_SIZE = 32
elif FLAGS.dataset == 'nhcd':
train, in_dev, dev, test = nhcd.prepare_data()
NETWORK = 'lenet'
NUM_CLASSES = nhcd.NUM_CLASSES
IMAGE_PIXELS = nhcd.IMAGE_PIXELS
IMAGE_SIZE = nhcd.IMAGE_SIZE
elif FLAGS.dataset == 'english-hnd':
train, in_dev, dev, test = datasets.load_english_hnd()
NETWORK = 'lenet'
NUM_CLASSES = 59
IMAGE_PIXELS = 1024
IMAGE_SIZE = 32
elif FLAGS.dataset == 'english-fnt':
train, in_dev, dev, test = datasets.load_english_fnt()
NETWORK = 'lenet'
NUM_CLASSES = 62
IMAGE_PIXELS = 1024
IMAGE_SIZE = 32
elif FLAGS.dataset == 'rmnist':
NETWORK = 'lenet'
NUM_CLASSES = rmnist.NUM_CLASSES
IMAGE_PIXELS = rmnist.IMAGE_PIXELS
IMAGE_SIZE = rmnist.IMAGE_SIZE
train, dev, test = rmnist.prepare_data([0])
in_dev = dev
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import os
import sys
import time
import tqdm
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from matplotlib.pyplot import imread
import pickle
import input_data
import lipitk
import nhcd
import rmnist
import pickle
import datasets
# Basic model parameters as external flags.
FLAGS = None
DEF = 0
NETWORK, NUM_CLASSES, IMAGE_PIXELS, IMAGE_SIZE = None, None, None, None
train, in_dev, dev, test = None, None, None, None
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
def placeholder_inputs(batch_size=None):
images_placeholder = tf.placeholder(
tf.float32, shape=(None, IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(None))
domain_placeholder = tf.placeholder(tf.int32, shape=(None))
return images_placeholder, labels_placeholder, domain_placeholder
def fill_feed_dict(x_ys, images_pl, labels_pl, domain_pl, batch_size=None):
xs, ys, us = x_ys
if batch_size is None:
batch_size = FLAGS.batch_size
elif batch_size == -1:
batch_size = len(xs)
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
# print ("Batch size: ", FLAGS.batch_size)
def next_batch(_xs, _ys, _us, bs):
if len(_xs) < bs:
bs = len(_xs)
idxs = np.random.choice(len(_xs), size=bs, replace=False)
return _xs[idxs], _ys[idxs], _us[idxs]
images_feed, labels_feed, domain_feed = next_batch(xs, ys, us, FLAGS.batch_size)
images_feed = np.reshape(images_feed, [FLAGS.batch_size, IMAGE_PIXELS])
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
domain_pl: domain_feed,
}
return feed_dict
def eprint(*args):
_str = " ".join([str(arg) for arg in args])
sys.stderr.write("%s\n" % _str)
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
data_set):
bs = FLAGS.batch_size
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = max(1, (len(data_set[0]) // bs))
num_examples = steps_per_epoch * bs
xs, ys, us = data_set
for step in xrange(steps_per_epoch):
_mx = min((step+1)*bs, len(data_set[0]))
idxs = np.arange(step*bs, _mx, dtype=np.int32)
_xs, _ys, _us = xs[idxs], ys[idxs], us[idxs]
_xs = np.reshape(_xs, [(_mx - (step*bs)), IMAGE_PIXELS])
feed_dict = {images_placeholder: _xs,
labels_placeholder: _ys,
domain_placeholder: _us}
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
eprint('Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
return precision, true_count
def do_eval_macro(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
data_set):
agg_prec, agg_corr = 0, 0
num = 0
for dom in np.unique(data_set[-1]):
idxs = np.where(np.equal(data_set[-1], dom))
data = (data_set[0][idxs], data_set[1][idxs], data_set[2][idxs])
FLAGS.batch_size = min(FLAGS.batch_size, len(data[0]))
prec, corr = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, domain_placeholder, data)
agg_prec += prec
agg_corr += corr
num += 1
precision, true_count = agg_prec/num, agg_corr/num
eprint('Num domains: %d Num correct: %d Precision @ 1: %0.04f' %
(num, true_count, precision))
return precision, true_count
def run_simple():
SEED = FLAGS.seed
np.random.seed(SEED)
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, shape=[], name='learning_rate')
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder, domain_placeholder = placeholder_inputs()
final_layer = tf.layers.Dense(NUM_CLASSES, kernel_initializer=tf.random_normal_initializer(0, 0.05))
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
logits = final_layer(reprs)
with tf.variable_scope('', reuse=True):
reprs_for_eval = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
logits_for_eval = final_layer(reprs_for_eval)
loss = lipitk.loss(logits, labels_placeholder, num_classes=NUM_CLASSES)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
_vars = tf.all_variables()
saver = tf.train.Saver(var_list=_vars)
sess = tf.Session(config=config)
sess.run(init)
best_dev, best_test = -1, -1
best_dev_abs, best_test_abs = -1, -1
nepochs = FLAGS.nepochs
num_steps = (nepochs*len(train[0]))//FLAGS.batch_size
nsteps_per_epoch = len(train[0])/FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(num_steps)):
start_time = time.time()
feed_dict = fill_feed_dict(train,
images_placeholder,
labels_placeholder,
domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
_, np_loss, np_logits = sess.run([train_op, loss, logits], feed_dict = feed_dict)
if (step + 1) % 1000 == 0 or (step + 1) == num_steps:
eprint ("Loss: ", np_loss)
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
eprint('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
train)
# Evaluate against the dev set.
in_dev_prec, in_dev_corr = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
in_dev)
dev_prec, dev_corr = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
dev)
test_prec, test_corr = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
test)
if dev_prec >= best_dev:
best_dev, best_dev_abs = dev_prec, dev_corr
best_test, best_test_abs = test_prec, test_corr
print ("test prec for best dev, test acc: %f, test acc: %f" % (best_dev, best_test))
return in_dev_prec, best_dev, best_test
def run_training():
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = max([np.max(train[2]), np.max(dev[2]), np.max(test[2])]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
ph_lr = tf.placeholder(tf.float32, name="learning_rate", shape=[])
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder, domain_placeholder = placeholder_inputs(FLAGS.batch_size)
LN = tf.keras.layers.LayerNormalization(axis=1)
with tf.variable_scope(''):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=True)
reprs = LN(reprs)
logits1, logits2, reg_loss, common_var, specialized_common_wt, _e = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
with tf.variable_scope('', reuse=True):
reprs = lipitk.get_reprs(images_placeholder, image_size=IMAGE_SIZE, network=NETWORK, is_training=False)
reprs = LN(reprs)
_, logits_for_eval, _, _, _, _ = lipitk.inference_bottleneckv2(reprs, domain_placeholder, num_domains=num_domains, emb_dim=emb_dim, num_classes=NUM_CLASSES)
loss1 = lipitk.loss(logits1, labels_placeholder, num_classes=NUM_CLASSES)
loss2 = lipitk.loss(logits2, labels_placeholder, num_classes=NUM_CLASSES)
loss = (FLAGS.lmbda * loss1) + loss2
if FLAGS.lmbda > 0:
loss = FLAGS.lmbda*loss1 + (1 - FLAGS.lmbda)*loss2
loss += FLAGS.alpha*reg_loss
if FLAGS.lmbda > 1:
loss /= FLAGS.lmbda
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
_vars = tf.global_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
best_acc, best_test = -1, -1
nepochs = FLAGS.nepochs
nsteps = (nepochs*len(train[0]))//FLAGS.batch_size
nsteps_per_epoch = len(train[0])/FLAGS.batch_size
start_lr = FLAGS.learning_rate
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train,
images_placeholder,
labels_placeholder,
domain_placeholder)
np_lr = start_lr
feed_dict[ph_lr] = np_lr
_, np_loss, _ = sess.run([train_op, loss, increment_global_step], feed_dict = feed_dict)
all_losses = sess.run([loss1, loss2, reg_loss], feed_dict=feed_dict)
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == nsteps:
print ("Loss: ", np_loss)
print ("Losses: ", all_losses)
print ("Common wt: ", sess.run(common_var))
print ("Specialized common wt: ", sess.run(specialized_common_wt))
print ("Emb matrix: ", sess.run(_e)[:5])
# Evaluate against the training set.
eprint('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
train)
# Evaluate against the dev set.
in_dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
in_dev)
dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
dev)
test_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print ("Best in-domain dev acc: %f test: %f" % (best_acc, best_test))
return in_dev_acc, best_acc, best_test
def run_cg(cgpp=False):
SEED = FLAGS.seed
np.random.seed(SEED)
emb_dim = FLAGS.emb_dim
num_domains = np.max(train[2]) + 1
tf.reset_default_graph()
with tf.Graph().as_default():
tf.set_random_seed(SEED)
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder, domain_placeholder = placeholder_inputs(
FLAGS.batch_size)
ph_lr = tf.placeholder(tf.float32, name="learning_rate")
if not cgpp:
cg_fn = lipitk.cg
else:
cg_fn = lipitk.cgpp
with tf.variable_scope('', ):
loss, _, debug_print = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=True, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
with tf.variable_scope('', reuse=True):
_, logits_for_eval, __ = cg_fn(images_placeholder, labels_placeholder, domain_placeholder, image_size=IMAGE_SIZE, is_training=False, network=NETWORK, num_classes=NUM_CLASSES, num_domains=num_domains, FLAGS=FLAGS)
train_op = lipitk.training(loss, ph_lr)
eval_correct = lipitk.evaluation(logits_for_eval, labels_placeholder)
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
_vars = tf.all_variables()
saver = tf.train.Saver(var_list=_vars, max_to_keep=20)
sess = tf.Session(config=config)
sess.run(init)
best_acc, best_test = -1, -1
nepochs = FLAGS.nepochs
nsteps = (nepochs*len(train[0]))//FLAGS.batch_size
for step in tqdm.tqdm(xrange(nsteps)):
start_time = time.time()
feed_dict = fill_feed_dict(train,
images_placeholder,
labels_placeholder,
domain_placeholder)
lr = FLAGS.learning_rate
feed_dict[ph_lr] = lr
_, np_loss = sess.run([train_op, loss], feed_dict = feed_dict)
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == nsteps:
print ("Loss: ", np_loss)
if debug_print is not None:
np_dp = sess.run(debug_print, feed_dict=feed_dict)
print ("****Debug:****")
print (np_dp)
# Evaluate against the training set.
eprint('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
train)
# Evaluate against the dev set.
in_dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
in_dev)
dev_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
dev)
test_acc, _ = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
domain_placeholder,
test)
if dev_acc >= best_acc:
best_acc = dev_acc
best_test = test_acc
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
print ("Best in-domain ind dev acc: %f dev acc: %f test: %f" % (in_dev_acc, best_acc, best_test))
return in_dev_acc, best_acc, best_test
def main(_):
seed = 0
in_dev_accs, dev_accs, test_accs = [], [], []
for seed in range(3):
FLAGS.seed = seed
np.random.seed(seed)
if FLAGS.simple:
# in_dev_acc, dev_acc, test_acc = run_simple()
FLAGS.lmbda = 0
in_dev_acc, dev_acc, test_acc = run_training()
elif FLAGS.cg:
in_dev_acc, dev_acc, test_acc = run_cg()
else:
in_dev_acc, dev_acc, test_acc = run_training()
in_dev_accs.append(in_dev_acc)
dev_accs.append(dev_acc)
test_accs.append(test_acc)
print ( "InD Val, Val, test acc: %0.4f (%0.4f), %0.4f (%0.4f), %0.4f (%0.4f)" % (np.mean(in_dev_accs), np.std(in_dev_accs), np.mean(dev_accs), np.std(dev_accs), np.mean(test_accs), np.std(test_accs)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
type=str,
default='lipitk',
help='Dataset to evaluate on.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='Initial learning rate.'
)
parser.add_argument(
'--seed',
type=int,
default=0,
help='Random seed.'
)
parser.add_argument(
'--num_train',
type=int,
default=-1,
help='Number of training domains'
)
parser.add_argument(
'--max_steps',
type=int,
default=15000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--batch_size',
type=int,
default=128,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--nepochs',
type=int,
default=100,
help='Number of epochs'
)
parser.add_argument(
'--init_ckpt',
type=str,
default=None,
help='Path to initializing checkpoint'
)
parser.add_argument(
'--emb_dim',
type=int,
default=2,
help='Number of basis vectors'
)
parser.add_argument(
'--lmbda',
type=float,
default=.5,
help='Loss specific component weight'
)
parser.add_argument(
'--alpha',
type=float,
default=1,
help='Coeff for reg. loss'
)
parser.add_argument(
'--simple',
action='store_true',
help='Trains and evaluates a simple baseline with no experts.'
)
parser.add_argument(
'--cg',
action='store_true',
help='Evaluate the Crossgrad baseline.'
)
parser.add_argument(
'--cg_eps',
type=float,
default=10,
help='Step size for perturbations in CG/CG++'
)
parser.set_defaults(simple=False)
FLAGS, unparsed = parser.parse_known_args()
if not FLAGS.simple:
FLAGS.log_dir = "lipitk_log/lipitktuner_nt=%d_fonts_e=%d_seed_%d" % (FLAGS.num_train, FLAGS.emb_dim, FLAGS.seed)
else:
FLAGS.log_dir = "lipitk_log/lipitktuner_wnorm_simple_nt=%d_fonts_e=%d_seed_%d" % (FLAGS.num_train, FLAGS.emb_dim, FLAGS.seed)
os.system("mkdir %s" % FLAGS.log_dir)
if FLAGS.dataset == 'lipitk':
train, in_dev, dev, test = lipitk.prepare_data(FLAGS.num_train)
NETWORK = 'lenet'
NUM_CLASSES = 111
IMAGE_PIXELS = 1024
IMAGE_SIZE = 32
elif FLAGS.dataset == 'nhcd':
train, in_dev, dev, test = nhcd.prepare_data()
NETWORK = 'lenet'
NUM_CLASSES = nhcd.NUM_CLASSES
IMAGE_PIXELS = nhcd.IMAGE_PIXELS
IMAGE_SIZE = nhcd.IMAGE_SIZE
elif FLAGS.dataset == 'english-hnd':
train, in_dev, dev, test = datasets.load_english_hnd()
NETWORK = 'lenet'
NUM_CLASSES = 59
IMAGE_PIXELS = 1024
IMAGE_SIZE = 32
elif FLAGS.dataset == 'english-fnt':
train, in_dev, dev, test = datasets.load_english_fnt()
NETWORK = 'lenet'
NUM_CLASSES = 62
IMAGE_PIXELS = 1024
IMAGE_SIZE = 32
elif FLAGS.dataset == 'rmnist':
NETWORK = 'lenet'
NUM_CLASSES = rmnist.NUM_CLASSES
IMAGE_PIXELS = rmnist.IMAGE_PIXELS
IMAGE_SIZE = rmnist.IMAGE_SIZE
train, dev, test = rmnist.prepare_data([0])
in_dev = dev
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
en
| 0.83414
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=missing-docstring # pylint: disable=redefined-builtin # Basic model parameters as external flags. # Create the feed_dict for the placeholders filled with the next # `batch size` examples. # print ("Batch size: ", FLAGS.batch_size) # And run one epoch of eval. # Counts the number of correct predictions. # Generate placeholders for the images and labels. # Create a saver for writing training checkpoints. # Evaluate against the training set. # Evaluate against the dev set. # Generate placeholders for the images and labels. # Create a saver for writing training checkpoints. # Save a checkpoint and evaluate the model periodically. # Evaluate against the training set. # Evaluate against the dev set. # Generate placeholders for the images and labels. # Create a saver for writing training checkpoints. # Save a checkpoint and evaluate the model periodically. # Evaluate against the training set. # Evaluate against the dev set. # in_dev_acc, dev_acc, test_acc = run_simple()
| 1.715874
| 2
|
exercicios/020.py
|
danielcesarcs/Python_aulas_cursoemvideo
| 0
|
6626521
|
<gh_stars>0
"""Ordem dos Sorteados"""
# Uso da importação de bibliotecas
import random
a = input('Aluno 1: ')
b = input('Aluno 2: ')
c = input('Aluno 3: ')
d = input('Aluno 4: ')
lista = [a, b, c, d]
random.shuffle(lista) # Metodo vai embaralhar a lista e reorganizar
print('A ordem de sortedos é: ')
print(lista)
|
"""Ordem dos Sorteados"""
# Uso da importação de bibliotecas
import random
a = input('Aluno 1: ')
b = input('Aluno 2: ')
c = input('Aluno 3: ')
d = input('Aluno 4: ')
lista = [a, b, c, d]
random.shuffle(lista) # Metodo vai embaralhar a lista e reorganizar
print('A ordem de sortedos é: ')
print(lista)
|
pt
| 0.911461
|
Ordem dos Sorteados # Uso da importação de bibliotecas # Metodo vai embaralhar a lista e reorganizar
| 3.857491
| 4
|
torchblocks/processor/utils.py
|
deepframwork/TorchBlocks
| 1
|
6626522
|
import json
import copy
class InputExample:
"""
A single training/test example for simple sequence classification.
texts: 列表形式,比如 [text_a,text_b]
label: 标签信息,
label_ids: 标签列表,比如多标签,NER等任务
"""
def __init__(self,
guid,
texts,
label=None,
label_ids=None,
**kwargs):
self.guid = guid
self.texts = texts
self.label = label
self.label_ids = label_ids
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures:
"""
A single set of features of processor.
Property names are the same names as the corresponding inputs to a model.
"""
def __init__(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
label=None,
label_ids=None,
**kwargs):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.label_ids = label_ids
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
import json
import copy
class InputExample:
"""
A single training/test example for simple sequence classification.
texts: 列表形式,比如 [text_a,text_b]
label: 标签信息,
label_ids: 标签列表,比如多标签,NER等任务
"""
def __init__(self,
guid,
texts,
label=None,
label_ids=None,
**kwargs):
self.guid = guid
self.texts = texts
self.label = label
self.label_ids = label_ids
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures:
"""
A single set of features of processor.
Property names are the same names as the corresponding inputs to a model.
"""
def __init__(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
label=None,
label_ids=None,
**kwargs):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.label_ids = label_ids
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
en
| 0.732219
|
A single training/test example for simple sequence classification.
texts: 列表形式,比如 [text_a,text_b]
label: 标签信息,
label_ids: 标签列表,比如多标签,NER等任务 Serializes this instance to a Python dictionary. Serializes this instance to a JSON string. A single set of features of processor.
Property names are the same names as the corresponding inputs to a model. Serializes this instance to a Python dictionary. Serializes this instance to a JSON string.
| 3.203585
| 3
|
backend/api/battles/serializers.py
|
gabrielaleal/pokebattle
| 1
|
6626523
|
from rest_framework import serializers
from api.pokemon.serializers import PokemonSerializer, PokemonWinnerSerializer
from api.users.serializers import UserSerializer
from battles.models import Battle, BattleTeam
from battles.tasks import run_battle_and_send_result_email
from battles.utils.battle import get_round_winner
from battles.utils.email import send_opponent_battle_invitation_email
from pokemon.helpers import (
are_pokemon_positions_repeated,
pokemon_sum_valid,
repeated_pokemon_in_teams,
sort_pokemon_in_correct_position,
)
from users.models import User
from .fields import BattleUrlDefault
class SelectTeamSerializerMixin(serializers.ModelSerializer):
pokemon_1_position = serializers.IntegerField(min_value=1, max_value=3, write_only=True)
pokemon_2_position = serializers.IntegerField(min_value=1, max_value=3, write_only=True)
pokemon_3_position = serializers.IntegerField(min_value=1, max_value=3, write_only=True)
def validate(self, data): # same as the clean method on form
if are_pokemon_positions_repeated(data):
raise serializers.ValidationError("Each Pokemon must have a unique position.")
is_pokemon_sum_valid = pokemon_sum_valid(
[data["pokemon_1"], data["pokemon_2"], data["pokemon_3"],] # noqa
)
if not is_pokemon_sum_valid:
raise serializers.ValidationError(
"The sum of the Pokemon points can't be greater than 600."
)
return data
class BattleTeamSerializer(serializers.ModelSerializer):
pokemon_1 = PokemonSerializer()
pokemon_2 = PokemonSerializer()
pokemon_3 = PokemonSerializer()
class Meta:
model = BattleTeam
fields = ("pokemon_1", "pokemon_2", "pokemon_3")
class BattleSerializer(serializers.ModelSerializer):
creator_team = serializers.SerializerMethodField()
opponent_team = serializers.SerializerMethodField()
matches_winners = serializers.SerializerMethodField()
creator = UserSerializer()
opponent = UserSerializer()
winner = UserSerializer()
def get_creator_team(self, obj):
team = BattleTeam.objects.filter(battle=obj, creator=obj.creator).first()
# if the creator team doesn't exist yet, just return user data
if not team:
return {}
# if it exists, return the battle team data (which also has the creator information)
serializer = BattleTeamSerializer(instance=team)
return serializer.data
def get_opponent_team(self, obj):
team = BattleTeam.objects.filter(battle=obj, creator=obj.opponent).first()
if not team:
return {}
serializer = BattleTeamSerializer(instance=team)
return serializer.data
def get_matches_winners(self, obj):
if obj.status == "ONGOING":
return {}
creator_team = BattleTeam.objects.filter(battle=obj, creator=obj.creator).first()
opponent_team = BattleTeam.objects.filter(battle=obj, creator=obj.opponent).first()
creator_team_pokemon = [
creator_team.pokemon_1,
creator_team.pokemon_2,
creator_team.pokemon_3,
]
opponent_team_pokemon = [
opponent_team.pokemon_1,
opponent_team.pokemon_2,
opponent_team.pokemon_3,
]
winners = []
for creator_pokemon, opponent_pokemon, position in zip(
creator_team_pokemon, opponent_team_pokemon, [1, 2, 3]
):
winner = get_round_winner(creator_pokemon, opponent_pokemon)
serializer = PokemonWinnerSerializer(instance=winner)
data = serializer.data
data["position"] = position
winners.append(data)
return winners
class Meta:
model = Battle
fields = (
"timestamp",
"status",
"creator",
"creator_team",
"opponent_team",
"opponent",
"winner",
"matches_winners",
"id",
)
class CreateBattleSerializer(SelectTeamSerializerMixin):
opponent_id = serializers.PrimaryKeyRelatedField(
source="battle.opponent", queryset=User.objects.all(),
)
# Read-only fields (so I can use them on frontend if I need any of them)
opponent = UserSerializer(source="battle.opponent", read_only=True)
creator = UserSerializer(source="battle.creator", read_only=True)
winner = UserSerializer(source="battle.winner", read_only=True)
status = serializers.CharField(read_only=True)
timestamp = serializers.DateTimeField(source="battle.timestamp", read_only=True)
class Meta:
model = BattleTeam
fields = (
"opponent",
"opponent_id",
"pokemon_1",
"pokemon_1_position",
"pokemon_2",
"pokemon_2_position",
"pokemon_3",
"pokemon_3_position",
"timestamp",
"creator",
"status",
"winner",
)
def create(self, validated_data): # same as the form_valid method on the view
# get self.request
request = self.context.get("request")
# create battle first
battle_data = validated_data.pop("battle")
battle_data["creator"] = request.user
battle = Battle.objects.create(**battle_data)
battle_team_data = sort_pokemon_in_correct_position(validated_data)
battle_team_data["battle"] = battle
battle_team_data["creator"] = request.user
instance = super().create(battle_team_data)
send_opponent_battle_invitation_email(battle)
return instance
class SelectOpponentTeamSerializer(SelectTeamSerializerMixin):
battle = serializers.HiddenField(default=BattleUrlDefault())
class Meta:
model = BattleTeam
fields = (
"pokemon_1",
"pokemon_1_position",
"pokemon_2",
"pokemon_2_position",
"pokemon_3",
"pokemon_3_position",
"battle",
)
def validate(self, data):
pokemon = [
data["pokemon_1"],
data["pokemon_2"],
data["pokemon_3"],
]
if repeated_pokemon_in_teams(pokemon, data["battle"]):
raise serializers.ValidationError(
"You chose a Pokemon from your opponent's team. Try again."
)
return data
def create(self, validated_data):
# get self.request
request = self.context.get("request")
battle_team_data = sort_pokemon_in_correct_position(validated_data)
battle_team_data["battle"] = validated_data["battle"]
battle_team_data["creator"] = request.user
instance = super().create(battle_team_data)
run_battle_and_send_result_email.delay(instance.battle.id)
return instance
|
from rest_framework import serializers
from api.pokemon.serializers import PokemonSerializer, PokemonWinnerSerializer
from api.users.serializers import UserSerializer
from battles.models import Battle, BattleTeam
from battles.tasks import run_battle_and_send_result_email
from battles.utils.battle import get_round_winner
from battles.utils.email import send_opponent_battle_invitation_email
from pokemon.helpers import (
are_pokemon_positions_repeated,
pokemon_sum_valid,
repeated_pokemon_in_teams,
sort_pokemon_in_correct_position,
)
from users.models import User
from .fields import BattleUrlDefault
class SelectTeamSerializerMixin(serializers.ModelSerializer):
pokemon_1_position = serializers.IntegerField(min_value=1, max_value=3, write_only=True)
pokemon_2_position = serializers.IntegerField(min_value=1, max_value=3, write_only=True)
pokemon_3_position = serializers.IntegerField(min_value=1, max_value=3, write_only=True)
def validate(self, data): # same as the clean method on form
if are_pokemon_positions_repeated(data):
raise serializers.ValidationError("Each Pokemon must have a unique position.")
is_pokemon_sum_valid = pokemon_sum_valid(
[data["pokemon_1"], data["pokemon_2"], data["pokemon_3"],] # noqa
)
if not is_pokemon_sum_valid:
raise serializers.ValidationError(
"The sum of the Pokemon points can't be greater than 600."
)
return data
class BattleTeamSerializer(serializers.ModelSerializer):
pokemon_1 = PokemonSerializer()
pokemon_2 = PokemonSerializer()
pokemon_3 = PokemonSerializer()
class Meta:
model = BattleTeam
fields = ("pokemon_1", "pokemon_2", "pokemon_3")
class BattleSerializer(serializers.ModelSerializer):
creator_team = serializers.SerializerMethodField()
opponent_team = serializers.SerializerMethodField()
matches_winners = serializers.SerializerMethodField()
creator = UserSerializer()
opponent = UserSerializer()
winner = UserSerializer()
def get_creator_team(self, obj):
team = BattleTeam.objects.filter(battle=obj, creator=obj.creator).first()
# if the creator team doesn't exist yet, just return user data
if not team:
return {}
# if it exists, return the battle team data (which also has the creator information)
serializer = BattleTeamSerializer(instance=team)
return serializer.data
def get_opponent_team(self, obj):
team = BattleTeam.objects.filter(battle=obj, creator=obj.opponent).first()
if not team:
return {}
serializer = BattleTeamSerializer(instance=team)
return serializer.data
def get_matches_winners(self, obj):
if obj.status == "ONGOING":
return {}
creator_team = BattleTeam.objects.filter(battle=obj, creator=obj.creator).first()
opponent_team = BattleTeam.objects.filter(battle=obj, creator=obj.opponent).first()
creator_team_pokemon = [
creator_team.pokemon_1,
creator_team.pokemon_2,
creator_team.pokemon_3,
]
opponent_team_pokemon = [
opponent_team.pokemon_1,
opponent_team.pokemon_2,
opponent_team.pokemon_3,
]
winners = []
for creator_pokemon, opponent_pokemon, position in zip(
creator_team_pokemon, opponent_team_pokemon, [1, 2, 3]
):
winner = get_round_winner(creator_pokemon, opponent_pokemon)
serializer = PokemonWinnerSerializer(instance=winner)
data = serializer.data
data["position"] = position
winners.append(data)
return winners
class Meta:
model = Battle
fields = (
"timestamp",
"status",
"creator",
"creator_team",
"opponent_team",
"opponent",
"winner",
"matches_winners",
"id",
)
class CreateBattleSerializer(SelectTeamSerializerMixin):
opponent_id = serializers.PrimaryKeyRelatedField(
source="battle.opponent", queryset=User.objects.all(),
)
# Read-only fields (so I can use them on frontend if I need any of them)
opponent = UserSerializer(source="battle.opponent", read_only=True)
creator = UserSerializer(source="battle.creator", read_only=True)
winner = UserSerializer(source="battle.winner", read_only=True)
status = serializers.CharField(read_only=True)
timestamp = serializers.DateTimeField(source="battle.timestamp", read_only=True)
class Meta:
model = BattleTeam
fields = (
"opponent",
"opponent_id",
"pokemon_1",
"pokemon_1_position",
"pokemon_2",
"pokemon_2_position",
"pokemon_3",
"pokemon_3_position",
"timestamp",
"creator",
"status",
"winner",
)
def create(self, validated_data): # same as the form_valid method on the view
# get self.request
request = self.context.get("request")
# create battle first
battle_data = validated_data.pop("battle")
battle_data["creator"] = request.user
battle = Battle.objects.create(**battle_data)
battle_team_data = sort_pokemon_in_correct_position(validated_data)
battle_team_data["battle"] = battle
battle_team_data["creator"] = request.user
instance = super().create(battle_team_data)
send_opponent_battle_invitation_email(battle)
return instance
class SelectOpponentTeamSerializer(SelectTeamSerializerMixin):
battle = serializers.HiddenField(default=BattleUrlDefault())
class Meta:
model = BattleTeam
fields = (
"pokemon_1",
"pokemon_1_position",
"pokemon_2",
"pokemon_2_position",
"pokemon_3",
"pokemon_3_position",
"battle",
)
def validate(self, data):
pokemon = [
data["pokemon_1"],
data["pokemon_2"],
data["pokemon_3"],
]
if repeated_pokemon_in_teams(pokemon, data["battle"]):
raise serializers.ValidationError(
"You chose a Pokemon from your opponent's team. Try again."
)
return data
def create(self, validated_data):
# get self.request
request = self.context.get("request")
battle_team_data = sort_pokemon_in_correct_position(validated_data)
battle_team_data["battle"] = validated_data["battle"]
battle_team_data["creator"] = request.user
instance = super().create(battle_team_data)
run_battle_and_send_result_email.delay(instance.battle.id)
return instance
|
en
| 0.887973
|
# same as the clean method on form # noqa # if the creator team doesn't exist yet, just return user data # if it exists, return the battle team data (which also has the creator information) # Read-only fields (so I can use them on frontend if I need any of them) # same as the form_valid method on the view # get self.request # create battle first # get self.request
| 2.578607
| 3
|
api/rest/models.py
|
dragonsuperf/job-spider
| 0
|
6626524
|
<reponame>dragonsuperf/job-spider<filename>api/rest/models.py
from django.db import models
class Article(models.Model):
company_name = models.CharField(max_length=32, default='ERROR')
career = models.CharField(max_length=16, default='0')
duty = models.CharField(max_length=32, default='ERROR')
location = models.CharField(max_length=64, default='ERROR')
required_skill = models.CharField(max_length=128, default='ERROR')
logo = models.CharField(max_length=64, default='ERROR')
content = models.TextField(default='ERROR')
|
from django.db import models
class Article(models.Model):
company_name = models.CharField(max_length=32, default='ERROR')
career = models.CharField(max_length=16, default='0')
duty = models.CharField(max_length=32, default='ERROR')
location = models.CharField(max_length=64, default='ERROR')
required_skill = models.CharField(max_length=128, default='ERROR')
logo = models.CharField(max_length=64, default='ERROR')
content = models.TextField(default='ERROR')
|
none
| 1
| 2.162157
| 2
|
|
backend/app/apps/hotels/helpers/recommendation.py
|
Hesbon5600/hotel-recommender
| 0
|
6626525
|
<reponame>Hesbon5600/hotel-recommender
import pandas as pd
from datetime import datetime
from sqlalchemy import create_engine
from django.conf import settings
from app import celery_app
from ..models import Hotel
def generate_score():
"""
Generate the score for each hotel based on the IMDB algorithm
"""
df_ = pd.DataFrame(list(Hotel.objects.all().values()))
# Calculate C
C = df_['star_rating'].mean()
# The average rating of a hotel in Nairobi is around 3,2, on a scale of 5.
# Next, let's calculate the number of votes, m, received by a hotel in the 55th percentile.
# The pandas library makes this task extremely trivial using the .quantile() method of a pandas Series:
m = df_['star_rating'].quantile(0.55)
# we can filter the hotels that qualify for the chart, based on their vote counts:
# Filter out all qualified movies into a new DataFrame
qualified_hotels = df_.copy().loc[df_['star_rating'] >= m]
def weighted_rating(x, m=m, C=C):
"""
Function that computes the weighted rating of each hotel
"""
v = x['total_reviews']
R = x['star_rating']
# Calculation based on the formula
return (v/(v+m) * R) + (m/(m+v) * C)
# Define a new feature 'score' and calculate its value with `weighted_rating()`
qualified_hotels['score'] = qualified_hotels.apply(weighted_rating, axis=1)
df_to_db = qualified_hotels[['id', 'score']]
return df_to_db
@celery_app.task(name="generate-recommendation")
def persist_to_db():
"""
save the score to the database
"""
engine = create_engine(settings.DB_CONN_STRING)
df_to_db = generate_score()
for i in range(1, len(df_to_db)+1):
entry = df_to_db.iloc[i-1:i]
score = entry['score'].values[0]
id = entry['id'].values[0]
update_sql = f"""UPDATE hotels_hotel SET score = '{score}' WHERE id = '{id}'"""
with engine.begin() as conn: # TRANSACTION
conn.execute(update_sql)
|
import pandas as pd
from datetime import datetime
from sqlalchemy import create_engine
from django.conf import settings
from app import celery_app
from ..models import Hotel
def generate_score():
"""
Generate the score for each hotel based on the IMDB algorithm
"""
df_ = pd.DataFrame(list(Hotel.objects.all().values()))
# Calculate C
C = df_['star_rating'].mean()
# The average rating of a hotel in Nairobi is around 3,2, on a scale of 5.
# Next, let's calculate the number of votes, m, received by a hotel in the 55th percentile.
# The pandas library makes this task extremely trivial using the .quantile() method of a pandas Series:
m = df_['star_rating'].quantile(0.55)
# we can filter the hotels that qualify for the chart, based on their vote counts:
# Filter out all qualified movies into a new DataFrame
qualified_hotels = df_.copy().loc[df_['star_rating'] >= m]
def weighted_rating(x, m=m, C=C):
"""
Function that computes the weighted rating of each hotel
"""
v = x['total_reviews']
R = x['star_rating']
# Calculation based on the formula
return (v/(v+m) * R) + (m/(m+v) * C)
# Define a new feature 'score' and calculate its value with `weighted_rating()`
qualified_hotels['score'] = qualified_hotels.apply(weighted_rating, axis=1)
df_to_db = qualified_hotels[['id', 'score']]
return df_to_db
@celery_app.task(name="generate-recommendation")
def persist_to_db():
"""
save the score to the database
"""
engine = create_engine(settings.DB_CONN_STRING)
df_to_db = generate_score()
for i in range(1, len(df_to_db)+1):
entry = df_to_db.iloc[i-1:i]
score = entry['score'].values[0]
id = entry['id'].values[0]
update_sql = f"""UPDATE hotels_hotel SET score = '{score}' WHERE id = '{id}'"""
with engine.begin() as conn: # TRANSACTION
conn.execute(update_sql)
|
en
| 0.90688
|
Generate the score for each hotel based on the IMDB algorithm # Calculate C # The average rating of a hotel in Nairobi is around 3,2, on a scale of 5. # Next, let's calculate the number of votes, m, received by a hotel in the 55th percentile. # The pandas library makes this task extremely trivial using the .quantile() method of a pandas Series: # we can filter the hotels that qualify for the chart, based on their vote counts: # Filter out all qualified movies into a new DataFrame Function that computes the weighted rating of each hotel # Calculation based on the formula # Define a new feature 'score' and calculate its value with `weighted_rating()` save the score to the database UPDATE hotels_hotel SET score = '{score}' WHERE id = '{id}' # TRANSACTION
| 2.988791
| 3
|
mikeio/dfsutil.py
|
murray91/mikeio
| 0
|
6626526
|
from typing import List, Union
import numpy as np
import pandas as pd
from .eum import EUMType, EUMUnit, ItemInfo, TimeAxisType
from .custom_exceptions import ItemsError
from mikecore.DfsFile import DfsDynamicItemInfo
def _valid_item_numbers(
dfsItemInfo: List[DfsDynamicItemInfo],
items: Union[int, List[int], List[str]] = None,
) -> List[int]:
n_items_file = len(dfsItemInfo)
if items is None:
return list(range(n_items_file))
if np.isscalar(items):
items = [items]
for idx, item in enumerate(items):
if isinstance(item, str):
items[idx] = _item_numbers_by_name(dfsItemInfo, [item])[0]
elif isinstance(item, int):
if (item < 0) or (item >= n_items_file):
raise ItemsError(n_items_file)
else:
raise ItemsError(n_items_file)
if len(np.unique(items)) != len(items):
raise ValueError("'items' must be unique")
return items
def _valid_timesteps(dfsFileInfo, time_steps):
# TODO: naming: time_steps or timesteps?
n_steps_file = dfsFileInfo.TimeAxis.NumberOfTimeSteps
if time_steps is None:
return list(range(n_steps_file))
if isinstance(time_steps, int):
time_steps = [time_steps]
if isinstance(time_steps, str):
parts = time_steps.split(",")
if len(parts) == 1:
parts.append(parts[0]) # end=start
if parts[0] == "":
time_steps = slice(parts[1]) # stop only
elif parts[1] == "":
time_steps = slice(parts[0], None) # start only
else:
time_steps = slice(parts[0], parts[1])
if isinstance(time_steps, slice):
if dfsFileInfo.TimeAxis.TimeAxisType != TimeAxisType.EquidistantCalendar:
# TODO: handle non-equidistant calendar
raise ValueError(
"Only equidistant calendar files are supported for this type of time_step argument"
)
start_time_file = dfsFileInfo.TimeAxis.StartDateTime
time_step_file = dfsFileInfo.TimeAxis.TimeStep
freq = pd.tseries.offsets.DateOffset(seconds=time_step_file)
time = pd.date_range(start_time_file, periods=n_steps_file, freq=freq)
if time_steps.start is None:
time_steps_start = time[0]
else:
time_steps_start = pd.Timestamp(time_steps.start)
if time_steps.stop is None:
time_steps_stop = time[-1]
else:
time_steps_stop = pd.Timestamp(time_steps.stop)
s = time.slice_indexer(time_steps_start, time_steps_stop)
time_steps = list(range(s.start, s.stop))
elif isinstance(time_steps[0], int):
time_steps = np.array(time_steps)
time_steps[time_steps < 0] = n_steps_file + time_steps[time_steps < 0]
time_steps = list(time_steps)
if max(time_steps) > (n_steps_file - 1):
raise IndexError(f"Timestep cannot be larger than {n_steps_file}")
if min(time_steps) < 0:
raise IndexError(f"Timestep cannot be less than {-n_steps_file}")
return time_steps
def _item_numbers_by_name(dfsItemInfo, item_names):
"""Utility function to find item numbers
Parameters
----------
dfsItemInfo : MIKE dfs ItemInfo object
item_names : list[str]
Names of items to be found
Returns
-------
list[int]
item numbers (0-based)
Raises
------
KeyError
In case item is not found in the dfs file
"""
names = [x.Name for x in dfsItemInfo]
item_lookup = {name: i for i, name in enumerate(names)}
try:
item_numbers = [item_lookup[x] for x in item_names]
except KeyError:
raise KeyError(f"Selected item name not found. Valid names are {names}")
return item_numbers
def _get_item_info(
dfsItemInfo: List[DfsDynamicItemInfo], item_numbers: List[int] = None
) -> List[ItemInfo]:
"""Read DFS ItemInfo for specific item numbers
Parameters
----------
dfsItemInfo : List[DfsDynamicItemInfo]
item_numbers : list[int], optional
Returns
-------
list[ItemInfo]
"""
if item_numbers is None:
item_numbers = list(range(len(dfsItemInfo)))
items = []
for item in item_numbers:
name = dfsItemInfo[item].Name
eumItem = dfsItemInfo[item].Quantity.Item
eumUnit = dfsItemInfo[item].Quantity.Unit
itemtype = EUMType(eumItem)
unit = EUMUnit(eumUnit)
data_value_type = dfsItemInfo[item].ValueType
item = ItemInfo(name, itemtype, unit, data_value_type)
items.append(item)
return items
|
from typing import List, Union
import numpy as np
import pandas as pd
from .eum import EUMType, EUMUnit, ItemInfo, TimeAxisType
from .custom_exceptions import ItemsError
from mikecore.DfsFile import DfsDynamicItemInfo
def _valid_item_numbers(
dfsItemInfo: List[DfsDynamicItemInfo],
items: Union[int, List[int], List[str]] = None,
) -> List[int]:
n_items_file = len(dfsItemInfo)
if items is None:
return list(range(n_items_file))
if np.isscalar(items):
items = [items]
for idx, item in enumerate(items):
if isinstance(item, str):
items[idx] = _item_numbers_by_name(dfsItemInfo, [item])[0]
elif isinstance(item, int):
if (item < 0) or (item >= n_items_file):
raise ItemsError(n_items_file)
else:
raise ItemsError(n_items_file)
if len(np.unique(items)) != len(items):
raise ValueError("'items' must be unique")
return items
def _valid_timesteps(dfsFileInfo, time_steps):
# TODO: naming: time_steps or timesteps?
n_steps_file = dfsFileInfo.TimeAxis.NumberOfTimeSteps
if time_steps is None:
return list(range(n_steps_file))
if isinstance(time_steps, int):
time_steps = [time_steps]
if isinstance(time_steps, str):
parts = time_steps.split(",")
if len(parts) == 1:
parts.append(parts[0]) # end=start
if parts[0] == "":
time_steps = slice(parts[1]) # stop only
elif parts[1] == "":
time_steps = slice(parts[0], None) # start only
else:
time_steps = slice(parts[0], parts[1])
if isinstance(time_steps, slice):
if dfsFileInfo.TimeAxis.TimeAxisType != TimeAxisType.EquidistantCalendar:
# TODO: handle non-equidistant calendar
raise ValueError(
"Only equidistant calendar files are supported for this type of time_step argument"
)
start_time_file = dfsFileInfo.TimeAxis.StartDateTime
time_step_file = dfsFileInfo.TimeAxis.TimeStep
freq = pd.tseries.offsets.DateOffset(seconds=time_step_file)
time = pd.date_range(start_time_file, periods=n_steps_file, freq=freq)
if time_steps.start is None:
time_steps_start = time[0]
else:
time_steps_start = pd.Timestamp(time_steps.start)
if time_steps.stop is None:
time_steps_stop = time[-1]
else:
time_steps_stop = pd.Timestamp(time_steps.stop)
s = time.slice_indexer(time_steps_start, time_steps_stop)
time_steps = list(range(s.start, s.stop))
elif isinstance(time_steps[0], int):
time_steps = np.array(time_steps)
time_steps[time_steps < 0] = n_steps_file + time_steps[time_steps < 0]
time_steps = list(time_steps)
if max(time_steps) > (n_steps_file - 1):
raise IndexError(f"Timestep cannot be larger than {n_steps_file}")
if min(time_steps) < 0:
raise IndexError(f"Timestep cannot be less than {-n_steps_file}")
return time_steps
def _item_numbers_by_name(dfsItemInfo, item_names):
"""Utility function to find item numbers
Parameters
----------
dfsItemInfo : MIKE dfs ItemInfo object
item_names : list[str]
Names of items to be found
Returns
-------
list[int]
item numbers (0-based)
Raises
------
KeyError
In case item is not found in the dfs file
"""
names = [x.Name for x in dfsItemInfo]
item_lookup = {name: i for i, name in enumerate(names)}
try:
item_numbers = [item_lookup[x] for x in item_names]
except KeyError:
raise KeyError(f"Selected item name not found. Valid names are {names}")
return item_numbers
def _get_item_info(
dfsItemInfo: List[DfsDynamicItemInfo], item_numbers: List[int] = None
) -> List[ItemInfo]:
"""Read DFS ItemInfo for specific item numbers
Parameters
----------
dfsItemInfo : List[DfsDynamicItemInfo]
item_numbers : list[int], optional
Returns
-------
list[ItemInfo]
"""
if item_numbers is None:
item_numbers = list(range(len(dfsItemInfo)))
items = []
for item in item_numbers:
name = dfsItemInfo[item].Name
eumItem = dfsItemInfo[item].Quantity.Item
eumUnit = dfsItemInfo[item].Quantity.Unit
itemtype = EUMType(eumItem)
unit = EUMUnit(eumUnit)
data_value_type = dfsItemInfo[item].ValueType
item = ItemInfo(name, itemtype, unit, data_value_type)
items.append(item)
return items
|
en
| 0.321073
|
# TODO: naming: time_steps or timesteps? # end=start # stop only # start only # TODO: handle non-equidistant calendar Utility function to find item numbers Parameters ---------- dfsItemInfo : MIKE dfs ItemInfo object item_names : list[str] Names of items to be found Returns ------- list[int] item numbers (0-based) Raises ------ KeyError In case item is not found in the dfs file Read DFS ItemInfo for specific item numbers Parameters ---------- dfsItemInfo : List[DfsDynamicItemInfo] item_numbers : list[int], optional Returns ------- list[ItemInfo]
| 2.503228
| 3
|
cvpods/checkpoint/catalog.py
|
reinforcementdriving/cvpods
| 0
|
6626527
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from cvpods.utils import PathHandler, PathManager
import logging
class ModelCatalog(object):
"""
Store mappings from names to third-party models.
"""
S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
# MSRA models have STRIDE_IN_1X1=True. False otherwise.
# NOTE: all BN models here have fused BN into an affine layer.
# As a result, you should only load them to a model with "FrozenBN".
# Loading them to a model with regular BN or SyncBN is wrong.
# Even when loaded to FrozenBN, it is still different from affine by an epsilon,
# which should be negligible for training.
# NOTE: all models here uses PIXEL_STD=[1,1,1]
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
"FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
"FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
}
C2_DETECTRON_PATH_FORMAT = (
"{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl"
) # noqa B950
C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
# format: {model_name} -> part of the url
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
"35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
"48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
"35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
"35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
"36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog._get_c2_detectron_baseline(name)
if name.startswith("ImageNetPretrained/"):
return ModelCatalog._get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog: {}".format(name))
@staticmethod
def _get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def _get_c2_detectron_baseline(name):
name = name[len("Caffe2Detectron/COCO/"):]
url = ModelCatalog.C2_DETECTRON_MODELS[name]
if "keypoint_rcnn" in name:
dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
else:
dataset = ModelCatalog.C2_DATASET_COCO
if "35998355/rpn_R-50-C4_1x" in name:
# this one model is somehow different from others ..
type = "rpn"
else:
type = "generalized_rcnn"
# Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
)
return url
class ModelCatalogHandler(PathHandler):
"""
Resolve URL like catalog://.
"""
PREFIX = "catalog://"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX):])
logger.info("Catalog entry {} points to {}".format(path, catalog_path))
return PathManager.get_local_path(catalog_path)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
class Detectron2Handler(PathHandler):
"""
Resolve anything that's in Detectron2 model zoo.
"""
PREFIX = "detectron2://"
S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path):
name = path[len(self.PREFIX):]
return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(ModelCatalogHandler())
PathManager.register_handler(Detectron2Handler())
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from cvpods.utils import PathHandler, PathManager
import logging
class ModelCatalog(object):
"""
Store mappings from names to third-party models.
"""
S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
# MSRA models have STRIDE_IN_1X1=True. False otherwise.
# NOTE: all BN models here have fused BN into an affine layer.
# As a result, you should only load them to a model with "FrozenBN".
# Loading them to a model with regular BN or SyncBN is wrong.
# Even when loaded to FrozenBN, it is still different from affine by an epsilon,
# which should be negligible for training.
# NOTE: all models here uses PIXEL_STD=[1,1,1]
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
"FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
"FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
}
C2_DETECTRON_PATH_FORMAT = (
"{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl"
) # noqa B950
C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
# format: {model_name} -> part of the url
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
"35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
"48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
"35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
"35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
"36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog._get_c2_detectron_baseline(name)
if name.startswith("ImageNetPretrained/"):
return ModelCatalog._get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog: {}".format(name))
@staticmethod
def _get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def _get_c2_detectron_baseline(name):
name = name[len("Caffe2Detectron/COCO/"):]
url = ModelCatalog.C2_DETECTRON_MODELS[name]
if "keypoint_rcnn" in name:
dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
else:
dataset = ModelCatalog.C2_DATASET_COCO
if "35998355/rpn_R-50-C4_1x" in name:
# this one model is somehow different from others ..
type = "rpn"
else:
type = "generalized_rcnn"
# Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
)
return url
class ModelCatalogHandler(PathHandler):
"""
Resolve URL like catalog://.
"""
PREFIX = "catalog://"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX):])
logger.info("Catalog entry {} points to {}".format(path, catalog_path))
return PathManager.get_local_path(catalog_path)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
class Detectron2Handler(PathHandler):
"""
Resolve anything that's in Detectron2 model zoo.
"""
PREFIX = "detectron2://"
S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path):
name = path[len(self.PREFIX):]
return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(ModelCatalogHandler())
PathManager.register_handler(Detectron2Handler())
|
en
| 0.842161
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved Store mappings from names to third-party models. # MSRA models have STRIDE_IN_1X1=True. False otherwise. # NOTE: all BN models here have fused BN into an affine layer. # As a result, you should only load them to a model with "FrozenBN". # Loading them to a model with regular BN or SyncBN is wrong. # Even when loaded to FrozenBN, it is still different from affine by an epsilon, # which should be negligible for training. # NOTE: all models here uses PIXEL_STD=[1,1,1] # noqa B950 # format: {model_name} -> part of the url # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # noqa B950 # this one model is somehow different from others .. # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`. Resolve URL like catalog://. Resolve anything that's in Detectron2 model zoo.
| 1.864107
| 2
|
zerxis_invitations/invitations/apps.py
|
Kirembu/zerxis-invitations
| 0
|
6626528
|
import importlib
from django.apps import AppConfig as BaseAppConfig
from django.utils.translation import gettext_lazy as _
class AppConfig(BaseAppConfig):
name = "zerxis_invitations"
label = "zerxis_invitations"
verbose_name = _("Zerxis Invitations")
def ready(self):
importlib.import_module("zerxis_invitations.invitations.receivers")
|
import importlib
from django.apps import AppConfig as BaseAppConfig
from django.utils.translation import gettext_lazy as _
class AppConfig(BaseAppConfig):
name = "zerxis_invitations"
label = "zerxis_invitations"
verbose_name = _("Zerxis Invitations")
def ready(self):
importlib.import_module("zerxis_invitations.invitations.receivers")
|
none
| 1
| 1.751938
| 2
|
|
soapypower/power.py
|
alexnask/soapy_power
| 111
|
6626529
|
#!/usr/bin/env python3
import sys, time, datetime, math, logging, signal
import numpy
import simplesoapy
from simplespectral import zeros
from soapypower import psd, writer
logger = logging.getLogger(__name__)
_shutdown = False
def _shutdown_handler(sig, frame):
"""Set global _shutdown flag when receiving SIGTERM or SIGINT signals"""
global _shutdown
_shutdown = True
# Register signals with _shutdown_handler
signal.signal(signal.SIGTERM, _shutdown_handler)
signal.signal(signal.SIGINT, _shutdown_handler)
if sys.platform == 'win32':
signal.signal(signal.SIGBREAK, _shutdown_handler)
class SoapyPower:
"""SoapySDR spectrum analyzer"""
def __init__(self, soapy_args='', sample_rate=2.00e6, bandwidth=0, corr=0, gain=20.7,
auto_gain=False, channel=0, antenna='', settings=None,
force_sample_rate=False, force_bandwidth=False,
output=sys.stdout, output_format='rtl_power'):
self.device = simplesoapy.SoapyDevice(
soapy_args=soapy_args, sample_rate=sample_rate, bandwidth=bandwidth, corr=corr,
gain=gain, auto_gain=auto_gain, channel=channel, antenna=antenna, settings=settings,
force_sample_rate=force_sample_rate, force_bandwidth=force_bandwidth
)
self._output = output
self._output_format = output_format
self._buffer = None
self._buffer_repeats = None
self._base_buffer_size = None
self._max_buffer_size = None
self._bins = None
self._repeats = None
self._tune_delay = None
self._reset_stream = None
self._psd = None
self._writer = None
def nearest_freq(self, freq, bin_size):
"""Return nearest frequency based on bin size"""
return round(freq / bin_size) * bin_size
def nearest_bins(self, bins, even=False, pow2=False):
"""Return nearest number of FFT bins (even or power of two)"""
if pow2:
bins_log2 = math.log(bins, 2)
if bins_log2 % 1 != 0:
bins = 2**math.ceil(bins_log2)
logger.warning('number of FFT bins should be power of two, changing to {}'.format(bins))
elif even:
if bins % 2 != 0:
bins = math.ceil(bins / 2) * 2
logger.warning('number of FFT bins should be even, changing to {}'.format(bins))
return bins
def nearest_overlap(self, overlap, bins):
"""Return nearest overlap/crop factor based on number of bins"""
bins_overlap = overlap * bins
if bins_overlap % 2 != 0:
bins_overlap = math.ceil(bins_overlap / 2) * 2
overlap = bins_overlap / bins
logger.warning('number of overlapping FFT bins should be even, '
'changing overlap/crop factor to {:.5f}'.format(overlap))
return overlap
def bin_size_to_bins(self, bin_size):
"""Convert bin size [Hz] to number of FFT bins"""
return math.ceil(self.device.sample_rate / bin_size)
def bins_to_bin_size(self, bins):
"""Convert number of FFT bins to bin size [Hz]"""
return self.device.sample_rate / bins
def time_to_repeats(self, bins, integration_time):
"""Convert integration time to number of repeats"""
return math.ceil((self.device.sample_rate * integration_time) / bins)
def repeats_to_time(self, bins, repeats):
"""Convert number of repeats to integration time"""
return (repeats * bins) / self.device.sample_rate
def freq_plan(self, min_freq, max_freq, bins, overlap=0, quiet=False):
"""Returns list of frequencies for frequency hopping"""
bin_size = self.bins_to_bin_size(bins)
bins_crop = round((1 - overlap) * bins)
sample_rate_crop = (1 - overlap) * self.device.sample_rate
freq_range = max_freq - min_freq
hopping = True if freq_range >= sample_rate_crop else False
hop_size = self.nearest_freq(sample_rate_crop, bin_size)
hops = math.ceil(freq_range / hop_size) if hopping else 1
min_center_freq = min_freq + (hop_size / 2) if hopping else min_freq + (freq_range / 2)
max_center_freq = min_center_freq + ((hops - 1) * hop_size)
freq_list = [min_center_freq + (i * hop_size) for i in range(hops)]
if not quiet:
logger.info('overlap: {:.5f}'.format(overlap))
logger.info('bin_size: {:.2f} Hz'.format(bin_size))
logger.info('bins: {}'.format(bins))
logger.info('bins (after crop): {}'.format(bins_crop))
logger.info('sample_rate: {:.3f} MHz'.format(self.device.sample_rate / 1e6))
logger.info('sample_rate (after crop): {:.3f} MHz'.format(sample_rate_crop / 1e6))
logger.info('freq_range: {:.3f} MHz'.format(freq_range / 1e6))
logger.info('hopping: {}'.format('YES' if hopping else 'NO'))
logger.info('hop_size: {:.3f} MHz'.format(hop_size / 1e6))
logger.info('hops: {}'.format(hops))
logger.info('min_center_freq: {:.3f} MHz'.format(min_center_freq / 1e6))
logger.info('max_center_freq: {:.3f} MHz'.format(max_center_freq / 1e6))
logger.info('min_freq (after crop): {:.3f} MHz'.format((min_center_freq - (hop_size / 2)) / 1e6))
logger.info('max_freq (after crop): {:.3f} MHz'.format((max_center_freq + (hop_size / 2)) / 1e6))
logger.debug('Frequency hops table:')
logger.debug(' {:8s} {:8s} {:8s}'.format('Min:', 'Center:', 'Max:'))
for f in freq_list:
logger.debug(' {:8.3f} MHz {:8.3f} MHz {:8.3f} MHz'.format(
(f - (self.device.sample_rate / 2)) / 1e6,
f / 1e6,
(f + (self.device.sample_rate / 2)) / 1e6,
))
return freq_list
def create_buffer(self, bins, repeats, base_buffer_size, max_buffer_size=0):
"""Create buffer for reading samples"""
samples = bins * repeats
buffer_repeats = 1
buffer_size = math.ceil(samples / base_buffer_size) * base_buffer_size
if not max_buffer_size:
# Max buffer size about 100 MB
max_buffer_size = (100 * 1024**2) / 8
if max_buffer_size > 0:
max_buffer_size = math.ceil(max_buffer_size / base_buffer_size) * base_buffer_size
if buffer_size > max_buffer_size:
logger.warning('Required buffer size ({}) will be shrinked to max_buffer_size ({})!'.format(
buffer_size, max_buffer_size
))
buffer_repeats = math.ceil(buffer_size / max_buffer_size)
buffer_size = max_buffer_size
logger.info('repeats: {}'.format(repeats))
logger.info('samples: {} (time: {:.5f} s)'.format(samples, samples / self.device.sample_rate))
if max_buffer_size > 0:
logger.info('max_buffer_size (samples): {} (repeats: {:.2f}, time: {:.5f} s)'.format(
max_buffer_size, max_buffer_size / bins, max_buffer_size / self.device.sample_rate
))
else:
logger.info('max_buffer_size (samples): UNLIMITED')
logger.info('buffer_size (samples): {} (repeats: {:.2f}, time: {:.5f} s)'.format(
buffer_size, buffer_size / bins, buffer_size / self.device.sample_rate
))
logger.info('buffer_repeats: {}'.format(buffer_repeats))
return (buffer_repeats, zeros(buffer_size, numpy.complex64))
def setup(self, bins, repeats, base_buffer_size=0, max_buffer_size=0, fft_window='hann',
fft_overlap=0.5, crop_factor=0, log_scale=True, remove_dc=False, detrend=None,
lnb_lo=0, tune_delay=0, reset_stream=False, max_threads=0, max_queue_size=0):
"""Prepare samples buffer and start streaming samples from device"""
if self.device.is_streaming:
self.device.stop_stream()
base_buffer = self.device.start_stream(buffer_size=base_buffer_size)
self._bins = bins
self._repeats = repeats
self._base_buffer_size = len(base_buffer)
self._max_buffer_size = max_buffer_size
self._buffer_repeats, self._buffer = self.create_buffer(
bins, repeats, self._base_buffer_size, self._max_buffer_size
)
self._tune_delay = tune_delay
self._reset_stream = reset_stream
self._psd = psd.PSD(bins, self.device.sample_rate, fft_window=fft_window, fft_overlap=fft_overlap,
crop_factor=crop_factor, log_scale=log_scale, remove_dc=remove_dc, detrend=detrend,
lnb_lo=lnb_lo, max_threads=max_threads, max_queue_size=max_queue_size)
self._writer = writer.formats[self._output_format](self._output)
def stop(self):
"""Stop streaming samples from device and delete samples buffer"""
if not self.device.is_streaming:
return
self.device.stop_stream()
self._writer.close()
self._bins = None
self._repeats = None
self._base_buffer_size = None
self._max_buffer_size = None
self._buffer_repeats = None
self._buffer = None
self._tune_delay = None
self._reset_stream = None
self._psd = None
self._writer = None
def psd(self, freq):
"""Tune to specified center frequency and compute Power Spectral Density"""
if not self.device.is_streaming:
raise RuntimeError('Streaming is not initialized, you must run setup() first!')
# Tune to new frequency in main thread
logger.debug(' Frequency hop: {:.2f} Hz'.format(freq))
t_freq = time.time()
if self.device.freq != freq:
# Deactivate streaming before tuning
if self._reset_stream:
self.device.device.deactivateStream(self.device.stream)
# Actually tune to new center frequency
self.device.freq = freq
# Reactivate straming after tuning
if self._reset_stream:
self.device.device.activateStream(self.device.stream)
# Delay reading samples after tuning
if self._tune_delay:
t_delay = time.time()
while True:
self.device.read_stream()
t_delay_end = time.time()
if t_delay_end - t_delay >= self._tune_delay:
break
logger.debug(' Tune delay: {:.3f} s'.format(t_delay_end - t_delay))
else:
logger.debug(' Same frequency as before, tuning skipped')
psd_state = self._psd.set_center_freq(freq)
t_freq_end = time.time()
logger.debug(' Tune time: {:.3f} s'.format(t_freq_end - t_freq))
for repeat in range(self._buffer_repeats):
logger.debug(' Repeat: {}'.format(repeat + 1))
# Read samples from SDR in main thread
t_acq = time.time()
acq_time_start = datetime.datetime.utcnow()
self.device.read_stream_into_buffer(self._buffer)
acq_time_stop = datetime.datetime.utcnow()
t_acq_end = time.time()
logger.debug(' Acquisition time: {:.3f} s'.format(t_acq_end - t_acq))
# Start FFT computation in another thread
self._psd.update_async(psd_state, numpy.copy(self._buffer))
t_final = time.time()
if _shutdown:
break
psd_future = self._psd.result_async(psd_state)
logger.debug(' Total hop time: {:.3f} s'.format(t_final - t_freq))
return (psd_future, acq_time_start, acq_time_stop)
def sweep(self, min_freq, max_freq, bins, repeats, runs=0, time_limit=0, overlap=0,
fft_window='hann', fft_overlap=0.5, crop=False, log_scale=True, remove_dc=False, detrend=None, lnb_lo=0,
tune_delay=0, reset_stream=False, base_buffer_size=0, max_buffer_size=0, max_threads=0, max_queue_size=0):
"""Sweep spectrum using frequency hopping"""
self.setup(
bins, repeats, base_buffer_size, max_buffer_size,
fft_window=fft_window, fft_overlap=fft_overlap, crop_factor=overlap if crop else 0,
log_scale=log_scale, remove_dc=remove_dc, detrend=detrend, lnb_lo=lnb_lo, tune_delay=tune_delay,
reset_stream=reset_stream, max_threads=max_threads, max_queue_size=max_queue_size
)
try:
freq_list = self.freq_plan(min_freq - lnb_lo, max_freq - lnb_lo, bins, overlap)
t_start = time.time()
run = 0
while not _shutdown and (runs == 0 or run < runs):
run += 1
t_run_start = time.time()
logger.debug('Run: {}'.format(run))
for freq in freq_list:
# Tune to new frequency, acquire samples and compute Power Spectral Density
psd_future, acq_time_start, acq_time_stop = self.psd(freq)
# Write PSD to stdout (in another thread)
self._writer.write_async(psd_future, acq_time_start, acq_time_stop,
len(self._buffer) * self._buffer_repeats)
if _shutdown:
break
# Write end of measurement marker (in another thread)
write_next_future = self._writer.write_next_async()
t_run = time.time()
logger.debug(' Total run time: {:.3f} s'.format(t_run - t_run_start))
# End measurement if time limit is exceeded
if time_limit and (time.time() - t_start) >= time_limit:
logger.info('Time limit of {} s exceeded, completed {} runs'.format(time_limit, run))
break
# Wait for last write to be finished
write_next_future.result()
# Debug thread pool queues
logging.debug('Number of USB buffer overflow errors: {}'.format(self.device.buffer_overflow_count))
logging.debug('PSD worker threads: {}'.format(self._psd._executor._max_workers))
logging.debug('Max. PSD queue size: {} / {}'.format(self._psd._executor.max_queue_size_reached,
self._psd._executor.max_queue_size))
logging.debug('Writer worker threads: {}'.format(self._writer._executor._max_workers))
logging.debug('Max. Writer queue size: {} / {}'.format(self._writer._executor.max_queue_size_reached,
self._writer._executor.max_queue_size))
finally:
# Shutdown SDR
self.stop()
t_stop = time.time()
logger.info('Total time: {:.3f} s'.format(t_stop - t_start))
|
#!/usr/bin/env python3
import sys, time, datetime, math, logging, signal
import numpy
import simplesoapy
from simplespectral import zeros
from soapypower import psd, writer
logger = logging.getLogger(__name__)
_shutdown = False
def _shutdown_handler(sig, frame):
"""Set global _shutdown flag when receiving SIGTERM or SIGINT signals"""
global _shutdown
_shutdown = True
# Register signals with _shutdown_handler
signal.signal(signal.SIGTERM, _shutdown_handler)
signal.signal(signal.SIGINT, _shutdown_handler)
if sys.platform == 'win32':
signal.signal(signal.SIGBREAK, _shutdown_handler)
class SoapyPower:
"""SoapySDR spectrum analyzer"""
def __init__(self, soapy_args='', sample_rate=2.00e6, bandwidth=0, corr=0, gain=20.7,
auto_gain=False, channel=0, antenna='', settings=None,
force_sample_rate=False, force_bandwidth=False,
output=sys.stdout, output_format='rtl_power'):
self.device = simplesoapy.SoapyDevice(
soapy_args=soapy_args, sample_rate=sample_rate, bandwidth=bandwidth, corr=corr,
gain=gain, auto_gain=auto_gain, channel=channel, antenna=antenna, settings=settings,
force_sample_rate=force_sample_rate, force_bandwidth=force_bandwidth
)
self._output = output
self._output_format = output_format
self._buffer = None
self._buffer_repeats = None
self._base_buffer_size = None
self._max_buffer_size = None
self._bins = None
self._repeats = None
self._tune_delay = None
self._reset_stream = None
self._psd = None
self._writer = None
def nearest_freq(self, freq, bin_size):
"""Return nearest frequency based on bin size"""
return round(freq / bin_size) * bin_size
def nearest_bins(self, bins, even=False, pow2=False):
"""Return nearest number of FFT bins (even or power of two)"""
if pow2:
bins_log2 = math.log(bins, 2)
if bins_log2 % 1 != 0:
bins = 2**math.ceil(bins_log2)
logger.warning('number of FFT bins should be power of two, changing to {}'.format(bins))
elif even:
if bins % 2 != 0:
bins = math.ceil(bins / 2) * 2
logger.warning('number of FFT bins should be even, changing to {}'.format(bins))
return bins
def nearest_overlap(self, overlap, bins):
"""Return nearest overlap/crop factor based on number of bins"""
bins_overlap = overlap * bins
if bins_overlap % 2 != 0:
bins_overlap = math.ceil(bins_overlap / 2) * 2
overlap = bins_overlap / bins
logger.warning('number of overlapping FFT bins should be even, '
'changing overlap/crop factor to {:.5f}'.format(overlap))
return overlap
def bin_size_to_bins(self, bin_size):
"""Convert bin size [Hz] to number of FFT bins"""
return math.ceil(self.device.sample_rate / bin_size)
def bins_to_bin_size(self, bins):
"""Convert number of FFT bins to bin size [Hz]"""
return self.device.sample_rate / bins
def time_to_repeats(self, bins, integration_time):
"""Convert integration time to number of repeats"""
return math.ceil((self.device.sample_rate * integration_time) / bins)
def repeats_to_time(self, bins, repeats):
"""Convert number of repeats to integration time"""
return (repeats * bins) / self.device.sample_rate
def freq_plan(self, min_freq, max_freq, bins, overlap=0, quiet=False):
"""Returns list of frequencies for frequency hopping"""
bin_size = self.bins_to_bin_size(bins)
bins_crop = round((1 - overlap) * bins)
sample_rate_crop = (1 - overlap) * self.device.sample_rate
freq_range = max_freq - min_freq
hopping = True if freq_range >= sample_rate_crop else False
hop_size = self.nearest_freq(sample_rate_crop, bin_size)
hops = math.ceil(freq_range / hop_size) if hopping else 1
min_center_freq = min_freq + (hop_size / 2) if hopping else min_freq + (freq_range / 2)
max_center_freq = min_center_freq + ((hops - 1) * hop_size)
freq_list = [min_center_freq + (i * hop_size) for i in range(hops)]
if not quiet:
logger.info('overlap: {:.5f}'.format(overlap))
logger.info('bin_size: {:.2f} Hz'.format(bin_size))
logger.info('bins: {}'.format(bins))
logger.info('bins (after crop): {}'.format(bins_crop))
logger.info('sample_rate: {:.3f} MHz'.format(self.device.sample_rate / 1e6))
logger.info('sample_rate (after crop): {:.3f} MHz'.format(sample_rate_crop / 1e6))
logger.info('freq_range: {:.3f} MHz'.format(freq_range / 1e6))
logger.info('hopping: {}'.format('YES' if hopping else 'NO'))
logger.info('hop_size: {:.3f} MHz'.format(hop_size / 1e6))
logger.info('hops: {}'.format(hops))
logger.info('min_center_freq: {:.3f} MHz'.format(min_center_freq / 1e6))
logger.info('max_center_freq: {:.3f} MHz'.format(max_center_freq / 1e6))
logger.info('min_freq (after crop): {:.3f} MHz'.format((min_center_freq - (hop_size / 2)) / 1e6))
logger.info('max_freq (after crop): {:.3f} MHz'.format((max_center_freq + (hop_size / 2)) / 1e6))
logger.debug('Frequency hops table:')
logger.debug(' {:8s} {:8s} {:8s}'.format('Min:', 'Center:', 'Max:'))
for f in freq_list:
logger.debug(' {:8.3f} MHz {:8.3f} MHz {:8.3f} MHz'.format(
(f - (self.device.sample_rate / 2)) / 1e6,
f / 1e6,
(f + (self.device.sample_rate / 2)) / 1e6,
))
return freq_list
def create_buffer(self, bins, repeats, base_buffer_size, max_buffer_size=0):
"""Create buffer for reading samples"""
samples = bins * repeats
buffer_repeats = 1
buffer_size = math.ceil(samples / base_buffer_size) * base_buffer_size
if not max_buffer_size:
# Max buffer size about 100 MB
max_buffer_size = (100 * 1024**2) / 8
if max_buffer_size > 0:
max_buffer_size = math.ceil(max_buffer_size / base_buffer_size) * base_buffer_size
if buffer_size > max_buffer_size:
logger.warning('Required buffer size ({}) will be shrinked to max_buffer_size ({})!'.format(
buffer_size, max_buffer_size
))
buffer_repeats = math.ceil(buffer_size / max_buffer_size)
buffer_size = max_buffer_size
logger.info('repeats: {}'.format(repeats))
logger.info('samples: {} (time: {:.5f} s)'.format(samples, samples / self.device.sample_rate))
if max_buffer_size > 0:
logger.info('max_buffer_size (samples): {} (repeats: {:.2f}, time: {:.5f} s)'.format(
max_buffer_size, max_buffer_size / bins, max_buffer_size / self.device.sample_rate
))
else:
logger.info('max_buffer_size (samples): UNLIMITED')
logger.info('buffer_size (samples): {} (repeats: {:.2f}, time: {:.5f} s)'.format(
buffer_size, buffer_size / bins, buffer_size / self.device.sample_rate
))
logger.info('buffer_repeats: {}'.format(buffer_repeats))
return (buffer_repeats, zeros(buffer_size, numpy.complex64))
def setup(self, bins, repeats, base_buffer_size=0, max_buffer_size=0, fft_window='hann',
fft_overlap=0.5, crop_factor=0, log_scale=True, remove_dc=False, detrend=None,
lnb_lo=0, tune_delay=0, reset_stream=False, max_threads=0, max_queue_size=0):
"""Prepare samples buffer and start streaming samples from device"""
if self.device.is_streaming:
self.device.stop_stream()
base_buffer = self.device.start_stream(buffer_size=base_buffer_size)
self._bins = bins
self._repeats = repeats
self._base_buffer_size = len(base_buffer)
self._max_buffer_size = max_buffer_size
self._buffer_repeats, self._buffer = self.create_buffer(
bins, repeats, self._base_buffer_size, self._max_buffer_size
)
self._tune_delay = tune_delay
self._reset_stream = reset_stream
self._psd = psd.PSD(bins, self.device.sample_rate, fft_window=fft_window, fft_overlap=fft_overlap,
crop_factor=crop_factor, log_scale=log_scale, remove_dc=remove_dc, detrend=detrend,
lnb_lo=lnb_lo, max_threads=max_threads, max_queue_size=max_queue_size)
self._writer = writer.formats[self._output_format](self._output)
def stop(self):
"""Stop streaming samples from device and delete samples buffer"""
if not self.device.is_streaming:
return
self.device.stop_stream()
self._writer.close()
self._bins = None
self._repeats = None
self._base_buffer_size = None
self._max_buffer_size = None
self._buffer_repeats = None
self._buffer = None
self._tune_delay = None
self._reset_stream = None
self._psd = None
self._writer = None
def psd(self, freq):
"""Tune to specified center frequency and compute Power Spectral Density"""
if not self.device.is_streaming:
raise RuntimeError('Streaming is not initialized, you must run setup() first!')
# Tune to new frequency in main thread
logger.debug(' Frequency hop: {:.2f} Hz'.format(freq))
t_freq = time.time()
if self.device.freq != freq:
# Deactivate streaming before tuning
if self._reset_stream:
self.device.device.deactivateStream(self.device.stream)
# Actually tune to new center frequency
self.device.freq = freq
# Reactivate straming after tuning
if self._reset_stream:
self.device.device.activateStream(self.device.stream)
# Delay reading samples after tuning
if self._tune_delay:
t_delay = time.time()
while True:
self.device.read_stream()
t_delay_end = time.time()
if t_delay_end - t_delay >= self._tune_delay:
break
logger.debug(' Tune delay: {:.3f} s'.format(t_delay_end - t_delay))
else:
logger.debug(' Same frequency as before, tuning skipped')
psd_state = self._psd.set_center_freq(freq)
t_freq_end = time.time()
logger.debug(' Tune time: {:.3f} s'.format(t_freq_end - t_freq))
for repeat in range(self._buffer_repeats):
logger.debug(' Repeat: {}'.format(repeat + 1))
# Read samples from SDR in main thread
t_acq = time.time()
acq_time_start = datetime.datetime.utcnow()
self.device.read_stream_into_buffer(self._buffer)
acq_time_stop = datetime.datetime.utcnow()
t_acq_end = time.time()
logger.debug(' Acquisition time: {:.3f} s'.format(t_acq_end - t_acq))
# Start FFT computation in another thread
self._psd.update_async(psd_state, numpy.copy(self._buffer))
t_final = time.time()
if _shutdown:
break
psd_future = self._psd.result_async(psd_state)
logger.debug(' Total hop time: {:.3f} s'.format(t_final - t_freq))
return (psd_future, acq_time_start, acq_time_stop)
def sweep(self, min_freq, max_freq, bins, repeats, runs=0, time_limit=0, overlap=0,
fft_window='hann', fft_overlap=0.5, crop=False, log_scale=True, remove_dc=False, detrend=None, lnb_lo=0,
tune_delay=0, reset_stream=False, base_buffer_size=0, max_buffer_size=0, max_threads=0, max_queue_size=0):
"""Sweep spectrum using frequency hopping"""
self.setup(
bins, repeats, base_buffer_size, max_buffer_size,
fft_window=fft_window, fft_overlap=fft_overlap, crop_factor=overlap if crop else 0,
log_scale=log_scale, remove_dc=remove_dc, detrend=detrend, lnb_lo=lnb_lo, tune_delay=tune_delay,
reset_stream=reset_stream, max_threads=max_threads, max_queue_size=max_queue_size
)
try:
freq_list = self.freq_plan(min_freq - lnb_lo, max_freq - lnb_lo, bins, overlap)
t_start = time.time()
run = 0
while not _shutdown and (runs == 0 or run < runs):
run += 1
t_run_start = time.time()
logger.debug('Run: {}'.format(run))
for freq in freq_list:
# Tune to new frequency, acquire samples and compute Power Spectral Density
psd_future, acq_time_start, acq_time_stop = self.psd(freq)
# Write PSD to stdout (in another thread)
self._writer.write_async(psd_future, acq_time_start, acq_time_stop,
len(self._buffer) * self._buffer_repeats)
if _shutdown:
break
# Write end of measurement marker (in another thread)
write_next_future = self._writer.write_next_async()
t_run = time.time()
logger.debug(' Total run time: {:.3f} s'.format(t_run - t_run_start))
# End measurement if time limit is exceeded
if time_limit and (time.time() - t_start) >= time_limit:
logger.info('Time limit of {} s exceeded, completed {} runs'.format(time_limit, run))
break
# Wait for last write to be finished
write_next_future.result()
# Debug thread pool queues
logging.debug('Number of USB buffer overflow errors: {}'.format(self.device.buffer_overflow_count))
logging.debug('PSD worker threads: {}'.format(self._psd._executor._max_workers))
logging.debug('Max. PSD queue size: {} / {}'.format(self._psd._executor.max_queue_size_reached,
self._psd._executor.max_queue_size))
logging.debug('Writer worker threads: {}'.format(self._writer._executor._max_workers))
logging.debug('Max. Writer queue size: {} / {}'.format(self._writer._executor.max_queue_size_reached,
self._writer._executor.max_queue_size))
finally:
# Shutdown SDR
self.stop()
t_stop = time.time()
logger.info('Total time: {:.3f} s'.format(t_stop - t_start))
|
en
| 0.848703
|
#!/usr/bin/env python3 Set global _shutdown flag when receiving SIGTERM or SIGINT signals # Register signals with _shutdown_handler SoapySDR spectrum analyzer Return nearest frequency based on bin size Return nearest number of FFT bins (even or power of two) Return nearest overlap/crop factor based on number of bins Convert bin size [Hz] to number of FFT bins Convert number of FFT bins to bin size [Hz] Convert integration time to number of repeats Convert number of repeats to integration time Returns list of frequencies for frequency hopping Create buffer for reading samples # Max buffer size about 100 MB Prepare samples buffer and start streaming samples from device Stop streaming samples from device and delete samples buffer Tune to specified center frequency and compute Power Spectral Density # Tune to new frequency in main thread # Deactivate streaming before tuning # Actually tune to new center frequency # Reactivate straming after tuning # Delay reading samples after tuning # Read samples from SDR in main thread # Start FFT computation in another thread Sweep spectrum using frequency hopping # Tune to new frequency, acquire samples and compute Power Spectral Density # Write PSD to stdout (in another thread) # Write end of measurement marker (in another thread) # End measurement if time limit is exceeded # Wait for last write to be finished # Debug thread pool queues # Shutdown SDR
| 2.407536
| 2
|
myvenv/lib/python3.6/site-packages/nltk/twitter/common.py
|
catb0y/twitter_feeling
| 11
|
6626530
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Twitter client
#
# Copyright (C) 2001-2017 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility functions for the :module:`twitterclient` module which do not require
the `twython` library to have been installed.
"""
from __future__ import print_function
import csv
import gzip
import json
import nltk.compat as compat
HIER_SEPARATOR = "."
def extract_fields(tweet, fields):
"""
Extract field values from a full tweet and return them as a list
:param json tweet: The tweet in JSON format
:param list fields: The fields to be extracted from the tweet
:rtype: list(str)
"""
out = []
for field in fields:
try:
_add_field_to_out(tweet, field, out)
except TypeError:
raise RuntimeError('Fatal error when extracting fields. Cannot find field ', field)
return out
def _add_field_to_out(json, field, out):
if _is_composed_key(field):
key, value = _get_key_value_composed(field)
_add_field_to_out(json[key], value, out)
else:
out += [json[field]]
def _is_composed_key(field):
if HIER_SEPARATOR in field:
return True
return False
def _get_key_value_composed(field):
out = field.split(HIER_SEPARATOR)
# there could be up to 3 levels
key = out[0]
value = HIER_SEPARATOR.join(out[1:])
return key, value
def _get_entity_recursive(json, entity):
if not json:
return None
elif isinstance(json, dict):
for key, value in json.items():
if key == entity:
return value
# 'entities' and 'extended_entities' are wrappers in Twitter json
# structure that contain other Twitter objects. See:
# https://dev.twitter.com/overview/api/entities-in-twitter-objects
if key == 'entities' or key == 'extended_entities':
candidate = _get_entity_recursive(value, entity)
if candidate is not None:
return candidate
return None
elif isinstance(json, list):
for item in json:
candidate = _get_entity_recursive(item, entity)
if candidate is not None:
return candidate
return None
else:
return None
def json2csv(fp, outfile, fields, encoding='utf8', errors='replace',
gzip_compress=False):
"""
Extract selected fields from a file of line-separated JSON tweets and
write to a file in CSV format.
This utility function allows a file of full tweets to be easily converted
to a CSV file for easier processing. For example, just TweetIDs or
just the text content of the Tweets can be extracted.
Additionally, the function allows combinations of fields of other Twitter
objects (mainly the users, see below).
For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see
`json2csv_entities`
:param str infile: The name of the file containing full tweets
:param str outfile: The name of the text file where results should be\
written
:param list fields: The list of fields to be extracted. Useful examples\
are 'id_str' for the tweetID and 'text' for the text of the tweet. See\
<https://dev.twitter.com/overview/api/tweets> for a full list of fields.\
e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\
Additonally, it allows IDs from other Twitter objects, e. g.,\
['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count']
:param error: Behaviour for encoding errors, see\
https://docs.python.org/3/library/codecs.html#codec-base-classes
:param gzip_compress: if `True`, output files are compressed with gzip
"""
(writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
# write the list of fields as header
writer.writerow(fields)
# process the file
for line in fp:
tweet = json.loads(line)
row = extract_fields(tweet, fields)
writer.writerow(row)
outf.close()
def outf_writer_compat(outfile, encoding, errors, gzip_compress=False):
"""
Identify appropriate CSV writer given the Python version
"""
if compat.PY3:
if gzip_compress:
outf = gzip.open(outfile, 'wt', encoding=encoding, errors=errors)
else:
outf = open(outfile, 'w', encoding=encoding, errors=errors)
writer = csv.writer(outf)
else:
if gzip_compress:
outf = gzip.open(outfile, 'wb')
else:
outf = open(outfile, 'wb')
writer = compat.UnicodeWriter(outf, encoding=encoding, errors=errors)
return (writer, outf)
def json2csv_entities(tweets_file, outfile, main_fields, entity_type, entity_fields,
encoding='utf8', errors='replace', gzip_compress=False):
"""
Extract selected fields from a file of line-separated JSON tweets and
write to a file in CSV format.
This utility function allows a file of full Tweets to be easily converted
to a CSV file for easier processing of Twitter entities. For example, the
hashtags or media elements of a tweet can be extracted.
It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags
there will be two lines in the output file, one per hashtag
:param tweets_file: the file-like object containing full Tweets
:param str outfile: The path of the text file where results should be\
written
:param list main_fields: The list of fields to be extracted from the main\
object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\
<https://dev.twitter.com/overview/api/tweets> for a full list of fields.
e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']
If `entity_type` is expressed with hierarchy, then it is the list of\
fields of the object that corresponds to the key of the entity_type,\
(e.g., for entity_type='user.urls', the fields in the main_fields list\
belong to the user object; for entity_type='place.bounding_box', the\
files in the main_field list belong to the place object of the tweet).
:param list entity_type: The name of the entity: 'hashtags', 'media',\
'urls' and 'user_mentions' for the tweet object. For a user object,\
this needs to be expressed with a hierarchy: `'user.urls'`. For the\
bounding box of the Tweet location, use `'place.bounding_box'`.
:param list entity_fields: The list of fields to be extracted from the\
entity. E.g. `['text']` (of the Tweet)
:param error: Behaviour for encoding errors, see\
https://docs.python.org/3/library/codecs.html#codec-base-classes
:param gzip_compress: if `True`, ouput files are compressed with gzip
"""
(writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
header = get_header_field_list(main_fields, entity_type, entity_fields)
writer.writerow(header)
for line in tweets_file:
tweet = json.loads(line)
if _is_composed_key(entity_type):
key, value = _get_key_value_composed(entity_type)
object_json = _get_entity_recursive(tweet, key)
if not object_json:
# this can happen in the case of "place"
continue
object_fields = extract_fields(object_json, main_fields)
items = _get_entity_recursive(object_json, value)
_write_to_file(object_fields, items, entity_fields, writer)
else:
tweet_fields = extract_fields(tweet, main_fields)
items = _get_entity_recursive(tweet, entity_type)
_write_to_file(tweet_fields, items, entity_fields, writer)
outf.close()
def get_header_field_list(main_fields, entity_type, entity_fields):
if _is_composed_key(entity_type):
key, value = _get_key_value_composed(entity_type)
main_entity = key
sub_entity = value
else:
main_entity = None
sub_entity = entity_type
if main_entity:
output1 = [HIER_SEPARATOR.join([main_entity, x]) for x in main_fields]
else:
output1 = main_fields
output2 = [HIER_SEPARATOR.join([sub_entity, x]) for x in entity_fields]
return output1 + output2
def _write_to_file(object_fields, items, entity_fields, writer):
if not items:
# it could be that the entity is just not present for the tweet
# e.g. tweet hashtag is always present, even as [], however
# tweet media may not be present
return
if isinstance(items, dict):
# this happens e.g. for "place" of a tweet
row = object_fields
# there might be composed keys in de list of required fields
entity_field_values = [x for x in entity_fields if not _is_composed_key(x)]
entity_field_composed = [x for x in entity_fields if _is_composed_key(x)]
for field in entity_field_values:
value = items[field]
if isinstance(value, list):
row += value
else:
row += [value]
# now check required dictionaries
for d in entity_field_composed:
kd, vd = _get_key_value_composed(d)
json_dict = items[kd]
if not isinstance(json_dict, dict):
raise RuntimeError("""Key {0} does not contain a dictionary
in the json file""".format(kd))
row += [json_dict[vd]]
writer.writerow(row)
return
# in general it is a list
for item in items:
row = object_fields + extract_fields(item, entity_fields)
writer.writerow(row)
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Twitter client
#
# Copyright (C) 2001-2017 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility functions for the :module:`twitterclient` module which do not require
the `twython` library to have been installed.
"""
from __future__ import print_function
import csv
import gzip
import json
import nltk.compat as compat
HIER_SEPARATOR = "."
def extract_fields(tweet, fields):
"""
Extract field values from a full tweet and return them as a list
:param json tweet: The tweet in JSON format
:param list fields: The fields to be extracted from the tweet
:rtype: list(str)
"""
out = []
for field in fields:
try:
_add_field_to_out(tweet, field, out)
except TypeError:
raise RuntimeError('Fatal error when extracting fields. Cannot find field ', field)
return out
def _add_field_to_out(json, field, out):
if _is_composed_key(field):
key, value = _get_key_value_composed(field)
_add_field_to_out(json[key], value, out)
else:
out += [json[field]]
def _is_composed_key(field):
if HIER_SEPARATOR in field:
return True
return False
def _get_key_value_composed(field):
out = field.split(HIER_SEPARATOR)
# there could be up to 3 levels
key = out[0]
value = HIER_SEPARATOR.join(out[1:])
return key, value
def _get_entity_recursive(json, entity):
if not json:
return None
elif isinstance(json, dict):
for key, value in json.items():
if key == entity:
return value
# 'entities' and 'extended_entities' are wrappers in Twitter json
# structure that contain other Twitter objects. See:
# https://dev.twitter.com/overview/api/entities-in-twitter-objects
if key == 'entities' or key == 'extended_entities':
candidate = _get_entity_recursive(value, entity)
if candidate is not None:
return candidate
return None
elif isinstance(json, list):
for item in json:
candidate = _get_entity_recursive(item, entity)
if candidate is not None:
return candidate
return None
else:
return None
def json2csv(fp, outfile, fields, encoding='utf8', errors='replace',
gzip_compress=False):
"""
Extract selected fields from a file of line-separated JSON tweets and
write to a file in CSV format.
This utility function allows a file of full tweets to be easily converted
to a CSV file for easier processing. For example, just TweetIDs or
just the text content of the Tweets can be extracted.
Additionally, the function allows combinations of fields of other Twitter
objects (mainly the users, see below).
For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see
`json2csv_entities`
:param str infile: The name of the file containing full tweets
:param str outfile: The name of the text file where results should be\
written
:param list fields: The list of fields to be extracted. Useful examples\
are 'id_str' for the tweetID and 'text' for the text of the tweet. See\
<https://dev.twitter.com/overview/api/tweets> for a full list of fields.\
e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\
Additonally, it allows IDs from other Twitter objects, e. g.,\
['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count']
:param error: Behaviour for encoding errors, see\
https://docs.python.org/3/library/codecs.html#codec-base-classes
:param gzip_compress: if `True`, output files are compressed with gzip
"""
(writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
# write the list of fields as header
writer.writerow(fields)
# process the file
for line in fp:
tweet = json.loads(line)
row = extract_fields(tweet, fields)
writer.writerow(row)
outf.close()
def outf_writer_compat(outfile, encoding, errors, gzip_compress=False):
"""
Identify appropriate CSV writer given the Python version
"""
if compat.PY3:
if gzip_compress:
outf = gzip.open(outfile, 'wt', encoding=encoding, errors=errors)
else:
outf = open(outfile, 'w', encoding=encoding, errors=errors)
writer = csv.writer(outf)
else:
if gzip_compress:
outf = gzip.open(outfile, 'wb')
else:
outf = open(outfile, 'wb')
writer = compat.UnicodeWriter(outf, encoding=encoding, errors=errors)
return (writer, outf)
def json2csv_entities(tweets_file, outfile, main_fields, entity_type, entity_fields,
encoding='utf8', errors='replace', gzip_compress=False):
"""
Extract selected fields from a file of line-separated JSON tweets and
write to a file in CSV format.
This utility function allows a file of full Tweets to be easily converted
to a CSV file for easier processing of Twitter entities. For example, the
hashtags or media elements of a tweet can be extracted.
It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags
there will be two lines in the output file, one per hashtag
:param tweets_file: the file-like object containing full Tweets
:param str outfile: The path of the text file where results should be\
written
:param list main_fields: The list of fields to be extracted from the main\
object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\
<https://dev.twitter.com/overview/api/tweets> for a full list of fields.
e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']
If `entity_type` is expressed with hierarchy, then it is the list of\
fields of the object that corresponds to the key of the entity_type,\
(e.g., for entity_type='user.urls', the fields in the main_fields list\
belong to the user object; for entity_type='place.bounding_box', the\
files in the main_field list belong to the place object of the tweet).
:param list entity_type: The name of the entity: 'hashtags', 'media',\
'urls' and 'user_mentions' for the tweet object. For a user object,\
this needs to be expressed with a hierarchy: `'user.urls'`. For the\
bounding box of the Tweet location, use `'place.bounding_box'`.
:param list entity_fields: The list of fields to be extracted from the\
entity. E.g. `['text']` (of the Tweet)
:param error: Behaviour for encoding errors, see\
https://docs.python.org/3/library/codecs.html#codec-base-classes
:param gzip_compress: if `True`, ouput files are compressed with gzip
"""
(writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
header = get_header_field_list(main_fields, entity_type, entity_fields)
writer.writerow(header)
for line in tweets_file:
tweet = json.loads(line)
if _is_composed_key(entity_type):
key, value = _get_key_value_composed(entity_type)
object_json = _get_entity_recursive(tweet, key)
if not object_json:
# this can happen in the case of "place"
continue
object_fields = extract_fields(object_json, main_fields)
items = _get_entity_recursive(object_json, value)
_write_to_file(object_fields, items, entity_fields, writer)
else:
tweet_fields = extract_fields(tweet, main_fields)
items = _get_entity_recursive(tweet, entity_type)
_write_to_file(tweet_fields, items, entity_fields, writer)
outf.close()
def get_header_field_list(main_fields, entity_type, entity_fields):
if _is_composed_key(entity_type):
key, value = _get_key_value_composed(entity_type)
main_entity = key
sub_entity = value
else:
main_entity = None
sub_entity = entity_type
if main_entity:
output1 = [HIER_SEPARATOR.join([main_entity, x]) for x in main_fields]
else:
output1 = main_fields
output2 = [HIER_SEPARATOR.join([sub_entity, x]) for x in entity_fields]
return output1 + output2
def _write_to_file(object_fields, items, entity_fields, writer):
if not items:
# it could be that the entity is just not present for the tweet
# e.g. tweet hashtag is always present, even as [], however
# tweet media may not be present
return
if isinstance(items, dict):
# this happens e.g. for "place" of a tweet
row = object_fields
# there might be composed keys in de list of required fields
entity_field_values = [x for x in entity_fields if not _is_composed_key(x)]
entity_field_composed = [x for x in entity_fields if _is_composed_key(x)]
for field in entity_field_values:
value = items[field]
if isinstance(value, list):
row += value
else:
row += [value]
# now check required dictionaries
for d in entity_field_composed:
kd, vd = _get_key_value_composed(d)
json_dict = items[kd]
if not isinstance(json_dict, dict):
raise RuntimeError("""Key {0} does not contain a dictionary
in the json file""".format(kd))
row += [json_dict[vd]]
writer.writerow(row)
return
# in general it is a list
for item in items:
row = object_fields + extract_fields(item, entity_fields)
writer.writerow(row)
|
en
| 0.732494
|
# -*- coding: utf-8 -*- # Natural Language Toolkit: Twitter client # # Copyright (C) 2001-2017 NLTK Project # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT Utility functions for the :module:`twitterclient` module which do not require the `twython` library to have been installed. Extract field values from a full tweet and return them as a list :param json tweet: The tweet in JSON format :param list fields: The fields to be extracted from the tweet :rtype: list(str) # there could be up to 3 levels # 'entities' and 'extended_entities' are wrappers in Twitter json # structure that contain other Twitter objects. See: # https://dev.twitter.com/overview/api/entities-in-twitter-objects Extract selected fields from a file of line-separated JSON tweets and write to a file in CSV format. This utility function allows a file of full tweets to be easily converted to a CSV file for easier processing. For example, just TweetIDs or just the text content of the Tweets can be extracted. Additionally, the function allows combinations of fields of other Twitter objects (mainly the users, see below). For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see `json2csv_entities` :param str infile: The name of the file containing full tweets :param str outfile: The name of the text file where results should be\ written :param list fields: The list of fields to be extracted. Useful examples\ are 'id_str' for the tweetID and 'text' for the text of the tweet. See\ <https://dev.twitter.com/overview/api/tweets> for a full list of fields.\ e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\ Additonally, it allows IDs from other Twitter objects, e. g.,\ ['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count'] :param error: Behaviour for encoding errors, see\ https://docs.python.org/3/library/codecs.html#codec-base-classes :param gzip_compress: if `True`, output files are compressed with gzip # write the list of fields as header # process the file Identify appropriate CSV writer given the Python version Extract selected fields from a file of line-separated JSON tweets and write to a file in CSV format. This utility function allows a file of full Tweets to be easily converted to a CSV file for easier processing of Twitter entities. For example, the hashtags or media elements of a tweet can be extracted. It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags there will be two lines in the output file, one per hashtag :param tweets_file: the file-like object containing full Tweets :param str outfile: The path of the text file where results should be\ written :param list main_fields: The list of fields to be extracted from the main\ object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\ <https://dev.twitter.com/overview/api/tweets> for a full list of fields. e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count'] If `entity_type` is expressed with hierarchy, then it is the list of\ fields of the object that corresponds to the key of the entity_type,\ (e.g., for entity_type='user.urls', the fields in the main_fields list\ belong to the user object; for entity_type='place.bounding_box', the\ files in the main_field list belong to the place object of the tweet). :param list entity_type: The name of the entity: 'hashtags', 'media',\ 'urls' and 'user_mentions' for the tweet object. For a user object,\ this needs to be expressed with a hierarchy: `'user.urls'`. For the\ bounding box of the Tweet location, use `'place.bounding_box'`. :param list entity_fields: The list of fields to be extracted from the\ entity. E.g. `['text']` (of the Tweet) :param error: Behaviour for encoding errors, see\ https://docs.python.org/3/library/codecs.html#codec-base-classes :param gzip_compress: if `True`, ouput files are compressed with gzip # this can happen in the case of "place" # it could be that the entity is just not present for the tweet # e.g. tweet hashtag is always present, even as [], however # tweet media may not be present # this happens e.g. for "place" of a tweet # there might be composed keys in de list of required fields # now check required dictionaries Key {0} does not contain a dictionary in the json file # in general it is a list
| 3.250988
| 3
|
tests/mod_base/base.py
|
fossabot/extendable
| 3
|
6626531
|
from extendable import ExtendableMeta
class Base(metaclass=ExtendableMeta):
def test(self) -> str:
return "base"
|
from extendable import ExtendableMeta
class Base(metaclass=ExtendableMeta):
def test(self) -> str:
return "base"
|
none
| 1
| 2.843421
| 3
|
|
kytos/core/link.py
|
MarvinTorres/kytos
| 0
|
6626532
|
"""Module with all classes related to links.
Links are low level abstractions representing connections between two
interfaces.
"""
import hashlib
import json
import random
from kytos.core.common import GenericEntity
from kytos.core.exceptions import (KytosLinkCreationError,
KytosNoTagAvailableError)
from kytos.core.interface import TAGType
class Link(GenericEntity):
"""Define a link between two Endpoints."""
def __init__(self, endpoint_a, endpoint_b):
"""Create a Link instance and set its attributes.
Two kytos.core.interface.Interface are required as parameters.
"""
if endpoint_a is None:
raise KytosLinkCreationError("endpoint_a cannot be None")
if endpoint_b is None:
raise KytosLinkCreationError("endpoint_b cannot be None")
self.endpoint_a = endpoint_a
self.endpoint_b = endpoint_b
super().__init__()
def __hash__(self):
return hash(self.id)
def is_enabled(self):
"""Override the is_enabled method.
We consider a link enabled when all the interfaces are enabled.
Returns:
boolean: True if both interfaces are enabled, False otherwise.
"""
return (self._enabled and self.endpoint_a.is_enabled() and
self.endpoint_b.is_enabled())
def is_active(self):
"""Override the is_active method.
We consider a link active whether all the interfaces are active.
Returns:
boolean: True if the interfaces are active, othewrise False.
"""
return (self._active and self.endpoint_a.is_active() and
self.endpoint_b.is_active())
def __eq__(self, other):
"""Check if two instances of Link are equal."""
return self.id == other.id
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Link intance.
Returns:
string: link id.
"""
dpid_a = self.endpoint_a.switch.dpid
port_a = self.endpoint_a.port_number
dpid_b = self.endpoint_b.switch.dpid
port_b = self.endpoint_b.port_number
if dpid_a < dpid_b:
elements = (dpid_a, port_a, dpid_b, port_b)
elif dpid_a > dpid_b:
elements = (dpid_b, port_b, dpid_a, port_a)
elif port_a < port_b:
elements = (dpid_a, port_a, dpid_b, port_b)
else:
elements = (dpid_b, port_b, dpid_a, port_a)
str_id = "%s:%s:%s:%s" % elements
return hashlib.sha256(str_id.encode('utf-8')).hexdigest()
@property
def available_tags(self):
"""Return the available tags for the link.
Based on the endpoint tags.
"""
return [tag for tag in self.endpoint_a.available_tags if tag in
self.endpoint_b.available_tags]
def use_tag(self, tag):
"""Remove a specific tag from available_tags if it is there.
Deprecated: use only the get_next_available_tag method.
"""
if self.is_tag_available(tag):
self.endpoint_a.use_tag(tag)
self.endpoint_b.use_tag(tag)
return True
return False
def is_tag_available(self, tag):
"""Check if a tag is available."""
return (self.endpoint_a.is_tag_available(tag) and
self.endpoint_b.is_tag_available(tag))
def get_next_available_tag(self):
"""Return the next available tag if exists."""
# Copy the available tags because in case of error
# we will remove and add elements to the available_tags
available_tags_a = self.endpoint_a.available_tags.copy()
available_tags_b = self.endpoint_b.available_tags.copy()
random.shuffle(available_tags_a)
random.shuffle(available_tags_b)
for tag in available_tags_a:
# Tag does not exist in endpoint B. Try another tag.
if tag not in available_tags_b:
continue
# Tag already in use. Try another tag.
if not self.endpoint_a.use_tag(tag):
continue
# Tag already in use in B. Mark the tag as available again.
if not self.endpoint_b.use_tag(tag):
self.endpoint_a.make_tag_available(tag)
continue
# Tag used successfully by both endpoints. Returning.
return tag
raise KytosNoTagAvailableError(self)
def make_tag_available(self, tag):
"""Add a specific tag in available_tags."""
if not self.is_tag_available(tag):
self.endpoint_a.make_tag_available(tag)
self.endpoint_b.make_tag_available(tag)
return True
return False
def available_vlans(self):
"""Get all available vlans from each interface in the link."""
vlans_a = self._get_available_vlans(self.endpoint_a)
vlans_b = self._get_available_vlans(self.endpoint_b)
return [vlan for vlan in vlans_a if vlan in vlans_b]
@staticmethod
def _get_available_vlans(endpoint):
"""Return all vlans from endpoint."""
tags = endpoint.available_tags
return [tag for tag in tags if tag.tag_type == TAGType.VLAN]
def as_dict(self):
"""Return the Link as a dictionary."""
return {'id': self.id,
'endpoint_a': self.endpoint_a.as_dict(),
'endpoint_b': self.endpoint_b.as_dict(),
'metadata': self.get_metadata_as_dict(),
'active': self.is_active(),
'enabled': self.is_enabled()}
def as_json(self):
"""Return the Link as a JSON string."""
return json.dumps(self.as_dict())
|
"""Module with all classes related to links.
Links are low level abstractions representing connections between two
interfaces.
"""
import hashlib
import json
import random
from kytos.core.common import GenericEntity
from kytos.core.exceptions import (KytosLinkCreationError,
KytosNoTagAvailableError)
from kytos.core.interface import TAGType
class Link(GenericEntity):
"""Define a link between two Endpoints."""
def __init__(self, endpoint_a, endpoint_b):
"""Create a Link instance and set its attributes.
Two kytos.core.interface.Interface are required as parameters.
"""
if endpoint_a is None:
raise KytosLinkCreationError("endpoint_a cannot be None")
if endpoint_b is None:
raise KytosLinkCreationError("endpoint_b cannot be None")
self.endpoint_a = endpoint_a
self.endpoint_b = endpoint_b
super().__init__()
def __hash__(self):
return hash(self.id)
def is_enabled(self):
"""Override the is_enabled method.
We consider a link enabled when all the interfaces are enabled.
Returns:
boolean: True if both interfaces are enabled, False otherwise.
"""
return (self._enabled and self.endpoint_a.is_enabled() and
self.endpoint_b.is_enabled())
def is_active(self):
"""Override the is_active method.
We consider a link active whether all the interfaces are active.
Returns:
boolean: True if the interfaces are active, othewrise False.
"""
return (self._active and self.endpoint_a.is_active() and
self.endpoint_b.is_active())
def __eq__(self, other):
"""Check if two instances of Link are equal."""
return self.id == other.id
@property
def id(self): # pylint: disable=invalid-name
"""Return id from Link intance.
Returns:
string: link id.
"""
dpid_a = self.endpoint_a.switch.dpid
port_a = self.endpoint_a.port_number
dpid_b = self.endpoint_b.switch.dpid
port_b = self.endpoint_b.port_number
if dpid_a < dpid_b:
elements = (dpid_a, port_a, dpid_b, port_b)
elif dpid_a > dpid_b:
elements = (dpid_b, port_b, dpid_a, port_a)
elif port_a < port_b:
elements = (dpid_a, port_a, dpid_b, port_b)
else:
elements = (dpid_b, port_b, dpid_a, port_a)
str_id = "%s:%s:%s:%s" % elements
return hashlib.sha256(str_id.encode('utf-8')).hexdigest()
@property
def available_tags(self):
"""Return the available tags for the link.
Based on the endpoint tags.
"""
return [tag for tag in self.endpoint_a.available_tags if tag in
self.endpoint_b.available_tags]
def use_tag(self, tag):
"""Remove a specific tag from available_tags if it is there.
Deprecated: use only the get_next_available_tag method.
"""
if self.is_tag_available(tag):
self.endpoint_a.use_tag(tag)
self.endpoint_b.use_tag(tag)
return True
return False
def is_tag_available(self, tag):
"""Check if a tag is available."""
return (self.endpoint_a.is_tag_available(tag) and
self.endpoint_b.is_tag_available(tag))
def get_next_available_tag(self):
"""Return the next available tag if exists."""
# Copy the available tags because in case of error
# we will remove and add elements to the available_tags
available_tags_a = self.endpoint_a.available_tags.copy()
available_tags_b = self.endpoint_b.available_tags.copy()
random.shuffle(available_tags_a)
random.shuffle(available_tags_b)
for tag in available_tags_a:
# Tag does not exist in endpoint B. Try another tag.
if tag not in available_tags_b:
continue
# Tag already in use. Try another tag.
if not self.endpoint_a.use_tag(tag):
continue
# Tag already in use in B. Mark the tag as available again.
if not self.endpoint_b.use_tag(tag):
self.endpoint_a.make_tag_available(tag)
continue
# Tag used successfully by both endpoints. Returning.
return tag
raise KytosNoTagAvailableError(self)
def make_tag_available(self, tag):
"""Add a specific tag in available_tags."""
if not self.is_tag_available(tag):
self.endpoint_a.make_tag_available(tag)
self.endpoint_b.make_tag_available(tag)
return True
return False
def available_vlans(self):
"""Get all available vlans from each interface in the link."""
vlans_a = self._get_available_vlans(self.endpoint_a)
vlans_b = self._get_available_vlans(self.endpoint_b)
return [vlan for vlan in vlans_a if vlan in vlans_b]
@staticmethod
def _get_available_vlans(endpoint):
"""Return all vlans from endpoint."""
tags = endpoint.available_tags
return [tag for tag in tags if tag.tag_type == TAGType.VLAN]
def as_dict(self):
"""Return the Link as a dictionary."""
return {'id': self.id,
'endpoint_a': self.endpoint_a.as_dict(),
'endpoint_b': self.endpoint_b.as_dict(),
'metadata': self.get_metadata_as_dict(),
'active': self.is_active(),
'enabled': self.is_enabled()}
def as_json(self):
"""Return the Link as a JSON string."""
return json.dumps(self.as_dict())
|
en
| 0.769958
|
Module with all classes related to links. Links are low level abstractions representing connections between two interfaces. Define a link between two Endpoints. Create a Link instance and set its attributes. Two kytos.core.interface.Interface are required as parameters. Override the is_enabled method. We consider a link enabled when all the interfaces are enabled. Returns: boolean: True if both interfaces are enabled, False otherwise. Override the is_active method. We consider a link active whether all the interfaces are active. Returns: boolean: True if the interfaces are active, othewrise False. Check if two instances of Link are equal. # pylint: disable=invalid-name Return id from Link intance. Returns: string: link id. Return the available tags for the link. Based on the endpoint tags. Remove a specific tag from available_tags if it is there. Deprecated: use only the get_next_available_tag method. Check if a tag is available. Return the next available tag if exists. # Copy the available tags because in case of error # we will remove and add elements to the available_tags # Tag does not exist in endpoint B. Try another tag. # Tag already in use. Try another tag. # Tag already in use in B. Mark the tag as available again. # Tag used successfully by both endpoints. Returning. Add a specific tag in available_tags. Get all available vlans from each interface in the link. Return all vlans from endpoint. Return the Link as a dictionary. Return the Link as a JSON string.
| 3.043232
| 3
|
rotations/rotations.py
|
MomsFriendlyRobotCompany/rotations
| 1
|
6626533
|
# -*- coding: utf-8 -*-
##############################################
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
# see LICENSE for full details
##############################################
import numpy as np
rad2deg = 180/np.pi
deg2rad = np.pi/180
# https://en.wikipedia.org/wiki/Rotation_matrix
def R1(a, degrees=False):
if degrees:
a *= deg2rad
ca = np.cos(a)
sa = np.sin(a)
return np.array(
[[1, 0, 0],
[0, ca, sa],
[0, -sa, ca]]
)
def R2(a, degrees=False):
if degrees:
a *= deg2rad
ca = np.cos(a)
sa = np.sin(a)
return np.array(
[[ ca, 0, -sa],
[ 0, 1, 0],
[ sa, 0, ca]]
)
def R3(a, degrees=False):
if degrees:
a *= deg2rad
ca = np.cos(a)
sa = np.sin(a)
return np.array(
[[ ca, sa, 0],
[-sa, ca, 0],
[ 0, 0, 1]]
)
def R313(a,b,c, degrees=False):
"""Returns a rotation matrix based on: Z(c)*X(b)*Z(a)"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
# return np.array(
# [
# [c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1, s1*s2],
# [c3*s1+c1*c2*s3, c1*c2*c3-s1*s3, -c1*s2],
# [ s2*s3, c3*s2, c2]
# ]
# )
return np.array(
[
[ c1*c3-c2*s1*s3, c3*s1+c1*c2*s3, s2*s3],
[-c1*s3-c2*c3*s1, c1*c2*c3-s1*s3, c3*s2],
[ s1*s2, -c1*s2, c2]
]
)
def R312(a,b,c, degrees=False):
"""Returns a rotation matrix based on: Y2*X1*Z3"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
return np.array(
[
[c1*c3-s1*s2*s3, -c2*s1, c1*s3+c3*s1*s2],
[c3*s1+c1*s2*s3, c1*c2, s1*s3-c1*c3*s2],
[ -c2*s3, s2, c2*c3]
]
)
# p_body = X*Y*Z p_inertial
# R321 = lambda a,b,c,v=False: R123(a,b,c,v).T
def R321(a,b,c, degrees=False):
"""Returns a rotation matrix based on: X*Y*Z"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
# return np.array(
# [
# [c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
# [c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
# [ -s2, c2*s3, c2*c3]
# ]
# )
return np.array(
[
[ c1*c2, c2*s1, -s2],
[c1*s2*s3-c3*s1, c1*c3+s1*s2*s3, c2*s3],
[s1*s3+c1*c3*s2, c3*s1*s2-c1*s3, c2*c3]
]
)
def R123(a,b,c, degrees=False):
"""Returns a rotation matrix based on: Z*Y*X"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
return np.array(
[
[c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
[c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
[ -s2, c2*s3, c2*c3]
]
)
|
# -*- coding: utf-8 -*-
##############################################
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
# see LICENSE for full details
##############################################
import numpy as np
rad2deg = 180/np.pi
deg2rad = np.pi/180
# https://en.wikipedia.org/wiki/Rotation_matrix
def R1(a, degrees=False):
if degrees:
a *= deg2rad
ca = np.cos(a)
sa = np.sin(a)
return np.array(
[[1, 0, 0],
[0, ca, sa],
[0, -sa, ca]]
)
def R2(a, degrees=False):
if degrees:
a *= deg2rad
ca = np.cos(a)
sa = np.sin(a)
return np.array(
[[ ca, 0, -sa],
[ 0, 1, 0],
[ sa, 0, ca]]
)
def R3(a, degrees=False):
if degrees:
a *= deg2rad
ca = np.cos(a)
sa = np.sin(a)
return np.array(
[[ ca, sa, 0],
[-sa, ca, 0],
[ 0, 0, 1]]
)
def R313(a,b,c, degrees=False):
"""Returns a rotation matrix based on: Z(c)*X(b)*Z(a)"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
# return np.array(
# [
# [c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1, s1*s2],
# [c3*s1+c1*c2*s3, c1*c2*c3-s1*s3, -c1*s2],
# [ s2*s3, c3*s2, c2]
# ]
# )
return np.array(
[
[ c1*c3-c2*s1*s3, c3*s1+c1*c2*s3, s2*s3],
[-c1*s3-c2*c3*s1, c1*c2*c3-s1*s3, c3*s2],
[ s1*s2, -c1*s2, c2]
]
)
def R312(a,b,c, degrees=False):
"""Returns a rotation matrix based on: Y2*X1*Z3"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
return np.array(
[
[c1*c3-s1*s2*s3, -c2*s1, c1*s3+c3*s1*s2],
[c3*s1+c1*s2*s3, c1*c2, s1*s3-c1*c3*s2],
[ -c2*s3, s2, c2*c3]
]
)
# p_body = X*Y*Z p_inertial
# R321 = lambda a,b,c,v=False: R123(a,b,c,v).T
def R321(a,b,c, degrees=False):
"""Returns a rotation matrix based on: X*Y*Z"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
# return np.array(
# [
# [c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
# [c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
# [ -s2, c2*s3, c2*c3]
# ]
# )
return np.array(
[
[ c1*c2, c2*s1, -s2],
[c1*s2*s3-c3*s1, c1*c3+s1*s2*s3, c2*s3],
[s1*s3+c1*c3*s2, c3*s1*s2-c1*s3, c2*c3]
]
)
def R123(a,b,c, degrees=False):
"""Returns a rotation matrix based on: Z*Y*X"""
if degrees:
a *= deg2rad
b *= deg2rad
c *= deg2rad
s3 = np.sin(c); c3 = np.cos(c)
s2 = np.sin(b); c2 = np.cos(b)
s1 = np.sin(a); c1 = np.cos(a)
return np.array(
[
[c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
[c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
[ -s2, c2*s3, c2*c3]
]
)
|
en
| 0.433405
|
# -*- coding: utf-8 -*- ############################################## # The MIT License (MIT) # Copyright (c) 2020 <NAME> # see LICENSE for full details ############################################## # https://en.wikipedia.org/wiki/Rotation_matrix Returns a rotation matrix based on: Z(c)*X(b)*Z(a) # return np.array( # [ # [c1*c3-c2*s1*s3, -c1*s3-c2*c3*s1, s1*s2], # [c3*s1+c1*c2*s3, c1*c2*c3-s1*s3, -c1*s2], # [ s2*s3, c3*s2, c2] # ] # ) Returns a rotation matrix based on: Y2*X1*Z3 # p_body = X*Y*Z p_inertial # R321 = lambda a,b,c,v=False: R123(a,b,c,v).T Returns a rotation matrix based on: X*Y*Z # return np.array( # [ # [c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2], # [c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3], # [ -s2, c2*s3, c2*c3] # ] # ) Returns a rotation matrix based on: Z*Y*X
| 3.38547
| 3
|
pint_server/tests/unit/conftest.py
|
jeremy-moffitt/public-cloud-info-service
| 0
|
6626534
|
<reponame>jeremy-moffitt/public-cloud-info-service<gh_stars>0
# Copyright (c) 2021 SUSE LLC
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
import mock
import os
import pytest
import sqlalchemy
os.environ = mock.MagicMock()
sqlalchemy.create_engine = mock.MagicMock()
from pint_server import app
@pytest.fixture(scope='session')
def client():
flask_app = app.app
flask_app.config['TESTING'] = True
with flask_app.test_client() as client:
yield client
|
# Copyright (c) 2021 SUSE LLC
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
import mock
import os
import pytest
import sqlalchemy
os.environ = mock.MagicMock()
sqlalchemy.create_engine = mock.MagicMock()
from pint_server import app
@pytest.fixture(scope='session')
def client():
flask_app = app.app
flask_app.config['TESTING'] = True
with flask_app.test_client() as client:
yield client
|
en
| 0.895506
|
# Copyright (c) 2021 SUSE LLC # # This program is free software; you can redistribute it and/or # modify it under the terms of version 3 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, contact SUSE LLC. # # To contact SUSE about this file by physical or electronic mail, # you may find current contact information at www.suse.com
| 1.90139
| 2
|
setup.py
|
nuchi/tf_pb_without_tf
| 5
|
6626535
|
import setuptools
setuptools.setup(
name='tf_pb',
packages=setuptools.find_packages('.'),
install_requires=['protobuf', 'grpcio']
)
|
import setuptools
setuptools.setup(
name='tf_pb',
packages=setuptools.find_packages('.'),
install_requires=['protobuf', 'grpcio']
)
|
none
| 1
| 1.094781
| 1
|
|
runtime/test/specs/V1_2/lstm3_state3_float16.mod.py
|
riscv-android-src/platform-packages-modules-NeuralNetworks
| 0
|
6626536
|
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# LSTM Test, With Peephole, With Projection, No Clipping
model = Model()
n_batch = 2
n_input = 5
# n_cell and n_output have the same size when there is no projection.
n_cell = 20
n_output = 16
input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
recurrent_to_input_weights = Input("recurrent_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" %(n_cell))
cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{%d,%d}" % (n_output, n_cell))
projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
activation_param = Int32Scalar("activation_param", 4) # Tanh
cell_clip_param = Float16Scalar("cell_clip_param", 0.)
proj_clip_param = Float16Scalar("proj_clip_param", 0.)
scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
model = model.Operation("LSTM",
input,
input_to_input_weights,
input_to_forget_weights,
input_to_cell_weights,
input_to_output_weights,
recurrent_to_input_weights,
recurrent_to_forget_weights,
recurrent_to_cell_weights,
recurrent_to_output_weights,
cell_to_input_weights,
cell_to_forget_weights,
cell_to_output_weights,
input_gate_bias,
forget_gate_bias,
cell_gate_bias,
output_gate_bias,
projection_weights,
projection_bias,
output_state_in,
cell_state_in,
activation_param,
cell_clip_param,
proj_clip_param
).To([scratch_buffer, output_state_out, cell_state_out, output])
input0 = {input_to_input_weights: [
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677],
input_to_forget_weights: [
-0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
-0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
-0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
-0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
-0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
-0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
-0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
-0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
-0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496],
input_to_cell_weights: [
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042],
input_to_output_weights: [
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956],
input_gate_bias: [
0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
-0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
-0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196],
forget_gate_bias: [
0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739],
cell_gate_bias: [
-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027],
output_gate_bias: [
0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
-0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877],
recurrent_to_input_weights: [
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447],
recurrent_to_forget_weights: [
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027],
recurrent_to_cell_weights: [
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404],
recurrent_to_output_weights: [
0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
-0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
-0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
-0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
-0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
-0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
-0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
-0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
-0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
-0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
-0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
-0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
-0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
-0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
-0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
-0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
-0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
-0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
-0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
-0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
-0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
-0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
-0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
-0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
-0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
-0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
-0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621],
cell_to_input_weights: [
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175],
cell_to_forget_weights: [
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355],
cell_to_output_weights: [
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733],
projection_weights: [
-0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
-0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
-0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
-0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
-0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
-0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
-0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
-0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
-0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
-0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
-0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
-0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
-0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
-0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
-0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
-0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
-0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
-0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
-0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
-0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
-0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
-0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
-0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656],
projection_bias: [],
}
# Batch0: 4 (input_sequence_size) * 5 (n_input)
input0[input] = [0.867394, 0.291279, 0.013714, 0.482521, 0.626339]
# Batch1: 4 (input_sequence_size) * 5 (n_input)
input0[input].extend(
[0.082922, 0.563329, 0.865614, 0.333232, 0.259916]
)
input0[output_state_in] = [
-0.0213783, 0.0350169, 0.000324787, 0.0276012,
-0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043,
0.0376827, 0.0136115, 0.0243435, 0.0354492,
-0.0204549, 0.0450315, -0.00117379, 0.0167673,
-0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.00484469, -0.0240239,
0.0325789, 0.00790064, 0.0220157, 0.0333314,
]
input0[cell_state_in] = [
-0.126572, -0.121882, 0.121569, 0.0489971,
-0.240177, -0.124685, -0.122565, 0.0162748,
0.0317536, -0.0270355, 0.0418199, -0.179755,
-0.327279, -0.0342741, 0.133831, -0.0238279,
0.122148, 0.269115, 0.185989, 0.525976,
-0.167208, -0.109612, 0.0531226, 0.0695387,
-0.248335, -0.134123, -0.108246, 0.00628498,
0.0492984, -0.0264919, 0.0698144, -0.0635602,
-0.295363, -0.0760078, 0.102725, -0.0351708,
0.149804, 0.259131, 0.202573, 0.500664,
]
output0 = {
scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
cell_state_out: [ 0 for x in range(n_batch * n_cell) ],
output_state_out: [ 0 for x in range(n_batch * n_output) ],
}
# Batch0: 4 (input_sequence_size) * 16 (n_output)
output0[output] = [
-0.0189322, 0.0464512, -0.00251373, 0.0225745,
-0.0308346, -0.0317124, 0.0460407, -0.0189395,
0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169]
# Batch1: 4 (input_sequence_size) * 16 (n_output)
output0[output].extend(
[-0.0264787, 0.0387855, -0.000764675, 0.0217599,
-0.037537, -0.0335206, 0.0431679, -0.0211424,
0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009]
)
Example((input0, output0))
|
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# LSTM Test, With Peephole, With Projection, No Clipping
model = Model()
n_batch = 2
n_input = 5
# n_cell and n_output have the same size when there is no projection.
n_cell = 20
n_output = 16
input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
recurrent_to_input_weights = Input("recurrent_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" %(n_cell))
cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{%d,%d}" % (n_output, n_cell))
projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
activation_param = Int32Scalar("activation_param", 4) # Tanh
cell_clip_param = Float16Scalar("cell_clip_param", 0.)
proj_clip_param = Float16Scalar("proj_clip_param", 0.)
scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
model = model.Operation("LSTM",
input,
input_to_input_weights,
input_to_forget_weights,
input_to_cell_weights,
input_to_output_weights,
recurrent_to_input_weights,
recurrent_to_forget_weights,
recurrent_to_cell_weights,
recurrent_to_output_weights,
cell_to_input_weights,
cell_to_forget_weights,
cell_to_output_weights,
input_gate_bias,
forget_gate_bias,
cell_gate_bias,
output_gate_bias,
projection_weights,
projection_bias,
output_state_in,
cell_state_in,
activation_param,
cell_clip_param,
proj_clip_param
).To([scratch_buffer, output_state_out, cell_state_out, output])
input0 = {input_to_input_weights: [
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677],
input_to_forget_weights: [
-0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
-0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
-0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
-0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
-0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
-0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
-0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
-0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
-0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496],
input_to_cell_weights: [
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042],
input_to_output_weights: [
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956],
input_gate_bias: [
0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
-0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
-0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196],
forget_gate_bias: [
0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739],
cell_gate_bias: [
-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027],
output_gate_bias: [
0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
-0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877],
recurrent_to_input_weights: [
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447],
recurrent_to_forget_weights: [
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027],
recurrent_to_cell_weights: [
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404],
recurrent_to_output_weights: [
0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
-0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
-0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
-0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
-0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
-0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
-0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
-0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
-0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
-0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
-0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
-0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
-0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
-0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
-0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
-0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
-0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
-0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
-0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
-0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
-0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
-0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
-0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
-0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
-0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
-0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
-0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621],
cell_to_input_weights: [
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175],
cell_to_forget_weights: [
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355],
cell_to_output_weights: [
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733],
projection_weights: [
-0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
-0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
-0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
-0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
-0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
-0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
-0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
-0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
-0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
-0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
-0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
-0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
-0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
-0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
-0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
-0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
-0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
-0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
-0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
-0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
-0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
-0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
-0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656],
projection_bias: [],
}
# Batch0: 4 (input_sequence_size) * 5 (n_input)
input0[input] = [0.867394, 0.291279, 0.013714, 0.482521, 0.626339]
# Batch1: 4 (input_sequence_size) * 5 (n_input)
input0[input].extend(
[0.082922, 0.563329, 0.865614, 0.333232, 0.259916]
)
input0[output_state_in] = [
-0.0213783, 0.0350169, 0.000324787, 0.0276012,
-0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043,
0.0376827, 0.0136115, 0.0243435, 0.0354492,
-0.0204549, 0.0450315, -0.00117379, 0.0167673,
-0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.00484469, -0.0240239,
0.0325789, 0.00790064, 0.0220157, 0.0333314,
]
input0[cell_state_in] = [
-0.126572, -0.121882, 0.121569, 0.0489971,
-0.240177, -0.124685, -0.122565, 0.0162748,
0.0317536, -0.0270355, 0.0418199, -0.179755,
-0.327279, -0.0342741, 0.133831, -0.0238279,
0.122148, 0.269115, 0.185989, 0.525976,
-0.167208, -0.109612, 0.0531226, 0.0695387,
-0.248335, -0.134123, -0.108246, 0.00628498,
0.0492984, -0.0264919, 0.0698144, -0.0635602,
-0.295363, -0.0760078, 0.102725, -0.0351708,
0.149804, 0.259131, 0.202573, 0.500664,
]
output0 = {
scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
cell_state_out: [ 0 for x in range(n_batch * n_cell) ],
output_state_out: [ 0 for x in range(n_batch * n_output) ],
}
# Batch0: 4 (input_sequence_size) * 16 (n_output)
output0[output] = [
-0.0189322, 0.0464512, -0.00251373, 0.0225745,
-0.0308346, -0.0317124, 0.0460407, -0.0189395,
0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169]
# Batch1: 4 (input_sequence_size) * 16 (n_output)
output0[output].extend(
[-0.0264787, 0.0387855, -0.000764675, 0.0217599,
-0.037537, -0.0335206, 0.0431679, -0.0211424,
0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009]
)
Example((input0, output0))
|
en
| 0.751353
|
# # Copyright (C) 2017 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # LSTM Test, With Peephole, With Projection, No Clipping # n_cell and n_output have the same size when there is no projection. # Tanh # Batch0: 4 (input_sequence_size) * 5 (n_input) # Batch1: 4 (input_sequence_size) * 5 (n_input) # Batch0: 4 (input_sequence_size) * 16 (n_output) # Batch1: 4 (input_sequence_size) * 16 (n_output)
| 1.995155
| 2
|
src/cicadad/util/context.py
|
cicadatesting/cicada-distributed
| 6
|
6626537
|
<filename>src/cicadad/util/context.py
from typing import Any
import base64
import json
def encode_context(context: Any) -> str:
"""Encode a context as a base64 string
Args:
context (any): Context object. Must be json serializable
Returns:
str: Encoded context base64 string
"""
return base64.b64encode(json.dumps(context).encode("ascii")).decode("ascii")
def decode_context(encoded_context: str) -> Any:
"""Decode a base64 string to JSON deserialized context
Args:
encoded_context (str): Base64 context string
Returns:
any: Decoded context
"""
decoded_context_bytes = base64.b64decode(
encoded_context.encode("ascii"),
)
return json.loads(decoded_context_bytes.decode("ascii"))
|
<filename>src/cicadad/util/context.py
from typing import Any
import base64
import json
def encode_context(context: Any) -> str:
"""Encode a context as a base64 string
Args:
context (any): Context object. Must be json serializable
Returns:
str: Encoded context base64 string
"""
return base64.b64encode(json.dumps(context).encode("ascii")).decode("ascii")
def decode_context(encoded_context: str) -> Any:
"""Decode a base64 string to JSON deserialized context
Args:
encoded_context (str): Base64 context string
Returns:
any: Decoded context
"""
decoded_context_bytes = base64.b64decode(
encoded_context.encode("ascii"),
)
return json.loads(decoded_context_bytes.decode("ascii"))
|
en
| 0.413254
|
Encode a context as a base64 string Args: context (any): Context object. Must be json serializable Returns: str: Encoded context base64 string Decode a base64 string to JSON deserialized context Args: encoded_context (str): Base64 context string Returns: any: Decoded context
| 3.005459
| 3
|
obsidion/cogs/fun/fun.py
|
Darkflame72/Minecraft-Discord
| 1
|
6626538
|
"""Fun cog."""
from __future__ import annotations
import logging
from random import choice
from typing import List
from typing import TYPE_CHECKING
import discord
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
from discord_slash.utils.manage_commands import create_choice
from discord_slash.utils.manage_commands import create_option
from obsidion.core.i18n import cog_i18n
from obsidion.core.i18n import Translator
if TYPE_CHECKING:
from obsidion.core.bot import Obsidion
log = logging.getLogger(__name__)
_ = Translator("Fun", __file__)
minecraft = (
"ᔑ",
"ʖ",
"ᓵ",
"↸",
"ᒷ",
"⎓",
"⊣",
"⍑",
"╎",
"⋮",
"ꖌ",
"ꖎ",
"ᒲ",
"リ",
"𝙹",
"!¡",
"ᑑ",
"∷",
"ᓭ",
"ℸ",
"⚍",
"⍊",
"∴",
" ̇",
"||",
"⨅",
)
alphabet = "abcdefghijklmnopqrstuvwxyz"
@cog_i18n(_)
class Fun(commands.Cog):
def __init__(self, bot: Obsidion) -> None:
"""Init."""
self.bot = bot
self.pvp_mes = self.load_from_file("pvp")
self.kill_mes = self.load_from_file("kill")
self.build_ideas_mes = self.load_from_file("build_ideas")
@staticmethod
def load_from_file(file: str) -> List[str]:
"""Load text from file.
Args:
file (str): file name
Returns:
List[str]: list of input
"""
with open(f"obsidion/cogs/fun/resources/{file}.txt") as f:
content = f.readlines()
return [x.strip() for x in content]
@cog_ext.cog_slash(name="buildidea", description="Get an idea for a new build.")
async def buildidea(self, ctx: SlashContext) -> None:
"""Get an idea for a new build."""
embed = self.bot.build_embed(
title=_("Build idea"),
description=choice(self.build_ideas_mes),
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="kill",
description="Kill that pesky friend in a fun and stylish way.",
options=[
create_option(
name="username", description="Friend.", option_type=6, required=True
)
],
)
async def kill(self, ctx: SlashContext, username: discord.Member) -> None:
"""Get an idea for a new build."""
embed = self.bot.build_embed(
title=_("Kill"),
description=choice(self.kill_mes).replace("member", username.mention),
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="pvp",
description="Duel someone.",
options=[
create_option(
name="player1", description="Player 1.", option_type=6, required=True
),
create_option(
name="player2", description="Player 2.", option_type=6, required=False
),
],
)
async def pvp(
self, ctx: SlashContext, player1: discord.Member, player2: discord.Member = None
) -> None:
"""Get an idea for a new build."""
if not player2:
player2 = ctx.author
embed = self.bot.build_embed(
title=_("PVP"),
description=choice(self.pvp_mes)
.replace("member1", player1.mention)
.replace("member2", player2.mention),
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="villager",
description="Hmm hm hmmm Hm hmmm hmm.",
options=[
create_option(
name="speech",
description="Hmm",
option_type=3,
required=True,
)
],
)
async def villager(self, ctx: SlashContext, speech: str):
last_was_alpha = False # Used to detect the start of a word
last_was_h = False # Used to prevent 'H's without 'm's
last_was_lower_m = False # Used to make "HmmHmm" instead of "HmmMmm"
sentence = ""
for char in speech:
if char.isalpha(): # Alphabetical letter -- Replace with 'Hmm'
if not last_was_alpha: # First letter of alphabetical string
sentence += "H" if char.isupper() else "h"
last_was_h = True
last_was_lower_m = False
else: # Non-first letter
if not char.isupper():
sentence += "m"
last_was_lower_m = True
last_was_h = False
else:
# Use an 'H' instead to allow CamelCase 'HmmHmm's
if last_was_lower_m:
sentence += "H"
last_was_h = True
else:
sentence += "M"
last_was_h = False
last_was_lower_m = False
last_was_alpha = True # Remember for next potential 'M'
else: # Non-alphabetical letters -- Do not replace
# Add an m after 'H's without 'm's
if last_was_h:
sentence += "m"
last_was_h = False
# Add non-letter character without changing it
sentence += char
last_was_alpha = False
# If the laster character is an H, add a final 'm'
if last_was_h:
sentence += "m"
await ctx.send(speech)
@cog_ext.cog_slash(
name="enchant",
description="Enchant a message.",
options=[
create_option(
name="msg",
description="Text to enchant or unenchant.",
option_type=3,
required=True,
)
],
)
async def enchant(self, ctx, msg: str):
response = ""
letter_pos = 0
while letter_pos < len(msg):
letter = (
msg[letter_pos : letter_pos + 2]
if msg[letter_pos : letter_pos + 2] in minecraft
else msg[letter_pos]
)
letter = letter.lower()
if letter in alphabet:
response += minecraft[alphabet.index(letter)]
elif letter in minecraft:
response += alphabet[minecraft.index(letter)]
if len(letter) == 2:
letter_pos += 1
else:
response += letter
letter_pos += 1
embed = self.bot.build_embed(
title=_("Enchant"),
description=response,
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(name="creeper", description="Aw man.")
async def creeper(self, ctx: SlashContext):
"""Aw man."""
await ctx.send("Aw man.")
@cog_ext.cog_slash(
name="rps",
description="Play Rock Paper Shears.",
options=[
create_option(
name="choice",
description="Rock paper or shears",
option_type=3,
required=True,
choices=[
create_choice(name="Rock", value="rock"),
create_choice(name="Paper", value="paper"),
create_choice(name="Shears", value="shears"),
],
)
],
)
async def rps(self, ctx: SlashContext, user_choice: str):
"""Play Rock Paper Shears"""
options = ["rock", "paper", "shears"]
c_choice = choice(options)
if user_choice == options[options.index(user_choice) - 1]:
msg = _("You chose {user_choice}, I chose {c_choice} I win.").format(
user_choice=user_choice, c_choice=c_choice
)
elif c_choice == user_choice:
msg = _(
"You chose {user_choice}, I chose {c_choice} looks like we"
" have a tie."
).format(user_choice=user_choice, c_choice=c_choice)
else:
msg = _("You chose {user_choice}, I chose {c_choice} you win.").format(
user_choice=user_choice, c_choice=c_choice
)
embed = self.bot.build_embed(title=_("Rock Paper Shears"), description=msg)
await ctx.send(embed=embed)
|
"""Fun cog."""
from __future__ import annotations
import logging
from random import choice
from typing import List
from typing import TYPE_CHECKING
import discord
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
from discord_slash.utils.manage_commands import create_choice
from discord_slash.utils.manage_commands import create_option
from obsidion.core.i18n import cog_i18n
from obsidion.core.i18n import Translator
if TYPE_CHECKING:
from obsidion.core.bot import Obsidion
log = logging.getLogger(__name__)
_ = Translator("Fun", __file__)
minecraft = (
"ᔑ",
"ʖ",
"ᓵ",
"↸",
"ᒷ",
"⎓",
"⊣",
"⍑",
"╎",
"⋮",
"ꖌ",
"ꖎ",
"ᒲ",
"リ",
"𝙹",
"!¡",
"ᑑ",
"∷",
"ᓭ",
"ℸ",
"⚍",
"⍊",
"∴",
" ̇",
"||",
"⨅",
)
alphabet = "abcdefghijklmnopqrstuvwxyz"
@cog_i18n(_)
class Fun(commands.Cog):
def __init__(self, bot: Obsidion) -> None:
"""Init."""
self.bot = bot
self.pvp_mes = self.load_from_file("pvp")
self.kill_mes = self.load_from_file("kill")
self.build_ideas_mes = self.load_from_file("build_ideas")
@staticmethod
def load_from_file(file: str) -> List[str]:
"""Load text from file.
Args:
file (str): file name
Returns:
List[str]: list of input
"""
with open(f"obsidion/cogs/fun/resources/{file}.txt") as f:
content = f.readlines()
return [x.strip() for x in content]
@cog_ext.cog_slash(name="buildidea", description="Get an idea for a new build.")
async def buildidea(self, ctx: SlashContext) -> None:
"""Get an idea for a new build."""
embed = self.bot.build_embed(
title=_("Build idea"),
description=choice(self.build_ideas_mes),
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="kill",
description="Kill that pesky friend in a fun and stylish way.",
options=[
create_option(
name="username", description="Friend.", option_type=6, required=True
)
],
)
async def kill(self, ctx: SlashContext, username: discord.Member) -> None:
"""Get an idea for a new build."""
embed = self.bot.build_embed(
title=_("Kill"),
description=choice(self.kill_mes).replace("member", username.mention),
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="pvp",
description="Duel someone.",
options=[
create_option(
name="player1", description="Player 1.", option_type=6, required=True
),
create_option(
name="player2", description="Player 2.", option_type=6, required=False
),
],
)
async def pvp(
self, ctx: SlashContext, player1: discord.Member, player2: discord.Member = None
) -> None:
"""Get an idea for a new build."""
if not player2:
player2 = ctx.author
embed = self.bot.build_embed(
title=_("PVP"),
description=choice(self.pvp_mes)
.replace("member1", player1.mention)
.replace("member2", player2.mention),
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="villager",
description="Hmm hm hmmm Hm hmmm hmm.",
options=[
create_option(
name="speech",
description="Hmm",
option_type=3,
required=True,
)
],
)
async def villager(self, ctx: SlashContext, speech: str):
last_was_alpha = False # Used to detect the start of a word
last_was_h = False # Used to prevent 'H's without 'm's
last_was_lower_m = False # Used to make "HmmHmm" instead of "HmmMmm"
sentence = ""
for char in speech:
if char.isalpha(): # Alphabetical letter -- Replace with 'Hmm'
if not last_was_alpha: # First letter of alphabetical string
sentence += "H" if char.isupper() else "h"
last_was_h = True
last_was_lower_m = False
else: # Non-first letter
if not char.isupper():
sentence += "m"
last_was_lower_m = True
last_was_h = False
else:
# Use an 'H' instead to allow CamelCase 'HmmHmm's
if last_was_lower_m:
sentence += "H"
last_was_h = True
else:
sentence += "M"
last_was_h = False
last_was_lower_m = False
last_was_alpha = True # Remember for next potential 'M'
else: # Non-alphabetical letters -- Do not replace
# Add an m after 'H's without 'm's
if last_was_h:
sentence += "m"
last_was_h = False
# Add non-letter character without changing it
sentence += char
last_was_alpha = False
# If the laster character is an H, add a final 'm'
if last_was_h:
sentence += "m"
await ctx.send(speech)
@cog_ext.cog_slash(
name="enchant",
description="Enchant a message.",
options=[
create_option(
name="msg",
description="Text to enchant or unenchant.",
option_type=3,
required=True,
)
],
)
async def enchant(self, ctx, msg: str):
response = ""
letter_pos = 0
while letter_pos < len(msg):
letter = (
msg[letter_pos : letter_pos + 2]
if msg[letter_pos : letter_pos + 2] in minecraft
else msg[letter_pos]
)
letter = letter.lower()
if letter in alphabet:
response += minecraft[alphabet.index(letter)]
elif letter in minecraft:
response += alphabet[minecraft.index(letter)]
if len(letter) == 2:
letter_pos += 1
else:
response += letter
letter_pos += 1
embed = self.bot.build_embed(
title=_("Enchant"),
description=response,
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(name="creeper", description="Aw man.")
async def creeper(self, ctx: SlashContext):
"""Aw man."""
await ctx.send("Aw man.")
@cog_ext.cog_slash(
name="rps",
description="Play Rock Paper Shears.",
options=[
create_option(
name="choice",
description="Rock paper or shears",
option_type=3,
required=True,
choices=[
create_choice(name="Rock", value="rock"),
create_choice(name="Paper", value="paper"),
create_choice(name="Shears", value="shears"),
],
)
],
)
async def rps(self, ctx: SlashContext, user_choice: str):
"""Play Rock Paper Shears"""
options = ["rock", "paper", "shears"]
c_choice = choice(options)
if user_choice == options[options.index(user_choice) - 1]:
msg = _("You chose {user_choice}, I chose {c_choice} I win.").format(
user_choice=user_choice, c_choice=c_choice
)
elif c_choice == user_choice:
msg = _(
"You chose {user_choice}, I chose {c_choice} looks like we"
" have a tie."
).format(user_choice=user_choice, c_choice=c_choice)
else:
msg = _("You chose {user_choice}, I chose {c_choice} you win.").format(
user_choice=user_choice, c_choice=c_choice
)
embed = self.bot.build_embed(title=_("Rock Paper Shears"), description=msg)
await ctx.send(embed=embed)
|
en
| 0.695996
|
Fun cog. Init. Load text from file. Args: file (str): file name Returns: List[str]: list of input Get an idea for a new build. Get an idea for a new build. Get an idea for a new build. # Used to detect the start of a word # Used to prevent 'H's without 'm's # Used to make "HmmHmm" instead of "HmmMmm" # Alphabetical letter -- Replace with 'Hmm' # First letter of alphabetical string # Non-first letter # Use an 'H' instead to allow CamelCase 'HmmHmm's # Remember for next potential 'M' # Non-alphabetical letters -- Do not replace # Add an m after 'H's without 'm's # Add non-letter character without changing it # If the laster character is an H, add a final 'm' Aw man. Play Rock Paper Shears
| 2.430284
| 2
|
bvspca/social/wagtail_hooks.py
|
rds0751/bvspca
| 10
|
6626539
|
from wagtail.contrib.modeladmin.options import (ModelAdmin, modeladmin_register)
from .models import SocialMediaQueue
class NewsSocialMediaQueueModelAdmin(ModelAdmin):
model = SocialMediaQueue
menu_label = 'SM Queue'
menu_icon = 'fa-share-square '
menu_order = 450
add_to_settings_menu = True
list_display = ('date', 'priority', 'page_link', 'status')
ordering = ('priority', 'date')
list_per_page = 20
modeladmin_register(NewsSocialMediaQueueModelAdmin)
|
from wagtail.contrib.modeladmin.options import (ModelAdmin, modeladmin_register)
from .models import SocialMediaQueue
class NewsSocialMediaQueueModelAdmin(ModelAdmin):
model = SocialMediaQueue
menu_label = 'SM Queue'
menu_icon = 'fa-share-square '
menu_order = 450
add_to_settings_menu = True
list_display = ('date', 'priority', 'page_link', 'status')
ordering = ('priority', 'date')
list_per_page = 20
modeladmin_register(NewsSocialMediaQueueModelAdmin)
|
none
| 1
| 1.603775
| 2
|
|
actions/reboot_vm.py
|
cognifloyd/stackstorm-libcloud
| 0
|
6626540
|
<reponame>cognifloyd/stackstorm-libcloud
from lib.actions import SingleVMAction
__all__ = [
'RebootVMAction'
]
class RebootVMAction(SingleVMAction):
api_type = 'compute'
def run(self, credentials, vm_id, extra_kwargs=None):
extra_kwargs = extra_kwargs or {}
driver = self._get_driver_for_credentials(credentials=credentials)
node = self._get_node_for_id(node_id=vm_id, driver=driver)
self.logger.info('Rebooting node: %s' % (node))
status = driver.reboot_node(node=node, **extra_kwargs)
if status is True:
self.logger.info('Successfully rebooted node "%s"' % (node))
else:
self.logger.error('Failed to reboot node "%s"' % (node))
return status
|
from lib.actions import SingleVMAction
__all__ = [
'RebootVMAction'
]
class RebootVMAction(SingleVMAction):
api_type = 'compute'
def run(self, credentials, vm_id, extra_kwargs=None):
extra_kwargs = extra_kwargs or {}
driver = self._get_driver_for_credentials(credentials=credentials)
node = self._get_node_for_id(node_id=vm_id, driver=driver)
self.logger.info('Rebooting node: %s' % (node))
status = driver.reboot_node(node=node, **extra_kwargs)
if status is True:
self.logger.info('Successfully rebooted node "%s"' % (node))
else:
self.logger.error('Failed to reboot node "%s"' % (node))
return status
|
none
| 1
| 2.397588
| 2
|
|
test/test_cutpaste.py
|
thymeyk/imdetector
| 3
|
6626541
|
<gh_stars>1-10
import os
import unittest
from imdetector.image import SuspiciousImage
from imdetector.cutpaste import CutPaste
DIR = os.getcwd()
class TestCutPaste(unittest.TestCase):
def test_cutpaste(self):
# TODO: re-traing a model
expected = [0, 1]
img1 = SuspiciousImage(
os.path.join(
DIR, 'test/image/yrc_1000_505_cp.png'))
img2 = SuspiciousImage(os.path.join(DIR, 'test/image/yrc_16.png'))
detector = CutPaste()
actual = detector.detect([img1, img2])
print(actual)
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
if __name__ == "__main__":
unittest.main()
|
import os
import unittest
from imdetector.image import SuspiciousImage
from imdetector.cutpaste import CutPaste
DIR = os.getcwd()
class TestCutPaste(unittest.TestCase):
def test_cutpaste(self):
# TODO: re-traing a model
expected = [0, 1]
img1 = SuspiciousImage(
os.path.join(
DIR, 'test/image/yrc_1000_505_cp.png'))
img2 = SuspiciousImage(os.path.join(DIR, 'test/image/yrc_16.png'))
detector = CutPaste()
actual = detector.detect([img1, img2])
print(actual)
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
if __name__ == "__main__":
unittest.main()
|
en
| 0.752428
|
# TODO: re-traing a model
| 2.942564
| 3
|
crits/samples/sample.py
|
frbapolkosnik/crits
| 0
|
6626542
|
import json
from mongoengine import Document
from mongoengine import StringField, ListField
from mongoengine import IntField
from django.conf import settings
from crits.samples.migrate import migrate_sample
from crits.core.crits_mongoengine import CritsBaseAttributes
from crits.core.crits_mongoengine import CritsSourceDocument
from crits.core.crits_mongoengine import CritsActionsDocument
from crits.core.crits_mongoengine import json_handler
from crits.core.data_tools import format_file
from crits.core.fields import getFileField
class Sample(CritsBaseAttributes, CritsSourceDocument, CritsActionsDocument,
Document):
"""Sample object"""
meta = {
"collection": settings.COL_SAMPLES,
"crits_type": 'Sample',
"latest_schema_version": 5,
"shard_key": ('md5',),
"schema_doc": {
'filename': 'The name of the last file that was uploaded with this'\
'MD5',
'filenames': 'A list of filenames this binary has gone by.',
'filetype': 'The filetype of the file',
'mimetype': 'The mimetype of the file',
'size': 'The size of the file',
'md5': 'The MD5 of the file',
'sha1': 'The SHA1 of the file',
'sha256': 'The SHA256 of the file',
'ssdeep': 'The ssdeep of the file',
'impfuzzy': 'The impfuzzy of the executable file',
'campaign': 'List [] of campaigns using this file',
'source': 'List [] of sources that provided this file',
'created': 'ISODate of when this file was uploaded',
'modified': 'ISODate of when the file metadata was last modified',
'filedata': 'The ObjectId of the file in GridFS'
},
"jtable_opts": {
'details_url': 'crits.samples.views.detail',
'details_url_key': 'md5',
'default_sort': "created DESC",
'searchurl': 'crits.samples.views.samples_listing',
'fields': [ "filename", "size", "filetype",
"created", "modified", "campaign",
"source", "md5", "id", "status"],
'jtopts_fields': [ "details",
"filename",
"size",
"filetype",
"created",
"campaign",
"source",
"md5",
"status",
"favorite",
"id"],
'hidden_fields': ["md5"],
'linked_fields': ["filename", "source", "campaign",
"filetype"],
'details_link': 'details',
'no_sort': ['details']
},
}
filedata = getFileField(collection_name=settings.COL_SAMPLES)
filename = StringField(required=True)
filenames = ListField(StringField())
filetype = StringField()
md5 = StringField(required=True)
mimetype = StringField()
sha1 = StringField()
sha256 = StringField()
size = IntField(default=0)
ssdeep = StringField()
impfuzzy = StringField()
def migrate(self):
migrate_sample(self)
def add_file_data(self, file_data):
self._generate_file_metadata(file_data)
self.filedata = file_data
def add_file_obj(self, file_obj):
data = file_obj.read()
self._generate_file_metadata(data)
self.filedata = data
def _generate_file_metadata(self, data):
import pydeep
import magic
from hashlib import md5, sha1, sha256
try:
import pyimpfuzzy
except ImportError:
pass
try:
self.filetype = magic.from_buffer(data)
except:
self.filetype = "Unavailable"
try:
mimetype = magic.from_buffer(data, mime=True)
if mimetype:
self.mimetype = mimetype.split(";")[0]
if not mimetype:
self.mimetype = "unknown"
except:
self.mimetype = "Unavailable"
self.size = len(data)
# this is a shard key. you can't modify it once it's set.
# MongoEngine will still mark the field as modified even if you set it
# to the same value.
if not self.md5:
self.md5 = md5(data).hexdigest()
self.sha1 = sha1(data).hexdigest()
self.sha256 = sha256(data).hexdigest()
try:
self.ssdeep = pydeep.hash_bytes(data)
except:
self.ssdeep = None
try:
self.impfuzzy = pyimpfuzzy.get_impfuzzy_data(data)
except:
self.impfuzzy = None
def is_pe(self):
"""
Is this a PE file.
"""
ret = self.filedata.grid_id != None and self.filedata.read(2) == "MZ"
if self.filedata.grid_id:
self.filedata.seek(0)
return ret
def is_pdf(self):
"""
Is this a PDF.
"""
ret = self.filedata.grid_id != None and "%PDF-" in self.filedata.read(1024)
if self.filedata.grid_id:
self.filedata.seek(0)
return ret
def discover_binary(self):
"""
Queries GridFS for a matching binary to this sample document.
"""
from crits.core.mongo_tools import mongo_connector
fm = mongo_connector("%s.files" % self._meta['collection'])
objectid = fm.find_one({'md5': self.md5}, {'_id': 1})
if objectid:
self.filedata.grid_id = objectid['_id']
self.filedata._mark_as_changed()
def set_filenames(self, filenames):
"""
Set the Sample filenames to a specified list.
:param filenames: The filenames to set.
:type filenames: list
"""
if isinstance(filenames, list):
self.filenames = filenames
def _json_yaml_convert(self, exclude=[]):
"""
Helper to convert to a dict before converting to JSON.
:param exclude: list of fields to exclude.
:type exclude: list
:returns: json
"""
d = self.to_dict(exclude)
if 'filedata' not in exclude:
(d['filedata'], ext) = format_file(self.filedata.read(), 'base64')
return json.dumps(d, default=json_handler)
|
import json
from mongoengine import Document
from mongoengine import StringField, ListField
from mongoengine import IntField
from django.conf import settings
from crits.samples.migrate import migrate_sample
from crits.core.crits_mongoengine import CritsBaseAttributes
from crits.core.crits_mongoengine import CritsSourceDocument
from crits.core.crits_mongoengine import CritsActionsDocument
from crits.core.crits_mongoengine import json_handler
from crits.core.data_tools import format_file
from crits.core.fields import getFileField
class Sample(CritsBaseAttributes, CritsSourceDocument, CritsActionsDocument,
Document):
"""Sample object"""
meta = {
"collection": settings.COL_SAMPLES,
"crits_type": 'Sample',
"latest_schema_version": 5,
"shard_key": ('md5',),
"schema_doc": {
'filename': 'The name of the last file that was uploaded with this'\
'MD5',
'filenames': 'A list of filenames this binary has gone by.',
'filetype': 'The filetype of the file',
'mimetype': 'The mimetype of the file',
'size': 'The size of the file',
'md5': 'The MD5 of the file',
'sha1': 'The SHA1 of the file',
'sha256': 'The SHA256 of the file',
'ssdeep': 'The ssdeep of the file',
'impfuzzy': 'The impfuzzy of the executable file',
'campaign': 'List [] of campaigns using this file',
'source': 'List [] of sources that provided this file',
'created': 'ISODate of when this file was uploaded',
'modified': 'ISODate of when the file metadata was last modified',
'filedata': 'The ObjectId of the file in GridFS'
},
"jtable_opts": {
'details_url': 'crits.samples.views.detail',
'details_url_key': 'md5',
'default_sort': "created DESC",
'searchurl': 'crits.samples.views.samples_listing',
'fields': [ "filename", "size", "filetype",
"created", "modified", "campaign",
"source", "md5", "id", "status"],
'jtopts_fields': [ "details",
"filename",
"size",
"filetype",
"created",
"campaign",
"source",
"md5",
"status",
"favorite",
"id"],
'hidden_fields': ["md5"],
'linked_fields': ["filename", "source", "campaign",
"filetype"],
'details_link': 'details',
'no_sort': ['details']
},
}
filedata = getFileField(collection_name=settings.COL_SAMPLES)
filename = StringField(required=True)
filenames = ListField(StringField())
filetype = StringField()
md5 = StringField(required=True)
mimetype = StringField()
sha1 = StringField()
sha256 = StringField()
size = IntField(default=0)
ssdeep = StringField()
impfuzzy = StringField()
def migrate(self):
migrate_sample(self)
def add_file_data(self, file_data):
self._generate_file_metadata(file_data)
self.filedata = file_data
def add_file_obj(self, file_obj):
data = file_obj.read()
self._generate_file_metadata(data)
self.filedata = data
def _generate_file_metadata(self, data):
import pydeep
import magic
from hashlib import md5, sha1, sha256
try:
import pyimpfuzzy
except ImportError:
pass
try:
self.filetype = magic.from_buffer(data)
except:
self.filetype = "Unavailable"
try:
mimetype = magic.from_buffer(data, mime=True)
if mimetype:
self.mimetype = mimetype.split(";")[0]
if not mimetype:
self.mimetype = "unknown"
except:
self.mimetype = "Unavailable"
self.size = len(data)
# this is a shard key. you can't modify it once it's set.
# MongoEngine will still mark the field as modified even if you set it
# to the same value.
if not self.md5:
self.md5 = md5(data).hexdigest()
self.sha1 = sha1(data).hexdigest()
self.sha256 = sha256(data).hexdigest()
try:
self.ssdeep = pydeep.hash_bytes(data)
except:
self.ssdeep = None
try:
self.impfuzzy = pyimpfuzzy.get_impfuzzy_data(data)
except:
self.impfuzzy = None
def is_pe(self):
"""
Is this a PE file.
"""
ret = self.filedata.grid_id != None and self.filedata.read(2) == "MZ"
if self.filedata.grid_id:
self.filedata.seek(0)
return ret
def is_pdf(self):
"""
Is this a PDF.
"""
ret = self.filedata.grid_id != None and "%PDF-" in self.filedata.read(1024)
if self.filedata.grid_id:
self.filedata.seek(0)
return ret
def discover_binary(self):
"""
Queries GridFS for a matching binary to this sample document.
"""
from crits.core.mongo_tools import mongo_connector
fm = mongo_connector("%s.files" % self._meta['collection'])
objectid = fm.find_one({'md5': self.md5}, {'_id': 1})
if objectid:
self.filedata.grid_id = objectid['_id']
self.filedata._mark_as_changed()
def set_filenames(self, filenames):
"""
Set the Sample filenames to a specified list.
:param filenames: The filenames to set.
:type filenames: list
"""
if isinstance(filenames, list):
self.filenames = filenames
def _json_yaml_convert(self, exclude=[]):
"""
Helper to convert to a dict before converting to JSON.
:param exclude: list of fields to exclude.
:type exclude: list
:returns: json
"""
d = self.to_dict(exclude)
if 'filedata' not in exclude:
(d['filedata'], ext) = format_file(self.filedata.read(), 'base64')
return json.dumps(d, default=json_handler)
|
en
| 0.823632
|
Sample object # this is a shard key. you can't modify it once it's set. # MongoEngine will still mark the field as modified even if you set it # to the same value. Is this a PE file. Is this a PDF. Queries GridFS for a matching binary to this sample document. Set the Sample filenames to a specified list. :param filenames: The filenames to set. :type filenames: list Helper to convert to a dict before converting to JSON. :param exclude: list of fields to exclude. :type exclude: list :returns: json
| 2.072612
| 2
|
models/semisup_mtl.py
|
zachstarkk/ASM2V
| 0
|
6626543
|
<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from typing import Tuple, List
import yaml
# TSA
def get_tsa_thresh(schedule, global_step, num_train_steps, start, end, device):
training_progress = torch.tensor(float(global_step) / float(num_train_steps))
if schedule == 'linear_schedule':
threshold = training_progress
elif schedule == 'exp_schedule':
scale = 5
threshold = torch.exp((training_progress - 1) * scale)
elif schedule == 'log_schedule':
scale = 5
threshold = 1 - torch.exp((-training_progress) * scale)
else:
raise NotImplementedError('TSA scheduler method %s is not implemented' % schedule)
output = threshold * (end - start) + start
return output.to(device)
class SemiSupMultiMLP(nn.Module):
'''
Baic one single shared bottom for all tasks across all views for benchmark evaluation for semi-supervised learning
'''
def __init__(self, num_tasks, num_views, seq_len, sensor_dim, hidden_dim, output_dim, init_method, temperature, device, dropout_p=0.2):
super(SemiSupMultiMLP, self).__init__()
self.num_views = num_views
self.seq_len = seq_len
self.sensor_dim = sensor_dim
self.temperature = temperature
self.init_method = init_method
self.device = device
self.shared_bottom = nn.Linear(sensor_dim, 1)
# self.specific_tasks_1 = nn.ModuleList([nn.Linear(sensor_dim, 1) for _ in range(num_tasks)])
self.specific_tasks_2 = nn.ModuleList([nn.Linear(num_views, 1) for _ in range(num_tasks)])
self.towers = nn.ModuleList([nn.Sequential(
nn.Linear(seq_len, hidden_dim),
nn.BatchNorm1d(num_features=hidden_dim),
nn.ReLU(),
nn.Dropout(p=dropout_p),
nn.Linear(hidden_dim, output_dim)
) for _ in range(num_tasks)])
self._init_task_logits_(num_tasks, num_views, 3)
def _init_task_logits_(self, num_candidates_1, num_candidates_2, num_options):
if self.init_method == 'all':
task_logits = .8 * torch.ones(num_candidates_1, num_candidates_2, num_options)
for i in range(1, num_options):
task_logits[:, :, i] = 0
elif self.init_method == 'random':
task_logits = 1e-3 * torch.randn(num_candidates_1, num_candidates_2, num_options)
elif self.init_method == 'equal':
task_logits = .5 * torch.ones(num_candidates_1, num_candidates_2, num_options)
else:
raise NotImplementedError('Initial Method %s is not implemented' % self.init_method)
self.register_parameter('task_logits', nn.Parameter(task_logits, requires_grad=True))
def train_sample_policy(self, temperature, hard_sampling):
return F.gumbel_softmax(getattr(self, 'task_logits'), temperature, hard=hard_sampling)
def forward(self, sup_x, unsup_x=None):
# X shape: [batch_size, num_tasks, num_views, seq_len, sensor_dim] from single task
if unsup_x is None:
bsz, num_tasks, num_views, seq_len, sensor_dim = sup_x.size()
x = sup_x.transpose(2, 3) # >> [batch_size, num_tasks, seq_len, num_views, sensor_dim]
# x = x.reshape(bsz, num_tasks, seq_len, -1)
x = self.shared_bottom(x).squeeze(-1) # >> [batch_size, num_tasks, seq_len, num_views]
# output = [self.specific_tasks_1[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len, num_views] * num_tasks
output = [self.specific_tasks_2[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len] * num_tasks
# x = torch.stack(output, dim=1) # >> [batch_size, num_tasks, seq_len]
output = [self.towers[task_id](output[task_id]) for task_id in range(num_tasks)] # >> [batch_size, output_dim] * num_tasks
return torch.stack(output, dim=0), None
else:
# Adaptive Data Augmentation for Unsupervised Part
bsz, num_tasks, num_views, seq_len, sensor_dim = sup_x.size()
unsup_bsz = unsup_x.size(0)
policy = self.train_sample_policy(self.temperature, True) # >> [num_tasks, 2]
unsup_x += torch.empty(unsup_bsz, num_tasks, num_views, seq_len, sensor_dim, device=self.device).normal_(0, 10) * \
policy[:, :, 0][None, :, :, None, None] + \
torch.empty(unsup_bsz, num_tasks, num_views, seq_len, sensor_dim, device=self.device).uniform_(0, 10) * \
policy[:, :, 1][None, :, :, None, None] + \
torch.empty(unsup_bsz, num_tasks, num_views, seq_len, sensor_dim, device=self.device).log_normal_(0, 3) * \
policy[:, :, 2][None, :, :, None, None] # >> [unsup_bsz, ...]
# unsup_x += .1
x = torch.cat([sup_x, unsup_x], dim=0) # >> [bsz+unsup_bsz, ...]
x = x.transpose(2, 3) # >> [batch_size, num_tasks, seq_len, num_views, sensor_dim]
# x = x.reshape(bsz, num_tasks, seq_len, -1)
x = self.shared_bottom(x).squeeze(-1) # >> [batch_size, num_tasks, seq_len, num_views]
# output = [self.specific_tasks_1[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len, num_views] * num_tasks
output = [self.specific_tasks_2[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len] * num_tasks
# x = torch.stack(output, dim=1) # >> [batch_size, num_tasks, seq_len]
output = [self.towers[task_id](output[task_id]) for task_id in range(num_tasks)] # >> [batch_size, output_dim] * num_tasks
output = torch.stack(output, dim=0)
return output[:, :bsz, :], output[:, bsz:, :] # >> [num_tasks, bsz, output_dim], [num_tasks, unsup_bsz, output_dim]
class SemiSupUncertaintyLossWrapper(nn.Module):
"""Implementation of paper: Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics
params: num_tasks
params: model
return: Wrapped losses of multiple tasks
"""
def __init__(self, model, num_tasks, sup_criterion, l2_criterion, unsup_criterion, eta, beta, opt, device):
super(SemiSupUncertaintyLossWrapper, self).__init__()
self.model = model
self.num_tasks = num_tasks
self.sup_criterion = sup_criterion
self.unsup_criterion = unsup_criterion
self.l2_criterion = l2_criterion
if self.training:
self.tsa = opt['train']['is_tsa']
else:
self.tsa = False
self.sample_ratio = opt['train']['adaption_sample_ratio']
self.adaption_steps = opt['train']['adaption_steps']
self.tsa_schedule = opt['train']['tsa_schedule']
self.total_steps = opt['train']['total_steps']
self.uda_softmax_temp = opt['train']['uda_softmax_temp']
self.uda_confidence_thresh = opt['train']['uda_confidence_thresh']
self.uda_coeff = opt['train']['uda_coefficient']
self.device = device
assert len(eta) == num_tasks * 2, "length of eta should be same as number of tasks"
# variable change for stability >> using eta = 2log\sigma
self.init_eta(eta)
# self.init_beta(beta)
# self.eta = nn.Parameter(torch.Tensor(eta)).to(device)
def init_beta(self, beta):
self.register_parameter('beta', nn.Parameter(torch.Tensor(beta), requires_grad=True))
def init_eta(self, eta):
self.register_parameter('eta', nn.Parameter(torch.Tensor(eta), requires_grad=True))
def forward(self, sup_inputs, targets, unsup_inputs, global_step):
# Compute Supervised loss with uncertainty
targets = targets.transpose(0, 1)
if unsup_inputs is not None:
unsup_bsz = unsup_inputs.size(0)
sample_bsz = int(unsup_bsz * self.sample_ratio)
aug_unsup_inputs = []
for k in range(1, self.adaption_steps):
# adaptation_indices = random.sample(range(unsup_bsz), sample_bsz)
# aug_unsup_indices.append(adaptation_indices)
aug_unsup_inputs.append(unsup_inputs[sample_bsz*k:sample_bsz*(k+1)])
aug_unsup_inputs = torch.cat(aug_unsup_inputs, dim=0)
# print(aug_unsup_inputs.size())
# adaptation_indices = np.zeros(unsup_bsz, dtype=bool)
# adaptation_indices[np.arange(unsup_bsz//2) * 2] = True
# evaluation_indices = torch.tensor(~adaptation_indices)
# adaptation_indices = torch.from_numpy(adaptation_indices)
sup_outputs, unsup_outputs = self.model(sup_inputs, aug_unsup_inputs)
# print(unsup_outputs.size())
else:
sup_outputs, unsup_outputs = self.model(sup_inputs, unsup_inputs)
# Compute Supervised Cross Entropy Loss
sup_total_loss = 0
sup_loss_list = [self.sup_criterion(o, y) for o, y in zip(sup_outputs, targets)]
if self.tsa:
tsa_thresh = get_tsa_thresh(self.tsa_schedule, global_step, self.total_steps, start=1./sup_outputs.size(-1), end=1, device=self.device)
for task_id in range(self.num_tasks):
sup_loss = sup_loss_list[task_id]
larger_than_threshold = torch.exp(-sup_loss) > tsa_thresh # prob = exp(log_prob), prob > tsa_threshold
# larger_than_threshold = torch.sum( F.softmax(pred[:sup_size]) * torch.eye(num_labels)[sup_label_ids] , dim=-1) > tsa_threshold
loss_mask = torch.ones_like(targets[task_id], dtype=torch.float32) * (1 - larger_than_threshold.type(torch.float32))
# loss_mask = loss_mask.to(self.device)
sup_loss = torch.sum(sup_loss * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1)
sup_total_loss += torch.sum(sup_loss * torch.exp(-self.eta[task_id]) + self.eta[task_id])
else:
# pass
for i in range(self.num_tasks):
sup_total_loss += torch.sum(sup_loss_list[i] * torch.exp(-self.eta[i]) + self.eta[i])
# sup_total_loss += torch.sum(sup_loss_list[i])
# print(sup_total_loss)
# Compute l2 loss between view-specific and merged outputs
# l2_total_loss = 0
# view_outputs, merged_outputs = F.log_softmax(view_outputs, dim=-1), F.log_softmax(merged_outputs, dim=-1)
# l2_loss_list = [self.l2_criterion(o, y) for o, y in zip(view_outputs, merged_outputs)]
# for i in range(self.num_tasks):
# l2_total_loss += torch.sum(l2_loss_list[i])
# print(l2_total_loss)
# Compute Unsupervised loss
if unsup_outputs is not None:
unsup_total_loss = 0
with torch.no_grad():
# aug
# softmax temperature controlling
aug_log_probs_multi = []
for k in range(self.adaption_steps - 1):
uda_softmax_temp = self.uda_softmax_temp if self.uda_softmax_temp > 0 else 1.
aug_log_probs = F.log_softmax(unsup_outputs[:, k*sample_bsz:(k+1)*sample_bsz, :] / uda_softmax_temp, dim=-1) # >> [num_tasks, unsup_bsz, output_dim]
aug_log_probs_multi.append(aug_log_probs)
# Original
# evaluation_indices = random.sample(range(unsup_bsz), sample_bsz)
# print(evaluation_indices)
ori_outputs, _, = self.model(unsup_inputs[:sample_bsz])
ori_probs = F.softmax(ori_outputs, dim=-1) # >> [num_tasks, unsup_bsz, output_dim] # KLdiv target
# print(ori_probs.size())
# confidence-based masking
if self.uda_confidence_thresh != -1:
unsup_loss_masks = torch.max(ori_probs, dim=-1)[0] > self.uda_confidence_thresh
unsup_loss_masks = unsup_loss_masks.type(torch.float32)
else:
unsup_loss_masks = torch.ones(self.num_tasks, unsup_inputs.size(0), dtype=torch.float32)
unsup_loss_masks = unsup_loss_masks.to(self.device) # >> [num_tasks, unsup_bsz]
# KLdiv loss
"""
nn.KLDivLoss (kl_div)
input : log_prob (log_softmax)
target : prob (softmax)
https://pytorch.org/docs/stable/nn.html
unsup_loss is divied by number of unsup_loss_mask
it is different from the google UDA official
The official unsup_loss is divided by total
https://github.com/google-research/uda/blob/master/text/uda.py#L175
"""
for k in range(self.adaption_steps - 1):
aug_log_probs = aug_log_probs_multi[k]
# print(aug_log_probs.size())
# print(ori_probs.size())
unsup_loss_list = [torch.sum(self.unsup_criterion(aug_log_prob, ori_prob), dim=-1)
for aug_log_prob, ori_prob in zip(aug_log_probs, ori_probs)]
# unsup_loss = torch.sum(self.unsup_criterion(aug_log_prob, ori_prob), dim=-1)
unsup_loss_list = [torch.sum(unsup_loss * unsup_loss_mask, dim=-1) / torch.sum(unsup_loss_mask, dim=-1)
for unsup_loss, unsup_loss_mask in zip(unsup_loss_list, unsup_loss_masks)]
for i in range(self.num_tasks):
# unsup_total_loss += torch.sum(unsup_loss_list[i] * torch.exp(-self.eta[self.num_tasks+i]) + self.eta[self.num_tasks+i])
unsup_total_loss += torch.sum(unsup_loss_list[i])
# print(unsup_total_loss)
final_loss = sup_total_loss + self.uda_coeff * unsup_total_loss / (self.adaption_steps - 1)
# print(final_loss)
return sup_outputs, final_loss, sup_total_loss, unsup_total_loss
# total_loss = torch.Tensor(loss).to(self.device) * torch.exp(-self.eta) + self.eta
# total_loss = torch.Tensor(loss).to(self.device) / self.num_tasks
return sup_outputs, sup_total_loss, None, None # omit 1/2
if __name__ == '__main__':
with open('./yamls/realworld2016_exp.yaml') as f:
opt = yaml.safe_load(f)
sup_criterion = torch.nn.CrossEntropyLoss(reduction='none')
unsup_criterion = torch.nn.KLDivLoss(reduction='none')
device = torch.device('cuda:0')
sensor_dim = opt['num_sensors'] * 3
model = SemiSupMultiMLP(opt['num_tasks'], opt['num_views'], opt['window_size'],
sensor_dim, 100, opt['num_action_types'], opt['train']['init_method'],
opt['train']['temperature'], device, .5)
model = model.to(device)
eta = [0.] * opt['num_tasks']
wrapper = SemiSupUncertaintyLossWrapper(model, opt['num_tasks'], sup_criterion, unsup_criterion, eta, opt, device)
sup_x = torch.randn(32, 8, 7, 250, 9)
unsup_x = torch.randn(16, 8, 7, 250, 9)
targets = targets = torch.empty(32, 8, dtype=torch.long).random_(7)
sup_x, targets, unsup_x = sup_x.to(device), targets.to(device), unsup_x.to(device)
# sup_y, unsup_y = model(sup_x, unsup_x)
# print(sup_y.size(), unsup_y.size())
# y = model(x)
outputs, fina_loss, sup_loss, unsup_loss = wrapper(sup_x, targets, unsup_x, 100)
# print(y[0].size())
print(outputs.size())
print(fina_loss, sup_loss, unsup_loss)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from typing import Tuple, List
import yaml
# TSA
def get_tsa_thresh(schedule, global_step, num_train_steps, start, end, device):
training_progress = torch.tensor(float(global_step) / float(num_train_steps))
if schedule == 'linear_schedule':
threshold = training_progress
elif schedule == 'exp_schedule':
scale = 5
threshold = torch.exp((training_progress - 1) * scale)
elif schedule == 'log_schedule':
scale = 5
threshold = 1 - torch.exp((-training_progress) * scale)
else:
raise NotImplementedError('TSA scheduler method %s is not implemented' % schedule)
output = threshold * (end - start) + start
return output.to(device)
class SemiSupMultiMLP(nn.Module):
'''
Baic one single shared bottom for all tasks across all views for benchmark evaluation for semi-supervised learning
'''
def __init__(self, num_tasks, num_views, seq_len, sensor_dim, hidden_dim, output_dim, init_method, temperature, device, dropout_p=0.2):
super(SemiSupMultiMLP, self).__init__()
self.num_views = num_views
self.seq_len = seq_len
self.sensor_dim = sensor_dim
self.temperature = temperature
self.init_method = init_method
self.device = device
self.shared_bottom = nn.Linear(sensor_dim, 1)
# self.specific_tasks_1 = nn.ModuleList([nn.Linear(sensor_dim, 1) for _ in range(num_tasks)])
self.specific_tasks_2 = nn.ModuleList([nn.Linear(num_views, 1) for _ in range(num_tasks)])
self.towers = nn.ModuleList([nn.Sequential(
nn.Linear(seq_len, hidden_dim),
nn.BatchNorm1d(num_features=hidden_dim),
nn.ReLU(),
nn.Dropout(p=dropout_p),
nn.Linear(hidden_dim, output_dim)
) for _ in range(num_tasks)])
self._init_task_logits_(num_tasks, num_views, 3)
def _init_task_logits_(self, num_candidates_1, num_candidates_2, num_options):
if self.init_method == 'all':
task_logits = .8 * torch.ones(num_candidates_1, num_candidates_2, num_options)
for i in range(1, num_options):
task_logits[:, :, i] = 0
elif self.init_method == 'random':
task_logits = 1e-3 * torch.randn(num_candidates_1, num_candidates_2, num_options)
elif self.init_method == 'equal':
task_logits = .5 * torch.ones(num_candidates_1, num_candidates_2, num_options)
else:
raise NotImplementedError('Initial Method %s is not implemented' % self.init_method)
self.register_parameter('task_logits', nn.Parameter(task_logits, requires_grad=True))
def train_sample_policy(self, temperature, hard_sampling):
return F.gumbel_softmax(getattr(self, 'task_logits'), temperature, hard=hard_sampling)
def forward(self, sup_x, unsup_x=None):
# X shape: [batch_size, num_tasks, num_views, seq_len, sensor_dim] from single task
if unsup_x is None:
bsz, num_tasks, num_views, seq_len, sensor_dim = sup_x.size()
x = sup_x.transpose(2, 3) # >> [batch_size, num_tasks, seq_len, num_views, sensor_dim]
# x = x.reshape(bsz, num_tasks, seq_len, -1)
x = self.shared_bottom(x).squeeze(-1) # >> [batch_size, num_tasks, seq_len, num_views]
# output = [self.specific_tasks_1[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len, num_views] * num_tasks
output = [self.specific_tasks_2[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len] * num_tasks
# x = torch.stack(output, dim=1) # >> [batch_size, num_tasks, seq_len]
output = [self.towers[task_id](output[task_id]) for task_id in range(num_tasks)] # >> [batch_size, output_dim] * num_tasks
return torch.stack(output, dim=0), None
else:
# Adaptive Data Augmentation for Unsupervised Part
bsz, num_tasks, num_views, seq_len, sensor_dim = sup_x.size()
unsup_bsz = unsup_x.size(0)
policy = self.train_sample_policy(self.temperature, True) # >> [num_tasks, 2]
unsup_x += torch.empty(unsup_bsz, num_tasks, num_views, seq_len, sensor_dim, device=self.device).normal_(0, 10) * \
policy[:, :, 0][None, :, :, None, None] + \
torch.empty(unsup_bsz, num_tasks, num_views, seq_len, sensor_dim, device=self.device).uniform_(0, 10) * \
policy[:, :, 1][None, :, :, None, None] + \
torch.empty(unsup_bsz, num_tasks, num_views, seq_len, sensor_dim, device=self.device).log_normal_(0, 3) * \
policy[:, :, 2][None, :, :, None, None] # >> [unsup_bsz, ...]
# unsup_x += .1
x = torch.cat([sup_x, unsup_x], dim=0) # >> [bsz+unsup_bsz, ...]
x = x.transpose(2, 3) # >> [batch_size, num_tasks, seq_len, num_views, sensor_dim]
# x = x.reshape(bsz, num_tasks, seq_len, -1)
x = self.shared_bottom(x).squeeze(-1) # >> [batch_size, num_tasks, seq_len, num_views]
# output = [self.specific_tasks_1[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len, num_views] * num_tasks
output = [self.specific_tasks_2[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len] * num_tasks
# x = torch.stack(output, dim=1) # >> [batch_size, num_tasks, seq_len]
output = [self.towers[task_id](output[task_id]) for task_id in range(num_tasks)] # >> [batch_size, output_dim] * num_tasks
output = torch.stack(output, dim=0)
return output[:, :bsz, :], output[:, bsz:, :] # >> [num_tasks, bsz, output_dim], [num_tasks, unsup_bsz, output_dim]
class SemiSupUncertaintyLossWrapper(nn.Module):
"""Implementation of paper: Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics
params: num_tasks
params: model
return: Wrapped losses of multiple tasks
"""
def __init__(self, model, num_tasks, sup_criterion, l2_criterion, unsup_criterion, eta, beta, opt, device):
super(SemiSupUncertaintyLossWrapper, self).__init__()
self.model = model
self.num_tasks = num_tasks
self.sup_criterion = sup_criterion
self.unsup_criterion = unsup_criterion
self.l2_criterion = l2_criterion
if self.training:
self.tsa = opt['train']['is_tsa']
else:
self.tsa = False
self.sample_ratio = opt['train']['adaption_sample_ratio']
self.adaption_steps = opt['train']['adaption_steps']
self.tsa_schedule = opt['train']['tsa_schedule']
self.total_steps = opt['train']['total_steps']
self.uda_softmax_temp = opt['train']['uda_softmax_temp']
self.uda_confidence_thresh = opt['train']['uda_confidence_thresh']
self.uda_coeff = opt['train']['uda_coefficient']
self.device = device
assert len(eta) == num_tasks * 2, "length of eta should be same as number of tasks"
# variable change for stability >> using eta = 2log\sigma
self.init_eta(eta)
# self.init_beta(beta)
# self.eta = nn.Parameter(torch.Tensor(eta)).to(device)
def init_beta(self, beta):
self.register_parameter('beta', nn.Parameter(torch.Tensor(beta), requires_grad=True))
def init_eta(self, eta):
self.register_parameter('eta', nn.Parameter(torch.Tensor(eta), requires_grad=True))
def forward(self, sup_inputs, targets, unsup_inputs, global_step):
# Compute Supervised loss with uncertainty
targets = targets.transpose(0, 1)
if unsup_inputs is not None:
unsup_bsz = unsup_inputs.size(0)
sample_bsz = int(unsup_bsz * self.sample_ratio)
aug_unsup_inputs = []
for k in range(1, self.adaption_steps):
# adaptation_indices = random.sample(range(unsup_bsz), sample_bsz)
# aug_unsup_indices.append(adaptation_indices)
aug_unsup_inputs.append(unsup_inputs[sample_bsz*k:sample_bsz*(k+1)])
aug_unsup_inputs = torch.cat(aug_unsup_inputs, dim=0)
# print(aug_unsup_inputs.size())
# adaptation_indices = np.zeros(unsup_bsz, dtype=bool)
# adaptation_indices[np.arange(unsup_bsz//2) * 2] = True
# evaluation_indices = torch.tensor(~adaptation_indices)
# adaptation_indices = torch.from_numpy(adaptation_indices)
sup_outputs, unsup_outputs = self.model(sup_inputs, aug_unsup_inputs)
# print(unsup_outputs.size())
else:
sup_outputs, unsup_outputs = self.model(sup_inputs, unsup_inputs)
# Compute Supervised Cross Entropy Loss
sup_total_loss = 0
sup_loss_list = [self.sup_criterion(o, y) for o, y in zip(sup_outputs, targets)]
if self.tsa:
tsa_thresh = get_tsa_thresh(self.tsa_schedule, global_step, self.total_steps, start=1./sup_outputs.size(-1), end=1, device=self.device)
for task_id in range(self.num_tasks):
sup_loss = sup_loss_list[task_id]
larger_than_threshold = torch.exp(-sup_loss) > tsa_thresh # prob = exp(log_prob), prob > tsa_threshold
# larger_than_threshold = torch.sum( F.softmax(pred[:sup_size]) * torch.eye(num_labels)[sup_label_ids] , dim=-1) > tsa_threshold
loss_mask = torch.ones_like(targets[task_id], dtype=torch.float32) * (1 - larger_than_threshold.type(torch.float32))
# loss_mask = loss_mask.to(self.device)
sup_loss = torch.sum(sup_loss * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1)
sup_total_loss += torch.sum(sup_loss * torch.exp(-self.eta[task_id]) + self.eta[task_id])
else:
# pass
for i in range(self.num_tasks):
sup_total_loss += torch.sum(sup_loss_list[i] * torch.exp(-self.eta[i]) + self.eta[i])
# sup_total_loss += torch.sum(sup_loss_list[i])
# print(sup_total_loss)
# Compute l2 loss between view-specific and merged outputs
# l2_total_loss = 0
# view_outputs, merged_outputs = F.log_softmax(view_outputs, dim=-1), F.log_softmax(merged_outputs, dim=-1)
# l2_loss_list = [self.l2_criterion(o, y) for o, y in zip(view_outputs, merged_outputs)]
# for i in range(self.num_tasks):
# l2_total_loss += torch.sum(l2_loss_list[i])
# print(l2_total_loss)
# Compute Unsupervised loss
if unsup_outputs is not None:
unsup_total_loss = 0
with torch.no_grad():
# aug
# softmax temperature controlling
aug_log_probs_multi = []
for k in range(self.adaption_steps - 1):
uda_softmax_temp = self.uda_softmax_temp if self.uda_softmax_temp > 0 else 1.
aug_log_probs = F.log_softmax(unsup_outputs[:, k*sample_bsz:(k+1)*sample_bsz, :] / uda_softmax_temp, dim=-1) # >> [num_tasks, unsup_bsz, output_dim]
aug_log_probs_multi.append(aug_log_probs)
# Original
# evaluation_indices = random.sample(range(unsup_bsz), sample_bsz)
# print(evaluation_indices)
ori_outputs, _, = self.model(unsup_inputs[:sample_bsz])
ori_probs = F.softmax(ori_outputs, dim=-1) # >> [num_tasks, unsup_bsz, output_dim] # KLdiv target
# print(ori_probs.size())
# confidence-based masking
if self.uda_confidence_thresh != -1:
unsup_loss_masks = torch.max(ori_probs, dim=-1)[0] > self.uda_confidence_thresh
unsup_loss_masks = unsup_loss_masks.type(torch.float32)
else:
unsup_loss_masks = torch.ones(self.num_tasks, unsup_inputs.size(0), dtype=torch.float32)
unsup_loss_masks = unsup_loss_masks.to(self.device) # >> [num_tasks, unsup_bsz]
# KLdiv loss
"""
nn.KLDivLoss (kl_div)
input : log_prob (log_softmax)
target : prob (softmax)
https://pytorch.org/docs/stable/nn.html
unsup_loss is divied by number of unsup_loss_mask
it is different from the google UDA official
The official unsup_loss is divided by total
https://github.com/google-research/uda/blob/master/text/uda.py#L175
"""
for k in range(self.adaption_steps - 1):
aug_log_probs = aug_log_probs_multi[k]
# print(aug_log_probs.size())
# print(ori_probs.size())
unsup_loss_list = [torch.sum(self.unsup_criterion(aug_log_prob, ori_prob), dim=-1)
for aug_log_prob, ori_prob in zip(aug_log_probs, ori_probs)]
# unsup_loss = torch.sum(self.unsup_criterion(aug_log_prob, ori_prob), dim=-1)
unsup_loss_list = [torch.sum(unsup_loss * unsup_loss_mask, dim=-1) / torch.sum(unsup_loss_mask, dim=-1)
for unsup_loss, unsup_loss_mask in zip(unsup_loss_list, unsup_loss_masks)]
for i in range(self.num_tasks):
# unsup_total_loss += torch.sum(unsup_loss_list[i] * torch.exp(-self.eta[self.num_tasks+i]) + self.eta[self.num_tasks+i])
unsup_total_loss += torch.sum(unsup_loss_list[i])
# print(unsup_total_loss)
final_loss = sup_total_loss + self.uda_coeff * unsup_total_loss / (self.adaption_steps - 1)
# print(final_loss)
return sup_outputs, final_loss, sup_total_loss, unsup_total_loss
# total_loss = torch.Tensor(loss).to(self.device) * torch.exp(-self.eta) + self.eta
# total_loss = torch.Tensor(loss).to(self.device) / self.num_tasks
return sup_outputs, sup_total_loss, None, None # omit 1/2
if __name__ == '__main__':
with open('./yamls/realworld2016_exp.yaml') as f:
opt = yaml.safe_load(f)
sup_criterion = torch.nn.CrossEntropyLoss(reduction='none')
unsup_criterion = torch.nn.KLDivLoss(reduction='none')
device = torch.device('cuda:0')
sensor_dim = opt['num_sensors'] * 3
model = SemiSupMultiMLP(opt['num_tasks'], opt['num_views'], opt['window_size'],
sensor_dim, 100, opt['num_action_types'], opt['train']['init_method'],
opt['train']['temperature'], device, .5)
model = model.to(device)
eta = [0.] * opt['num_tasks']
wrapper = SemiSupUncertaintyLossWrapper(model, opt['num_tasks'], sup_criterion, unsup_criterion, eta, opt, device)
sup_x = torch.randn(32, 8, 7, 250, 9)
unsup_x = torch.randn(16, 8, 7, 250, 9)
targets = targets = torch.empty(32, 8, dtype=torch.long).random_(7)
sup_x, targets, unsup_x = sup_x.to(device), targets.to(device), unsup_x.to(device)
# sup_y, unsup_y = model(sup_x, unsup_x)
# print(sup_y.size(), unsup_y.size())
# y = model(x)
outputs, fina_loss, sup_loss, unsup_loss = wrapper(sup_x, targets, unsup_x, 100)
# print(y[0].size())
print(outputs.size())
print(fina_loss, sup_loss, unsup_loss)
|
en
| 0.513844
|
# TSA Baic one single shared bottom for all tasks across all views for benchmark evaluation for semi-supervised learning # self.specific_tasks_1 = nn.ModuleList([nn.Linear(sensor_dim, 1) for _ in range(num_tasks)]) # X shape: [batch_size, num_tasks, num_views, seq_len, sensor_dim] from single task # >> [batch_size, num_tasks, seq_len, num_views, sensor_dim] # x = x.reshape(bsz, num_tasks, seq_len, -1) # >> [batch_size, num_tasks, seq_len, num_views] # output = [self.specific_tasks_1[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len, num_views] * num_tasks # >> [batch_size, seq_len] * num_tasks # x = torch.stack(output, dim=1) # >> [batch_size, num_tasks, seq_len] # >> [batch_size, output_dim] * num_tasks # Adaptive Data Augmentation for Unsupervised Part # >> [num_tasks, 2] # >> [unsup_bsz, ...] # unsup_x += .1 # >> [bsz+unsup_bsz, ...] # >> [batch_size, num_tasks, seq_len, num_views, sensor_dim] # x = x.reshape(bsz, num_tasks, seq_len, -1) # >> [batch_size, num_tasks, seq_len, num_views] # output = [self.specific_tasks_1[task_id](x[:, task_id, ...]).squeeze(-1) for task_id in range(num_tasks)] # >> [batch_size, seq_len, num_views] * num_tasks # >> [batch_size, seq_len] * num_tasks # x = torch.stack(output, dim=1) # >> [batch_size, num_tasks, seq_len] # >> [batch_size, output_dim] * num_tasks # >> [num_tasks, bsz, output_dim], [num_tasks, unsup_bsz, output_dim] Implementation of paper: Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics params: num_tasks params: model return: Wrapped losses of multiple tasks # variable change for stability >> using eta = 2log\sigma # self.init_beta(beta) # self.eta = nn.Parameter(torch.Tensor(eta)).to(device) # Compute Supervised loss with uncertainty # adaptation_indices = random.sample(range(unsup_bsz), sample_bsz) # aug_unsup_indices.append(adaptation_indices) # print(aug_unsup_inputs.size()) # adaptation_indices = np.zeros(unsup_bsz, dtype=bool) # adaptation_indices[np.arange(unsup_bsz//2) * 2] = True # evaluation_indices = torch.tensor(~adaptation_indices) # adaptation_indices = torch.from_numpy(adaptation_indices) # print(unsup_outputs.size()) # Compute Supervised Cross Entropy Loss # prob = exp(log_prob), prob > tsa_threshold # larger_than_threshold = torch.sum( F.softmax(pred[:sup_size]) * torch.eye(num_labels)[sup_label_ids] , dim=-1) > tsa_threshold # loss_mask = loss_mask.to(self.device) # pass # sup_total_loss += torch.sum(sup_loss_list[i]) # print(sup_total_loss) # Compute l2 loss between view-specific and merged outputs # l2_total_loss = 0 # view_outputs, merged_outputs = F.log_softmax(view_outputs, dim=-1), F.log_softmax(merged_outputs, dim=-1) # l2_loss_list = [self.l2_criterion(o, y) for o, y in zip(view_outputs, merged_outputs)] # for i in range(self.num_tasks): # l2_total_loss += torch.sum(l2_loss_list[i]) # print(l2_total_loss) # Compute Unsupervised loss # aug # softmax temperature controlling # >> [num_tasks, unsup_bsz, output_dim] # Original # evaluation_indices = random.sample(range(unsup_bsz), sample_bsz) # print(evaluation_indices) # >> [num_tasks, unsup_bsz, output_dim] # KLdiv target # print(ori_probs.size()) # confidence-based masking # >> [num_tasks, unsup_bsz] # KLdiv loss nn.KLDivLoss (kl_div) input : log_prob (log_softmax) target : prob (softmax) https://pytorch.org/docs/stable/nn.html unsup_loss is divied by number of unsup_loss_mask it is different from the google UDA official The official unsup_loss is divided by total https://github.com/google-research/uda/blob/master/text/uda.py#L175 # print(aug_log_probs.size()) # print(ori_probs.size()) # unsup_loss = torch.sum(self.unsup_criterion(aug_log_prob, ori_prob), dim=-1) # unsup_total_loss += torch.sum(unsup_loss_list[i] * torch.exp(-self.eta[self.num_tasks+i]) + self.eta[self.num_tasks+i]) # print(unsup_total_loss) # print(final_loss) # total_loss = torch.Tensor(loss).to(self.device) * torch.exp(-self.eta) + self.eta # total_loss = torch.Tensor(loss).to(self.device) / self.num_tasks # omit 1/2 # sup_y, unsup_y = model(sup_x, unsup_x) # print(sup_y.size(), unsup_y.size()) # y = model(x) # print(y[0].size())
| 2.40194
| 2
|
clean/taxi/taxispark.py
|
chrislin009/trafficDispatcher
| 1
|
6626544
|
import sys
import datetime
from pyspark import SparkContext
from csv import reader
from operator import add
def week(datetimestr):
#get weeknumber of the yaer
date, time = datetimestr.split(' ')
yearstr, monthstr, daystr = date.split('-')
year = int(yearstr)
month = int(monthstr)
day = int(daystr)
isoyear, isoweeknum, isoweekdaynum = datetime.date(year, month, day).isocalendar()
if isoyear == year:
return isoweeknum
else:
return 0
if __name__ == "__main__":
sc = SparkContext()
rddtaxi = sc.textFile('TaxiResult.txt')
lines = rddtaxi.mapPartitions(lambda x: reader(x))
neighbor_count = lines.filter(lambda x: int(x[1]) > 0).map(lambda x: (int(x[1]), 1)).reduceByKey(add) \
.map(lambda x: "%d,%d" % (x[0], x[1])).sortBy(lambda x: x[0])
neighbor_count.saveAsTextFile("taxi_n_count.txt")
week_count = lines.filter(lambda x: int(x[1]) > 0).map(lambda x:(week(x[0]), 1)).reduceByKey(add) \
.map(lambda x: "%d,%d" % (x[0], x[1])).sortBy(lambda x: x[0])
week_count.saveAsTextFile("taxi_week_count.txt")
|
import sys
import datetime
from pyspark import SparkContext
from csv import reader
from operator import add
def week(datetimestr):
#get weeknumber of the yaer
date, time = datetimestr.split(' ')
yearstr, monthstr, daystr = date.split('-')
year = int(yearstr)
month = int(monthstr)
day = int(daystr)
isoyear, isoweeknum, isoweekdaynum = datetime.date(year, month, day).isocalendar()
if isoyear == year:
return isoweeknum
else:
return 0
if __name__ == "__main__":
sc = SparkContext()
rddtaxi = sc.textFile('TaxiResult.txt')
lines = rddtaxi.mapPartitions(lambda x: reader(x))
neighbor_count = lines.filter(lambda x: int(x[1]) > 0).map(lambda x: (int(x[1]), 1)).reduceByKey(add) \
.map(lambda x: "%d,%d" % (x[0], x[1])).sortBy(lambda x: x[0])
neighbor_count.saveAsTextFile("taxi_n_count.txt")
week_count = lines.filter(lambda x: int(x[1]) > 0).map(lambda x:(week(x[0]), 1)).reduceByKey(add) \
.map(lambda x: "%d,%d" % (x[0], x[1])).sortBy(lambda x: x[0])
week_count.saveAsTextFile("taxi_week_count.txt")
|
en
| 0.799675
|
#get weeknumber of the yaer
| 3.0005
| 3
|
wotw_highlighter/block_styler.py
|
wizardsoftheweb/wotw_highlighter
| 0
|
6626545
|
<reponame>wizardsoftheweb/wotw_highlighter
"""This file provides a class to provide styling"""
from os.path import dirname, join
from wotw_highlighter.block_options import BlockOptions
class BlockStyler(BlockOptions):
"""This class provides styles for a highlighted_blob"""
@staticmethod
def dump_pygments_styles():
"""Dumps all the styles from Pygments"""
with open(
join(
dirname(__file__),
'data',
'pygments-monokai.css'
),
'r'
) as css_file:
styles = css_file.read()
return styles.strip()
@staticmethod
def dump_additional_styles():
"""Dumps all the additional styling"""
with open(
join(
dirname(__file__),
'data',
'pygments-monokai-additions.css'
),
'r'
) as css_file:
styles = css_file.read()
return styles.strip()
@staticmethod
def dump_styles():
"""Dumps all the style"""
return (
BlockStyler.dump_pygments_styles()
+
BlockStyler.dump_additional_styles()
)
def set_styles(self):
"""Assigns styles to the BlockOptions chain"""
self.highlighted_blob_styles = self.dump_styles()
|
"""This file provides a class to provide styling"""
from os.path import dirname, join
from wotw_highlighter.block_options import BlockOptions
class BlockStyler(BlockOptions):
"""This class provides styles for a highlighted_blob"""
@staticmethod
def dump_pygments_styles():
"""Dumps all the styles from Pygments"""
with open(
join(
dirname(__file__),
'data',
'pygments-monokai.css'
),
'r'
) as css_file:
styles = css_file.read()
return styles.strip()
@staticmethod
def dump_additional_styles():
"""Dumps all the additional styling"""
with open(
join(
dirname(__file__),
'data',
'pygments-monokai-additions.css'
),
'r'
) as css_file:
styles = css_file.read()
return styles.strip()
@staticmethod
def dump_styles():
"""Dumps all the style"""
return (
BlockStyler.dump_pygments_styles()
+
BlockStyler.dump_additional_styles()
)
def set_styles(self):
"""Assigns styles to the BlockOptions chain"""
self.highlighted_blob_styles = self.dump_styles()
|
en
| 0.681637
|
This file provides a class to provide styling This class provides styles for a highlighted_blob Dumps all the styles from Pygments Dumps all the additional styling Dumps all the style Assigns styles to the BlockOptions chain
| 2.804505
| 3
|
Py/font_big.py
|
ttempe/Pocket_Organ
| 11
|
6626546
|
# Code generated by font_to_py.py.
# Font: FreeSansBold.ttf Char set: #357ABCDEFGadgimu
# Cmd: ./font_to_py.py ttf/FreeSansBold.ttf 40 -x -c ABCDEFGm753#dimaug font_big.py
version = '0.33'
def height():
return 42
def baseline():
return 32
def max_width():
return 38
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 35
def max_ch():
return 117
_font =\
b'\x1a\x00\x00\x3f\x80\x00\x01\xff\xf0\x00\x03\xff\xf8\x00\x07\xff'\
b'\xfe\x00\x0f\xff\xfe\x00\x0f\xf0\xff\x00\x0f\xe0\x7f\x00\x1f\xe0'\
b'\x7f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x3f\x80\x00\x00\x7f\x00\x00\x00\xff\x00\x00\x01'\
b'\xfe\x00\x00\x03\xfc\x00\x00\x07\xf0\x00\x00\x0f\xe0\x00\x00\x0f'\
b'\xc0\x00\x00\x1f\x80\x00\x00\x1f\x00\x00\x00\x1f\x00\x00\x00\x1f'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xc0\x00\x00\x1f'\
b'\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f'\
b'\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xf0\x78\x00\xf0\x78\x01\xf0\xf8\x01\xf0'\
b'\xf8\x01\xf0\xf0\x01\xe0\xf0\x7f\xff\xff\x7f\xff\xff\x7f\xff\xff'\
b'\x7f\xff\xff\x7f\xff\xff\x03\xc3\xe0\x07\xc3\xe0\x07\xc3\xe0\x07'\
b'\x83\xc0\x07\x83\xc0\x0f\x87\xc0\xff\xff\xfc\xff\xff\xfc\xff\xff'\
b'\xfc\xff\xff\xfc\xff\xff\xfc\x1f\x0f\x80\x1f\x0f\x80\x1e\x0f\x00'\
b'\x1e\x0f\x00\x3e\x1f\x00\x3e\x1f\x00\x3e\x1f\x00\x3c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x7f\x80\x03'\
b'\xff\xe0\x07\xff\xf0\x0f\xff\xf8\x1f\xff\xfc\x1f\xe3\xfc\x3f\xc1'\
b'\xfe\x3f\x80\xfe\x3f\x80\xfe\x3f\x80\xfe\x00\x00\xfe\x00\x00\xfc'\
b'\x00\x01\xfc\x00\x03\xf8\x00\x0f\xf0\x00\x0f\xe0\x00\x0f\xf0\x00'\
b'\x0f\xfc\x00\x01\xfe\x00\x00\xfe\x00\x00\x7f\x00\x00\x7f\x00\x00'\
b'\x7f\x00\x00\x7f\x3f\x80\x7f\x3f\x80\x7f\x3f\xc0\xfe\x1f\xe1\xfe'\
b'\x1f\xff\xfe\x0f\xff\xfc\x07\xff\xf8\x03\xff\xe0\x00\x7f\x80\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x07'\
b'\xff\xfc\x0f\xff\xfc\x0f\xff\xfc\x0f\xff\xfc\x0f\xff\xfc\x0f\x00'\
b'\x00\x0f\x00\x00\x1f\x00\x00\x1f\x00\x00\x1f\x00\x00\x1f\x3f\x00'\
b'\x1e\xff\xc0\x1f\xff\xf0\x1f\xff\xf8\x3f\xff\xf8\x3f\x87\xfc\x3f'\
b'\x01\xfc\x00\x01\xfe\x00\x00\xfe\x00\x00\xfe\x00\x00\xfe\x00\x00'\
b'\xfe\x00\x00\xfe\x00\x00\xfe\x7e\x01\xfc\x7e\x01\xfc\x7f\x07\xfc'\
b'\x3f\xff\xf8\x3f\xff\xf0\x1f\xff\xe0\x07\xff\x80\x01\xfe\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x3f'\
b'\xff\xfc\x3f\xff\xfc\x3f\xff\xfc\x3f\xff\xfc\x3f\xff\xfc\x00\x00'\
b'\xf8\x00\x01\xf8\x00\x03\xf0\x00\x03\xe0\x00\x07\xe0\x00\x0f\xc0'\
b'\x00\x0f\xc0\x00\x1f\x80\x00\x1f\x80\x00\x3f\x80\x00\x3f\x00\x00'\
b'\x7f\x00\x00\x7e\x00\x00\x7e\x00\x00\xfe\x00\x00\xfe\x00\x00\xfc'\
b'\x00\x01\xfc\x00\x01\xfc\x00\x01\xfc\x00\x01\xfc\x00\x01\xf8\x00'\
b'\x03\xf8\x00\x03\xf8\x00\x03\xf8\x00\x03\xf8\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00'\
b'\x00\x1f\xe0\x00\x00\x1f\xe0\x00\x00\x1f\xf0\x00\x00\x3f\xf0\x00'\
b'\x00\x3f\xf0\x00\x00\x3f\xf8\x00\x00\x7f\xf8\x00\x00\x7f\xf8\x00'\
b'\x00\xff\xfc\x00\x00\xfc\xfc\x00\x00\xfc\xfc\x00\x01\xfc\x7e\x00'\
b'\x01\xf8\x7e\x00\x01\xf8\x7e\x00\x03\xf0\x3f\x00\x03\xf0\x3f\x00'\
b'\x03\xf0\x3f\x00\x07\xe0\x1f\x80\x07\xe0\x1f\x80\x07\xff\xff\xc0'\
b'\x0f\xff\xff\xc0\x0f\xff\xff\xc0\x1f\xff\xff\xe0\x1f\xff\xff\xe0'\
b'\x1f\x80\x07\xe0\x3f\x80\x07\xf0\x3f\x00\x03\xf0\x3f\x00\x03\xf0'\
b'\x7f\x00\x03\xf8\x7e\x00\x03\xf8\x7e\x00\x01\xf8\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x1f\xff\xfc\x00\x1f\xff'\
b'\xff\x00\x1f\xff\xff\x80\x1f\xff\xff\xc0\x1f\xff\xff\xe0\x1f\xc0'\
b'\x1f\xe0\x1f\xc0\x0f\xf0\x1f\xc0\x07\xf0\x1f\xc0\x07\xf0\x1f\xc0'\
b'\x07\xf0\x1f\xc0\x07\xf0\x1f\xc0\x0f\xe0\x1f\xc0\x1f\xc0\x1f\xff'\
b'\xff\x80\x1f\xff\xff\x00\x1f\xff\xff\x00\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\xe0\x1f\xc0\x0f\xf0\x1f\xc0\x07\xf0\x1f\xc0\x03\xf8\x1f\xc0'\
b'\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x07\xf8\x1f\xc0'\
b'\x0f\xf0\x1f\xff\xff\xf0\x1f\xff\xff\xe0\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\x80\x1f\xff\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00'\
b'\x00\x0f\xf8\x00\x00\x3f\xff\x00\x00\xff\xff\x80\x01\xff\xff\xc0'\
b'\x03\xff\xff\xe0\x07\xfc\x1f\xf0\x0f\xf0\x07\xf8\x0f\xe0\x03\xf8'\
b'\x1f\xe0\x03\xf8\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x3f\xc0\x00\x00'\
b'\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x3f\xc0\x00\x00\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc'\
b'\x1f\xe0\x03\xfc\x0f\xe0\x03\xf8\x0f\xf0\x07\xf8\x07\xfc\x1f\xf0'\
b'\x03\xff\xff\xf0\x01\xff\xff\xe0\x00\xff\xff\x80\x00\x3f\xff\x00'\
b'\x00\x0f\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x1f\xff'\
b'\xf8\x00\x1f\xff\xfe\x00\x1f\xff\xff\x80\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\xe0\x1f\xc0\x3f\xe0\x1f\xc0\x0f\xf0\x1f\xc0\x07\xf8\x1f\xc0'\
b'\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x01\xfc\x1f\xc0'\
b'\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0'\
b'\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0'\
b'\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x07\xf8\x1f\xc0'\
b'\x0f\xf0\x1f\xc0\x3f\xe0\x1f\xff\xff\xe0\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\x80\x1f\xff\xfe\x00\x1f\xff\xf0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1d\x00\x00\x00\x00\x00\x1f\xff\xff\x80\x1f\xff\xff\x80'\
b'\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xc0\x00\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xff\xff\x00'\
b'\x1f\xff\xff\x00\x1f\xff\xff\x00\x1f\xff\xff\x00\x1f\xff\xff\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00'\
b'\x1f\xff\xff\xc0\x1f\xff\xff\xc0\x1f\xff\xff\xc0\x1f\xff\xff\xc0'\
b'\x1f\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00'\
b'\x00\x00\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xff'\
b'\xff\x80\x1f\xff\xff\x80\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xff\xfe\x00\x1f\xff\xfe\x00\x1f\xff'\
b'\xfe\x00\x1f\xff\xfe\x00\x1f\xff\xfe\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x21\x00\x00\x07\xfc\x00\x00\x00\x3f\xff'\
b'\x80\x00\x00\x7f\xff\xc0\x00\x01\xff\xff\xf0\x00\x03\xff\xff\xf8'\
b'\x00\x07\xfe\x07\xf8\x00\x07\xf8\x01\xfc\x00\x0f\xf0\x00\xfc\x00'\
b'\x0f\xe0\x00\xfe\x00\x1f\xc0\x00\x7e\x00\x1f\xc0\x00\x00\x00\x1f'\
b'\xc0\x00\x00\x00\x3f\x80\x00\x00\x00\x3f\x80\x00\x00\x00\x3f\x80'\
b'\x00\x00\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x1f'\
b'\xfe\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x00\x7e'\
b'\x00\x1f\xc0\x00\x7e\x00\x1f\xc0\x00\x7e\x00\x1f\xe0\x00\x7e\x00'\
b'\x0f\xe0\x00\xfe\x00\x0f\xf0\x01\xfe\x00\x07\xf8\x03\xfe\x00\x03'\
b'\xfe\x0f\xfe\x00\x03\xff\xff\xfe\x00\x01\xff\xff\xbe\x00\x00\x7f'\
b'\xff\x1e\x00\x00\x3f\xfe\x1e\x00\x00\x07\xf8\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xc0\x03\xff\xf0\x07\xff\xf8'\
b'\x0f\xff\xfc\x1f\xff\xfe\x1f\xc1\xfe\x1f\xc0\xfe\x1f\x80\xfe\x00'\
b'\x00\xfe\x00\x07\xfe\x00\xff\xfe\x07\xff\xfe\x0f\xff\xfe\x1f\xf0'\
b'\xfe\x3f\xc0\xfe\x3f\x80\xfe\x3f\x80\xfe\x3f\x81\xfe\x3f\xc3\xfe'\
b'\x3f\xff\xfe\x1f\xff\xfe\x0f\xfe\xff\x07\xfc\xff\x01\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00\x00\x00'\
b'\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80'\
b'\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80'\
b'\x00\x7e\x3f\x80\x01\xff\x3f\x80\x03\xff\xbf\x80\x07\xff\xff\x80'\
b'\x0f\xff\xff\x80\x0f\xf1\xff\x80\x1f\xe0\xff\x80\x1f\xc0\x7f\x80'\
b'\x3f\xc0\x7f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80'\
b'\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\xc0\x7f\x80'\
b'\x1f\xc0\x7f\x80\x1f\xe0\xff\x80\x1f\xf1\xff\x80\x0f\xff\xff\x80'\
b'\x07\xff\xff\x80\x03\xff\xbf\x80\x01\xff\x3f\x80\x00\x7e\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x3f\x80\x01\xff'\
b'\x3f\x80\x03\xff\xbf\x80\x07\xff\xff\x80\x0f\xff\xff\x80\x0f\xf1'\
b'\xff\x80\x1f\xe0\xff\x80\x1f\xc0\x7f\x80\x3f\xc0\x7f\x80\x3f\x80'\
b'\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80'\
b'\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x1f\xc0\x7f\x80\x1f\xc0'\
b'\xff\x80\x1f\xf1\xff\x80\x0f\xff\xff\x80\x07\xff\xff\x80\x07\xff'\
b'\xbf\x80\x01\xff\x3f\x80\x00\x7e\x3f\x80\x00\x00\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x7f\x80\x1f\xc0\x7f\x00\x1f\xe0\xff\x00\x1f\xff'\
b'\xfe\x00\x0f\xff\xfc\x00\x03\xff\xf8\x00\x00\xff\xc0\x00\x0c\x00'\
b'\x00\x00\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x00\x00\x00\x00'\
b'\x00\x00\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0'\
b'\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0'\
b'\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x26\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x1f\xc7\xe0\x3f\x00\x1f\xdf\xf8\xff\xc0\x1f\xff\xfd'\
b'\xff\xe0\x1f\xff\xff\xff\xf0\x1f\xff\xff\xff\xf8\x1f\xf1\xff\x87'\
b'\xf8\x1f\xe0\xff\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8'\
b'\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f'\
b'\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0'\
b'\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe'\
b'\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03'\
b'\xf8\x1f\xc0\xfe\x03\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xe0\x7f\x80\x1f\xf0\xff\x80\x0f\xff\xff\x80\x0f\xff'\
b'\xff\x80\x07\xff\xbf\x80\x03\xff\x3f\x80\x00\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00'
_index =\
b'\x00\x00\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x2a\x01\x00\x00\xaa\x01\x00\x00\x2a\x02\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\x02'\
b'\x54\x03\xfe\x03\xa8\x04\x52\x05\xfc\x05\xa6\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7a\x07'\
b'\x00\x00\x00\x00\xfa\x07\x00\x00\x00\x00\xa4\x08\x00\x00\x4e\x09'\
b'\x00\x00\x00\x00\x00\x00\xa4\x09\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x78\x0a\x22\x0b'
_mvfont = memoryview(_font)
_mvi = memoryview(_index)
ifb = lambda l : l[0] | (l[1] << 8)
def get_ch(ch):
oc = ord(ch)
ioff = 2 * (oc - 35 + 1) if oc >= 35 and oc <= 117 else 0
doff = ifb(_mvi[ioff : ])
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 42
return _mvfont[doff + 2:next_offs], 42, width
|
# Code generated by font_to_py.py.
# Font: FreeSansBold.ttf Char set: #357ABCDEFGadgimu
# Cmd: ./font_to_py.py ttf/FreeSansBold.ttf 40 -x -c ABCDEFGm753#dimaug font_big.py
version = '0.33'
def height():
return 42
def baseline():
return 32
def max_width():
return 38
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 35
def max_ch():
return 117
_font =\
b'\x1a\x00\x00\x3f\x80\x00\x01\xff\xf0\x00\x03\xff\xf8\x00\x07\xff'\
b'\xfe\x00\x0f\xff\xfe\x00\x0f\xf0\xff\x00\x0f\xe0\x7f\x00\x1f\xe0'\
b'\x7f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x3f\x80\x00\x00\x7f\x00\x00\x00\xff\x00\x00\x01'\
b'\xfe\x00\x00\x03\xfc\x00\x00\x07\xf0\x00\x00\x0f\xe0\x00\x00\x0f'\
b'\xc0\x00\x00\x1f\x80\x00\x00\x1f\x00\x00\x00\x1f\x00\x00\x00\x1f'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xc0\x00\x00\x1f'\
b'\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f'\
b'\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xf0\x78\x00\xf0\x78\x01\xf0\xf8\x01\xf0'\
b'\xf8\x01\xf0\xf0\x01\xe0\xf0\x7f\xff\xff\x7f\xff\xff\x7f\xff\xff'\
b'\x7f\xff\xff\x7f\xff\xff\x03\xc3\xe0\x07\xc3\xe0\x07\xc3\xe0\x07'\
b'\x83\xc0\x07\x83\xc0\x0f\x87\xc0\xff\xff\xfc\xff\xff\xfc\xff\xff'\
b'\xfc\xff\xff\xfc\xff\xff\xfc\x1f\x0f\x80\x1f\x0f\x80\x1e\x0f\x00'\
b'\x1e\x0f\x00\x3e\x1f\x00\x3e\x1f\x00\x3e\x1f\x00\x3c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x7f\x80\x03'\
b'\xff\xe0\x07\xff\xf0\x0f\xff\xf8\x1f\xff\xfc\x1f\xe3\xfc\x3f\xc1'\
b'\xfe\x3f\x80\xfe\x3f\x80\xfe\x3f\x80\xfe\x00\x00\xfe\x00\x00\xfc'\
b'\x00\x01\xfc\x00\x03\xf8\x00\x0f\xf0\x00\x0f\xe0\x00\x0f\xf0\x00'\
b'\x0f\xfc\x00\x01\xfe\x00\x00\xfe\x00\x00\x7f\x00\x00\x7f\x00\x00'\
b'\x7f\x00\x00\x7f\x3f\x80\x7f\x3f\x80\x7f\x3f\xc0\xfe\x1f\xe1\xfe'\
b'\x1f\xff\xfe\x0f\xff\xfc\x07\xff\xf8\x03\xff\xe0\x00\x7f\x80\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x07'\
b'\xff\xfc\x0f\xff\xfc\x0f\xff\xfc\x0f\xff\xfc\x0f\xff\xfc\x0f\x00'\
b'\x00\x0f\x00\x00\x1f\x00\x00\x1f\x00\x00\x1f\x00\x00\x1f\x3f\x00'\
b'\x1e\xff\xc0\x1f\xff\xf0\x1f\xff\xf8\x3f\xff\xf8\x3f\x87\xfc\x3f'\
b'\x01\xfc\x00\x01\xfe\x00\x00\xfe\x00\x00\xfe\x00\x00\xfe\x00\x00'\
b'\xfe\x00\x00\xfe\x00\x00\xfe\x7e\x01\xfc\x7e\x01\xfc\x7f\x07\xfc'\
b'\x3f\xff\xf8\x3f\xff\xf0\x1f\xff\xe0\x07\xff\x80\x01\xfe\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x3f'\
b'\xff\xfc\x3f\xff\xfc\x3f\xff\xfc\x3f\xff\xfc\x3f\xff\xfc\x00\x00'\
b'\xf8\x00\x01\xf8\x00\x03\xf0\x00\x03\xe0\x00\x07\xe0\x00\x0f\xc0'\
b'\x00\x0f\xc0\x00\x1f\x80\x00\x1f\x80\x00\x3f\x80\x00\x3f\x00\x00'\
b'\x7f\x00\x00\x7e\x00\x00\x7e\x00\x00\xfe\x00\x00\xfe\x00\x00\xfc'\
b'\x00\x01\xfc\x00\x01\xfc\x00\x01\xfc\x00\x01\xfc\x00\x01\xf8\x00'\
b'\x03\xf8\x00\x03\xf8\x00\x03\xf8\x00\x03\xf8\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00'\
b'\x00\x1f\xe0\x00\x00\x1f\xe0\x00\x00\x1f\xf0\x00\x00\x3f\xf0\x00'\
b'\x00\x3f\xf0\x00\x00\x3f\xf8\x00\x00\x7f\xf8\x00\x00\x7f\xf8\x00'\
b'\x00\xff\xfc\x00\x00\xfc\xfc\x00\x00\xfc\xfc\x00\x01\xfc\x7e\x00'\
b'\x01\xf8\x7e\x00\x01\xf8\x7e\x00\x03\xf0\x3f\x00\x03\xf0\x3f\x00'\
b'\x03\xf0\x3f\x00\x07\xe0\x1f\x80\x07\xe0\x1f\x80\x07\xff\xff\xc0'\
b'\x0f\xff\xff\xc0\x0f\xff\xff\xc0\x1f\xff\xff\xe0\x1f\xff\xff\xe0'\
b'\x1f\x80\x07\xe0\x3f\x80\x07\xf0\x3f\x00\x03\xf0\x3f\x00\x03\xf0'\
b'\x7f\x00\x03\xf8\x7e\x00\x03\xf8\x7e\x00\x01\xf8\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x1f\xff\xfc\x00\x1f\xff'\
b'\xff\x00\x1f\xff\xff\x80\x1f\xff\xff\xc0\x1f\xff\xff\xe0\x1f\xc0'\
b'\x1f\xe0\x1f\xc0\x0f\xf0\x1f\xc0\x07\xf0\x1f\xc0\x07\xf0\x1f\xc0'\
b'\x07\xf0\x1f\xc0\x07\xf0\x1f\xc0\x0f\xe0\x1f\xc0\x1f\xc0\x1f\xff'\
b'\xff\x80\x1f\xff\xff\x00\x1f\xff\xff\x00\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\xe0\x1f\xc0\x0f\xf0\x1f\xc0\x07\xf0\x1f\xc0\x03\xf8\x1f\xc0'\
b'\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x07\xf8\x1f\xc0'\
b'\x0f\xf0\x1f\xff\xff\xf0\x1f\xff\xff\xe0\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\x80\x1f\xff\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00'\
b'\x00\x0f\xf8\x00\x00\x3f\xff\x00\x00\xff\xff\x80\x01\xff\xff\xc0'\
b'\x03\xff\xff\xe0\x07\xfc\x1f\xf0\x0f\xf0\x07\xf8\x0f\xe0\x03\xf8'\
b'\x1f\xe0\x03\xf8\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x3f\xc0\x00\x00'\
b'\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x3f\xc0\x00\x00\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc'\
b'\x1f\xe0\x03\xfc\x0f\xe0\x03\xf8\x0f\xf0\x07\xf8\x07\xfc\x1f\xf0'\
b'\x03\xff\xff\xf0\x01\xff\xff\xe0\x00\xff\xff\x80\x00\x3f\xff\x00'\
b'\x00\x0f\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x1f\xff'\
b'\xf8\x00\x1f\xff\xfe\x00\x1f\xff\xff\x80\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\xe0\x1f\xc0\x3f\xe0\x1f\xc0\x0f\xf0\x1f\xc0\x07\xf8\x1f\xc0'\
b'\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x01\xfc\x1f\xc0'\
b'\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0'\
b'\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0\x01\xfc\x1f\xc0'\
b'\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x03\xf8\x1f\xc0\x07\xf8\x1f\xc0'\
b'\x0f\xf0\x1f\xc0\x3f\xe0\x1f\xff\xff\xe0\x1f\xff\xff\xc0\x1f\xff'\
b'\xff\x80\x1f\xff\xfe\x00\x1f\xff\xf0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1d\x00\x00\x00\x00\x00\x1f\xff\xff\x80\x1f\xff\xff\x80'\
b'\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xc0\x00\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xff\xff\x00'\
b'\x1f\xff\xff\x00\x1f\xff\xff\x00\x1f\xff\xff\x00\x1f\xff\xff\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00'\
b'\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00'\
b'\x1f\xff\xff\xc0\x1f\xff\xff\xc0\x1f\xff\xff\xc0\x1f\xff\xff\xc0'\
b'\x1f\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00'\
b'\x00\x00\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xff\xff\x80\x1f\xff'\
b'\xff\x80\x1f\xff\xff\x80\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xff\xfe\x00\x1f\xff\xfe\x00\x1f\xff'\
b'\xfe\x00\x1f\xff\xfe\x00\x1f\xff\xfe\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0'\
b'\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x1f\xc0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x21\x00\x00\x07\xfc\x00\x00\x00\x3f\xff'\
b'\x80\x00\x00\x7f\xff\xc0\x00\x01\xff\xff\xf0\x00\x03\xff\xff\xf8'\
b'\x00\x07\xfe\x07\xf8\x00\x07\xf8\x01\xfc\x00\x0f\xf0\x00\xfc\x00'\
b'\x0f\xe0\x00\xfe\x00\x1f\xc0\x00\x7e\x00\x1f\xc0\x00\x00\x00\x1f'\
b'\xc0\x00\x00\x00\x3f\x80\x00\x00\x00\x3f\x80\x00\x00\x00\x3f\x80'\
b'\x00\x00\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x1f'\
b'\xfe\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x1f\xfe\x00\x3f\x80\x00\x7e'\
b'\x00\x1f\xc0\x00\x7e\x00\x1f\xc0\x00\x7e\x00\x1f\xe0\x00\x7e\x00'\
b'\x0f\xe0\x00\xfe\x00\x0f\xf0\x01\xfe\x00\x07\xf8\x03\xfe\x00\x03'\
b'\xfe\x0f\xfe\x00\x03\xff\xff\xfe\x00\x01\xff\xff\xbe\x00\x00\x7f'\
b'\xff\x1e\x00\x00\x3f\xfe\x1e\x00\x00\x07\xf8\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xc0\x03\xff\xf0\x07\xff\xf8'\
b'\x0f\xff\xfc\x1f\xff\xfe\x1f\xc1\xfe\x1f\xc0\xfe\x1f\x80\xfe\x00'\
b'\x00\xfe\x00\x07\xfe\x00\xff\xfe\x07\xff\xfe\x0f\xff\xfe\x1f\xf0'\
b'\xfe\x3f\xc0\xfe\x3f\x80\xfe\x3f\x80\xfe\x3f\x81\xfe\x3f\xc3\xfe'\
b'\x3f\xff\xfe\x1f\xff\xfe\x0f\xfe\xff\x07\xfc\xff\x01\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00\x00\x00'\
b'\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80'\
b'\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80\x00\x00\x3f\x80'\
b'\x00\x7e\x3f\x80\x01\xff\x3f\x80\x03\xff\xbf\x80\x07\xff\xff\x80'\
b'\x0f\xff\xff\x80\x0f\xf1\xff\x80\x1f\xe0\xff\x80\x1f\xc0\x7f\x80'\
b'\x3f\xc0\x7f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80'\
b'\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\xc0\x7f\x80'\
b'\x1f\xc0\x7f\x80\x1f\xe0\xff\x80\x1f\xf1\xff\x80\x0f\xff\xff\x80'\
b'\x07\xff\xff\x80\x03\xff\xbf\x80\x01\xff\x3f\x80\x00\x7e\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x3f\x80\x01\xff'\
b'\x3f\x80\x03\xff\xbf\x80\x07\xff\xff\x80\x0f\xff\xff\x80\x0f\xf1'\
b'\xff\x80\x1f\xe0\xff\x80\x1f\xc0\x7f\x80\x3f\xc0\x7f\x80\x3f\x80'\
b'\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80'\
b'\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x3f\x80\x1f\xc0\x7f\x80\x1f\xc0'\
b'\xff\x80\x1f\xf1\xff\x80\x0f\xff\xff\x80\x07\xff\xff\x80\x07\xff'\
b'\xbf\x80\x01\xff\x3f\x80\x00\x7e\x3f\x80\x00\x00\x3f\x80\x00\x00'\
b'\x3f\x80\x00\x00\x7f\x80\x1f\xc0\x7f\x00\x1f\xe0\xff\x00\x1f\xff'\
b'\xfe\x00\x0f\xff\xfc\x00\x03\xff\xf8\x00\x00\xff\xc0\x00\x0c\x00'\
b'\x00\x00\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x00\x00\x00\x00'\
b'\x00\x00\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0'\
b'\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0'\
b'\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0\x1f\xc0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x26\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x1f\xc7\xe0\x3f\x00\x1f\xdf\xf8\xff\xc0\x1f\xff\xfd'\
b'\xff\xe0\x1f\xff\xff\xff\xf0\x1f\xff\xff\xff\xf8\x1f\xf1\xff\x87'\
b'\xf8\x1f\xe0\xff\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8'\
b'\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f'\
b'\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0'\
b'\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe'\
b'\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03\xf8\x1f\xc0\xfe\x03'\
b'\xf8\x1f\xc0\xfe\x03\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0\x3f\x80\x1f\xc0'\
b'\x3f\x80\x1f\xe0\x7f\x80\x1f\xf0\xff\x80\x0f\xff\xff\x80\x0f\xff'\
b'\xff\x80\x07\xff\xbf\x80\x03\xff\x3f\x80\x00\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00'
_index =\
b'\x00\x00\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x2a\x01\x00\x00\xaa\x01\x00\x00\x2a\x02\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\x02'\
b'\x54\x03\xfe\x03\xa8\x04\x52\x05\xfc\x05\xa6\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7a\x07'\
b'\x00\x00\x00\x00\xfa\x07\x00\x00\x00\x00\xa4\x08\x00\x00\x4e\x09'\
b'\x00\x00\x00\x00\x00\x00\xa4\x09\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x78\x0a\x22\x0b'
_mvfont = memoryview(_font)
_mvi = memoryview(_index)
ifb = lambda l : l[0] | (l[1] << 8)
def get_ch(ch):
oc = ord(ch)
ioff = 2 * (oc - 35 + 1) if oc >= 35 and oc <= 117 else 0
doff = ifb(_mvi[ioff : ])
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 42
return _mvfont[doff + 2:next_offs], 42, width
|
en
| 0.307526
|
# Code generated by font_to_py.py. # Font: FreeSansBold.ttf Char set: #357ABCDEFGadgimu # Cmd: ./font_to_py.py ttf/FreeSansBold.ttf 40 -x -c ABCDEFGm753#dimaug font_big.py
| 2.086221
| 2
|
grundzeug/config/providers/common.py
|
nickguletskii/grundzeug
| 1
|
6626547
|
<reponame>nickguletskii/grundzeug<gh_stars>1-10
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from pathlib import Path
from typing import TextIO, Union, Any
from typing_extensions import Literal
from grundzeug.config.common import Configurable, ConfigPathT, MISSING, CanonicalConfigPathT
class ConfigurationProvider:
"""
ConfigurationProviders are queried when configuration values are being resolved.
"""
@abstractmethod
def get_value(self, path: CanonicalConfigPathT) -> Union[Any, Literal[MISSING]]:
"""
:param path: The requested config path (key).
:return: The requested config value, or :py:const:`~grundzeug.config.common.MISSING` if this provider does \
not have the requested config path (key).
"""
raise NotImplementedError()
class TextParserConfigurationProviderMixin:
"""
A helper mixin that allows instantiating ConfigurationProviders using files and TextIO.
Requires the implementing class to have a constructor that takes a single string as an argument.
"""
@classmethod
def from_string(cls, string: str) -> ConfigurationProvider:
"""
Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` from a string that can
be directly translated to a configuration hierarchy.
:param string: The string that will be passed into the class's constructor.
:return: An instance of the class this method was called on.
"""
if not issubclass(cls, ConfigurationProvider):
raise TypeError(f"Classes that extend TextParserConfigurationProviderMixin should also implement "
f"grundzeug.config.providers.common.ConfigurationProvider")
return cls(string) # type: ignore
@classmethod
def from_file(cls, path: Path, encoding=None, errors=None) -> ConfigurationProvider:
"""
Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` by reading the file at the
specified path and passing the contents into the class's constructor.
Uses :py:meth:`~pathlib.Path.read_text` behind the scenes.
:param path: The file to read.
:param encoding: See :py:func:`~builtins.open` for more details.
:param errors: See :py:func:`~builtins.open` for more details.
:return: An instance of the class this method was called on.
"""
return cls.from_string(path.read_text(encoding=encoding, errors=errors))
@classmethod
def read(cls, io: TextIO) -> ConfigurationProvider:
"""
Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` by reading the specified
instance of :py:class:`~typing.TextIO` (obtained using :py:func:`~builtins.open` or similar) and passing the
contents into the class's constructor.
:param io: the instance of :py:class:`~typing.TextIO` to read and pass into the class's constructor.
:return: An instance of the class this method was called on.
"""
return cls.from_string(io.read())
class DictTreeConfigurationProvider(ConfigurationProvider):
def __init__(self, root: dict):
"""
A base class for ConfigurationProviders which can represent their contents as an immutable nested dictionary.
:param root: The root dictionary. A configuration value with the configuration key ("foo", "bar", "baz") will
be resolved by indexing ``root`` as follows: ``root["foo"]["bar"]["baz"]``.
"""
self._dict = root
def set_value(
self,
reference: Union[ConfigPathT, Configurable, Any],
value
):
if isinstance(reference, Configurable):
reference = reference.configurable_metadata.full_path
reference: ConfigPathT = reference
current_dictionary = self._dict
for i, name in enumerate(reference[:-1]):
if name not in current_dictionary:
current_dictionary[name] = {}
if not isinstance(current_dictionary[name], dict):
raise KeyError(f"Could not set the value for configuration key {reference} because the entry at "
f"{tuple(reference[:i + 1])} is not a dictionary.")
current_dictionary = current_dictionary[name]
current_dictionary[reference[-1]] = value
def get_value(self, path: ConfigPathT):
cur = self._dict
for x in path:
if x not in cur:
return MISSING
cur = cur[x]
return cur
__all__ = ["ConfigurationProvider", "TextParserConfigurationProviderMixin", "DictTreeConfigurationProvider"]
|
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from pathlib import Path
from typing import TextIO, Union, Any
from typing_extensions import Literal
from grundzeug.config.common import Configurable, ConfigPathT, MISSING, CanonicalConfigPathT
class ConfigurationProvider:
"""
ConfigurationProviders are queried when configuration values are being resolved.
"""
@abstractmethod
def get_value(self, path: CanonicalConfigPathT) -> Union[Any, Literal[MISSING]]:
"""
:param path: The requested config path (key).
:return: The requested config value, or :py:const:`~grundzeug.config.common.MISSING` if this provider does \
not have the requested config path (key).
"""
raise NotImplementedError()
class TextParserConfigurationProviderMixin:
"""
A helper mixin that allows instantiating ConfigurationProviders using files and TextIO.
Requires the implementing class to have a constructor that takes a single string as an argument.
"""
@classmethod
def from_string(cls, string: str) -> ConfigurationProvider:
"""
Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` from a string that can
be directly translated to a configuration hierarchy.
:param string: The string that will be passed into the class's constructor.
:return: An instance of the class this method was called on.
"""
if not issubclass(cls, ConfigurationProvider):
raise TypeError(f"Classes that extend TextParserConfigurationProviderMixin should also implement "
f"grundzeug.config.providers.common.ConfigurationProvider")
return cls(string) # type: ignore
@classmethod
def from_file(cls, path: Path, encoding=None, errors=None) -> ConfigurationProvider:
"""
Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` by reading the file at the
specified path and passing the contents into the class's constructor.
Uses :py:meth:`~pathlib.Path.read_text` behind the scenes.
:param path: The file to read.
:param encoding: See :py:func:`~builtins.open` for more details.
:param errors: See :py:func:`~builtins.open` for more details.
:return: An instance of the class this method was called on.
"""
return cls.from_string(path.read_text(encoding=encoding, errors=errors))
@classmethod
def read(cls, io: TextIO) -> ConfigurationProvider:
"""
Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` by reading the specified
instance of :py:class:`~typing.TextIO` (obtained using :py:func:`~builtins.open` or similar) and passing the
contents into the class's constructor.
:param io: the instance of :py:class:`~typing.TextIO` to read and pass into the class's constructor.
:return: An instance of the class this method was called on.
"""
return cls.from_string(io.read())
class DictTreeConfigurationProvider(ConfigurationProvider):
def __init__(self, root: dict):
"""
A base class for ConfigurationProviders which can represent their contents as an immutable nested dictionary.
:param root: The root dictionary. A configuration value with the configuration key ("foo", "bar", "baz") will
be resolved by indexing ``root`` as follows: ``root["foo"]["bar"]["baz"]``.
"""
self._dict = root
def set_value(
self,
reference: Union[ConfigPathT, Configurable, Any],
value
):
if isinstance(reference, Configurable):
reference = reference.configurable_metadata.full_path
reference: ConfigPathT = reference
current_dictionary = self._dict
for i, name in enumerate(reference[:-1]):
if name not in current_dictionary:
current_dictionary[name] = {}
if not isinstance(current_dictionary[name], dict):
raise KeyError(f"Could not set the value for configuration key {reference} because the entry at "
f"{tuple(reference[:i + 1])} is not a dictionary.")
current_dictionary = current_dictionary[name]
current_dictionary[reference[-1]] = value
def get_value(self, path: ConfigPathT):
cur = self._dict
for x in path:
if x not in cur:
return MISSING
cur = cur[x]
return cur
__all__ = ["ConfigurationProvider", "TextParserConfigurationProviderMixin", "DictTreeConfigurationProvider"]
|
en
| 0.785472
|
# Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ConfigurationProviders are queried when configuration values are being resolved. :param path: The requested config path (key). :return: The requested config value, or :py:const:`~grundzeug.config.common.MISSING` if this provider does \ not have the requested config path (key). A helper mixin that allows instantiating ConfigurationProviders using files and TextIO. Requires the implementing class to have a constructor that takes a single string as an argument. Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` from a string that can be directly translated to a configuration hierarchy. :param string: The string that will be passed into the class's constructor. :return: An instance of the class this method was called on. # type: ignore Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` by reading the file at the specified path and passing the contents into the class's constructor. Uses :py:meth:`~pathlib.Path.read_text` behind the scenes. :param path: The file to read. :param encoding: See :py:func:`~builtins.open` for more details. :param errors: See :py:func:`~builtins.open` for more details. :return: An instance of the class this method was called on. Construct the :py:class:`~grundzeug.config.providers.common.ConfigurationProvider` by reading the specified instance of :py:class:`~typing.TextIO` (obtained using :py:func:`~builtins.open` or similar) and passing the contents into the class's constructor. :param io: the instance of :py:class:`~typing.TextIO` to read and pass into the class's constructor. :return: An instance of the class this method was called on. A base class for ConfigurationProviders which can represent their contents as an immutable nested dictionary. :param root: The root dictionary. A configuration value with the configuration key ("foo", "bar", "baz") will be resolved by indexing ``root`` as follows: ``root["foo"]["bar"]["baz"]``.
| 2.233624
| 2
|
fhirclient/models/element.py
|
carolinarsm/client-py
| 418
|
6626548
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Element) on 2019-05-07.
# 2019, SMART Health IT.
from . import fhirabstractbase
class Element(fhirabstractbase.FHIRAbstractBase):
""" Base for all elements.
Base definition for all elements in a resource.
"""
resource_type = "Element"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.extension = None
""" Additional content defined by implementations.
List of `Extension` items (represented as `dict` in JSON). """
self.id = None
""" Unique id for inter-element referencing.
Type `str`. """
super(Element, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Element, self).elementProperties()
from . import extension
js.extend([
("extension", "extension", extension.Extension, True, None, False),
("id", "id", str, False, None, False),
])
return js
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Element) on 2019-05-07.
# 2019, SMART Health IT.
from . import fhirabstractbase
class Element(fhirabstractbase.FHIRAbstractBase):
""" Base for all elements.
Base definition for all elements in a resource.
"""
resource_type = "Element"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.extension = None
""" Additional content defined by implementations.
List of `Extension` items (represented as `dict` in JSON). """
self.id = None
""" Unique id for inter-element referencing.
Type `str`. """
super(Element, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Element, self).elementProperties()
from . import extension
js.extend([
("extension", "extension", extension.Extension, True, None, False),
("id", "id", str, False, None, False),
])
return js
|
en
| 0.675397
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Element) on 2019-05-07. # 2019, SMART Health IT. Base for all elements. Base definition for all elements in a resource. Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError Additional content defined by implementations. List of `Extension` items (represented as `dict` in JSON). Unique id for inter-element referencing. Type `str`.
| 2.33533
| 2
|
addons/source-python/plugins/warcraft/races/__init__.py
|
ThomasVieth/WCS-Remastered
| 0
|
6626549
|
<reponame>ThomasVieth/WCS-Remastered<gh_stars>0
"""
"""
## python imports
from configobj import ConfigObj
from glob import glob
from os.path import dirname, basename, isfile
## source.python imports
from engines.server import insert_command_string
from paths import CUSTOM_DATA_PATH
from messages import SayText2
## warcraft.package imports
from warcraft.race import Race
from warcraft.skill import Skill
from warcraft.utility import classproperty
## __all__ declaration
modules = glob(dirname(__file__) + '/*')
__all__ = ["ini_races", "load_ini_races"]
for f in modules:
f_basename = basename(f)
if isfile(f):
__all__.append(f_basename[:-3])
elif f_basename != "races":
__all__.append(f_basename)
## generate functions
def name_to_classname(name):
return "INI_" + name.replace(" ", "").replace("-", "_")
def generate_is_available(required_level):
def _func(cls, player):
return player.total_level >= required_level
return _func
def generate_description(description):
def _func(cls):
return description
return _func
def generate_requirement_string(required_level):
def _func(cls):
return f"TL {required_level}"
return _func
def generate_requirement_sort_key(required_level):
def _func(cls):
return required_level
return _func
## load ini races
with open(CUSTOM_DATA_PATH / "warcraft" / "events.txt", "r") as fo:
_event_options = list(
filter(lambda x: not x.startswith(' '),
map(lambda x: x.rstrip("\n"),
fo.readlines()
)
)
)
def make_skill_callback(fire_targets, fire_object):
targets = fire_targets.split(",")
events = list()
clientcommands = list()
for target in targets:
fire_type, name = target.split(':')
if fire_type == "clientcommand":
clientcommands.append(name)
elif fire_type == "event" and not name in _event_options:
raise ValueError(f"Cannot register skill callback for event ({name}). Please read the docs.")
elif fire_type == "event":
events.append(name)
else:
raise ValueError(f"Unsupported fire_type supplied in INI race ({fire_type}).")
def _func(self, *args, **kwargs):
if self.level == 0:
return
if _func._chance and _func._chance > randint(0, 101):
return
kwargs["value"] = self.values[self.level - 1]
for command in _func._commands:
insert_command_string(command.format(**kwargs))
_func._chance = fire_object.get("chance", None)
_func._commands = fire_object["cmd"].split(";")
_func._events = events
_func._clientcommands = clientcommands
return _func
def make_skill_class(skillname, skill_object):
## Gather information.
required_level = skill_object.as_int("required_level")
maximum_level = skill_object.as_int("maximum_level")
description = skill_object["description"]
values = skill_object["values"]
keyvalues = {
"max_level": maximum_level,
"is_available":
classmethod(generate_is_available(required_level)),
"description":
classproperty(generate_description(description)),
"name":
classproperty(lambda cls: skillname),
"values": values
}
for num, fire_targets in enumerate(skill_object.sections):
skill_method = make_skill_callback(fire_targets, skill_object[fire_targets])
ini_funcs.append(skill_method)
keyvalues[f"func{num}"] = skill_method
## Construct the skill class.
new_skill_class = type(
name_to_classname(skillname), ## Classname.
(Skill, ), ## Skill class to inherit from.
keyvalues
)
ini_skills.append(new_skill_class)
return new_skill_class
def make_race_class(racename, race_object):
## Gather information.
required_level = race_object.as_int("required_level")
maximum_level = race_object.as_int("maximum_level")
author = race_object["author"]
description = race_object["description"]
## Construct the race class.
new_race_class = type(
name_to_classname(racename), ## Classname.
(Race, ), ## Race class to inherit from.
{
"max_level": maximum_level,
"is_available":
classmethod(generate_is_available(required_level)),
"description":
classproperty(generate_description(description)),
"requirement_string":
classproperty(generate_requirement_string(required_level)),
"requirement_sort_key":
classproperty(generate_requirement_sort_key(required_level)),
"name":
classproperty(lambda cls: racename)
}
)
for skillname in race_object.sections:
new_skill_class = make_skill_class(skillname, race_object[skillname])
new_race_class.add_skill(new_skill_class)
ini_races.append(new_race_class)
return new_race_class
def create_race_classes(races_object):
for racename, race_object in races_object.items():
race_class = make_race_class(racename, race_object)
ini_races = list()
ini_skills = list()
ini_funcs = list()
def load_ini_races():
create_race_classes(
ConfigObj(CUSTOM_DATA_PATH / "warcraft" / "races.ini")
)
|
"""
"""
## python imports
from configobj import ConfigObj
from glob import glob
from os.path import dirname, basename, isfile
## source.python imports
from engines.server import insert_command_string
from paths import CUSTOM_DATA_PATH
from messages import SayText2
## warcraft.package imports
from warcraft.race import Race
from warcraft.skill import Skill
from warcraft.utility import classproperty
## __all__ declaration
modules = glob(dirname(__file__) + '/*')
__all__ = ["ini_races", "load_ini_races"]
for f in modules:
f_basename = basename(f)
if isfile(f):
__all__.append(f_basename[:-3])
elif f_basename != "races":
__all__.append(f_basename)
## generate functions
def name_to_classname(name):
return "INI_" + name.replace(" ", "").replace("-", "_")
def generate_is_available(required_level):
def _func(cls, player):
return player.total_level >= required_level
return _func
def generate_description(description):
def _func(cls):
return description
return _func
def generate_requirement_string(required_level):
def _func(cls):
return f"TL {required_level}"
return _func
def generate_requirement_sort_key(required_level):
def _func(cls):
return required_level
return _func
## load ini races
with open(CUSTOM_DATA_PATH / "warcraft" / "events.txt", "r") as fo:
_event_options = list(
filter(lambda x: not x.startswith(' '),
map(lambda x: x.rstrip("\n"),
fo.readlines()
)
)
)
def make_skill_callback(fire_targets, fire_object):
targets = fire_targets.split(",")
events = list()
clientcommands = list()
for target in targets:
fire_type, name = target.split(':')
if fire_type == "clientcommand":
clientcommands.append(name)
elif fire_type == "event" and not name in _event_options:
raise ValueError(f"Cannot register skill callback for event ({name}). Please read the docs.")
elif fire_type == "event":
events.append(name)
else:
raise ValueError(f"Unsupported fire_type supplied in INI race ({fire_type}).")
def _func(self, *args, **kwargs):
if self.level == 0:
return
if _func._chance and _func._chance > randint(0, 101):
return
kwargs["value"] = self.values[self.level - 1]
for command in _func._commands:
insert_command_string(command.format(**kwargs))
_func._chance = fire_object.get("chance", None)
_func._commands = fire_object["cmd"].split(";")
_func._events = events
_func._clientcommands = clientcommands
return _func
def make_skill_class(skillname, skill_object):
## Gather information.
required_level = skill_object.as_int("required_level")
maximum_level = skill_object.as_int("maximum_level")
description = skill_object["description"]
values = skill_object["values"]
keyvalues = {
"max_level": maximum_level,
"is_available":
classmethod(generate_is_available(required_level)),
"description":
classproperty(generate_description(description)),
"name":
classproperty(lambda cls: skillname),
"values": values
}
for num, fire_targets in enumerate(skill_object.sections):
skill_method = make_skill_callback(fire_targets, skill_object[fire_targets])
ini_funcs.append(skill_method)
keyvalues[f"func{num}"] = skill_method
## Construct the skill class.
new_skill_class = type(
name_to_classname(skillname), ## Classname.
(Skill, ), ## Skill class to inherit from.
keyvalues
)
ini_skills.append(new_skill_class)
return new_skill_class
def make_race_class(racename, race_object):
## Gather information.
required_level = race_object.as_int("required_level")
maximum_level = race_object.as_int("maximum_level")
author = race_object["author"]
description = race_object["description"]
## Construct the race class.
new_race_class = type(
name_to_classname(racename), ## Classname.
(Race, ), ## Race class to inherit from.
{
"max_level": maximum_level,
"is_available":
classmethod(generate_is_available(required_level)),
"description":
classproperty(generate_description(description)),
"requirement_string":
classproperty(generate_requirement_string(required_level)),
"requirement_sort_key":
classproperty(generate_requirement_sort_key(required_level)),
"name":
classproperty(lambda cls: racename)
}
)
for skillname in race_object.sections:
new_skill_class = make_skill_class(skillname, race_object[skillname])
new_race_class.add_skill(new_skill_class)
ini_races.append(new_race_class)
return new_race_class
def create_race_classes(races_object):
for racename, race_object in races_object.items():
race_class = make_race_class(racename, race_object)
ini_races = list()
ini_skills = list()
ini_funcs = list()
def load_ini_races():
create_race_classes(
ConfigObj(CUSTOM_DATA_PATH / "warcraft" / "races.ini")
)
|
en
| 0.373041
|
## python imports ## source.python imports ## warcraft.package imports ## __all__ declaration ## generate functions ## load ini races ## Gather information. ## Construct the skill class. ## Classname. ## Skill class to inherit from. ## Gather information. ## Construct the race class. ## Classname. ## Race class to inherit from.
| 2.385298
| 2
|
tests/python/unittest/test_lang_target.py
|
kpot/tvm
| 0
|
6626550
|
import tvm
@tvm.target.generic_func
def mygeneric(data):
# default generic function
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.rasp():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def test_target_string_parse():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
|
import tvm
@tvm.target.generic_func
def mygeneric(data):
# default generic function
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.rasp():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def test_target_string_parse():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
|
en
| 0.037051
|
# default generic function
| 2.27106
| 2
|