body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@property
def z(self):
"\n Sets the aggregation data.\n\n The 'z' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n "
return self['z']
| 989,078,158,267,727,900
|
Sets the aggregation data.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
z
|
labaran1/plotly.py
|
python
|
@property
def z(self):
"\n Sets the aggregation data.\n\n The 'z' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n "
return self['z']
|
@property
def zauto(self):
"\n Determines whether or not the color domain is computed with\n respect to the input data (here in `z`) or the bounds set in\n `zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`\n are set by the user.\n\n The 'zauto' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n "
return self['zauto']
| 1,912,562,263,594,213,400
|
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
zauto
|
labaran1/plotly.py
|
python
|
@property
def zauto(self):
"\n Determines whether or not the color domain is computed with\n respect to the input data (here in `z`) or the bounds set in\n `zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`\n are set by the user.\n\n The 'zauto' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n "
return self['zauto']
|
@property
def zhoverformat(self):
"\n Sets the hover text formatting rulefor `z` using d3 formatting\n mini-languages which are very similar to those in Python. For\n numbers, see:\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By\n default the values are formatted using generic number format.\n\n The 'zhoverformat' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n "
return self['zhoverformat']
| -2,554,998,917,856,491,500
|
Sets the hover text formatting rulefor `z` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By
default the values are formatted using generic number format.
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
zhoverformat
|
labaran1/plotly.py
|
python
|
@property
def zhoverformat(self):
"\n Sets the hover text formatting rulefor `z` using d3 formatting\n mini-languages which are very similar to those in Python. For\n numbers, see:\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format.By\n default the values are formatted using generic number format.\n\n The 'zhoverformat' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n "
return self['zhoverformat']
|
@property
def zmax(self):
"\n Sets the upper bound of the color domain. Value should have the\n same units as in `z` and if set, `zmin` must be set as well.\n\n The 'zmax' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n "
return self['zmax']
| -230,201,867,242,881,380
|
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
zmax
|
labaran1/plotly.py
|
python
|
@property
def zmax(self):
"\n Sets the upper bound of the color domain. Value should have the\n same units as in `z` and if set, `zmin` must be set as well.\n\n The 'zmax' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n "
return self['zmax']
|
@property
def zmid(self):
"\n Sets the mid-point of the color domain by scaling `zmin` and/or\n `zmax` to be equidistant to this point. Value should have the\n same units as in `z`. Has no effect when `zauto` is `false`.\n\n The 'zmid' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n "
return self['zmid']
| 8,848,989,567,395,846,000
|
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
zmid
|
labaran1/plotly.py
|
python
|
@property
def zmid(self):
"\n Sets the mid-point of the color domain by scaling `zmin` and/or\n `zmax` to be equidistant to this point. Value should have the\n same units as in `z`. Has no effect when `zauto` is `false`.\n\n The 'zmid' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n "
return self['zmid']
|
@property
def zmin(self):
"\n Sets the lower bound of the color domain. Value should have the\n same units as in `z` and if set, `zmax` must be set as well.\n\n The 'zmin' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n "
return self['zmin']
| -7,033,344,151,506,177,000
|
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
zmin
|
labaran1/plotly.py
|
python
|
@property
def zmin(self):
"\n Sets the lower bound of the color domain. Value should have the\n same units as in `z` and if set, `zmax` must be set as well.\n\n The 'zmin' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n "
return self['zmin']
|
@property
def zsrc(self):
"\n Sets the source reference on Chart Studio Cloud for `z`.\n\n The 'zsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['zsrc']
| 3,882,254,053,371,198,500
|
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
zsrc
|
labaran1/plotly.py
|
python
|
@property
def zsrc(self):
"\n Sets the source reference on Chart Studio Cloud for `z`.\n\n The 'zsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['zsrc']
|
def __init__(self, arg=None, autobinx=None, autobiny=None, autocolorscale=None, autocontour=None, bingroup=None, coloraxis=None, colorbar=None, colorscale=None, contours=None, customdata=None, customdatasrc=None, histfunc=None, histnorm=None, hoverinfo=None, hoverinfosrc=None, hoverlabel=None, hovertemplate=None, hovertemplatesrc=None, ids=None, idssrc=None, legendgroup=None, legendgrouptitle=None, legendrank=None, line=None, marker=None, meta=None, metasrc=None, name=None, nbinsx=None, nbinsy=None, ncontours=None, opacity=None, reversescale=None, showlegend=None, showscale=None, stream=None, textfont=None, texttemplate=None, uid=None, uirevision=None, visible=None, x=None, xaxis=None, xbingroup=None, xbins=None, xcalendar=None, xhoverformat=None, xsrc=None, y=None, yaxis=None, ybingroup=None, ybins=None, ycalendar=None, yhoverformat=None, ysrc=None, z=None, zauto=None, zhoverformat=None, zmax=None, zmid=None, zmin=None, zsrc=None, **kwargs):
'\n Construct a new Histogram2dContour object\n\n The sample data from which statistics are computed is set in\n `x` and `y` (where `x` and `y` represent marginal\n distributions, binning is set in `xbins` and `ybins` in this\n case) or `z` (where `z` represent the 2D distribution and\n binning set, binning is set by `x` and `y` in this case). The\n resulting distribution is visualized as a contour plot.\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n :class:`plotly.graph_objs.Histogram2dContour`\n autobinx\n Obsolete: since v1.42 each bin attribute is auto-\n determined separately and `autobinx` is not needed.\n However, we accept `autobinx: true` or `false` and will\n update `xbins` accordingly before deleting `autobinx`\n from the trace.\n autobiny\n Obsolete: since v1.42 each bin attribute is auto-\n determined separately and `autobiny` is not needed.\n However, we accept `autobiny: true` or `false` and will\n update `ybins` accordingly before deleting `autobiny`\n from the trace.\n autocolorscale\n Determines whether the colorscale is a default palette\n (`autocolorscale: true`) or the palette determined by\n `colorscale`. In case `colorscale` is unspecified or\n `autocolorscale` is true, the default palette will be\n chosen according to whether numbers in the `color`\n array are all positive, all negative or mixed.\n autocontour\n Determines whether or not the contour level attributes\n are picked by an algorithm. If True, the number of\n contour levels can be set in `ncontours`. If False, set\n the contour level attributes in `contours`.\n bingroup\n Set the `xbingroup` and `ybingroup` default prefix For\n example, setting a `bingroup` of 1 on two histogram2d\n traces will make them their x-bins and y-bins match\n separately.\n coloraxis\n Sets a reference to a shared color axis. References to\n these shared color axes are "coloraxis", "coloraxis2",\n "coloraxis3", etc. Settings for these shared color axes\n are set in the layout, under `layout.coloraxis`,\n `layout.coloraxis2`, etc. Note that multiple color\n scales can be linked to the same color axis.\n colorbar\n :class:`plotly.graph_objects.histogram2dcontour.ColorBa\n r` instance or dict with compatible properties\n colorscale\n Sets the colorscale. The colorscale must be an array\n containing arrays mapping a normalized value to an rgb,\n rgba, hex, hsl, hsv, or named color string. At minimum,\n a mapping for the lowest (0) and highest (1) values are\n required. For example, `[[0, \'rgb(0,0,255)\'], [1,\n \'rgb(255,0,0)\']]`. To control the bounds of the\n colorscale in color space, use `zmin` and `zmax`.\n Alternatively, `colorscale` may be a palette name\n string of the following list: Blackbody,Bluered,Blues,C\n ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl\n and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.\n contours\n :class:`plotly.graph_objects.histogram2dcontour.Contour\n s` instance or dict with compatible properties\n customdata\n Assigns extra data each datum. This may be useful when\n listening to hover, click and selection events. Note\n that, "scatter" traces also appends customdata items in\n the markers DOM elements\n customdatasrc\n Sets the source reference on Chart Studio Cloud for\n `customdata`.\n histfunc\n Specifies the binning function used for this histogram\n trace. If "count", the histogram values are computed by\n counting the number of values lying inside each bin. If\n "sum", "avg", "min", "max", the histogram values are\n computed using the sum, the average, the minimum or the\n maximum of the values lying inside each bin\n respectively.\n histnorm\n Specifies the type of normalization used for this\n histogram trace. If "", the span of each bar\n corresponds to the number of occurrences (i.e. the\n number of data points lying inside the bins). If\n "percent" / "probability", the span of each bar\n corresponds to the percentage / fraction of occurrences\n with respect to the total number of sample points\n (here, the sum of all bin HEIGHTS equals 100% / 1). If\n "density", the span of each bar corresponds to the\n number of occurrences in a bin divided by the size of\n the bin interval (here, the sum of all bin AREAS equals\n the total number of sample points). If *probability\n density*, the area of each bar corresponds to the\n probability that an event will fall into the\n corresponding bin (here, the sum of all bin AREAS\n equals 1).\n hoverinfo\n Determines which trace information appear on hover. If\n `none` or `skip` are set, no information is displayed\n upon hovering. But, if `none` is set, click and hover\n events are still fired.\n hoverinfosrc\n Sets the source reference on Chart Studio Cloud for\n `hoverinfo`.\n hoverlabel\n :class:`plotly.graph_objects.histogram2dcontour.Hoverla\n bel` instance or dict with compatible properties\n hovertemplate\n Template string used for rendering the information that\n appear on hover box. Note that this will override\n `hoverinfo`. Variables are inserted using %{variable},\n for example "y: %{y}" as well as %{xother}, {%_xother},\n {%_xother_}, {%xother_}. When showing info for several\n points, "xother" will be added to those with different\n x positions from the first point. An underscore before\n or after "(x|y)other" will add a space on that side,\n only when this field is shown. Numbers are formatted\n using d3-format\'s syntax %{variable:d3-format}, for\n example "Price: %{y:$.2f}".\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format\n for details on the formatting syntax. Dates are\n formatted using d3-time-format\'s syntax\n %{variable|d3-time-format}, for example "Day:\n %{2019-01-01|%A}". https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format for details on the\n date formatting syntax. The variables available in\n `hovertemplate` are the ones emitted as event data\n described at this link\n https://plotly.com/javascript/plotlyjs-events/#event-\n data. Additionally, every attributes that can be\n specified per-point (the ones that are `arrayOk: true`)\n are available. variable `z` Anything contained in tag\n `<extra>` is displayed in the secondary box, for\n example "<extra>{fullData.name}</extra>". To hide the\n secondary box completely, use an empty tag\n `<extra></extra>`.\n hovertemplatesrc\n Sets the source reference on Chart Studio Cloud for\n `hovertemplate`.\n ids\n Assigns id labels to each datum. These ids for object\n constancy of data points during animation. Should be an\n array of strings, not numbers or any other type.\n idssrc\n Sets the source reference on Chart Studio Cloud for\n `ids`.\n legendgroup\n Sets the legend group for this trace. Traces part of\n the same legend group hide/show at the same time when\n toggling legend items.\n legendgrouptitle\n :class:`plotly.graph_objects.histogram2dcontour.Legendg\n rouptitle` instance or dict with compatible properties\n legendrank\n Sets the legend rank for this trace. Items and groups\n with smaller ranks are presented on top/left side while\n with `*reversed* `legend.traceorder` they are on\n bottom/right side. The default legendrank is 1000, so\n that you can use ranks less than 1000 to place certain\n items before all unranked items, and ranks greater than\n 1000 to go after all unranked items.\n line\n :class:`plotly.graph_objects.histogram2dcontour.Line`\n instance or dict with compatible properties\n marker\n :class:`plotly.graph_objects.histogram2dcontour.Marker`\n instance or dict with compatible properties\n meta\n Assigns extra meta information associated with this\n trace that can be used in various text attributes.\n Attributes such as trace `name`, graph, axis and\n colorbar `title.text`, annotation `text`\n `rangeselector`, `updatemenues` and `sliders` `label`\n text all support `meta`. To access the trace `meta`\n values in an attribute in the same trace, simply use\n `%{meta[i]}` where `i` is the index or key of the\n `meta` item in question. To access trace `meta` in\n layout attributes, use `%{data[n[.meta[i]}` where `i`\n is the index or key of the `meta` and `n` is the trace\n index.\n metasrc\n Sets the source reference on Chart Studio Cloud for\n `meta`.\n name\n Sets the trace name. The trace name appear as the\n legend item and on hover.\n nbinsx\n Specifies the maximum number of desired bins. This\n value will be used in an algorithm that will decide the\n optimal bin size such that the histogram best\n visualizes the distribution of the data. Ignored if\n `xbins.size` is provided.\n nbinsy\n Specifies the maximum number of desired bins. This\n value will be used in an algorithm that will decide the\n optimal bin size such that the histogram best\n visualizes the distribution of the data. Ignored if\n `ybins.size` is provided.\n ncontours\n Sets the maximum number of contour levels. The actual\n number of contours will be chosen automatically to be\n less than or equal to the value of `ncontours`. Has an\n effect only if `autocontour` is True or if\n `contours.size` is missing.\n opacity\n Sets the opacity of the trace.\n reversescale\n Reverses the color mapping if true. If true, `zmin`\n will correspond to the last color in the array and\n `zmax` will correspond to the first color.\n showlegend\n Determines whether or not an item corresponding to this\n trace is shown in the legend.\n showscale\n Determines whether or not a colorbar is displayed for\n this trace.\n stream\n :class:`plotly.graph_objects.histogram2dcontour.Stream`\n instance or dict with compatible properties\n textfont\n For this trace it only has an effect if `coloring` is\n set to "heatmap". Sets the text font.\n texttemplate\n For this trace it only has an effect if `coloring` is\n set to "heatmap". Template string used for rendering\n the information text that appear on points. Note that\n this will override `textinfo`. Variables are inserted\n using %{variable}, for example "y: %{y}". Numbers are\n formatted using d3-format\'s syntax\n %{variable:d3-format}, for example "Price: %{y:$.2f}".\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format\n for details on the formatting syntax. Dates are\n formatted using d3-time-format\'s syntax\n %{variable|d3-time-format}, for example "Day:\n %{2019-01-01|%A}". https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format for details on the\n date formatting syntax. Every attributes that can be\n specified per-point (the ones that are `arrayOk: true`)\n are available. variables `x`, `y`, `z` and `text`.\n uid\n Assign an id to this trace, Use this to provide object\n constancy between traces during animations and\n transitions.\n uirevision\n Controls persistence of some user-driven changes to the\n trace: `constraintrange` in `parcoords` traces, as well\n as some `editable: true` modifications such as `name`\n and `colorbar.title`. Defaults to `layout.uirevision`.\n Note that other user-driven trace attribute changes are\n controlled by `layout` attributes: `trace.visible` is\n controlled by `layout.legend.uirevision`,\n `selectedpoints` is controlled by\n `layout.selectionrevision`, and `colorbar.(x|y)`\n (accessible with `config: {editable: true}`) is\n controlled by `layout.editrevision`. Trace changes are\n tracked by `uid`, which only falls back on trace index\n if no `uid` is provided. So if your app can add/remove\n traces before the end of the `data` array, such that\n the same trace has a different index, you can still\n preserve user-driven changes if you give each trace a\n `uid` that stays with it as it moves.\n visible\n Determines whether or not this trace is visible. If\n "legendonly", the trace is not drawn, but can appear as\n a legend item (provided that the legend itself is\n visible).\n x\n Sets the sample data to be binned on the x axis.\n xaxis\n Sets a reference between this trace\'s x coordinates and\n a 2D cartesian x axis. If "x" (the default value), the\n x coordinates refer to `layout.xaxis`. If "x2", the x\n coordinates refer to `layout.xaxis2`, and so on.\n xbingroup\n Set a group of histogram traces which will have\n compatible x-bin settings. Using `xbingroup`,\n histogram2d and histogram2dcontour traces (on axes of\n the same axis type) can have compatible x-bin settings.\n Note that the same `xbingroup` value can be used to set\n (1D) histogram `bingroup`\n xbins\n :class:`plotly.graph_objects.histogram2dcontour.XBins`\n instance or dict with compatible properties\n xcalendar\n Sets the calendar system to use with `x` date data.\n xhoverformat\n Sets the hover text formatting rulefor `x` using d3\n formatting mini-languages which are very similar to\n those in Python. For numbers, see:\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format.\n And for dates see: https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two items to\n d3\'s date formatter: "%h" for half of the year as a\n decimal number as well as "%{n}f" for fractional\n seconds with n digits. For example, *2016-10-13\n 09:15:23.456* with tickformat "%H~%M~%S.%2f" would\n display *09~15~23.46*By default the values are\n formatted using `xaxis.hoverformat`.\n xsrc\n Sets the source reference on Chart Studio Cloud for\n `x`.\n y\n Sets the sample data to be binned on the y axis.\n yaxis\n Sets a reference between this trace\'s y coordinates and\n a 2D cartesian y axis. If "y" (the default value), the\n y coordinates refer to `layout.yaxis`. If "y2", the y\n coordinates refer to `layout.yaxis2`, and so on.\n ybingroup\n Set a group of histogram traces which will have\n compatible y-bin settings. Using `ybingroup`,\n histogram2d and histogram2dcontour traces (on axes of\n the same axis type) can have compatible y-bin settings.\n Note that the same `ybingroup` value can be used to set\n (1D) histogram `bingroup`\n ybins\n :class:`plotly.graph_objects.histogram2dcontour.YBins`\n instance or dict with compatible properties\n ycalendar\n Sets the calendar system to use with `y` date data.\n yhoverformat\n Sets the hover text formatting rulefor `y` using d3\n formatting mini-languages which are very similar to\n those in Python. For numbers, see:\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format.\n And for dates see: https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two items to\n d3\'s date formatter: "%h" for half of the year as a\n decimal number as well as "%{n}f" for fractional\n seconds with n digits. For example, *2016-10-13\n 09:15:23.456* with tickformat "%H~%M~%S.%2f" would\n display *09~15~23.46*By default the values are\n formatted using `yaxis.hoverformat`.\n ysrc\n Sets the source reference on Chart Studio Cloud for\n `y`.\n z\n Sets the aggregation data.\n zauto\n Determines whether or not the color domain is computed\n with respect to the input data (here in `z`) or the\n bounds set in `zmin` and `zmax` Defaults to `false`\n when `zmin` and `zmax` are set by the user.\n zhoverformat\n Sets the hover text formatting rulefor `z` using d3\n formatting mini-languages which are very similar to\n those in Python. For numbers, see: https://github.com/d\n 3/d3-format/tree/v1.4.5#d3-format.By default the values\n are formatted using generic number format.\n zmax\n Sets the upper bound of the color domain. Value should\n have the same units as in `z` and if set, `zmin` must\n be set as well.\n zmid\n Sets the mid-point of the color domain by scaling\n `zmin` and/or `zmax` to be equidistant to this point.\n Value should have the same units as in `z`. Has no\n effect when `zauto` is `false`.\n zmin\n Sets the lower bound of the color domain. Value should\n have the same units as in `z` and if set, `zmax` must\n be set as well.\n zsrc\n Sets the source reference on Chart Studio Cloud for\n `z`.\n\n Returns\n -------\n Histogram2dContour\n '
super(Histogram2dContour, self).__init__('histogram2dcontour')
if ('_parent' in kwargs):
self._parent = kwargs['_parent']
return
if (arg is None):
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError('The first argument to the plotly.graph_objs.Histogram2dContour\nconstructor must be a dict or\nan instance of :class:`plotly.graph_objs.Histogram2dContour`')
self._skip_invalid = kwargs.pop('skip_invalid', False)
self._validate = kwargs.pop('_validate', True)
_v = arg.pop('autobinx', None)
_v = (autobinx if (autobinx is not None) else _v)
if (_v is not None):
self['autobinx'] = _v
_v = arg.pop('autobiny', None)
_v = (autobiny if (autobiny is not None) else _v)
if (_v is not None):
self['autobiny'] = _v
_v = arg.pop('autocolorscale', None)
_v = (autocolorscale if (autocolorscale is not None) else _v)
if (_v is not None):
self['autocolorscale'] = _v
_v = arg.pop('autocontour', None)
_v = (autocontour if (autocontour is not None) else _v)
if (_v is not None):
self['autocontour'] = _v
_v = arg.pop('bingroup', None)
_v = (bingroup if (bingroup is not None) else _v)
if (_v is not None):
self['bingroup'] = _v
_v = arg.pop('coloraxis', None)
_v = (coloraxis if (coloraxis is not None) else _v)
if (_v is not None):
self['coloraxis'] = _v
_v = arg.pop('colorbar', None)
_v = (colorbar if (colorbar is not None) else _v)
if (_v is not None):
self['colorbar'] = _v
_v = arg.pop('colorscale', None)
_v = (colorscale if (colorscale is not None) else _v)
if (_v is not None):
self['colorscale'] = _v
_v = arg.pop('contours', None)
_v = (contours if (contours is not None) else _v)
if (_v is not None):
self['contours'] = _v
_v = arg.pop('customdata', None)
_v = (customdata if (customdata is not None) else _v)
if (_v is not None):
self['customdata'] = _v
_v = arg.pop('customdatasrc', None)
_v = (customdatasrc if (customdatasrc is not None) else _v)
if (_v is not None):
self['customdatasrc'] = _v
_v = arg.pop('histfunc', None)
_v = (histfunc if (histfunc is not None) else _v)
if (_v is not None):
self['histfunc'] = _v
_v = arg.pop('histnorm', None)
_v = (histnorm if (histnorm is not None) else _v)
if (_v is not None):
self['histnorm'] = _v
_v = arg.pop('hoverinfo', None)
_v = (hoverinfo if (hoverinfo is not None) else _v)
if (_v is not None):
self['hoverinfo'] = _v
_v = arg.pop('hoverinfosrc', None)
_v = (hoverinfosrc if (hoverinfosrc is not None) else _v)
if (_v is not None):
self['hoverinfosrc'] = _v
_v = arg.pop('hoverlabel', None)
_v = (hoverlabel if (hoverlabel is not None) else _v)
if (_v is not None):
self['hoverlabel'] = _v
_v = arg.pop('hovertemplate', None)
_v = (hovertemplate if (hovertemplate is not None) else _v)
if (_v is not None):
self['hovertemplate'] = _v
_v = arg.pop('hovertemplatesrc', None)
_v = (hovertemplatesrc if (hovertemplatesrc is not None) else _v)
if (_v is not None):
self['hovertemplatesrc'] = _v
_v = arg.pop('ids', None)
_v = (ids if (ids is not None) else _v)
if (_v is not None):
self['ids'] = _v
_v = arg.pop('idssrc', None)
_v = (idssrc if (idssrc is not None) else _v)
if (_v is not None):
self['idssrc'] = _v
_v = arg.pop('legendgroup', None)
_v = (legendgroup if (legendgroup is not None) else _v)
if (_v is not None):
self['legendgroup'] = _v
_v = arg.pop('legendgrouptitle', None)
_v = (legendgrouptitle if (legendgrouptitle is not None) else _v)
if (_v is not None):
self['legendgrouptitle'] = _v
_v = arg.pop('legendrank', None)
_v = (legendrank if (legendrank is not None) else _v)
if (_v is not None):
self['legendrank'] = _v
_v = arg.pop('line', None)
_v = (line if (line is not None) else _v)
if (_v is not None):
self['line'] = _v
_v = arg.pop('marker', None)
_v = (marker if (marker is not None) else _v)
if (_v is not None):
self['marker'] = _v
_v = arg.pop('meta', None)
_v = (meta if (meta is not None) else _v)
if (_v is not None):
self['meta'] = _v
_v = arg.pop('metasrc', None)
_v = (metasrc if (metasrc is not None) else _v)
if (_v is not None):
self['metasrc'] = _v
_v = arg.pop('name', None)
_v = (name if (name is not None) else _v)
if (_v is not None):
self['name'] = _v
_v = arg.pop('nbinsx', None)
_v = (nbinsx if (nbinsx is not None) else _v)
if (_v is not None):
self['nbinsx'] = _v
_v = arg.pop('nbinsy', None)
_v = (nbinsy if (nbinsy is not None) else _v)
if (_v is not None):
self['nbinsy'] = _v
_v = arg.pop('ncontours', None)
_v = (ncontours if (ncontours is not None) else _v)
if (_v is not None):
self['ncontours'] = _v
_v = arg.pop('opacity', None)
_v = (opacity if (opacity is not None) else _v)
if (_v is not None):
self['opacity'] = _v
_v = arg.pop('reversescale', None)
_v = (reversescale if (reversescale is not None) else _v)
if (_v is not None):
self['reversescale'] = _v
_v = arg.pop('showlegend', None)
_v = (showlegend if (showlegend is not None) else _v)
if (_v is not None):
self['showlegend'] = _v
_v = arg.pop('showscale', None)
_v = (showscale if (showscale is not None) else _v)
if (_v is not None):
self['showscale'] = _v
_v = arg.pop('stream', None)
_v = (stream if (stream is not None) else _v)
if (_v is not None):
self['stream'] = _v
_v = arg.pop('textfont', None)
_v = (textfont if (textfont is not None) else _v)
if (_v is not None):
self['textfont'] = _v
_v = arg.pop('texttemplate', None)
_v = (texttemplate if (texttemplate is not None) else _v)
if (_v is not None):
self['texttemplate'] = _v
_v = arg.pop('uid', None)
_v = (uid if (uid is not None) else _v)
if (_v is not None):
self['uid'] = _v
_v = arg.pop('uirevision', None)
_v = (uirevision if (uirevision is not None) else _v)
if (_v is not None):
self['uirevision'] = _v
_v = arg.pop('visible', None)
_v = (visible if (visible is not None) else _v)
if (_v is not None):
self['visible'] = _v
_v = arg.pop('x', None)
_v = (x if (x is not None) else _v)
if (_v is not None):
self['x'] = _v
_v = arg.pop('xaxis', None)
_v = (xaxis if (xaxis is not None) else _v)
if (_v is not None):
self['xaxis'] = _v
_v = arg.pop('xbingroup', None)
_v = (xbingroup if (xbingroup is not None) else _v)
if (_v is not None):
self['xbingroup'] = _v
_v = arg.pop('xbins', None)
_v = (xbins if (xbins is not None) else _v)
if (_v is not None):
self['xbins'] = _v
_v = arg.pop('xcalendar', None)
_v = (xcalendar if (xcalendar is not None) else _v)
if (_v is not None):
self['xcalendar'] = _v
_v = arg.pop('xhoverformat', None)
_v = (xhoverformat if (xhoverformat is not None) else _v)
if (_v is not None):
self['xhoverformat'] = _v
_v = arg.pop('xsrc', None)
_v = (xsrc if (xsrc is not None) else _v)
if (_v is not None):
self['xsrc'] = _v
_v = arg.pop('y', None)
_v = (y if (y is not None) else _v)
if (_v is not None):
self['y'] = _v
_v = arg.pop('yaxis', None)
_v = (yaxis if (yaxis is not None) else _v)
if (_v is not None):
self['yaxis'] = _v
_v = arg.pop('ybingroup', None)
_v = (ybingroup if (ybingroup is not None) else _v)
if (_v is not None):
self['ybingroup'] = _v
_v = arg.pop('ybins', None)
_v = (ybins if (ybins is not None) else _v)
if (_v is not None):
self['ybins'] = _v
_v = arg.pop('ycalendar', None)
_v = (ycalendar if (ycalendar is not None) else _v)
if (_v is not None):
self['ycalendar'] = _v
_v = arg.pop('yhoverformat', None)
_v = (yhoverformat if (yhoverformat is not None) else _v)
if (_v is not None):
self['yhoverformat'] = _v
_v = arg.pop('ysrc', None)
_v = (ysrc if (ysrc is not None) else _v)
if (_v is not None):
self['ysrc'] = _v
_v = arg.pop('z', None)
_v = (z if (z is not None) else _v)
if (_v is not None):
self['z'] = _v
_v = arg.pop('zauto', None)
_v = (zauto if (zauto is not None) else _v)
if (_v is not None):
self['zauto'] = _v
_v = arg.pop('zhoverformat', None)
_v = (zhoverformat if (zhoverformat is not None) else _v)
if (_v is not None):
self['zhoverformat'] = _v
_v = arg.pop('zmax', None)
_v = (zmax if (zmax is not None) else _v)
if (_v is not None):
self['zmax'] = _v
_v = arg.pop('zmid', None)
_v = (zmid if (zmid is not None) else _v)
if (_v is not None):
self['zmid'] = _v
_v = arg.pop('zmin', None)
_v = (zmin if (zmin is not None) else _v)
if (_v is not None):
self['zmin'] = _v
_v = arg.pop('zsrc', None)
_v = (zsrc if (zsrc is not None) else _v)
if (_v is not None):
self['zsrc'] = _v
self._props['type'] = 'histogram2dcontour'
arg.pop('type', None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| -2,595,120,491,532,694,500
|
Construct a new Histogram2dContour object
The sample data from which statistics are computed is set in
`x` and `y` (where `x` and `y` represent marginal
distributions, binning is set in `xbins` and `ybins` in this
case) or `z` (where `z` represent the 2D distribution and
binning set, binning is set by `x` and `y` in this case). The
resulting distribution is visualized as a contour plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.Histogram2dContour`
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
bingroup
Set the `xbingroup` and `ybingroup` default prefix For
example, setting a `bingroup` of 1 on two histogram2d
traces will make them their x-bins and y-bins match
separately.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.histogram2dcontour.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contours
:class:`plotly.graph_objects.histogram2dcontour.Contour
s` instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.histogram2dcontour.Hoverla
bel` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `z` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.histogram2dcontour.Legendg
rouptitle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
line
:class:`plotly.graph_objects.histogram2dcontour.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.histogram2dcontour.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.histogram2dcontour.Stream`
instance or dict with compatible properties
textfont
For this trace it only has an effect if `coloring` is
set to "heatmap". Sets the text font.
texttemplate
For this trace it only has an effect if `coloring` is
set to "heatmap". Template string used for rendering
the information text that appear on points. Note that
this will override `textinfo`. Variables are inserted
using %{variable}, for example "y: %{y}". Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `x`, `y`, `z` and `text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbingroup
Set a group of histogram traces which will have
compatible x-bin settings. Using `xbingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible x-bin settings.
Note that the same `xbingroup` value can be used to set
(1D) histogram `bingroup`
xbins
:class:`plotly.graph_objects.histogram2dcontour.XBins`
instance or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybingroup
Set a group of histogram traces which will have
compatible y-bin settings. Using `ybingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible y-bin settings.
Note that the same `ybingroup` value can be used to set
(1D) histogram `bingroup`
ybins
:class:`plotly.graph_objects.histogram2dcontour.YBins`
instance or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
z
Sets the aggregation data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zhoverformat
Sets the hover text formatting rulefor `z` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see: https://github.com/d
3/d3-format/tree/v1.4.5#d3-format.By default the values
are formatted using generic number format.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Histogram2dContour
|
packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py
|
__init__
|
labaran1/plotly.py
|
python
|
def __init__(self, arg=None, autobinx=None, autobiny=None, autocolorscale=None, autocontour=None, bingroup=None, coloraxis=None, colorbar=None, colorscale=None, contours=None, customdata=None, customdatasrc=None, histfunc=None, histnorm=None, hoverinfo=None, hoverinfosrc=None, hoverlabel=None, hovertemplate=None, hovertemplatesrc=None, ids=None, idssrc=None, legendgroup=None, legendgrouptitle=None, legendrank=None, line=None, marker=None, meta=None, metasrc=None, name=None, nbinsx=None, nbinsy=None, ncontours=None, opacity=None, reversescale=None, showlegend=None, showscale=None, stream=None, textfont=None, texttemplate=None, uid=None, uirevision=None, visible=None, x=None, xaxis=None, xbingroup=None, xbins=None, xcalendar=None, xhoverformat=None, xsrc=None, y=None, yaxis=None, ybingroup=None, ybins=None, ycalendar=None, yhoverformat=None, ysrc=None, z=None, zauto=None, zhoverformat=None, zmax=None, zmid=None, zmin=None, zsrc=None, **kwargs):
'\n Construct a new Histogram2dContour object\n\n The sample data from which statistics are computed is set in\n `x` and `y` (where `x` and `y` represent marginal\n distributions, binning is set in `xbins` and `ybins` in this\n case) or `z` (where `z` represent the 2D distribution and\n binning set, binning is set by `x` and `y` in this case). The\n resulting distribution is visualized as a contour plot.\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n :class:`plotly.graph_objs.Histogram2dContour`\n autobinx\n Obsolete: since v1.42 each bin attribute is auto-\n determined separately and `autobinx` is not needed.\n However, we accept `autobinx: true` or `false` and will\n update `xbins` accordingly before deleting `autobinx`\n from the trace.\n autobiny\n Obsolete: since v1.42 each bin attribute is auto-\n determined separately and `autobiny` is not needed.\n However, we accept `autobiny: true` or `false` and will\n update `ybins` accordingly before deleting `autobiny`\n from the trace.\n autocolorscale\n Determines whether the colorscale is a default palette\n (`autocolorscale: true`) or the palette determined by\n `colorscale`. In case `colorscale` is unspecified or\n `autocolorscale` is true, the default palette will be\n chosen according to whether numbers in the `color`\n array are all positive, all negative or mixed.\n autocontour\n Determines whether or not the contour level attributes\n are picked by an algorithm. If True, the number of\n contour levels can be set in `ncontours`. If False, set\n the contour level attributes in `contours`.\n bingroup\n Set the `xbingroup` and `ybingroup` default prefix For\n example, setting a `bingroup` of 1 on two histogram2d\n traces will make them their x-bins and y-bins match\n separately.\n coloraxis\n Sets a reference to a shared color axis. References to\n these shared color axes are "coloraxis", "coloraxis2",\n "coloraxis3", etc. Settings for these shared color axes\n are set in the layout, under `layout.coloraxis`,\n `layout.coloraxis2`, etc. Note that multiple color\n scales can be linked to the same color axis.\n colorbar\n :class:`plotly.graph_objects.histogram2dcontour.ColorBa\n r` instance or dict with compatible properties\n colorscale\n Sets the colorscale. The colorscale must be an array\n containing arrays mapping a normalized value to an rgb,\n rgba, hex, hsl, hsv, or named color string. At minimum,\n a mapping for the lowest (0) and highest (1) values are\n required. For example, `[[0, \'rgb(0,0,255)\'], [1,\n \'rgb(255,0,0)\']]`. To control the bounds of the\n colorscale in color space, use `zmin` and `zmax`.\n Alternatively, `colorscale` may be a palette name\n string of the following list: Blackbody,Bluered,Blues,C\n ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl\n and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.\n contours\n :class:`plotly.graph_objects.histogram2dcontour.Contour\n s` instance or dict with compatible properties\n customdata\n Assigns extra data each datum. This may be useful when\n listening to hover, click and selection events. Note\n that, "scatter" traces also appends customdata items in\n the markers DOM elements\n customdatasrc\n Sets the source reference on Chart Studio Cloud for\n `customdata`.\n histfunc\n Specifies the binning function used for this histogram\n trace. If "count", the histogram values are computed by\n counting the number of values lying inside each bin. If\n "sum", "avg", "min", "max", the histogram values are\n computed using the sum, the average, the minimum or the\n maximum of the values lying inside each bin\n respectively.\n histnorm\n Specifies the type of normalization used for this\n histogram trace. If , the span of each bar\n corresponds to the number of occurrences (i.e. the\n number of data points lying inside the bins). If\n "percent" / "probability", the span of each bar\n corresponds to the percentage / fraction of occurrences\n with respect to the total number of sample points\n (here, the sum of all bin HEIGHTS equals 100% / 1). If\n "density", the span of each bar corresponds to the\n number of occurrences in a bin divided by the size of\n the bin interval (here, the sum of all bin AREAS equals\n the total number of sample points). If *probability\n density*, the area of each bar corresponds to the\n probability that an event will fall into the\n corresponding bin (here, the sum of all bin AREAS\n equals 1).\n hoverinfo\n Determines which trace information appear on hover. If\n `none` or `skip` are set, no information is displayed\n upon hovering. But, if `none` is set, click and hover\n events are still fired.\n hoverinfosrc\n Sets the source reference on Chart Studio Cloud for\n `hoverinfo`.\n hoverlabel\n :class:`plotly.graph_objects.histogram2dcontour.Hoverla\n bel` instance or dict with compatible properties\n hovertemplate\n Template string used for rendering the information that\n appear on hover box. Note that this will override\n `hoverinfo`. Variables are inserted using %{variable},\n for example "y: %{y}" as well as %{xother}, {%_xother},\n {%_xother_}, {%xother_}. When showing info for several\n points, "xother" will be added to those with different\n x positions from the first point. An underscore before\n or after "(x|y)other" will add a space on that side,\n only when this field is shown. Numbers are formatted\n using d3-format\'s syntax %{variable:d3-format}, for\n example "Price: %{y:$.2f}".\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format\n for details on the formatting syntax. Dates are\n formatted using d3-time-format\'s syntax\n %{variable|d3-time-format}, for example "Day:\n %{2019-01-01|%A}". https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format for details on the\n date formatting syntax. The variables available in\n `hovertemplate` are the ones emitted as event data\n described at this link\n https://plotly.com/javascript/plotlyjs-events/#event-\n data. Additionally, every attributes that can be\n specified per-point (the ones that are `arrayOk: true`)\n are available. variable `z` Anything contained in tag\n `<extra>` is displayed in the secondary box, for\n example "<extra>{fullData.name}</extra>". To hide the\n secondary box completely, use an empty tag\n `<extra></extra>`.\n hovertemplatesrc\n Sets the source reference on Chart Studio Cloud for\n `hovertemplate`.\n ids\n Assigns id labels to each datum. These ids for object\n constancy of data points during animation. Should be an\n array of strings, not numbers or any other type.\n idssrc\n Sets the source reference on Chart Studio Cloud for\n `ids`.\n legendgroup\n Sets the legend group for this trace. Traces part of\n the same legend group hide/show at the same time when\n toggling legend items.\n legendgrouptitle\n :class:`plotly.graph_objects.histogram2dcontour.Legendg\n rouptitle` instance or dict with compatible properties\n legendrank\n Sets the legend rank for this trace. Items and groups\n with smaller ranks are presented on top/left side while\n with `*reversed* `legend.traceorder` they are on\n bottom/right side. The default legendrank is 1000, so\n that you can use ranks less than 1000 to place certain\n items before all unranked items, and ranks greater than\n 1000 to go after all unranked items.\n line\n :class:`plotly.graph_objects.histogram2dcontour.Line`\n instance or dict with compatible properties\n marker\n :class:`plotly.graph_objects.histogram2dcontour.Marker`\n instance or dict with compatible properties\n meta\n Assigns extra meta information associated with this\n trace that can be used in various text attributes.\n Attributes such as trace `name`, graph, axis and\n colorbar `title.text`, annotation `text`\n `rangeselector`, `updatemenues` and `sliders` `label`\n text all support `meta`. To access the trace `meta`\n values in an attribute in the same trace, simply use\n `%{meta[i]}` where `i` is the index or key of the\n `meta` item in question. To access trace `meta` in\n layout attributes, use `%{data[n[.meta[i]}` where `i`\n is the index or key of the `meta` and `n` is the trace\n index.\n metasrc\n Sets the source reference on Chart Studio Cloud for\n `meta`.\n name\n Sets the trace name. The trace name appear as the\n legend item and on hover.\n nbinsx\n Specifies the maximum number of desired bins. This\n value will be used in an algorithm that will decide the\n optimal bin size such that the histogram best\n visualizes the distribution of the data. Ignored if\n `xbins.size` is provided.\n nbinsy\n Specifies the maximum number of desired bins. This\n value will be used in an algorithm that will decide the\n optimal bin size such that the histogram best\n visualizes the distribution of the data. Ignored if\n `ybins.size` is provided.\n ncontours\n Sets the maximum number of contour levels. The actual\n number of contours will be chosen automatically to be\n less than or equal to the value of `ncontours`. Has an\n effect only if `autocontour` is True or if\n `contours.size` is missing.\n opacity\n Sets the opacity of the trace.\n reversescale\n Reverses the color mapping if true. If true, `zmin`\n will correspond to the last color in the array and\n `zmax` will correspond to the first color.\n showlegend\n Determines whether or not an item corresponding to this\n trace is shown in the legend.\n showscale\n Determines whether or not a colorbar is displayed for\n this trace.\n stream\n :class:`plotly.graph_objects.histogram2dcontour.Stream`\n instance or dict with compatible properties\n textfont\n For this trace it only has an effect if `coloring` is\n set to "heatmap". Sets the text font.\n texttemplate\n For this trace it only has an effect if `coloring` is\n set to "heatmap". Template string used for rendering\n the information text that appear on points. Note that\n this will override `textinfo`. Variables are inserted\n using %{variable}, for example "y: %{y}". Numbers are\n formatted using d3-format\'s syntax\n %{variable:d3-format}, for example "Price: %{y:$.2f}".\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format\n for details on the formatting syntax. Dates are\n formatted using d3-time-format\'s syntax\n %{variable|d3-time-format}, for example "Day:\n %{2019-01-01|%A}". https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format for details on the\n date formatting syntax. Every attributes that can be\n specified per-point (the ones that are `arrayOk: true`)\n are available. variables `x`, `y`, `z` and `text`.\n uid\n Assign an id to this trace, Use this to provide object\n constancy between traces during animations and\n transitions.\n uirevision\n Controls persistence of some user-driven changes to the\n trace: `constraintrange` in `parcoords` traces, as well\n as some `editable: true` modifications such as `name`\n and `colorbar.title`. Defaults to `layout.uirevision`.\n Note that other user-driven trace attribute changes are\n controlled by `layout` attributes: `trace.visible` is\n controlled by `layout.legend.uirevision`,\n `selectedpoints` is controlled by\n `layout.selectionrevision`, and `colorbar.(x|y)`\n (accessible with `config: {editable: true}`) is\n controlled by `layout.editrevision`. Trace changes are\n tracked by `uid`, which only falls back on trace index\n if no `uid` is provided. So if your app can add/remove\n traces before the end of the `data` array, such that\n the same trace has a different index, you can still\n preserve user-driven changes if you give each trace a\n `uid` that stays with it as it moves.\n visible\n Determines whether or not this trace is visible. If\n "legendonly", the trace is not drawn, but can appear as\n a legend item (provided that the legend itself is\n visible).\n x\n Sets the sample data to be binned on the x axis.\n xaxis\n Sets a reference between this trace\'s x coordinates and\n a 2D cartesian x axis. If "x" (the default value), the\n x coordinates refer to `layout.xaxis`. If "x2", the x\n coordinates refer to `layout.xaxis2`, and so on.\n xbingroup\n Set a group of histogram traces which will have\n compatible x-bin settings. Using `xbingroup`,\n histogram2d and histogram2dcontour traces (on axes of\n the same axis type) can have compatible x-bin settings.\n Note that the same `xbingroup` value can be used to set\n (1D) histogram `bingroup`\n xbins\n :class:`plotly.graph_objects.histogram2dcontour.XBins`\n instance or dict with compatible properties\n xcalendar\n Sets the calendar system to use with `x` date data.\n xhoverformat\n Sets the hover text formatting rulefor `x` using d3\n formatting mini-languages which are very similar to\n those in Python. For numbers, see:\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format.\n And for dates see: https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two items to\n d3\'s date formatter: "%h" for half of the year as a\n decimal number as well as "%{n}f" for fractional\n seconds with n digits. For example, *2016-10-13\n 09:15:23.456* with tickformat "%H~%M~%S.%2f" would\n display *09~15~23.46*By default the values are\n formatted using `xaxis.hoverformat`.\n xsrc\n Sets the source reference on Chart Studio Cloud for\n `x`.\n y\n Sets the sample data to be binned on the y axis.\n yaxis\n Sets a reference between this trace\'s y coordinates and\n a 2D cartesian y axis. If "y" (the default value), the\n y coordinates refer to `layout.yaxis`. If "y2", the y\n coordinates refer to `layout.yaxis2`, and so on.\n ybingroup\n Set a group of histogram traces which will have\n compatible y-bin settings. Using `ybingroup`,\n histogram2d and histogram2dcontour traces (on axes of\n the same axis type) can have compatible y-bin settings.\n Note that the same `ybingroup` value can be used to set\n (1D) histogram `bingroup`\n ybins\n :class:`plotly.graph_objects.histogram2dcontour.YBins`\n instance or dict with compatible properties\n ycalendar\n Sets the calendar system to use with `y` date data.\n yhoverformat\n Sets the hover text formatting rulefor `y` using d3\n formatting mini-languages which are very similar to\n those in Python. For numbers, see:\n https://github.com/d3/d3-format/tree/v1.4.5#d3-format.\n And for dates see: https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two items to\n d3\'s date formatter: "%h" for half of the year as a\n decimal number as well as "%{n}f" for fractional\n seconds with n digits. For example, *2016-10-13\n 09:15:23.456* with tickformat "%H~%M~%S.%2f" would\n display *09~15~23.46*By default the values are\n formatted using `yaxis.hoverformat`.\n ysrc\n Sets the source reference on Chart Studio Cloud for\n `y`.\n z\n Sets the aggregation data.\n zauto\n Determines whether or not the color domain is computed\n with respect to the input data (here in `z`) or the\n bounds set in `zmin` and `zmax` Defaults to `false`\n when `zmin` and `zmax` are set by the user.\n zhoverformat\n Sets the hover text formatting rulefor `z` using d3\n formatting mini-languages which are very similar to\n those in Python. For numbers, see: https://github.com/d\n 3/d3-format/tree/v1.4.5#d3-format.By default the values\n are formatted using generic number format.\n zmax\n Sets the upper bound of the color domain. Value should\n have the same units as in `z` and if set, `zmin` must\n be set as well.\n zmid\n Sets the mid-point of the color domain by scaling\n `zmin` and/or `zmax` to be equidistant to this point.\n Value should have the same units as in `z`. Has no\n effect when `zauto` is `false`.\n zmin\n Sets the lower bound of the color domain. Value should\n have the same units as in `z` and if set, `zmax` must\n be set as well.\n zsrc\n Sets the source reference on Chart Studio Cloud for\n `z`.\n\n Returns\n -------\n Histogram2dContour\n '
super(Histogram2dContour, self).__init__('histogram2dcontour')
if ('_parent' in kwargs):
self._parent = kwargs['_parent']
return
if (arg is None):
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError('The first argument to the plotly.graph_objs.Histogram2dContour\nconstructor must be a dict or\nan instance of :class:`plotly.graph_objs.Histogram2dContour`')
self._skip_invalid = kwargs.pop('skip_invalid', False)
self._validate = kwargs.pop('_validate', True)
_v = arg.pop('autobinx', None)
_v = (autobinx if (autobinx is not None) else _v)
if (_v is not None):
self['autobinx'] = _v
_v = arg.pop('autobiny', None)
_v = (autobiny if (autobiny is not None) else _v)
if (_v is not None):
self['autobiny'] = _v
_v = arg.pop('autocolorscale', None)
_v = (autocolorscale if (autocolorscale is not None) else _v)
if (_v is not None):
self['autocolorscale'] = _v
_v = arg.pop('autocontour', None)
_v = (autocontour if (autocontour is not None) else _v)
if (_v is not None):
self['autocontour'] = _v
_v = arg.pop('bingroup', None)
_v = (bingroup if (bingroup is not None) else _v)
if (_v is not None):
self['bingroup'] = _v
_v = arg.pop('coloraxis', None)
_v = (coloraxis if (coloraxis is not None) else _v)
if (_v is not None):
self['coloraxis'] = _v
_v = arg.pop('colorbar', None)
_v = (colorbar if (colorbar is not None) else _v)
if (_v is not None):
self['colorbar'] = _v
_v = arg.pop('colorscale', None)
_v = (colorscale if (colorscale is not None) else _v)
if (_v is not None):
self['colorscale'] = _v
_v = arg.pop('contours', None)
_v = (contours if (contours is not None) else _v)
if (_v is not None):
self['contours'] = _v
_v = arg.pop('customdata', None)
_v = (customdata if (customdata is not None) else _v)
if (_v is not None):
self['customdata'] = _v
_v = arg.pop('customdatasrc', None)
_v = (customdatasrc if (customdatasrc is not None) else _v)
if (_v is not None):
self['customdatasrc'] = _v
_v = arg.pop('histfunc', None)
_v = (histfunc if (histfunc is not None) else _v)
if (_v is not None):
self['histfunc'] = _v
_v = arg.pop('histnorm', None)
_v = (histnorm if (histnorm is not None) else _v)
if (_v is not None):
self['histnorm'] = _v
_v = arg.pop('hoverinfo', None)
_v = (hoverinfo if (hoverinfo is not None) else _v)
if (_v is not None):
self['hoverinfo'] = _v
_v = arg.pop('hoverinfosrc', None)
_v = (hoverinfosrc if (hoverinfosrc is not None) else _v)
if (_v is not None):
self['hoverinfosrc'] = _v
_v = arg.pop('hoverlabel', None)
_v = (hoverlabel if (hoverlabel is not None) else _v)
if (_v is not None):
self['hoverlabel'] = _v
_v = arg.pop('hovertemplate', None)
_v = (hovertemplate if (hovertemplate is not None) else _v)
if (_v is not None):
self['hovertemplate'] = _v
_v = arg.pop('hovertemplatesrc', None)
_v = (hovertemplatesrc if (hovertemplatesrc is not None) else _v)
if (_v is not None):
self['hovertemplatesrc'] = _v
_v = arg.pop('ids', None)
_v = (ids if (ids is not None) else _v)
if (_v is not None):
self['ids'] = _v
_v = arg.pop('idssrc', None)
_v = (idssrc if (idssrc is not None) else _v)
if (_v is not None):
self['idssrc'] = _v
_v = arg.pop('legendgroup', None)
_v = (legendgroup if (legendgroup is not None) else _v)
if (_v is not None):
self['legendgroup'] = _v
_v = arg.pop('legendgrouptitle', None)
_v = (legendgrouptitle if (legendgrouptitle is not None) else _v)
if (_v is not None):
self['legendgrouptitle'] = _v
_v = arg.pop('legendrank', None)
_v = (legendrank if (legendrank is not None) else _v)
if (_v is not None):
self['legendrank'] = _v
_v = arg.pop('line', None)
_v = (line if (line is not None) else _v)
if (_v is not None):
self['line'] = _v
_v = arg.pop('marker', None)
_v = (marker if (marker is not None) else _v)
if (_v is not None):
self['marker'] = _v
_v = arg.pop('meta', None)
_v = (meta if (meta is not None) else _v)
if (_v is not None):
self['meta'] = _v
_v = arg.pop('metasrc', None)
_v = (metasrc if (metasrc is not None) else _v)
if (_v is not None):
self['metasrc'] = _v
_v = arg.pop('name', None)
_v = (name if (name is not None) else _v)
if (_v is not None):
self['name'] = _v
_v = arg.pop('nbinsx', None)
_v = (nbinsx if (nbinsx is not None) else _v)
if (_v is not None):
self['nbinsx'] = _v
_v = arg.pop('nbinsy', None)
_v = (nbinsy if (nbinsy is not None) else _v)
if (_v is not None):
self['nbinsy'] = _v
_v = arg.pop('ncontours', None)
_v = (ncontours if (ncontours is not None) else _v)
if (_v is not None):
self['ncontours'] = _v
_v = arg.pop('opacity', None)
_v = (opacity if (opacity is not None) else _v)
if (_v is not None):
self['opacity'] = _v
_v = arg.pop('reversescale', None)
_v = (reversescale if (reversescale is not None) else _v)
if (_v is not None):
self['reversescale'] = _v
_v = arg.pop('showlegend', None)
_v = (showlegend if (showlegend is not None) else _v)
if (_v is not None):
self['showlegend'] = _v
_v = arg.pop('showscale', None)
_v = (showscale if (showscale is not None) else _v)
if (_v is not None):
self['showscale'] = _v
_v = arg.pop('stream', None)
_v = (stream if (stream is not None) else _v)
if (_v is not None):
self['stream'] = _v
_v = arg.pop('textfont', None)
_v = (textfont if (textfont is not None) else _v)
if (_v is not None):
self['textfont'] = _v
_v = arg.pop('texttemplate', None)
_v = (texttemplate if (texttemplate is not None) else _v)
if (_v is not None):
self['texttemplate'] = _v
_v = arg.pop('uid', None)
_v = (uid if (uid is not None) else _v)
if (_v is not None):
self['uid'] = _v
_v = arg.pop('uirevision', None)
_v = (uirevision if (uirevision is not None) else _v)
if (_v is not None):
self['uirevision'] = _v
_v = arg.pop('visible', None)
_v = (visible if (visible is not None) else _v)
if (_v is not None):
self['visible'] = _v
_v = arg.pop('x', None)
_v = (x if (x is not None) else _v)
if (_v is not None):
self['x'] = _v
_v = arg.pop('xaxis', None)
_v = (xaxis if (xaxis is not None) else _v)
if (_v is not None):
self['xaxis'] = _v
_v = arg.pop('xbingroup', None)
_v = (xbingroup if (xbingroup is not None) else _v)
if (_v is not None):
self['xbingroup'] = _v
_v = arg.pop('xbins', None)
_v = (xbins if (xbins is not None) else _v)
if (_v is not None):
self['xbins'] = _v
_v = arg.pop('xcalendar', None)
_v = (xcalendar if (xcalendar is not None) else _v)
if (_v is not None):
self['xcalendar'] = _v
_v = arg.pop('xhoverformat', None)
_v = (xhoverformat if (xhoverformat is not None) else _v)
if (_v is not None):
self['xhoverformat'] = _v
_v = arg.pop('xsrc', None)
_v = (xsrc if (xsrc is not None) else _v)
if (_v is not None):
self['xsrc'] = _v
_v = arg.pop('y', None)
_v = (y if (y is not None) else _v)
if (_v is not None):
self['y'] = _v
_v = arg.pop('yaxis', None)
_v = (yaxis if (yaxis is not None) else _v)
if (_v is not None):
self['yaxis'] = _v
_v = arg.pop('ybingroup', None)
_v = (ybingroup if (ybingroup is not None) else _v)
if (_v is not None):
self['ybingroup'] = _v
_v = arg.pop('ybins', None)
_v = (ybins if (ybins is not None) else _v)
if (_v is not None):
self['ybins'] = _v
_v = arg.pop('ycalendar', None)
_v = (ycalendar if (ycalendar is not None) else _v)
if (_v is not None):
self['ycalendar'] = _v
_v = arg.pop('yhoverformat', None)
_v = (yhoverformat if (yhoverformat is not None) else _v)
if (_v is not None):
self['yhoverformat'] = _v
_v = arg.pop('ysrc', None)
_v = (ysrc if (ysrc is not None) else _v)
if (_v is not None):
self['ysrc'] = _v
_v = arg.pop('z', None)
_v = (z if (z is not None) else _v)
if (_v is not None):
self['z'] = _v
_v = arg.pop('zauto', None)
_v = (zauto if (zauto is not None) else _v)
if (_v is not None):
self['zauto'] = _v
_v = arg.pop('zhoverformat', None)
_v = (zhoverformat if (zhoverformat is not None) else _v)
if (_v is not None):
self['zhoverformat'] = _v
_v = arg.pop('zmax', None)
_v = (zmax if (zmax is not None) else _v)
if (_v is not None):
self['zmax'] = _v
_v = arg.pop('zmid', None)
_v = (zmid if (zmid is not None) else _v)
if (_v is not None):
self['zmid'] = _v
_v = arg.pop('zmin', None)
_v = (zmin if (zmin is not None) else _v)
if (_v is not None):
self['zmin'] = _v
_v = arg.pop('zsrc', None)
_v = (zsrc if (zsrc is not None) else _v)
if (_v is not None):
self['zsrc'] = _v
self._props['type'] = 'histogram2dcontour'
arg.pop('type', None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
def get_db_dir():
'\n Just return the default dir listed above\n :return: the default location for the sqllite database\n '
return defaultdir
| -7,457,437,982,406,235,000
|
Just return the default dir listed above
:return: the default location for the sqllite database
|
taxon/config.py
|
get_db_dir
|
linsalrob/EdwardsLab
|
python
|
def get_db_dir():
'\n Just return the default dir listed above\n :return: the default location for the sqllite database\n '
return defaultdir
|
def compute_corr_mse_accel_gyro(self, exclude_col_names: list=[], accel_column_names: list=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'], gyro_column_names: list=['gyroscope_y', 'gyroscope_x', 'gyroscope_z'], windowDuration: int=None, slideDuration: int=None, groupByColumnName: List[str]=[], startTime=None):
'\n Compute correlation and mean standard error of accel and gyro sensors\n\n Args:\n exclude_col_names list(str): name of the columns on which features should not be computed\n accel_column_names list(str): name of accel data column\n gyro_column_names list(str): name of gyro data column\n windowDuration (int): duration of a window in seconds\n slideDuration (int): slide duration of a window\n groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2\n startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided\n\n\n Returns:\n DataStream object with all the existing data columns and FFT features\n '
feature_names = ['ax_ay_corr', 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr', 'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse']
exclude_col_names.extend(['timestamp', 'localtime', 'user', 'version'])
data = self._data.drop(*exclude_col_names)
basic_schema = StructType([StructField('timestamp', TimestampType()), StructField('localtime', TimestampType()), StructField('user', StringType()), StructField('version', IntegerType()), StructField('start_time', TimestampType()), StructField('end_time', TimestampType())])
features_list = []
for fn in feature_names:
features_list.append(StructField(fn, FloatType(), True))
features_schema = StructType((basic_schema.fields + features_list))
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_corr_mse_features_udf(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[(- 1)]
ax_ay_corr = df[accel_column_names[0]].corr(df[accel_column_names[1]])
ax_az_corr = df[accel_column_names[0]].corr(df[accel_column_names[2]])
ay_az_corr = df[accel_column_names[1]].corr(df[accel_column_names[2]])
gx_gy_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[1]])
gx_gz_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[2]])
gy_gz_corr = df[gyro_column_names[1]].corr(df[gyro_column_names[2]])
ax_ay_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
ax_az_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
ay_az_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
gx_gy_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
gx_gz_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
gy_gz_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time, ax_ay_corr, ax_az_corr, ay_az_corr, gx_gy_corr, gx_gz_corr, gy_gz_corr, ax_ay_mse, ax_az_mse, ay_az_mse, gx_gy_mse, gx_gz_mse, gy_gz_mse]], columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time', 'ax_ay_corr', 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr', 'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse'])
return basic_df
data = self.compute(get_corr_mse_features_udf, windowDuration=windowDuration, slideDuration=slideDuration, groupByColumnName=groupByColumnName, startTime=startTime)
return DataStream(data=data._data, metadata=Metadata())
| 3,432,307,631,600,157,000
|
Compute correlation and mean standard error of accel and gyro sensors
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
accel_column_names list(str): name of accel data column
gyro_column_names list(str): name of gyro data column
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
|
cerebralcortex/markers/brushing/features.py
|
compute_corr_mse_accel_gyro
|
MD2Korg/CerebralCortex-2.0
|
python
|
def compute_corr_mse_accel_gyro(self, exclude_col_names: list=[], accel_column_names: list=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'], gyro_column_names: list=['gyroscope_y', 'gyroscope_x', 'gyroscope_z'], windowDuration: int=None, slideDuration: int=None, groupByColumnName: List[str]=[], startTime=None):
'\n Compute correlation and mean standard error of accel and gyro sensors\n\n Args:\n exclude_col_names list(str): name of the columns on which features should not be computed\n accel_column_names list(str): name of accel data column\n gyro_column_names list(str): name of gyro data column\n windowDuration (int): duration of a window in seconds\n slideDuration (int): slide duration of a window\n groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2\n startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided\n\n\n Returns:\n DataStream object with all the existing data columns and FFT features\n '
feature_names = ['ax_ay_corr', 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr', 'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse']
exclude_col_names.extend(['timestamp', 'localtime', 'user', 'version'])
data = self._data.drop(*exclude_col_names)
basic_schema = StructType([StructField('timestamp', TimestampType()), StructField('localtime', TimestampType()), StructField('user', StringType()), StructField('version', IntegerType()), StructField('start_time', TimestampType()), StructField('end_time', TimestampType())])
features_list = []
for fn in feature_names:
features_list.append(StructField(fn, FloatType(), True))
features_schema = StructType((basic_schema.fields + features_list))
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_corr_mse_features_udf(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[(- 1)]
ax_ay_corr = df[accel_column_names[0]].corr(df[accel_column_names[1]])
ax_az_corr = df[accel_column_names[0]].corr(df[accel_column_names[2]])
ay_az_corr = df[accel_column_names[1]].corr(df[accel_column_names[2]])
gx_gy_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[1]])
gx_gz_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[2]])
gy_gz_corr = df[gyro_column_names[1]].corr(df[gyro_column_names[2]])
ax_ay_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
ax_az_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
ay_az_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
gx_gy_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
gx_gz_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
gy_gz_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time, ax_ay_corr, ax_az_corr, ay_az_corr, gx_gy_corr, gx_gz_corr, gy_gz_corr, ax_ay_mse, ax_az_mse, ay_az_mse, gx_gy_mse, gx_gz_mse, gy_gz_mse]], columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time', 'ax_ay_corr', 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr', 'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse'])
return basic_df
data = self.compute(get_corr_mse_features_udf, windowDuration=windowDuration, slideDuration=slideDuration, groupByColumnName=groupByColumnName, startTime=startTime)
return DataStream(data=data._data, metadata=Metadata())
|
def compute_fourier_features(self, exclude_col_names: list=[], feature_names=['fft_centroid', 'fft_spread', 'spectral_entropy', 'spectral_entropy_old', 'fft_flux', 'spectral_falloff'], windowDuration: int=None, slideDuration: int=None, groupByColumnName: List[str]=[], startTime=None):
'\n Transforms data from time domain to frequency domain.\n\n Args:\n exclude_col_names list(str): name of the columns on which features should not be computed\n feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff\n windowDuration (int): duration of a window in seconds\n slideDuration (int): slide duration of a window\n groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2\n startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided\n\n\n Returns:\n DataStream object with all the existing data columns and FFT features\n '
eps = 1e-08
exclude_col_names.extend(['timestamp', 'localtime', 'user', 'version'])
data = self._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([StructField('timestamp', TimestampType()), StructField('localtime', TimestampType()), StructField('user', StringType()), StructField('version', IntegerType()), StructField('start_time', TimestampType()), StructField('end_time', TimestampType())])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(((cn + '_') + sf), FloatType(), True))
features_schema = StructType((basic_schema.fields + features_list))
def stSpectralCentroidAndSpread(X, fs):
'Computes spectral centroid of frame (given abs(FFT))'
ind = (np.arange(1, (len(X) + 1)) * (fs / (2.0 * len(X))))
Xt = X.copy()
Xt = (Xt / Xt.max())
NUM = np.sum((ind * Xt))
DEN = (np.sum(Xt) + eps)
C = (NUM / DEN)
S = np.sqrt((np.sum((((ind - C) ** 2) * Xt)) / DEN))
C = (C / (fs / 2.0))
S = (S / (fs / 2.0))
return (C, S)
def stSpectralFlux(X, Xprev):
'\n Computes the spectral flux feature of the current frame\n ARGUMENTS:\n X: the abs(fft) of the current frame\n Xpre: the abs(fft) of the previous frame\n '
sumX = np.sum((X + eps))
sumPrevX = np.sum((Xprev + eps))
F = np.sum((((X / sumX) - (Xprev / sumPrevX)) ** 2))
return F
def stSpectralRollOff(X, c, fs):
'Computes spectral roll-off'
totalEnergy = np.sum((X ** 2))
fftLength = len(X)
Thres = (c * totalEnergy)
CumSum = (np.cumsum((X ** 2)) + eps)
[a] = np.nonzero((CumSum > Thres))
if (len(a) > 0):
mC = (np.float64(a[0]) / float(fftLength))
else:
mC = 0.0
return mC
def stSpectralEntropy(X, numOfShortBlocks=10):
'Computes the spectral entropy'
L = len(X)
Eol = np.sum((X ** 2))
subWinLength = int(np.floor((L / numOfShortBlocks)))
if (L != (subWinLength * numOfShortBlocks)):
X = X[0:(subWinLength * numOfShortBlocks)]
subWindows = X.reshape(subWinLength, numOfShortBlocks, order='F').copy()
s = (np.sum((subWindows ** 2), axis=0) / (Eol + eps))
En = (- np.sum((s * np.log2((s + eps)))))
return En
def spectral_entropy(data, sampling_freq, bands=None):
psd = (np.abs(np.fft.rfft(data)) ** 2)
psd /= np.sum(psd)
if (bands is None):
power_per_band = psd[(psd > 0)]
else:
freqs = np.fft.rfftfreq(data.size, (1 / float(sampling_freq)))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0], bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and((freqs >= low), (freqs < up))]) for (low, up) in zip(freq_limits_low, freq_limits_up)]
power_per_band = power_per_band[(power_per_band > 0)]
return (- np.sum((power_per_band * np.log2(power_per_band))))
def fourier_features_pandas_udf(data, frequency: float=16.0):
Fs = frequency
results = []
X = abs(np.fft.fft(data))
nFFT = (int((len(X) / 2)) + 1)
X = X[0:nFFT]
X = (X / len(X))
if ('fft_centroid' or ('fft_spread' in feature_names)):
(C, S) = stSpectralCentroidAndSpread(X, Fs)
if ('fft_centroid' in feature_names):
results.append(C)
if ('fft_spread' in feature_names):
results.append(S)
if ('spectral_entropy' in feature_names):
se = stSpectralEntropy(X)
results.append(se)
if ('spectral_entropy_old' in feature_names):
se_old = spectral_entropy(X, frequency)
results.append(se_old)
if ('fft_flux' in feature_names):
flx = stSpectralFlux(X, X.copy())
results.append(flx)
if ('spectral_folloff' in feature_names):
roff = stSpectralRollOff(X, 0.9, frequency)
results.append(roff)
return pd.Series(results)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_fft_features(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[(- 1)]
df.drop(exclude_col_names, axis=1, inplace=True)
df_ff = df.apply(fourier_features_pandas_udf)
df3 = df_ff.T
pd.set_option('display.max_colwidth', (- 1))
df3.columns = feature_names
output = df3.unstack().to_frame().sort_index(level=1).T
output.columns = [f'{j}_{i}' for (i, j) in output.columns]
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]], columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
return basic_df.assign(**output)
return self.compute(get_fft_features, windowDuration=windowDuration, slideDuration=slideDuration, groupByColumnName=groupByColumnName, startTime=startTime)
| -5,459,656,174,276,873,000
|
Transforms data from time domain to frequency domain.
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
|
cerebralcortex/markers/brushing/features.py
|
compute_fourier_features
|
MD2Korg/CerebralCortex-2.0
|
python
|
def compute_fourier_features(self, exclude_col_names: list=[], feature_names=['fft_centroid', 'fft_spread', 'spectral_entropy', 'spectral_entropy_old', 'fft_flux', 'spectral_falloff'], windowDuration: int=None, slideDuration: int=None, groupByColumnName: List[str]=[], startTime=None):
'\n Transforms data from time domain to frequency domain.\n\n Args:\n exclude_col_names list(str): name of the columns on which features should not be computed\n feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff\n windowDuration (int): duration of a window in seconds\n slideDuration (int): slide duration of a window\n groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2\n startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided\n\n\n Returns:\n DataStream object with all the existing data columns and FFT features\n '
eps = 1e-08
exclude_col_names.extend(['timestamp', 'localtime', 'user', 'version'])
data = self._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([StructField('timestamp', TimestampType()), StructField('localtime', TimestampType()), StructField('user', StringType()), StructField('version', IntegerType()), StructField('start_time', TimestampType()), StructField('end_time', TimestampType())])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(((cn + '_') + sf), FloatType(), True))
features_schema = StructType((basic_schema.fields + features_list))
def stSpectralCentroidAndSpread(X, fs):
'Computes spectral centroid of frame (given abs(FFT))'
ind = (np.arange(1, (len(X) + 1)) * (fs / (2.0 * len(X))))
Xt = X.copy()
Xt = (Xt / Xt.max())
NUM = np.sum((ind * Xt))
DEN = (np.sum(Xt) + eps)
C = (NUM / DEN)
S = np.sqrt((np.sum((((ind - C) ** 2) * Xt)) / DEN))
C = (C / (fs / 2.0))
S = (S / (fs / 2.0))
return (C, S)
def stSpectralFlux(X, Xprev):
'\n Computes the spectral flux feature of the current frame\n ARGUMENTS:\n X: the abs(fft) of the current frame\n Xpre: the abs(fft) of the previous frame\n '
sumX = np.sum((X + eps))
sumPrevX = np.sum((Xprev + eps))
F = np.sum((((X / sumX) - (Xprev / sumPrevX)) ** 2))
return F
def stSpectralRollOff(X, c, fs):
'Computes spectral roll-off'
totalEnergy = np.sum((X ** 2))
fftLength = len(X)
Thres = (c * totalEnergy)
CumSum = (np.cumsum((X ** 2)) + eps)
[a] = np.nonzero((CumSum > Thres))
if (len(a) > 0):
mC = (np.float64(a[0]) / float(fftLength))
else:
mC = 0.0
return mC
def stSpectralEntropy(X, numOfShortBlocks=10):
'Computes the spectral entropy'
L = len(X)
Eol = np.sum((X ** 2))
subWinLength = int(np.floor((L / numOfShortBlocks)))
if (L != (subWinLength * numOfShortBlocks)):
X = X[0:(subWinLength * numOfShortBlocks)]
subWindows = X.reshape(subWinLength, numOfShortBlocks, order='F').copy()
s = (np.sum((subWindows ** 2), axis=0) / (Eol + eps))
En = (- np.sum((s * np.log2((s + eps)))))
return En
def spectral_entropy(data, sampling_freq, bands=None):
psd = (np.abs(np.fft.rfft(data)) ** 2)
psd /= np.sum(psd)
if (bands is None):
power_per_band = psd[(psd > 0)]
else:
freqs = np.fft.rfftfreq(data.size, (1 / float(sampling_freq)))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0], bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and((freqs >= low), (freqs < up))]) for (low, up) in zip(freq_limits_low, freq_limits_up)]
power_per_band = power_per_band[(power_per_band > 0)]
return (- np.sum((power_per_band * np.log2(power_per_band))))
def fourier_features_pandas_udf(data, frequency: float=16.0):
Fs = frequency
results = []
X = abs(np.fft.fft(data))
nFFT = (int((len(X) / 2)) + 1)
X = X[0:nFFT]
X = (X / len(X))
if ('fft_centroid' or ('fft_spread' in feature_names)):
(C, S) = stSpectralCentroidAndSpread(X, Fs)
if ('fft_centroid' in feature_names):
results.append(C)
if ('fft_spread' in feature_names):
results.append(S)
if ('spectral_entropy' in feature_names):
se = stSpectralEntropy(X)
results.append(se)
if ('spectral_entropy_old' in feature_names):
se_old = spectral_entropy(X, frequency)
results.append(se_old)
if ('fft_flux' in feature_names):
flx = stSpectralFlux(X, X.copy())
results.append(flx)
if ('spectral_folloff' in feature_names):
roff = stSpectralRollOff(X, 0.9, frequency)
results.append(roff)
return pd.Series(results)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_fft_features(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[(- 1)]
df.drop(exclude_col_names, axis=1, inplace=True)
df_ff = df.apply(fourier_features_pandas_udf)
df3 = df_ff.T
pd.set_option('display.max_colwidth', (- 1))
df3.columns = feature_names
output = df3.unstack().to_frame().sort_index(level=1).T
output.columns = [f'{j}_{i}' for (i, j) in output.columns]
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]], columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
return basic_df.assign(**output)
return self.compute(get_fft_features, windowDuration=windowDuration, slideDuration=slideDuration, groupByColumnName=groupByColumnName, startTime=startTime)
|
def stSpectralCentroidAndSpread(X, fs):
'Computes spectral centroid of frame (given abs(FFT))'
ind = (np.arange(1, (len(X) + 1)) * (fs / (2.0 * len(X))))
Xt = X.copy()
Xt = (Xt / Xt.max())
NUM = np.sum((ind * Xt))
DEN = (np.sum(Xt) + eps)
C = (NUM / DEN)
S = np.sqrt((np.sum((((ind - C) ** 2) * Xt)) / DEN))
C = (C / (fs / 2.0))
S = (S / (fs / 2.0))
return (C, S)
| 917,355,619,372,886,900
|
Computes spectral centroid of frame (given abs(FFT))
|
cerebralcortex/markers/brushing/features.py
|
stSpectralCentroidAndSpread
|
MD2Korg/CerebralCortex-2.0
|
python
|
def stSpectralCentroidAndSpread(X, fs):
ind = (np.arange(1, (len(X) + 1)) * (fs / (2.0 * len(X))))
Xt = X.copy()
Xt = (Xt / Xt.max())
NUM = np.sum((ind * Xt))
DEN = (np.sum(Xt) + eps)
C = (NUM / DEN)
S = np.sqrt((np.sum((((ind - C) ** 2) * Xt)) / DEN))
C = (C / (fs / 2.0))
S = (S / (fs / 2.0))
return (C, S)
|
def stSpectralFlux(X, Xprev):
'\n Computes the spectral flux feature of the current frame\n ARGUMENTS:\n X: the abs(fft) of the current frame\n Xpre: the abs(fft) of the previous frame\n '
sumX = np.sum((X + eps))
sumPrevX = np.sum((Xprev + eps))
F = np.sum((((X / sumX) - (Xprev / sumPrevX)) ** 2))
return F
| 401,404,339,568,127,550
|
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
|
cerebralcortex/markers/brushing/features.py
|
stSpectralFlux
|
MD2Korg/CerebralCortex-2.0
|
python
|
def stSpectralFlux(X, Xprev):
'\n Computes the spectral flux feature of the current frame\n ARGUMENTS:\n X: the abs(fft) of the current frame\n Xpre: the abs(fft) of the previous frame\n '
sumX = np.sum((X + eps))
sumPrevX = np.sum((Xprev + eps))
F = np.sum((((X / sumX) - (Xprev / sumPrevX)) ** 2))
return F
|
def stSpectralRollOff(X, c, fs):
'Computes spectral roll-off'
totalEnergy = np.sum((X ** 2))
fftLength = len(X)
Thres = (c * totalEnergy)
CumSum = (np.cumsum((X ** 2)) + eps)
[a] = np.nonzero((CumSum > Thres))
if (len(a) > 0):
mC = (np.float64(a[0]) / float(fftLength))
else:
mC = 0.0
return mC
| 413,782,549,393,534,600
|
Computes spectral roll-off
|
cerebralcortex/markers/brushing/features.py
|
stSpectralRollOff
|
MD2Korg/CerebralCortex-2.0
|
python
|
def stSpectralRollOff(X, c, fs):
totalEnergy = np.sum((X ** 2))
fftLength = len(X)
Thres = (c * totalEnergy)
CumSum = (np.cumsum((X ** 2)) + eps)
[a] = np.nonzero((CumSum > Thres))
if (len(a) > 0):
mC = (np.float64(a[0]) / float(fftLength))
else:
mC = 0.0
return mC
|
def stSpectralEntropy(X, numOfShortBlocks=10):
'Computes the spectral entropy'
L = len(X)
Eol = np.sum((X ** 2))
subWinLength = int(np.floor((L / numOfShortBlocks)))
if (L != (subWinLength * numOfShortBlocks)):
X = X[0:(subWinLength * numOfShortBlocks)]
subWindows = X.reshape(subWinLength, numOfShortBlocks, order='F').copy()
s = (np.sum((subWindows ** 2), axis=0) / (Eol + eps))
En = (- np.sum((s * np.log2((s + eps)))))
return En
| -8,852,138,835,898,820,000
|
Computes the spectral entropy
|
cerebralcortex/markers/brushing/features.py
|
stSpectralEntropy
|
MD2Korg/CerebralCortex-2.0
|
python
|
def stSpectralEntropy(X, numOfShortBlocks=10):
L = len(X)
Eol = np.sum((X ** 2))
subWinLength = int(np.floor((L / numOfShortBlocks)))
if (L != (subWinLength * numOfShortBlocks)):
X = X[0:(subWinLength * numOfShortBlocks)]
subWindows = X.reshape(subWinLength, numOfShortBlocks, order='F').copy()
s = (np.sum((subWindows ** 2), axis=0) / (Eol + eps))
En = (- np.sum((s * np.log2((s + eps)))))
return En
|
def __init__(self, after=None, link=None, local_vars_configuration=None):
'NextPage - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._after = None
self._link = None
self.discriminator = None
self.after = after
if (link is not None):
self.link = link
| -554,981,027,761,478,850
|
NextPage - a model defined in OpenAPI
|
hubspot/files/files/models/next_page.py
|
__init__
|
Catchoom/hubspot-api-python
|
python
|
def __init__(self, after=None, link=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._after = None
self._link = None
self.discriminator = None
self.after = after
if (link is not None):
self.link = link
|
@property
def after(self):
'Gets the after of this NextPage. # noqa: E501\n\n\n :return: The after of this NextPage. # noqa: E501\n :rtype: str\n '
return self._after
| -8,255,473,615,383,818,000
|
Gets the after of this NextPage. # noqa: E501
:return: The after of this NextPage. # noqa: E501
:rtype: str
|
hubspot/files/files/models/next_page.py
|
after
|
Catchoom/hubspot-api-python
|
python
|
@property
def after(self):
'Gets the after of this NextPage. # noqa: E501\n\n\n :return: The after of this NextPage. # noqa: E501\n :rtype: str\n '
return self._after
|
@after.setter
def after(self, after):
'Sets the after of this NextPage.\n\n\n :param after: The after of this NextPage. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (after is None)):
raise ValueError('Invalid value for `after`, must not be `None`')
self._after = after
| -7,818,888,564,485,552,000
|
Sets the after of this NextPage.
:param after: The after of this NextPage. # noqa: E501
:type: str
|
hubspot/files/files/models/next_page.py
|
after
|
Catchoom/hubspot-api-python
|
python
|
@after.setter
def after(self, after):
'Sets the after of this NextPage.\n\n\n :param after: The after of this NextPage. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (after is None)):
raise ValueError('Invalid value for `after`, must not be `None`')
self._after = after
|
@property
def link(self):
'Gets the link of this NextPage. # noqa: E501\n\n\n :return: The link of this NextPage. # noqa: E501\n :rtype: str\n '
return self._link
| 5,843,383,549,101,338,000
|
Gets the link of this NextPage. # noqa: E501
:return: The link of this NextPage. # noqa: E501
:rtype: str
|
hubspot/files/files/models/next_page.py
|
link
|
Catchoom/hubspot-api-python
|
python
|
@property
def link(self):
'Gets the link of this NextPage. # noqa: E501\n\n\n :return: The link of this NextPage. # noqa: E501\n :rtype: str\n '
return self._link
|
@link.setter
def link(self, link):
'Sets the link of this NextPage.\n\n\n :param link: The link of this NextPage. # noqa: E501\n :type: str\n '
self._link = link
| 6,429,752,145,295,531,000
|
Sets the link of this NextPage.
:param link: The link of this NextPage. # noqa: E501
:type: str
|
hubspot/files/files/models/next_page.py
|
link
|
Catchoom/hubspot-api-python
|
python
|
@link.setter
def link(self, link):
'Sets the link of this NextPage.\n\n\n :param link: The link of this NextPage. # noqa: E501\n :type: str\n '
self._link = link
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 8,442,519,487,048,767,000
|
Returns the model properties as a dict
|
hubspot/files/files/models/next_page.py
|
to_dict
|
Catchoom/hubspot-api-python
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
hubspot/files/files/models/next_page.py
|
to_str
|
Catchoom/hubspot-api-python
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
hubspot/files/files/models/next_page.py
|
__repr__
|
Catchoom/hubspot-api-python
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, NextPage)):
return False
return (self.to_dict() == other.to_dict())
| -7,321,777,463,093,585,000
|
Returns true if both objects are equal
|
hubspot/files/files/models/next_page.py
|
__eq__
|
Catchoom/hubspot-api-python
|
python
|
def __eq__(self, other):
if (not isinstance(other, NextPage)):
return False
return (self.to_dict() == other.to_dict())
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, NextPage)):
return True
return (self.to_dict() != other.to_dict())
| -1,624,190,676,302,696,700
|
Returns true if both objects are not equal
|
hubspot/files/files/models/next_page.py
|
__ne__
|
Catchoom/hubspot-api-python
|
python
|
def __ne__(self, other):
if (not isinstance(other, NextPage)):
return True
return (self.to_dict() != other.to_dict())
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.GetFeedItemTarget = channel.unary_unary('/google.ads.googleads.v2.services.FeedItemTargetService/GetFeedItemTarget', request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.GetFeedItemTargetRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2.FeedItemTarget.FromString)
self.MutateFeedItemTargets = channel.unary_unary('/google.ads.googleads.v2.services.FeedItemTargetService/MutateFeedItemTargets', request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsResponse.FromString)
| 1,639,354,539,681,269,000
|
Constructor.
Args:
channel: A grpc.Channel.
|
google/ads/google_ads/v2/proto/services/feed_item_target_service_pb2_grpc.py
|
__init__
|
BenRKarl/google-ads-python
|
python
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.GetFeedItemTarget = channel.unary_unary('/google.ads.googleads.v2.services.FeedItemTargetService/GetFeedItemTarget', request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.GetFeedItemTargetRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2.FeedItemTarget.FromString)
self.MutateFeedItemTargets = channel.unary_unary('/google.ads.googleads.v2.services.FeedItemTargetService/MutateFeedItemTargets', request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsResponse.FromString)
|
def GetFeedItemTarget(self, request, context):
'Returns the requested feed item targets in full detail.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| -4,225,013,039,427,472,000
|
Returns the requested feed item targets in full detail.
|
google/ads/google_ads/v2/proto/services/feed_item_target_service_pb2_grpc.py
|
GetFeedItemTarget
|
BenRKarl/google-ads-python
|
python
|
def GetFeedItemTarget(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
def MutateFeedItemTargets(self, request, context):
'Creates or removes feed item targets. Operation statuses are returned.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| 4,147,450,015,430,002,000
|
Creates or removes feed item targets. Operation statuses are returned.
|
google/ads/google_ads/v2/proto/services/feed_item_target_service_pb2_grpc.py
|
MutateFeedItemTargets
|
BenRKarl/google-ads-python
|
python
|
def MutateFeedItemTargets(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
def check_dir_exists(dirname='./pickles'):
'Check if given dirname exists This will contain all the pickle files.'
if (not os.path.exists(dirname)):
print('Directory to store pickes does not exist. Creating one now: ./pickles')
os.mkdir(dirname)
| 1,241,679,966,958,779,100
|
Check if given dirname exists This will contain all the pickle files.
|
createpickles.py
|
check_dir_exists
|
ansrivas/keras-rest-server
|
python
|
def check_dir_exists(dirname='./pickles'):
if (not os.path.exists(dirname)):
print('Directory to store pickes does not exist. Creating one now: ./pickles')
os.mkdir(dirname)
|
def save_x_y_scalar(X_train, Y_train):
'Use a normalization method on your current dataset and save the coefficients.\n\n Args:\n X_train: Input X_train\n Y_train: Lables Y_train\n Returns:\n Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)\n '
scalar_x = StandardScaler()
X_train = scalar_x.fit_transform(X_train)
scalar_y = StandardScaler()
Y_train = scalar_y.fit_transform(Y_train)
print('dumping StandardScaler objects ..')
pickle.dump(scalar_y, open('pickles/scalar_y.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(scalar_x, open('pickles/scalar_x.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
return (X_train, Y_train)
| -4,774,427,860,463,725,000
|
Use a normalization method on your current dataset and save the coefficients.
Args:
X_train: Input X_train
Y_train: Lables Y_train
Returns:
Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)
|
createpickles.py
|
save_x_y_scalar
|
ansrivas/keras-rest-server
|
python
|
def save_x_y_scalar(X_train, Y_train):
'Use a normalization method on your current dataset and save the coefficients.\n\n Args:\n X_train: Input X_train\n Y_train: Lables Y_train\n Returns:\n Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)\n '
scalar_x = StandardScaler()
X_train = scalar_x.fit_transform(X_train)
scalar_y = StandardScaler()
Y_train = scalar_y.fit_transform(Y_train)
print('dumping StandardScaler objects ..')
pickle.dump(scalar_y, open('pickles/scalar_y.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(scalar_x, open('pickles/scalar_x.pickle', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
return (X_train, Y_train)
|
def create_model(X_train, Y_train):
'create_model will create a very simple neural net model and save the weights in a predefined directory.\n\n Args:\n X_train: Input X_train\n Y_train: Lables Y_train\n '
xin = X_train.shape[1]
model = Sequential()
model.add(Dense(units=4, input_shape=(xin,)))
model.add(Activation('tanh'))
model.add(Dense(4))
model.add(Activation('linear'))
model.add(Dense(1))
rms = kop.RMSprop()
print('compiling now..')
model.compile(loss='mse', optimizer=rms)
model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)
score = model.evaluate(X_train, Y_train, batch_size=1)
print('Evaluation results:', score)
open('pickles/my_model_architecture.json', 'w').write(model.to_json())
print('Saving weights in: ./pickles/my_model_weights.h5')
model.save_weights('pickles/my_model_weights.h5')
| -6,434,802,664,474,911,000
|
create_model will create a very simple neural net model and save the weights in a predefined directory.
Args:
X_train: Input X_train
Y_train: Lables Y_train
|
createpickles.py
|
create_model
|
ansrivas/keras-rest-server
|
python
|
def create_model(X_train, Y_train):
'create_model will create a very simple neural net model and save the weights in a predefined directory.\n\n Args:\n X_train: Input X_train\n Y_train: Lables Y_train\n '
xin = X_train.shape[1]
model = Sequential()
model.add(Dense(units=4, input_shape=(xin,)))
model.add(Activation('tanh'))
model.add(Dense(4))
model.add(Activation('linear'))
model.add(Dense(1))
rms = kop.RMSprop()
print('compiling now..')
model.compile(loss='mse', optimizer=rms)
model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)
score = model.evaluate(X_train, Y_train, batch_size=1)
print('Evaluation results:', score)
open('pickles/my_model_architecture.json', 'w').write(model.to_json())
print('Saving weights in: ./pickles/my_model_weights.h5')
model.save_weights('pickles/my_model_weights.h5')
|
def resize_return_buffer(buf_, size_):
' callback function that resizes return buffer when it is too small\n Args:\n size_: size the return buffer needs to be\n '
try:
if (not tls_var.buf):
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
elif (tls_var.bufSize < size_):
foo = tls_var.buf
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
memmove(tls_var.buf, foo, sizeof(foo))
except AttributeError:
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
return addressof(tls_var.buf)
| -1,352,476,546,927,327,500
|
callback function that resizes return buffer when it is too small
Args:
size_: size the return buffer needs to be
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
resize_return_buffer
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def resize_return_buffer(buf_, size_):
' callback function that resizes return buffer when it is too small\n Args:\n size_: size the return buffer needs to be\n '
try:
if (not tls_var.buf):
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
elif (tls_var.bufSize < size_):
foo = tls_var.buf
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
memmove(tls_var.buf, foo, sizeof(foo))
except AttributeError:
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
return addressof(tls_var.buf)
|
def initV2(self, module_name_, ini_params_, debug_=False):
' Initializes the G2 config manager\n This should only be called once per process.\n Args:\n moduleName: A short name given to this instance of the config module\n iniParams: A json document that contains G2 system parameters.\n verboseLogging: Enable diagnostic logging which will arcpy.AddMessage a massive amount of information to stdout\n '
self._module_name = self.prepareStringArgument(module_name_)
self._ini_params = self.prepareStringArgument(ini_params_)
self._debug = debug_
if self._debug:
arcpy.AddMessage('Initializing G2 Config Manager')
self._lib_handle.G2ConfigMgr_init_V2.argtypes = [c_char_p, c_char_p, c_int]
ret_code = self._lib_handle.G2ConfigMgr_init_V2(self._module_name, self._ini_params, self._debug)
if self._debug:
arcpy.AddMessage(('Initialization Status: ' + str(ret_code)))
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
| -1,780,151,840,131,302,000
|
Initializes the G2 config manager
This should only be called once per process.
Args:
moduleName: A short name given to this instance of the config module
iniParams: A json document that contains G2 system parameters.
verboseLogging: Enable diagnostic logging which will arcpy.AddMessage a massive amount of information to stdout
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
initV2
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def initV2(self, module_name_, ini_params_, debug_=False):
' Initializes the G2 config manager\n This should only be called once per process.\n Args:\n moduleName: A short name given to this instance of the config module\n iniParams: A json document that contains G2 system parameters.\n verboseLogging: Enable diagnostic logging which will arcpy.AddMessage a massive amount of information to stdout\n '
self._module_name = self.prepareStringArgument(module_name_)
self._ini_params = self.prepareStringArgument(ini_params_)
self._debug = debug_
if self._debug:
arcpy.AddMessage('Initializing G2 Config Manager')
self._lib_handle.G2ConfigMgr_init_V2.argtypes = [c_char_p, c_char_p, c_int]
ret_code = self._lib_handle.G2ConfigMgr_init_V2(self._module_name, self._ini_params, self._debug)
if self._debug:
arcpy.AddMessage(('Initialization Status: ' + str(ret_code)))
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
|
def __init__(self):
' Class initialization\n '
try:
if (os.name == 'nt'):
self._lib_handle = cdll.LoadLibrary('G2.dll')
else:
self._lib_handle = cdll.LoadLibrary('libG2.so')
except OSError as ex:
arcpy.AddMessage('ERROR: Unable to load G2. Did you remember to setup your environment by sourcing the setupEnv file?')
arcpy.AddMessage('ERROR: For more information see https://senzing.zendesk.com/hc/en-us/articles/115002408867-Introduction-G2-Quickstart')
arcpy.AddMessage('ERROR: If you are running Ubuntu or Debian please also review the ssl and crypto information at https://senzing.zendesk.com/hc/en-us/articles/115010259947-System-Requirements')
raise G2ModuleGenericException('Failed to load the G2 library')
self._resize_func_def = CFUNCTYPE(c_char_p, c_char_p, c_size_t)
self._resize_func = self._resize_func_def(resize_return_buffer)
| 6,399,571,678,208,304,000
|
Class initialization
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
__init__
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def __init__(self):
' \n '
try:
if (os.name == 'nt'):
self._lib_handle = cdll.LoadLibrary('G2.dll')
else:
self._lib_handle = cdll.LoadLibrary('libG2.so')
except OSError as ex:
arcpy.AddMessage('ERROR: Unable to load G2. Did you remember to setup your environment by sourcing the setupEnv file?')
arcpy.AddMessage('ERROR: For more information see https://senzing.zendesk.com/hc/en-us/articles/115002408867-Introduction-G2-Quickstart')
arcpy.AddMessage('ERROR: If you are running Ubuntu or Debian please also review the ssl and crypto information at https://senzing.zendesk.com/hc/en-us/articles/115010259947-System-Requirements')
raise G2ModuleGenericException('Failed to load the G2 library')
self._resize_func_def = CFUNCTYPE(c_char_p, c_char_p, c_size_t)
self._resize_func = self._resize_func_def(resize_return_buffer)
|
def prepareStringArgument(self, stringToPrepare):
' Internal processing function '
if (stringToPrepare == None):
return None
if (type(stringToPrepare) == str):
return stringToPrepare.encode('utf-8')
elif (type(stringToPrepare) == bytearray):
return stringToPrepare.decode().encode('utf-8')
elif (type(stringToPrepare) == bytes):
return str(stringToPrepare).encode('utf-8')
return stringToPrepare
| 8,941,194,383,144,176,000
|
Internal processing function
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
prepareStringArgument
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def prepareStringArgument(self, stringToPrepare):
' '
if (stringToPrepare == None):
return None
if (type(stringToPrepare) == str):
return stringToPrepare.encode('utf-8')
elif (type(stringToPrepare) == bytearray):
return stringToPrepare.decode().encode('utf-8')
elif (type(stringToPrepare) == bytes):
return str(stringToPrepare).encode('utf-8')
return stringToPrepare
|
def prepareIntArgument(self, valueToPrepare):
' Internal processing function '
' This converts many types of values to an integer '
if (valueToPrepare == None):
return None
if (type(valueToPrepare) == str):
return int(valueToPrepare.encode('utf-8'))
elif (type(valueToPrepare) == bytearray):
return int(valueToPrepare)
elif (type(valueToPrepare) == bytes):
return int(valueToPrepare)
return valueToPrepare
| 8,874,652,037,414,881,000
|
Internal processing function
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
prepareIntArgument
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def prepareIntArgument(self, valueToPrepare):
' '
' This converts many types of values to an integer '
if (valueToPrepare == None):
return None
if (type(valueToPrepare) == str):
return int(valueToPrepare.encode('utf-8'))
elif (type(valueToPrepare) == bytearray):
return int(valueToPrepare)
elif (type(valueToPrepare) == bytes):
return int(valueToPrepare)
return valueToPrepare
|
def addConfig(self, configStr, configComments, configID):
' registers a new configuration document in the datastore\n '
_configStr = self.prepareStringArgument(configStr)
_configComments = self.prepareStringArgument(configComments)
configID[:] = b''
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_addConfig.argtypes = [c_char_p, c_char_p, POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_addConfig.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_addConfig(_configStr, _configComments, cID)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
configID += str(cID.value).encode()
| 7,767,073,802,886,320,000
|
registers a new configuration document in the datastore
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
addConfig
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def addConfig(self, configStr, configComments, configID):
' \n '
_configStr = self.prepareStringArgument(configStr)
_configComments = self.prepareStringArgument(configComments)
configID[:] = b
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_addConfig.argtypes = [c_char_p, c_char_p, POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_addConfig.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_addConfig(_configStr, _configComments, cID)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
configID += str(cID.value).encode()
|
def getConfig(self, configID, response):
' retrieves the registered configuration document from the datastore\n '
configID_ = self.prepareIntArgument(configID)
response[:] = b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfig.restype = c_int
self._lib_handle.G2ConfigMgr_getConfig.argtypes = [c_longlong, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfig(configID_, pointer(responseBuf), pointer(responseSize), self._resize_func)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += tls_var.buf.value
| -3,640,614,544,183,717,400
|
retrieves the registered configuration document from the datastore
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
getConfig
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def getConfig(self, configID, response):
' \n '
configID_ = self.prepareIntArgument(configID)
response[:] = b
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfig.restype = c_int
self._lib_handle.G2ConfigMgr_getConfig.argtypes = [c_longlong, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfig(configID_, pointer(responseBuf), pointer(responseSize), self._resize_func)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += tls_var.buf.value
|
def getConfigList(self, response):
' retrieves a list of known configurations from the datastore\n '
response[:] = b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfigList.restype = c_int
self._lib_handle.G2ConfigMgr_getConfigList.argtypes = [POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfigList(pointer(responseBuf), pointer(responseSize), self._resize_func)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += tls_var.buf.value
| 6,758,571,486,106,685,000
|
retrieves a list of known configurations from the datastore
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
getConfigList
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def getConfigList(self, response):
' \n '
response[:] = b
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfigList.restype = c_int
self._lib_handle.G2ConfigMgr_getConfigList.argtypes = [POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfigList(pointer(responseBuf), pointer(responseSize), self._resize_func)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
response += tls_var.buf.value
|
def setDefaultConfigID(self, configID):
' sets the default config identifier in the datastore\n '
configID_ = self.prepareIntArgument(configID)
self._lib_handle.G2ConfigMgr_setDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_setDefaultConfigID.argtypes = [c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_setDefaultConfigID(configID_)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
| -7,938,155,852,795,214,000
|
sets the default config identifier in the datastore
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
setDefaultConfigID
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def setDefaultConfigID(self, configID):
' \n '
configID_ = self.prepareIntArgument(configID)
self._lib_handle.G2ConfigMgr_setDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_setDefaultConfigID.argtypes = [c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_setDefaultConfigID(configID_)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
|
def replaceDefaultConfigID(self, oldConfigID, newConfigID):
' sets the default config identifier in the datastore\n '
oldConfigID_ = self.prepareIntArgument(oldConfigID)
newConfigID_ = self.prepareIntArgument(newConfigID)
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.argtypes = [c_longlong, c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_replaceDefaultConfigID(oldConfigID_, newConfigID_)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
| 6,423,434,265,841,057,000
|
sets the default config identifier in the datastore
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
replaceDefaultConfigID
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def replaceDefaultConfigID(self, oldConfigID, newConfigID):
' \n '
oldConfigID_ = self.prepareIntArgument(oldConfigID)
newConfigID_ = self.prepareIntArgument(newConfigID)
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.argtypes = [c_longlong, c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_replaceDefaultConfigID(oldConfigID_, newConfigID_)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
|
def getDefaultConfigID(self, configID):
' gets the default config identifier from the datastore\n '
configID[:] = b''
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_getDefaultConfigID.argtypes = [POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_getDefaultConfigID.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_getDefaultConfigID(cID)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
if cID.value:
configID += str(cID.value).encode()
| 3,663,798,139,632,863,000
|
gets the default config identifier from the datastore
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
getDefaultConfigID
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def getDefaultConfigID(self, configID):
' \n '
configID[:] = b
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_getDefaultConfigID.argtypes = [POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_getDefaultConfigID.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_getDefaultConfigID(cID)
if (ret_code == (- 1)):
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif (ret_code < 0):
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
if cID.value:
configID += str(cID.value).encode()
|
def clearLastException(self):
' Clears the last exception\n '
self._lib_handle.G2ConfigMgr_clearLastException.restype = None
self._lib_handle.G2ConfigMgr_clearLastException.argtypes = []
self._lib_handle.G2ConfigMgr_clearLastException()
| 8,328,367,716,224,782,000
|
Clears the last exception
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
clearLastException
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def clearLastException(self):
' \n '
self._lib_handle.G2ConfigMgr_clearLastException.restype = None
self._lib_handle.G2ConfigMgr_clearLastException.argtypes = []
self._lib_handle.G2ConfigMgr_clearLastException()
|
def getLastException(self):
' Gets the last exception\n '
self._lib_handle.G2ConfigMgr_getLastException.restype = c_int
self._lib_handle.G2ConfigMgr_getLastException.argtypes = [c_char_p, c_size_t]
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
resultString = tls_var.buf.value.decode('utf-8')
return resultString
| 6,679,493,333,561,609,000
|
Gets the last exception
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
getLastException
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def getLastException(self):
' \n '
self._lib_handle.G2ConfigMgr_getLastException.restype = c_int
self._lib_handle.G2ConfigMgr_getLastException.argtypes = [c_char_p, c_size_t]
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
resultString = tls_var.buf.value.decode('utf-8')
return resultString
|
def getLastExceptionCode(self):
' Gets the last exception code\n '
self._lib_handle.G2ConfigMgr_getLastExceptionCode.restype = c_int
self._lib_handle.G2ConfigMgr_getLastExceptionCode.argtypes = []
exception_code = self._lib_handle.G2ConfigMgr_getLastExceptionCode()
return exception_code
| -2,972,673,154,366,856,700
|
Gets the last exception code
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
getLastExceptionCode
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def getLastExceptionCode(self):
' \n '
self._lib_handle.G2ConfigMgr_getLastExceptionCode.restype = c_int
self._lib_handle.G2ConfigMgr_getLastExceptionCode.argtypes = []
exception_code = self._lib_handle.G2ConfigMgr_getLastExceptionCode()
return exception_code
|
def destroy(self):
' Uninitializes the engine\n This should be done once per process after init(...) is called.\n After it is called the engine will no longer function.\n\n Args:\n\n Return:\n None\n '
self._lib_handle.G2ConfigMgr_destroy()
| -8,557,166,857,811,240,000
|
Uninitializes the engine
This should be done once per process after init(...) is called.
After it is called the engine will no longer function.
Args:
Return:
None
|
senzing/g2/sdk/python/G2ConfigMgr.py
|
destroy
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
python
|
def destroy(self):
' Uninitializes the engine\n This should be done once per process after init(...) is called.\n After it is called the engine will no longer function.\n\n Args:\n\n Return:\n None\n '
self._lib_handle.G2ConfigMgr_destroy()
|
def start(self):
'\n start monitor,\n it will start a monitor thread.\n '
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
| 6,365,399,168,440,328,000
|
start monitor,
it will start a monitor thread.
|
python/paddle/fluid/trainer_factory.py
|
start
|
0x45f/Paddle
|
python
|
def start(self):
'\n start monitor,\n it will start a monitor thread.\n '
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
|
def update_board(self, position, flag=False):
'Takes position [x,y] as input\n\t\t\treturns a updated board as a string\n\t\t'
x = (position[0] - 1)
y = (position[1] - 1)
if (flag == True):
if (self.board_data[y][x] == ' ◌ '):
self.board_data[y][x] = ' ▶ '
elif (self.board_data[y][x] == ' ▶ '):
self.board_data[y][x] = ' ◌ '
return self.draw_board()
if (self.mine_board[y][x] == '◉'):
self.board_data[y][x] = ' ◉ '
return False
elif (isinstance(self.mine_board[y][x], int) and (self.mine_board[y][x] > 0)):
self.board_data[y][x] = ((' ' + str(self.mine_board[y][x])) + ' ')
else:
self.flood_fill(x, y)
return self.draw_board()
| 4,688,058,642,854,749,000
|
Takes position [x,y] as input
returns a updated board as a string
|
board.py
|
update_board
|
Epirius/minesweeper
|
python
|
def update_board(self, position, flag=False):
'Takes position [x,y] as input\n\t\t\treturns a updated board as a string\n\t\t'
x = (position[0] - 1)
y = (position[1] - 1)
if (flag == True):
if (self.board_data[y][x] == ' ◌ '):
self.board_data[y][x] = ' ▶ '
elif (self.board_data[y][x] == ' ▶ '):
self.board_data[y][x] = ' ◌ '
return self.draw_board()
if (self.mine_board[y][x] == '◉'):
self.board_data[y][x] = ' ◉ '
return False
elif (isinstance(self.mine_board[y][x], int) and (self.mine_board[y][x] > 0)):
self.board_data[y][x] = ((' ' + str(self.mine_board[y][x])) + ' ')
else:
self.flood_fill(x, y)
return self.draw_board()
|
def clean_extra_package_management_files():
'Removes either requirements files and folder or the Pipfile.'
use_pipenv = '{{cookiecutter.use_pipenv}}'
use_heroku = '{{cookiecutter.use_heroku}}'
to_delete = []
if (use_pipenv == 'yes'):
to_delete = (to_delete + ['requirements.txt', 'requirements'])
else:
to_delete.append('Pipfile')
if (use_heroku == 'no'):
to_delete = (to_delete + ['Procfile', 'app.json'])
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy('.env.example', '.env')
open('dev.db', 'a').close()
except OSError as e:
_logger.warning('While attempting to remove file(s) an error occurred')
_logger.warning(f'Error: {e}')
sys.exit(1)
| 1,898,375,103,263,794,700
|
Removes either requirements files and folder or the Pipfile.
|
hooks/post_gen_project.py
|
clean_extra_package_management_files
|
HaeckelK/cookiecutter-flask
|
python
|
def clean_extra_package_management_files():
use_pipenv = '{{cookiecutter.use_pipenv}}'
use_heroku = '{{cookiecutter.use_heroku}}'
to_delete = []
if (use_pipenv == 'yes'):
to_delete = (to_delete + ['requirements.txt', 'requirements'])
else:
to_delete.append('Pipfile')
if (use_heroku == 'no'):
to_delete = (to_delete + ['Procfile', 'app.json'])
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy('.env.example', '.env')
open('dev.db', 'a').close()
except OSError as e:
_logger.warning('While attempting to remove file(s) an error occurred')
_logger.warning(f'Error: {e}')
sys.exit(1)
|
def test_online_tokenizer_config(self):
"this just tests that the online tokenizer files get correctly fetched and\n loaded via its tokenizer_config.json and it's not slow so it's run by normal CI\n "
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ['en', 'ru'])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
| -580,064,580,574,381,400
|
this just tests that the online tokenizer files get correctly fetched and
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
|
tests/test_tokenization_fsmt.py
|
test_online_tokenizer_config
|
DATEXIS/adapter-transformers
|
python
|
def test_online_tokenizer_config(self):
"this just tests that the online tokenizer files get correctly fetched and\n loaded via its tokenizer_config.json and it's not slow so it's run by normal CI\n "
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ['en', 'ru'])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
|
def test_full_tokenizer(self):
' Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt '
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = 'lower'
bpe_tokens = ['low', 'er</w>']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + ['<unk>'])
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
| 3,234,412,184,335,168,500
|
Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
|
tests/test_tokenization_fsmt.py
|
test_full_tokenizer
|
DATEXIS/adapter-transformers
|
python
|
def test_full_tokenizer(self):
' '
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = 'lower'
bpe_tokens = ['low', 'er</w>']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + ['<unk>'])
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
def fit(self, xi_train, xv_train, y_train, xi_valid=None, xv_valid=None, y_valid=None, early_stopping=False, refit=False):
'\n :param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]\n indi_j is the feature index of feature field j of sample i in the training set\n :param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]\n vali_j is the feature value of feature field j of sample i in the training set\n vali_j can be either binary (1/0, for binary/categorical features)\n :param y_train: label of each sample in the training set\n :param xi_valid: list of list of feature indices of each sample in the validation set\n :param xv_valid: list of list of feature values of each sample in the validation set\n :param y_valid: label of each sample in the validation set\n :param early_stopping: perform early stopping or not\n :param refit: refit the model on the train+valid dataset or not\n :return: None\n '
has_valid = (xv_valid is not None)
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int((len(y_train) / self.batch_size))
for i in range(total_batch):
(xi_batch, xv_batch, y_batch) = self.get_batch(xi_train, xv_train, y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
train_result = self.evaluate(xi_train, xv_train, y_train)
self.train_result.append(train_result[0])
if has_valid:
valid_result = self.evaluate(xi_valid, xv_valid, y_valid)
self.valid_result.append(valid_result[0])
if ((self.verbose > 0) and ((epoch % self.verbose) == 0)):
if has_valid:
print(('[%d] train-loss=%.4f, valid-loss=%.4f [%.1f s]' % ((epoch + 1), train_result[0], valid_result[0], (time() - t1))))
else:
print(('[%d] train-loss=%.4f [%.1f s]' % ((epoch + 1), train_result[0], (time() - t1))))
if (has_valid and early_stopping and self.training_termination(self.valid_result)):
break
if (has_valid and refit):
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
xi_train = (xi_train + xi_valid)
xv_train = (xv_train + xv_valid)
y_train = (y_train + y_valid)
for epoch in range(100):
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int((len(y_train) / self.batch_size))
for i in range(total_batch):
(xi_batch, xv_batch, y_batch) = self.get_batch(xi_train, xv_train, y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
train_result = self.evaluate(xi_train, xv_train, y_train)
ckp1 = (abs((train_result - best_train_score)) < 0.001)
ckp2 = (self.greater_is_better and (train_result > best_train_score))
ckp3 = ((not self.greater_is_better) and (train_result < best_train_score))
if (ckp1 or ckp2 or ckp3):
break
| -999,047,470,642,253,700
|
:param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features)
:param y_train: label of each sample in the training set
:param xi_valid: list of list of feature indices of each sample in the validation set
:param xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
|
tutorials/chapter_05_ProductNN/ProductNN.py
|
fit
|
Daniel1586/Initiative_RecSys
|
python
|
def fit(self, xi_train, xv_train, y_train, xi_valid=None, xv_valid=None, y_valid=None, early_stopping=False, refit=False):
'\n :param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]\n indi_j is the feature index of feature field j of sample i in the training set\n :param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]\n vali_j is the feature value of feature field j of sample i in the training set\n vali_j can be either binary (1/0, for binary/categorical features)\n :param y_train: label of each sample in the training set\n :param xi_valid: list of list of feature indices of each sample in the validation set\n :param xv_valid: list of list of feature values of each sample in the validation set\n :param y_valid: label of each sample in the validation set\n :param early_stopping: perform early stopping or not\n :param refit: refit the model on the train+valid dataset or not\n :return: None\n '
has_valid = (xv_valid is not None)
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int((len(y_train) / self.batch_size))
for i in range(total_batch):
(xi_batch, xv_batch, y_batch) = self.get_batch(xi_train, xv_train, y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
train_result = self.evaluate(xi_train, xv_train, y_train)
self.train_result.append(train_result[0])
if has_valid:
valid_result = self.evaluate(xi_valid, xv_valid, y_valid)
self.valid_result.append(valid_result[0])
if ((self.verbose > 0) and ((epoch % self.verbose) == 0)):
if has_valid:
print(('[%d] train-loss=%.4f, valid-loss=%.4f [%.1f s]' % ((epoch + 1), train_result[0], valid_result[0], (time() - t1))))
else:
print(('[%d] train-loss=%.4f [%.1f s]' % ((epoch + 1), train_result[0], (time() - t1))))
if (has_valid and early_stopping and self.training_termination(self.valid_result)):
break
if (has_valid and refit):
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
xi_train = (xi_train + xi_valid)
xv_train = (xv_train + xv_valid)
y_train = (y_train + y_valid)
for epoch in range(100):
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int((len(y_train) / self.batch_size))
for i in range(total_batch):
(xi_batch, xv_batch, y_batch) = self.get_batch(xi_train, xv_train, y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
train_result = self.evaluate(xi_train, xv_train, y_train)
ckp1 = (abs((train_result - best_train_score)) < 0.001)
ckp2 = (self.greater_is_better and (train_result > best_train_score))
ckp3 = ((not self.greater_is_better) and (train_result < best_train_score))
if (ckp1 or ckp2 or ckp3):
break
|
def list_buckets(client=s3_client):
'\n Usage: [arg1]:[initialized s3 client object],\n Description: Gets the list of buckets\n Returns: [list of buckets]\n '
response = s3_client.list_buckets()
buckets = []
for bucket in response['Buckets']:
buckets.append(bucket['Name'])
return buckets
| -6,498,878,433,956,038,000
|
Usage: [arg1]:[initialized s3 client object],
Description: Gets the list of buckets
Returns: [list of buckets]
|
ctrl4bi/aws_connect.py
|
list_buckets
|
vkreat-tech/ctrl4bi
|
python
|
def list_buckets(client=s3_client):
'\n Usage: [arg1]:[initialized s3 client object],\n Description: Gets the list of buckets\n Returns: [list of buckets]\n '
response = s3_client.list_buckets()
buckets = []
for bucket in response['Buckets']:
buckets.append(bucket['Name'])
return buckets
|
def list_objects(bucket, prefix='', client=s3_client):
'\n Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[initialized s3 client object],\n Description: Gets the keys in the S3 location\n Returns: [list of keys], [list of directories]\n '
keys = []
dirs = set()
next_token = ''
base_kwargs = {'Bucket': bucket, 'Prefix': prefix}
while (next_token is not None):
kwargs = base_kwargs.copy()
if (next_token != ''):
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:(k.rfind('/') + 1)])
next_token = results.get('NextContinuationToken')
return (keys, list(dirs))
| -8,144,335,608,066,176,000
|
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[initialized s3 client object],
Description: Gets the keys in the S3 location
Returns: [list of keys], [list of directories]
|
ctrl4bi/aws_connect.py
|
list_objects
|
vkreat-tech/ctrl4bi
|
python
|
def list_objects(bucket, prefix=, client=s3_client):
'\n Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[initialized s3 client object],\n Description: Gets the keys in the S3 location\n Returns: [list of keys], [list of directories]\n '
keys = []
dirs = set()
next_token =
base_kwargs = {'Bucket': bucket, 'Prefix': prefix}
while (next_token is not None):
kwargs = base_kwargs.copy()
if (next_token != ):
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:(k.rfind('/') + 1)])
next_token = results.get('NextContinuationToken')
return (keys, list(dirs))
|
def download_dir(bucket, prefix, local_path, client=s3_client):
'\n Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[local path to folder in which to place files],[arg4]:[initialized s3 client object],\n Description: Downloads the contents to the local path\n '
keys = []
dirs = set()
next_token = ''
base_kwargs = {'Bucket': bucket, 'Prefix': prefix}
local = ((local_path + bucket) + '\\')
while (next_token is not None):
kwargs = base_kwargs.copy()
if (next_token != ''):
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:(k.rfind('/') + 1)])
next_token = results.get('NextContinuationToken')
for d in dirs:
dest_pathname = os.path.join(local, d)
if (not os.path.exists(os.path.dirname(dest_pathname))):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if (not os.path.exists(os.path.dirname(dest_pathname))):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
| 5,385,448,467,571,741,000
|
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[local path to folder in which to place files],[arg4]:[initialized s3 client object],
Description: Downloads the contents to the local path
|
ctrl4bi/aws_connect.py
|
download_dir
|
vkreat-tech/ctrl4bi
|
python
|
def download_dir(bucket, prefix, local_path, client=s3_client):
'\n Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[local path to folder in which to place files],[arg4]:[initialized s3 client object],\n Description: Downloads the contents to the local path\n '
keys = []
dirs = set()
next_token =
base_kwargs = {'Bucket': bucket, 'Prefix': prefix}
local = ((local_path + bucket) + '\\')
while (next_token is not None):
kwargs = base_kwargs.copy()
if (next_token != ):
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:(k.rfind('/') + 1)])
next_token = results.get('NextContinuationToken')
for d in dirs:
dest_pathname = os.path.join(local, d)
if (not os.path.exists(os.path.dirname(dest_pathname))):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if (not os.path.exists(os.path.dirname(dest_pathname))):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
|
def __init__(self, name=None, channels=None, dependencies=None, local_vars_configuration=None):
'KernelSpec - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._channels = None
self._dependencies = None
self.discriminator = None
if (name is not None):
self.name = name
if (channels is not None):
self.channels = channels
if (dependencies is not None):
self.dependencies = dependencies
| 4,647,245,784,887,724,000
|
KernelSpec - a model defined in OpenAPI
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
__init__
|
KUAN-HSUN-LI/submarine
|
python
|
def __init__(self, name=None, channels=None, dependencies=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._channels = None
self._dependencies = None
self.discriminator = None
if (name is not None):
self.name = name
if (channels is not None):
self.channels = channels
if (dependencies is not None):
self.dependencies = dependencies
|
@property
def name(self):
'Gets the name of this KernelSpec. # noqa: E501\n\n\n :return: The name of this KernelSpec. # noqa: E501\n :rtype: str\n '
return self._name
| 2,942,202,076,586,926,000
|
Gets the name of this KernelSpec. # noqa: E501
:return: The name of this KernelSpec. # noqa: E501
:rtype: str
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
name
|
KUAN-HSUN-LI/submarine
|
python
|
@property
def name(self):
'Gets the name of this KernelSpec. # noqa: E501\n\n\n :return: The name of this KernelSpec. # noqa: E501\n :rtype: str\n '
return self._name
|
@name.setter
def name(self, name):
'Sets the name of this KernelSpec.\n\n\n :param name: The name of this KernelSpec. # noqa: E501\n :type: str\n '
self._name = name
| 2,682,577,613,443,766,300
|
Sets the name of this KernelSpec.
:param name: The name of this KernelSpec. # noqa: E501
:type: str
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
name
|
KUAN-HSUN-LI/submarine
|
python
|
@name.setter
def name(self, name):
'Sets the name of this KernelSpec.\n\n\n :param name: The name of this KernelSpec. # noqa: E501\n :type: str\n '
self._name = name
|
@property
def channels(self):
'Gets the channels of this KernelSpec. # noqa: E501\n\n\n :return: The channels of this KernelSpec. # noqa: E501\n :rtype: list[str]\n '
return self._channels
| -3,157,689,001,019,074,000
|
Gets the channels of this KernelSpec. # noqa: E501
:return: The channels of this KernelSpec. # noqa: E501
:rtype: list[str]
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
channels
|
KUAN-HSUN-LI/submarine
|
python
|
@property
def channels(self):
'Gets the channels of this KernelSpec. # noqa: E501\n\n\n :return: The channels of this KernelSpec. # noqa: E501\n :rtype: list[str]\n '
return self._channels
|
@channels.setter
def channels(self, channels):
'Sets the channels of this KernelSpec.\n\n\n :param channels: The channels of this KernelSpec. # noqa: E501\n :type: list[str]\n '
self._channels = channels
| 2,079,475,509,252,598,300
|
Sets the channels of this KernelSpec.
:param channels: The channels of this KernelSpec. # noqa: E501
:type: list[str]
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
channels
|
KUAN-HSUN-LI/submarine
|
python
|
@channels.setter
def channels(self, channels):
'Sets the channels of this KernelSpec.\n\n\n :param channels: The channels of this KernelSpec. # noqa: E501\n :type: list[str]\n '
self._channels = channels
|
@property
def dependencies(self):
'Gets the dependencies of this KernelSpec. # noqa: E501\n\n\n :return: The dependencies of this KernelSpec. # noqa: E501\n :rtype: list[str]\n '
return self._dependencies
| -8,961,836,283,899,798,000
|
Gets the dependencies of this KernelSpec. # noqa: E501
:return: The dependencies of this KernelSpec. # noqa: E501
:rtype: list[str]
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
dependencies
|
KUAN-HSUN-LI/submarine
|
python
|
@property
def dependencies(self):
'Gets the dependencies of this KernelSpec. # noqa: E501\n\n\n :return: The dependencies of this KernelSpec. # noqa: E501\n :rtype: list[str]\n '
return self._dependencies
|
@dependencies.setter
def dependencies(self, dependencies):
'Sets the dependencies of this KernelSpec.\n\n\n :param dependencies: The dependencies of this KernelSpec. # noqa: E501\n :type: list[str]\n '
self._dependencies = dependencies
| 2,991,228,109,283,427,300
|
Sets the dependencies of this KernelSpec.
:param dependencies: The dependencies of this KernelSpec. # noqa: E501
:type: list[str]
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
dependencies
|
KUAN-HSUN-LI/submarine
|
python
|
@dependencies.setter
def dependencies(self, dependencies):
'Sets the dependencies of this KernelSpec.\n\n\n :param dependencies: The dependencies of this KernelSpec. # noqa: E501\n :type: list[str]\n '
self._dependencies = dependencies
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 8,442,519,487,048,767,000
|
Returns the model properties as a dict
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
to_dict
|
KUAN-HSUN-LI/submarine
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
to_str
|
KUAN-HSUN-LI/submarine
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
__repr__
|
KUAN-HSUN-LI/submarine
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, KernelSpec)):
return False
return (self.to_dict() == other.to_dict())
| -7,715,880,987,173,101,000
|
Returns true if both objects are equal
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
__eq__
|
KUAN-HSUN-LI/submarine
|
python
|
def __eq__(self, other):
if (not isinstance(other, KernelSpec)):
return False
return (self.to_dict() == other.to_dict())
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, KernelSpec)):
return True
return (self.to_dict() != other.to_dict())
| -3,783,474,172,759,675,000
|
Returns true if both objects are not equal
|
submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py
|
__ne__
|
KUAN-HSUN-LI/submarine
|
python
|
def __ne__(self, other):
if (not isinstance(other, KernelSpec)):
return True
return (self.to_dict() != other.to_dict())
|
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'\n Start a node with requested rpcallowip and rpcbind parameters,\n then try to connect, and check if the set of bound addresses\n matches the expected set.\n '
self.log.info(('Bind test for %s' % str(addresses)))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += [('-rpcallowip=' + x) for x in allow_ips]
binds = [('-rpcbind=' + addr) for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, (base_args + binds))
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
| -6,699,779,156,219,655,000
|
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
|
test/functional/rpcbind_test.py
|
run_bind_test
|
CaliforniaCoinCAC/californiacoin
|
python
|
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'\n Start a node with requested rpcallowip and rpcbind parameters,\n then try to connect, and check if the set of bound addresses\n matches the expected set.\n '
self.log.info(('Bind test for %s' % str(addresses)))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += [('-rpcallowip=' + x) for x in allow_ips]
binds = [('-rpcbind=' + addr) for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, (base_args + binds))
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
|
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'\n Start a node with rpcallow IP, and request getnetworkinfo\n at a non-localhost IP.\n '
self.log.info(('Allow IP test for %s:%d' % (rpchost, rpcport)))
base_args = (['-disablewallet', '-nolisten'] + [('-rpcallowip=' + x) for x in allow_ips])
self.nodes[0].rpchost = None
self.start_nodes([base_args])
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, ('%s:%d' % (rpchost, rpcport))), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
| -8,650,732,974,665,215,000
|
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
|
test/functional/rpcbind_test.py
|
run_allowip_test
|
CaliforniaCoinCAC/californiacoin
|
python
|
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'\n Start a node with rpcallow IP, and request getnetworkinfo\n at a non-localhost IP.\n '
self.log.info(('Allow IP test for %s:%d' % (rpchost, rpcport)))
base_args = (['-disablewallet', '-nolisten'] + [('-rpcallowip=' + x) for x in allow_ips])
self.nodes[0].rpchost = None
self.start_nodes([base_args])
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, ('%s:%d' % (rpchost, rpcport))), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
|
def __init__(self, model_description: str, path: str=None, history=None, save_it: bool=True, new_style: bool=False):
'\n The class constructor. \n Attention: File history plotting is not yet implemented!\n :param model_description:str: something to name the image unique and is also the file name\n :param path:str: path of a file containing a history\n :param history: a history\n :param save_it:bool: save the plot instead of showing\n :param new_style:bool: desired matplot lib standard or new style\n '
try:
self._model_description = (model_description if isNotNone(model_description) else 'undescribed_model')
if (isNotNone(path) and isNone(history)):
self._path: str = path
self._using_history = False
if isNotNone(history):
self._history = history
self._history_keys = history.history.keys()
self._history_keys_list = list(self._history_keys)
self._using_history = True
self._new_style: bool = new_style
self._save_it: bool = save_it
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.Constructor]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| 3,218,054,712,274,708,500
|
The class constructor.
Attention: File history plotting is not yet implemented!
:param model_description:str: something to name the image unique and is also the file name
:param path:str: path of a file containing a history
:param history: a history
:param save_it:bool: save the plot instead of showing
:param new_style:bool: desired matplot lib standard or new style
|
Scripts/Plotter/PlotHistory.py
|
__init__
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def __init__(self, model_description: str, path: str=None, history=None, save_it: bool=True, new_style: bool=False):
'\n The class constructor. \n Attention: File history plotting is not yet implemented!\n :param model_description:str: something to name the image unique and is also the file name\n :param path:str: path of a file containing a history\n :param history: a history\n :param save_it:bool: save the plot instead of showing\n :param new_style:bool: desired matplot lib standard or new style\n '
try:
self._model_description = (model_description if isNotNone(model_description) else 'undescribed_model')
if (isNotNone(path) and isNone(history)):
self._path: str = path
self._using_history = False
if isNotNone(history):
self._history = history
self._history_keys = history.history.keys()
self._history_keys_list = list(self._history_keys)
self._using_history = True
self._new_style: bool = new_style
self._save_it: bool = save_it
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.Constructor]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def PlotHistory(self):
'\n Thise method allow to plot a history from directly a keras history. \n Plotting from log is not yet implemented!\n '
try:
if self._using_history:
if self._new_style:
self.CollectFromHistory()
self.DirectPlotHistory()
else:
self.OldPlotHistory()
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.PlotHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -9,081,983,840,649,278,000
|
Thise method allow to plot a history from directly a keras history.
Plotting from log is not yet implemented!
|
Scripts/Plotter/PlotHistory.py
|
PlotHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def PlotHistory(self):
'\n Thise method allow to plot a history from directly a keras history. \n Plotting from log is not yet implemented!\n '
try:
if self._using_history:
if self._new_style:
self.CollectFromHistory()
self.DirectPlotHistory()
else:
self.OldPlotHistory()
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.PlotHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CollectAccFromHistory(self, name: str):
'\n This method collect the accuracy data from the history into 2 lists.\n :param name:str: name of the used acc metric\n '
try:
acc_list: list = []
val_acc_list: list = []
name = re.sub('val_', '', name)
if (name in self._history_keys):
acc_list = [s for s in self._history_keys if (name == s)]
val_acc_list = [s for s in self._history_keys if (('val_' + name) == s)]
if (isNotNone(acc_list) and isNotNone(val_acc_list)):
self._history_keys_list.remove(name)
self._history_keys_list.remove(('val_' + name))
print('Found accuracy metrics in history!')
return (acc_list, val_acc_list)
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectAccFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -6,079,083,855,885,626,000
|
This method collect the accuracy data from the history into 2 lists.
:param name:str: name of the used acc metric
|
Scripts/Plotter/PlotHistory.py
|
CollectAccFromHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CollectAccFromHistory(self, name: str):
'\n This method collect the accuracy data from the history into 2 lists.\n :param name:str: name of the used acc metric\n '
try:
acc_list: list = []
val_acc_list: list = []
name = re.sub('val_', , name)
if (name in self._history_keys):
acc_list = [s for s in self._history_keys if (name == s)]
val_acc_list = [s for s in self._history_keys if (('val_' + name) == s)]
if (isNotNone(acc_list) and isNotNone(val_acc_list)):
self._history_keys_list.remove(name)
self._history_keys_list.remove(('val_' + name))
print('Found accuracy metrics in history!')
return (acc_list, val_acc_list)
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectAccFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CollectLossFromHistory(self):
'\n This method collect the loss metric data from the history.\n '
try:
loss_val: str = 'loss'
if (loss_val in self._history_keys):
self._losses = [s for s in self._history_keys if (loss_val == s)]
self._val_losses = [s for s in self._history_keys if (('val' + loss_val) in s)]
self._epochs = len(self._history.epoch)
if ((len(self._losses) == 0) or (len(self._val_losses) == 0)):
print('Loss is missing in history')
return
if (isNotNone(self._losses) and isNotNone(self._val_losses)):
self._history_keys_list.remove(loss_val)
self._history_keys_list.remove(('val_' + loss_val))
print('Found losses in history!')
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectLossFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -2,332,289,488,565,356,000
|
This method collect the loss metric data from the history.
|
Scripts/Plotter/PlotHistory.py
|
CollectLossFromHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CollectLossFromHistory(self):
'\n \n '
try:
loss_val: str = 'loss'
if (loss_val in self._history_keys):
self._losses = [s for s in self._history_keys if (loss_val == s)]
self._val_losses = [s for s in self._history_keys if (('val' + loss_val) in s)]
self._epochs = len(self._history.epoch)
if ((len(self._losses) == 0) or (len(self._val_losses) == 0)):
print('Loss is missing in history')
return
if (isNotNone(self._losses) and isNotNone(self._val_losses)):
self._history_keys_list.remove(loss_val)
self._history_keys_list.remove(('val_' + loss_val))
print('Found losses in history!')
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectLossFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CollectLearningRatesFromHistory(self):
'\n This method collect the learning rate metric data from the history.\n '
try:
lr_val: str = 'lr'
if (lr_val in self._history_keys):
self._learning_rates = [s for s in self._history_keys if (lr_val == s)]
if isNotNone(self._learning_rates):
self._history_keys_list.remove(lr_val)
print('Found learning rates in history!')
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectLearningRatesFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -4,803,077,714,445,215,000
|
This method collect the learning rate metric data from the history.
|
Scripts/Plotter/PlotHistory.py
|
CollectLearningRatesFromHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CollectLearningRatesFromHistory(self):
'\n \n '
try:
lr_val: str = 'lr'
if (lr_val in self._history_keys):
self._learning_rates = [s for s in self._history_keys if (lr_val == s)]
if isNotNone(self._learning_rates):
self._history_keys_list.remove(lr_val)
print('Found learning rates in history!')
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectLearningRatesFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CollectFromHistory(self):
'\n This method collect all necessary train informations from the history.\n '
if self._using_history:
try:
print('Collect losses from history...')
self.CollectLossFromHistory()
print('Collect learning rate from history...')
self.CollectLearningRatesFromHistory()
print('Collect ', self._history_keys_list[0], ' from history...')
(self._acc_stdcc_list, self._val_acc_stdcc_list) = self.CollectAccFromHistory(name=self._history_keys_list[0])
print('Collect ', self._history_keys_list[0], ' from history...')
(self._acc_topkcc_list, self._val_acc_topkcc_list) = self.CollectAccFromHistory(name=self._history_keys_list[0])
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
else:
print('No history initialized!')
| -5,342,395,841,408,103,000
|
This method collect all necessary train informations from the history.
|
Scripts/Plotter/PlotHistory.py
|
CollectFromHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CollectFromHistory(self):
'\n \n '
if self._using_history:
try:
print('Collect losses from history...')
self.CollectLossFromHistory()
print('Collect learning rate from history...')
self.CollectLearningRatesFromHistory()
print('Collect ', self._history_keys_list[0], ' from history...')
(self._acc_stdcc_list, self._val_acc_stdcc_list) = self.CollectAccFromHistory(name=self._history_keys_list[0])
print('Collect ', self._history_keys_list[0], ' from history...')
(self._acc_topkcc_list, self._val_acc_topkcc_list) = self.CollectAccFromHistory(name=self._history_keys_list[0])
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CollectFromHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
else:
print('No history initialized!')
|
def DirectPlotHistory(self):
'\n This method helps to plot a keras history containing losses, accuracy and possibly least learning rates.\n '
try:
fig_num: int = 1
self.AccOrLossPlot(fig_num=fig_num, title='Model loss', metric='loss', axis_labels=['train', 'validation'], history_labels=['Loss', 'Epoch'], extender='loss_epoch_plot', train_val_lists=[self._losses, self._val_losses])
fig_num += 1
if (('top_k_categorical_accuracy' in self._history_keys) and isNotNone(self._acc_topkcc_list) and isNotNone(self._val_acc_topkcc_list)):
self.AccOrLossPlot(fig_num=fig_num, title='Model Top k Categorical Accuracy', metric='top_k_categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Top k Categorical Accuracy', 'Epoch'], extender='top_k_categoriacal_epoch_plot', train_val_lists=[self._acc_topkcc_list, self._val_acc_topkcc_list])
fig_num += 1
if (('categorical_accuracy' in self._history_keys) and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list)):
self.AccOrLossPlot(fig_num=fig_num, title='Model Categorical Accuracy', metric='categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Categorical Accuracy', 'Epoch'], extender='categoriacal_epoch_plot', train_val_lists=[self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
if (('acc' in self._history_keys) and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list)):
self.AccOrLossPlot(fig_num=fig_num, title='Model Accuracy', metric='accuracy', axis_labels=['train', 'validation'], history_labels=['Accuracy', 'Epoch'], extender='accuracy_epoch_plot', train_val_lists=[self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
if (('lr' in self._history_keys) and isNotNone(self._learning_rates)):
self.LearningPlot(fig_num=fig_num, title='Model Learning Rate')
fig_num += 1
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.DirectPlotHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| 7,061,467,197,599,251,000
|
This method helps to plot a keras history containing losses, accuracy and possibly least learning rates.
|
Scripts/Plotter/PlotHistory.py
|
DirectPlotHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def DirectPlotHistory(self):
'\n \n '
try:
fig_num: int = 1
self.AccOrLossPlot(fig_num=fig_num, title='Model loss', metric='loss', axis_labels=['train', 'validation'], history_labels=['Loss', 'Epoch'], extender='loss_epoch_plot', train_val_lists=[self._losses, self._val_losses])
fig_num += 1
if (('top_k_categorical_accuracy' in self._history_keys) and isNotNone(self._acc_topkcc_list) and isNotNone(self._val_acc_topkcc_list)):
self.AccOrLossPlot(fig_num=fig_num, title='Model Top k Categorical Accuracy', metric='top_k_categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Top k Categorical Accuracy', 'Epoch'], extender='top_k_categoriacal_epoch_plot', train_val_lists=[self._acc_topkcc_list, self._val_acc_topkcc_list])
fig_num += 1
if (('categorical_accuracy' in self._history_keys) and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list)):
self.AccOrLossPlot(fig_num=fig_num, title='Model Categorical Accuracy', metric='categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Categorical Accuracy', 'Epoch'], extender='categoriacal_epoch_plot', train_val_lists=[self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
if (('acc' in self._history_keys) and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list)):
self.AccOrLossPlot(fig_num=fig_num, title='Model Accuracy', metric='accuracy', axis_labels=['train', 'validation'], history_labels=['Accuracy', 'Epoch'], extender='accuracy_epoch_plot', train_val_lists=[self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
if (('lr' in self._history_keys) and isNotNone(self._learning_rates)):
self.LearningPlot(fig_num=fig_num, title='Model Learning Rate')
fig_num += 1
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.DirectPlotHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def OldPlotHistory(self):
'\n This method plot the history in the old way.\n '
try:
fig_num: int = 1
self.AccOrLossPlot(fig_num=fig_num, title='Model loss', metric='loss', axis_labels=['train', 'validation'], history_labels=['Loss', 'Epoch'], extender='loss_epoch_plot')
fig_num += 1
if ('acc' in self._history_keys):
self.AccOrLossPlot(fig_num=fig_num, title='Model Accuracy', metric='acc', axis_labels=['train', 'validation'], history_labels=['Accuracy', 'Epoch'], extender='accuracy_epoch_plot')
fig_num += 1
if ('top_k_categorical_accuracy' in self._history_keys):
self.AccOrLossPlot(fig_num=fig_num, title='Model Top k Categorical Accuracy', metric='top_k_categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Top k Categorical Accuracy', 'Epoch'], extender='top_k_categoriacal_epoch_plot')
fig_num += 1
if ('categorical_accuracy' in self._history_keys):
self.AccOrLossPlot(fig_num=fig_num, title='Model Categorical Accuracy', metric='categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Categorical Accuracy', 'Epoch'], extender='categoriacal_epoch_plot')
fig_num += 1
if ('lr' in self._history_keys):
self.LearningPlot(fig_num=fig_num, title='Model Learning Rate')
fig_num += 1
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.OldPlotHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| 7,242,522,773,362,320,000
|
This method plot the history in the old way.
|
Scripts/Plotter/PlotHistory.py
|
OldPlotHistory
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def OldPlotHistory(self):
'\n \n '
try:
fig_num: int = 1
self.AccOrLossPlot(fig_num=fig_num, title='Model loss', metric='loss', axis_labels=['train', 'validation'], history_labels=['Loss', 'Epoch'], extender='loss_epoch_plot')
fig_num += 1
if ('acc' in self._history_keys):
self.AccOrLossPlot(fig_num=fig_num, title='Model Accuracy', metric='acc', axis_labels=['train', 'validation'], history_labels=['Accuracy', 'Epoch'], extender='accuracy_epoch_plot')
fig_num += 1
if ('top_k_categorical_accuracy' in self._history_keys):
self.AccOrLossPlot(fig_num=fig_num, title='Model Top k Categorical Accuracy', metric='top_k_categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Top k Categorical Accuracy', 'Epoch'], extender='top_k_categoriacal_epoch_plot')
fig_num += 1
if ('categorical_accuracy' in self._history_keys):
self.AccOrLossPlot(fig_num=fig_num, title='Model Categorical Accuracy', metric='categorical_accuracy', axis_labels=['train', 'validation'], history_labels=['Categorical Accuracy', 'Epoch'], extender='categoriacal_epoch_plot')
fig_num += 1
if ('lr' in self._history_keys):
self.LearningPlot(fig_num=fig_num, title='Model Learning Rate')
fig_num += 1
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.OldPlotHistory]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def AccOrLossPlot(self, fig_num: int, title: str, metric: str, axis_labels: list=['train', 'validation'], history_labels: list=['Metric', 'Epoch'], extender: str='_epoch_plot', train_val_lists: list=None):
'\n This method wrapp the plot creation for a single metric of the keras train history.\n :param fig_num:int: figure number\n :param title:str: figure title\n :param metric:str: desired metric\n :param axis_labels:list: axis labels \n :param history_labels:list: history labels\n :param extender:str: plot file name extender\n :param train_val_lists:list: a list containing the train and validation list of a defined metric\n '
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
if (metric == 'loss'):
plt.title(self.CalcResultLoss(history=self._history))
else:
plt.title(self.CalcResultAccuracy(history=self._history, metric=metric))
if (not self._new_style):
plt.plot(self._history.history[metric], color='blue', label=axis_labels[0])
plt.plot(self._history.history[('val_' + metric)], color='orange', label=axis_labels[1])
elif ((train_val_lists != None) and (len(train_val_lists) == 2)):
for l in train_val_lists[0]:
plt.plot(self._epochs, self._history.history[l], color='b', label=(((('Training ' + metric) + ' (') + str(format(self._history.history[l][(- 1)], '.5f'))) + ')'))
for l in train_val_lists[1]:
plt.plot(self._epochs, self._history.history[l], color='g', label=(((('Validation ' + metric) + ' (') + str(format(self._history.history[l][(- 1)], '.5f'))) + ')'))
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='lower right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender=extender)
else:
plt.show()
figure.clf()
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.AccOrLossPlot]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -994,659,522,012,894,500
|
This method wrapp the plot creation for a single metric of the keras train history.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
:param train_val_lists:list: a list containing the train and validation list of a defined metric
|
Scripts/Plotter/PlotHistory.py
|
AccOrLossPlot
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def AccOrLossPlot(self, fig_num: int, title: str, metric: str, axis_labels: list=['train', 'validation'], history_labels: list=['Metric', 'Epoch'], extender: str='_epoch_plot', train_val_lists: list=None):
'\n This method wrapp the plot creation for a single metric of the keras train history.\n :param fig_num:int: figure number\n :param title:str: figure title\n :param metric:str: desired metric\n :param axis_labels:list: axis labels \n :param history_labels:list: history labels\n :param extender:str: plot file name extender\n :param train_val_lists:list: a list containing the train and validation list of a defined metric\n '
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
if (metric == 'loss'):
plt.title(self.CalcResultLoss(history=self._history))
else:
plt.title(self.CalcResultAccuracy(history=self._history, metric=metric))
if (not self._new_style):
plt.plot(self._history.history[metric], color='blue', label=axis_labels[0])
plt.plot(self._history.history[('val_' + metric)], color='orange', label=axis_labels[1])
elif ((train_val_lists != None) and (len(train_val_lists) == 2)):
for l in train_val_lists[0]:
plt.plot(self._epochs, self._history.history[l], color='b', label=(((('Training ' + metric) + ' (') + str(format(self._history.history[l][(- 1)], '.5f'))) + ')'))
for l in train_val_lists[1]:
plt.plot(self._epochs, self._history.history[l], color='g', label=(((('Validation ' + metric) + ' (') + str(format(self._history.history[l][(- 1)], '.5f'))) + ')'))
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='lower right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender=extender)
else:
plt.show()
figure.clf()
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.AccOrLossPlot]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def LearningPlot(self, fig_num: int, title: str='Model Learning Rate', metric: str='lr', axis_labels: list=['train', 'validation'], history_labels: list=['Learning Rate', 'Epoch'], extender: str='learning_rate_epoch_plot'):
'\n This method plot a the single learning rate curve.\n :param fig_num:int: figure number\n :param title:str: figure title\n :param metric:str: desired metric\n :param axis_labels:list: axis labels \n :param history_labels:list: history labels\n :param extender:str: plot file name extender\n '
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
plt.title(self.CalcResultLearnRate(history=self._history))
if (not self._new_style):
plt.plot(self._history.history[metric], color='red', label='learning rate')
else:
for l in self._learning_rates:
plt.plot(self._epochs, self._history.history[l], color='r', label=(('Learning Rate (' + str(format(self._history.history[l][(- 1)], '.5f'))) + ')'))
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='upper right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender='learning_rate_epoch_plot')
else:
plt.show()
figure.clf()
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.LearningPlot]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| 8,425,297,315,928,478,000
|
This method plot a the single learning rate curve.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
|
Scripts/Plotter/PlotHistory.py
|
LearningPlot
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def LearningPlot(self, fig_num: int, title: str='Model Learning Rate', metric: str='lr', axis_labels: list=['train', 'validation'], history_labels: list=['Learning Rate', 'Epoch'], extender: str='learning_rate_epoch_plot'):
'\n This method plot a the single learning rate curve.\n :param fig_num:int: figure number\n :param title:str: figure title\n :param metric:str: desired metric\n :param axis_labels:list: axis labels \n :param history_labels:list: history labels\n :param extender:str: plot file name extender\n '
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
plt.title(self.CalcResultLearnRate(history=self._history))
if (not self._new_style):
plt.plot(self._history.history[metric], color='red', label='learning rate')
else:
for l in self._learning_rates:
plt.plot(self._epochs, self._history.history[l], color='r', label=(('Learning Rate (' + str(format(self._history.history[l][(- 1)], '.5f'))) + ')'))
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='upper right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender='learning_rate_epoch_plot')
else:
plt.show()
figure.clf()
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.LearningPlot]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CalcResultAccuracy(self, history, metric: str='acc'):
'\n This method show the train acc results.\n :param history: history of the training\n '
try:
return ('Training accuracy: %.2f%% / Validation accuracy: %.2f%%' % ((100 * history.history[metric][(- 1)]), (100 * history.history[('val_' + metric)][(- 1)])))
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CalcResultAccuracy]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -3,302,066,514,028,731,000
|
This method show the train acc results.
:param history: history of the training
|
Scripts/Plotter/PlotHistory.py
|
CalcResultAccuracy
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CalcResultAccuracy(self, history, metric: str='acc'):
'\n This method show the train acc results.\n :param history: history of the training\n '
try:
return ('Training accuracy: %.2f%% / Validation accuracy: %.2f%%' % ((100 * history.history[metric][(- 1)]), (100 * history.history[('val_' + metric)][(- 1)])))
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CalcResultAccuracy]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CalcResultLoss(self, history):
'\n This method show the train loss results.\n :param history: history of the training\n '
try:
return ((('Training loss: ' + str(history.history['loss'][(- 1)])[:(- 6)]) + ' / Validation loss: ') + str(history.history['val_loss'][(- 1)])[:(- 6)])
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CalcResultLoss]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| -9,213,006,050,517,600,000
|
This method show the train loss results.
:param history: history of the training
|
Scripts/Plotter/PlotHistory.py
|
CalcResultLoss
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CalcResultLoss(self, history):
'\n This method show the train loss results.\n :param history: history of the training\n '
try:
return ((('Training loss: ' + str(history.history['loss'][(- 1)])[:(- 6)]) + ' / Validation loss: ') + str(history.history['val_loss'][(- 1)])[:(- 6)])
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CalcResultLoss]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
def CalcResultLearnRate(self, history):
'\n This method show the train learn rate.\n :param history: history of the training\n '
try:
return ('Training Learn Rate: ' + str(history.history['lr'][(- 1)]))
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CalcResultLearnRate]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
| 2,398,961,352,070,724,000
|
This method show the train learn rate.
:param history: history of the training
|
Scripts/Plotter/PlotHistory.py
|
CalcResultLearnRate
|
ReleasedBrainiac/GraphToSequenceNN
|
python
|
def CalcResultLearnRate(self, history):
'\n This method show the train learn rate.\n :param history: history of the training\n '
try:
return ('Training Learn Rate: ' + str(history.history['lr'][(- 1)]))
except Exception as ex:
template = 'An exception of type {0} occurred in [HistoryPlotter.CalcResultLearnRate]. Arguments:\n{1!r}'
message = template.format(type(ex).__name__, ex.args)
print(message)
|
@staticmethod
def add_prefix(label_words, prefix):
"Add prefix to label words. For example, if a label words is in the middle of a template,\n the prefix should be ``' '``.\n\n Args:\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer.\n\n Returns:\n :obj:`Sequence[str]`: New label words with prefix.\n "
new_label_words = []
if isinstance(label_words[0], list):
assert (max([len(w) for w in label_words]) == 1), 'Providing multiple label words, you should use other verbalizers instead.'
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith('<!>'):
new_label_words.append(word.split('<!>')[1])
else:
new_label_words.append((prefix + word))
return new_label_words
| 3,213,134,003,616,306,700
|
Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
|
openprompt/prompts/one2one_verbalizer.py
|
add_prefix
|
BIT-ENGD/OpenPrompt
|
python
|
@staticmethod
def add_prefix(label_words, prefix):
"Add prefix to label words. For example, if a label words is in the middle of a template,\n the prefix should be ``' '``.\n\n Args:\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer.\n\n Returns:\n :obj:`Sequence[str]`: New label words with prefix.\n "
new_label_words = []
if isinstance(label_words[0], list):
assert (max([len(w) for w in label_words]) == 1), 'Providing multiple label words, you should use other verbalizers instead.'
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith('<!>'):
new_label_words.append(word.split('<!>')[1])
else:
new_label_words.append((prefix + word))
return new_label_words
|
def generate_parameters(self) -> List:
'In basic manual template, the parameters are generated from label words directly.\n In this implementation, the label_words should not be tokenized into more than one token.\n '
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if (len(word_ids) > 1):
logger.warning('Word {} is split into multiple tokens: {}. If this is not what you expect, try using another word for this verbalizer'.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [(([1] * len(ids)) + ([0] * (max_len - len(ids)))) for ids in words_ids]
words_ids = [(ids + ([0] * (max_len - len(ids)))) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
| 1,266,850,082,361,135,000
|
In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
|
openprompt/prompts/one2one_verbalizer.py
|
generate_parameters
|
BIT-ENGD/OpenPrompt
|
python
|
def generate_parameters(self) -> List:
'In basic manual template, the parameters are generated from label words directly.\n In this implementation, the label_words should not be tokenized into more than one token.\n '
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if (len(word_ids) > 1):
logger.warning('Word {} is split into multiple tokens: {}. If this is not what you expect, try using another word for this verbalizer'.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [(([1] * len(ids)) + ([0] * (max_len - len(ids)))) for ids in words_ids]
words_ids = [(ids + ([0] * (max_len - len(ids)))) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
|
def project(self, logits: torch.Tensor, **kwargs) -> torch.Tensor:
'\n Project the labels, the return value is the normalized (sum to 1) probs of label words.\n\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits of label words.\n\n Returns:\n :obj:`torch.Tensor`: The normalized logits of label words\n '
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
| 364,629,113,235,906,700
|
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
|
openprompt/prompts/one2one_verbalizer.py
|
project
|
BIT-ENGD/OpenPrompt
|
python
|
def project(self, logits: torch.Tensor, **kwargs) -> torch.Tensor:
'\n Project the labels, the return value is the normalized (sum to 1) probs of label words.\n\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits of label words.\n\n Returns:\n :obj:`torch.Tensor`: The normalized logits of label words\n '
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
|
def process_logits(self, logits: torch.Tensor, **kwargs):
'A whole framework to process the original logits over the vocabulary, which contains four steps:\n\n (1) Project the logits into logits of label words\n\n if self.post_log_softmax is True:\n\n (2) Normalize over all label words\n\n (3) Calibrate (optional)\n\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits.\n\n Returns:\n (:obj:`torch.Tensor`): The final processed logits over the label words set.\n '
label_words_logits = self.project(logits, **kwargs)
if self.post_log_softmax:
label_words_probs = self.normalize(label_words_logits)
if (hasattr(self, '_calibrate_logits') and (self._calibrate_logits is not None)):
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
label_words_logits = torch.log((label_words_probs + 1e-15))
return label_words_logits
| 1,527,449,169,758,929,200
|
A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
if self.post_log_softmax is True:
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
|
openprompt/prompts/one2one_verbalizer.py
|
process_logits
|
BIT-ENGD/OpenPrompt
|
python
|
def process_logits(self, logits: torch.Tensor, **kwargs):
'A whole framework to process the original logits over the vocabulary, which contains four steps:\n\n (1) Project the logits into logits of label words\n\n if self.post_log_softmax is True:\n\n (2) Normalize over all label words\n\n (3) Calibrate (optional)\n\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits.\n\n Returns:\n (:obj:`torch.Tensor`): The final processed logits over the label words set.\n '
label_words_logits = self.project(logits, **kwargs)
if self.post_log_softmax:
label_words_probs = self.normalize(label_words_logits)
if (hasattr(self, '_calibrate_logits') and (self._calibrate_logits is not None)):
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
label_words_logits = torch.log((label_words_probs + 1e-15))
return label_words_logits
|
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
'\n Given logits regarding the entire vocabulary, return the probs over the label words set.\n\n Args:\n logits (:obj:`Tensor`): The logits over the entire vocabulary.\n\n Returns:\n :obj:`Tensor`: The logits over the label words set.\n\n '
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, (- 1)), dim=(- 1)).reshape(*logits.shape)
| -2,564,412,923,553,433,000
|
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
|
openprompt/prompts/one2one_verbalizer.py
|
normalize
|
BIT-ENGD/OpenPrompt
|
python
|
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
'\n Given logits regarding the entire vocabulary, return the probs over the label words set.\n\n Args:\n logits (:obj:`Tensor`): The logits over the entire vocabulary.\n\n Returns:\n :obj:`Tensor`: The logits over the label words set.\n\n '
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, (- 1)), dim=(- 1)).reshape(*logits.shape)
|
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
'\n\n Args:\n label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]\n\n Returns:\n :obj:`torch.Tensor`: The calibrated probability of label words.\n '
shape = label_words_probs.shape
assert (self._calibrate_logits.dim() == 1), 'self._calibrate_logits are not 1-d tensor'
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert ((calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:]) and (calibrate_label_words_probs.shape[0] == 1)), 'shape not match'
label_words_probs /= (calibrate_label_words_probs + 1e-15)
norm = label_words_probs.reshape(shape[0], (- 1)).sum(dim=(- 1), keepdim=True)
label_words_probs /= norm
return label_words_probs
| 5,181,480,780,885,066,000
|
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
|
openprompt/prompts/one2one_verbalizer.py
|
calibrate
|
BIT-ENGD/OpenPrompt
|
python
|
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
'\n\n Args:\n label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]\n\n Returns:\n :obj:`torch.Tensor`: The calibrated probability of label words.\n '
shape = label_words_probs.shape
assert (self._calibrate_logits.dim() == 1), 'self._calibrate_logits are not 1-d tensor'
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert ((calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:]) and (calibrate_label_words_probs.shape[0] == 1)), 'shape not match'
label_words_probs /= (calibrate_label_words_probs + 1e-15)
norm = label_words_probs.reshape(shape[0], (- 1)).sum(dim=(- 1), keepdim=True)
label_words_probs /= norm
return label_words_probs
|
def timeline_trimmed_to_range(in_timeline, trim_range):
"Returns a new timeline that is a copy of the in_timeline, but with items\n outside the trim_range removed and items on the ends trimmed to the\n trim_range. Note that the timeline is never expanded, only shortened.\n Please note that you could do nearly the same thing non-destructively by\n just setting the Track's source_range but sometimes you want to really cut\n away the stuff outside and that's what this function is meant for."
new_timeline = copy.deepcopy(in_timeline)
for (track_num, child_track) in enumerate(in_timeline.tracks):
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(child_track, trim_range)
return new_timeline
| -1,156,443,789,120,338,000
|
Returns a new timeline that is a copy of the in_timeline, but with items
outside the trim_range removed and items on the ends trimmed to the
trim_range. Note that the timeline is never expanded, only shortened.
Please note that you could do nearly the same thing non-destructively by
just setting the Track's source_range but sometimes you want to really cut
away the stuff outside and that's what this function is meant for.
|
src/py-opentimelineio/opentimelineio/algorithms/timeline_algo.py
|
timeline_trimmed_to_range
|
AWhetter/OpenTimelineIO
|
python
|
def timeline_trimmed_to_range(in_timeline, trim_range):
"Returns a new timeline that is a copy of the in_timeline, but with items\n outside the trim_range removed and items on the ends trimmed to the\n trim_range. Note that the timeline is never expanded, only shortened.\n Please note that you could do nearly the same thing non-destructively by\n just setting the Track's source_range but sometimes you want to really cut\n away the stuff outside and that's what this function is meant for."
new_timeline = copy.deepcopy(in_timeline)
for (track_num, child_track) in enumerate(in_timeline.tracks):
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(child_track, trim_range)
return new_timeline
|
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True, show=True, verbose=None):
"Plot Covariance data.\n\n Parameters\n ----------\n cov : instance of Covariance\n The covariance matrix.\n info : dict\n Measurement info.\n exclude : list of str | str\n List of channels to exclude. If empty do not exclude any channel.\n If 'bads', exclude info['bads'].\n colorbar : bool\n Show colorbar or not.\n proj : bool\n Apply projections or not.\n show_svd : bool\n Plot also singular values of the noise covariance for each sensor\n type. We show square roots ie. standard deviations.\n show : bool\n Show figure if True.\n %(verbose)s\n\n Returns\n -------\n fig_cov : instance of matplotlib.figure.Figure\n The covariance plot.\n fig_svd : instance of matplotlib.figure.Figure | None\n The SVD spectra plot of the covariance.\n\n See Also\n --------\n mne.compute_rank\n\n Notes\n -----\n For each channel type, the rank is estimated using\n :func:`mne.compute_rank`.\n\n .. versionchanged:: 0.19\n Approximate ranks for each channel type are shown with red dashed lines.\n "
from ..cov import Covariance
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
(info, C, ch_names, idx_names) = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
for p in projs:
p['active'] = True
(P, ncomp, _) = make_projector(projs, ch_names)
if (ncomp > 0):
logger.info((' Created an SSP operator (subspace dimension = %d)' % ncomp))
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these channels.')
(fig_cov, axes) = plt.subplots(1, len(idx_names), squeeze=False, figsize=((3.8 * len(idx_names)), 3.7))
for (k, (idx, name, _, _, _)) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[(0, k)].imshow(C[idx][:, idx], interpolation='nearest', norm=Normalize(vmin=(- vlim), vmax=vlim), cmap='RdBu_r')
axes[(0, k)].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[(0, k)])
cax = divider.append_axes('right', size='5.5%', pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
(fig_svd, axes) = plt.subplots(1, len(idx_names), squeeze=False, figsize=((3.8 * len(idx_names)), 3.7))
for (k, (idx, name, unit, scaling, key)) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx], [], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
s[(s <= 0)] = (1e-10 * s[(s > 0)].min())
s = (np.sqrt(s) * scaling)
axes[(0, k)].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[(0, k)].axvline((this_rank - 1), ls='--', color='r', alpha=0.5, zorder=4, clip_on=False)
axes[(0, k)].text((this_rank - 1), axes[(0, k)].get_ylim()[1], ('rank ≈ %d' % (this_rank,)), ha='right', va='top', color='r', alpha=0.5, zorder=4)
axes[(0, k)].set(ylabel=(u'Noise σ (%s)' % unit), yscale='log', xlabel='Eigenvalue index', title=name, xlim=[0, (len(s) - 1)])
tight_layout(fig=fig_svd)
plt_show(show)
return (fig_cov, fig_svd)
| -5,992,722,798,221,997,000
|
Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info : dict
Measurement info.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
|
mne/viz/misc.py
|
plot_cov
|
Aniket-Pradhan/mne-python
|
python
|
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True, show=True, verbose=None):
"Plot Covariance data.\n\n Parameters\n ----------\n cov : instance of Covariance\n The covariance matrix.\n info : dict\n Measurement info.\n exclude : list of str | str\n List of channels to exclude. If empty do not exclude any channel.\n If 'bads', exclude info['bads'].\n colorbar : bool\n Show colorbar or not.\n proj : bool\n Apply projections or not.\n show_svd : bool\n Plot also singular values of the noise covariance for each sensor\n type. We show square roots ie. standard deviations.\n show : bool\n Show figure if True.\n %(verbose)s\n\n Returns\n -------\n fig_cov : instance of matplotlib.figure.Figure\n The covariance plot.\n fig_svd : instance of matplotlib.figure.Figure | None\n The SVD spectra plot of the covariance.\n\n See Also\n --------\n mne.compute_rank\n\n Notes\n -----\n For each channel type, the rank is estimated using\n :func:`mne.compute_rank`.\n\n .. versionchanged:: 0.19\n Approximate ranks for each channel type are shown with red dashed lines.\n "
from ..cov import Covariance
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
(info, C, ch_names, idx_names) = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
for p in projs:
p['active'] = True
(P, ncomp, _) = make_projector(projs, ch_names)
if (ncomp > 0):
logger.info((' Created an SSP operator (subspace dimension = %d)' % ncomp))
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these channels.')
(fig_cov, axes) = plt.subplots(1, len(idx_names), squeeze=False, figsize=((3.8 * len(idx_names)), 3.7))
for (k, (idx, name, _, _, _)) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[(0, k)].imshow(C[idx][:, idx], interpolation='nearest', norm=Normalize(vmin=(- vlim), vmax=vlim), cmap='RdBu_r')
axes[(0, k)].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[(0, k)])
cax = divider.append_axes('right', size='5.5%', pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
(fig_svd, axes) = plt.subplots(1, len(idx_names), squeeze=False, figsize=((3.8 * len(idx_names)), 3.7))
for (k, (idx, name, unit, scaling, key)) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx], [], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
s[(s <= 0)] = (1e-10 * s[(s > 0)].min())
s = (np.sqrt(s) * scaling)
axes[(0, k)].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[(0, k)].axvline((this_rank - 1), ls='--', color='r', alpha=0.5, zorder=4, clip_on=False)
axes[(0, k)].text((this_rank - 1), axes[(0, k)].get_ylim()[1], ('rank ≈ %d' % (this_rank,)), ha='right', va='top', color='r', alpha=0.5, zorder=4)
axes[(0, k)].set(ylabel=(u'Noise σ (%s)' % unit), yscale='log', xlabel='Eigenvalue index', title=name, xlim=[0, (len(s) - 1)])
tight_layout(fig=fig_svd)
plt_show(show)
return (fig_cov, fig_svd)
|
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None, source_index=None, colorbar=False, show=True):
'Plot source power in time-freqency grid.\n\n Parameters\n ----------\n stcs : list of SourceEstimate\n Source power for consecutive time windows, one SourceEstimate object\n should be provided for each frequency bin.\n freq_bins : list of tuples of float\n Start and end points of frequency bins of interest.\n tmin : float\n Minimum time instant to show.\n tmax : float\n Maximum time instant to show.\n source_index : int | None\n Index of source for which the spectrogram will be plotted. If None,\n the source with the largest activation will be selected.\n colorbar : bool\n If true, a colorbar will be added to the plot.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of Figure\n The figure.\n '
import matplotlib.pyplot as plt
if (len(stcs) == 0):
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if ((tmin is not None) and (tmin < stc.times[0])):
raise ValueError('tmin cannot be smaller than the first time point provided in stcs')
if ((tmax is not None) and (tmax > (stc.times[(- 1)] + stc.tstep))):
raise ValueError('tmax cannot be larger than the sum of the last time point and the time step, which are provided in stcs')
if (tmin is None):
tmin = stc.times[0]
if (tmax is None):
tmax = (stc.times[(- 1)] + stc.tstep)
time_bounds = np.arange(tmin, (tmax + stc.tstep), stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
source_power = []
for stc in stcs:
stc = stc.copy()
stc.crop(tmin, (tmax - stc.tstep))
source_power.append(stc.data)
source_power = np.array(source_power)
if (source_index is None):
source_index = np.unravel_index(source_power.argmax(), source_power.shape)[1]
gap_bounds = []
for i in range((len(freq_bins) - 1)):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[(i + 1)][0]
if (lower_bound != upper_bound):
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
(time_grid, freq_grid) = np.meshgrid(time_bounds, freq_bounds)
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :], cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = (1 + (len(time_bounds) // 10))
for i in range(len(time_bounds)):
if ((i % n_skip) != 0):
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[(- 1)])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[(- 1)])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
for (lower_bound, upper_bound) in gap_bounds:
plt.barh(lower_bound, (time_bounds[(- 1)] - time_bounds[0]), (upper_bound - lower_bound), time_bounds[0], color='#666666')
plt_show(show)
return fig
| -2,642,318,892,408,412,700
|
Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
|
mne/viz/misc.py
|
plot_source_spectrogram
|
Aniket-Pradhan/mne-python
|
python
|
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None, source_index=None, colorbar=False, show=True):
'Plot source power in time-freqency grid.\n\n Parameters\n ----------\n stcs : list of SourceEstimate\n Source power for consecutive time windows, one SourceEstimate object\n should be provided for each frequency bin.\n freq_bins : list of tuples of float\n Start and end points of frequency bins of interest.\n tmin : float\n Minimum time instant to show.\n tmax : float\n Maximum time instant to show.\n source_index : int | None\n Index of source for which the spectrogram will be plotted. If None,\n the source with the largest activation will be selected.\n colorbar : bool\n If true, a colorbar will be added to the plot.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of Figure\n The figure.\n '
import matplotlib.pyplot as plt
if (len(stcs) == 0):
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if ((tmin is not None) and (tmin < stc.times[0])):
raise ValueError('tmin cannot be smaller than the first time point provided in stcs')
if ((tmax is not None) and (tmax > (stc.times[(- 1)] + stc.tstep))):
raise ValueError('tmax cannot be larger than the sum of the last time point and the time step, which are provided in stcs')
if (tmin is None):
tmin = stc.times[0]
if (tmax is None):
tmax = (stc.times[(- 1)] + stc.tstep)
time_bounds = np.arange(tmin, (tmax + stc.tstep), stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
source_power = []
for stc in stcs:
stc = stc.copy()
stc.crop(tmin, (tmax - stc.tstep))
source_power.append(stc.data)
source_power = np.array(source_power)
if (source_index is None):
source_index = np.unravel_index(source_power.argmax(), source_power.shape)[1]
gap_bounds = []
for i in range((len(freq_bins) - 1)):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[(i + 1)][0]
if (lower_bound != upper_bound):
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
(time_grid, freq_grid) = np.meshgrid(time_bounds, freq_bounds)
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :], cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = (1 + (len(time_bounds) // 10))
for i in range(len(time_bounds)):
if ((i % n_skip) != 0):
time_tick_labels[i] =
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[(- 1)])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[(- 1)])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
for (lower_bound, upper_bound) in gap_bounds:
plt.barh(lower_bound, (time_bounds[(- 1)] - time_bounds[0]), (upper_bound - lower_bound), time_bounds[0], color='#666666')
plt_show(show)
return fig
|
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal', slices=None, show=True, show_indices=False, show_orientation=False, img_output=False):
'Plot BEM contours on anatomical slices.'
import matplotlib.pyplot as plt
from matplotlib import patheffects
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
(_, vox_mri_t, _, _, _, nim) = _read_mri_info(mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
((x, y, z), (flip_x, flip_y, flip_z), order) = _mri_orientation(nim, orientation)
transpose = (x < y)
data = _get_img_fdata(nim)
shift_x = (data.shape[x] if (flip_x < 0) else 0)
shift_y = (data.shape[y] if (flip_y < 0) else 0)
n_slices = data.shape[z]
if (slices is None):
slices = np.round(np.linspace(0, (n_slices - 1), 14)).astype(int)[1:(- 1)]
slices = np.atleast_1d(slices).copy()
slices[(slices < 0)] += n_slices
if ((not np.array_equal(np.sort(slices), slices)) or (slices.ndim != 1) or (slices.size < 1) or (slices[0] < 0) or (slices[(- 1)] >= n_slices) or (slices.dtype.kind not in 'iu')):
raise ValueError(('slices must be a sorted 1D array of int with unique elements, at least one element, and no elements greater than %d, got %s' % ((n_slices - 1), slices)))
if (flip_z < 0):
slices = slices[::(- 1)]
surfs = list()
for (file_name, color) in surfaces:
surf = dict()
(surf['rr'], surf['tris']) = read_surface(file_name)
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if (src is not None):
_ensure_src(src, extra=' or None')
if (src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI):
raise ValueError(f"Source space must be in MRI coordinates, got {_frame_to_str[src[0]['coord_frame']]}")
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, (points * 1000.0)))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
(fig, ax) = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = ([ax] * len(slices))
w = fig.get_size_inches()[0]
fig.set_size_inches([w, ((w / data.shape[x]) * data.shape[y])])
plt.close(fig)
else:
n_col = 4
(fig, axs, _, _) = _prepare_trellis(len(slices), n_col)
n_axes = len(axs)
fig.set_facecolor('k')
bounds = np.concatenate([[(- np.inf)], (slices[:(- 1)] + (np.diff(slices) / 2.0)), [np.inf]])
slicer = ([slice(None)] * 3)
ori_labels = dict(R='LR', A='PA', S='IS')
(xlabels, ylabels) = (ori_labels[order[0]], ori_labels[order[1]])
path_effects = [patheffects.withStroke(linewidth=4, foreground='k', alpha=0.75)]
out = (list() if img_output else fig)
for (ai, (ax, sl, lower, upper)) in enumerate(zip(axs, slices, bounds[:(- 1)], bounds[1:])):
slicer[z] = sl
dat = data[tuple(slicer)]
dat = (dat.T if transpose else dat)
dat = dat[::flip_y, ::flip_x]
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal')
for (surf, color) in surfs:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
ax.tricontour(((flip_x * surf['rr'][:, x]) + shift_x), ((flip_y * surf['rr'][:, y]) + shift_y), surf['tris'], surf['rr'][:, z], levels=[sl], colors=color, linewidths=1.0, zorder=1)
if len(sources):
in_slice = ((sources[:, z] >= lower) & (sources[:, z] < upper))
ax.scatter(((flip_x * sources[(in_slice, x)]) + shift_x), ((flip_y * sources[(in_slice, y)]) + shift_y), marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(((dat.shape[1] // 8) + 0.5), 0.5, str(sl), color='w', fontsize='x-small', va='bottom', ha='left')
kwargs = dict(color='#66CCEE', fontsize='medium', path_effects=path_effects, family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ((ai % n_col) == 0):
ax.text(0, (dat.shape[0] / 2.0), xlabels[0], va='center', ha='left', **kwargs)
if (((ai % n_col) == (n_col - 1)) or (ai == (n_axes - 1))):
ax.text((dat.shape[1] - 1), (dat.shape[0] / 2.0), xlabels[1], va='center', ha='right', **kwargs)
if (ai >= (n_axes - n_col)):
ax.text((dat.shape[1] / 2.0), 0, ylabels[0], ha='center', va='bottom', **kwargs)
if ((ai < n_col) or (n_col == 1)):
ax.text((dat.shape[1] / 2.0), (dat.shape[0] - 1), ylabels[1], ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight', pad_inches=0, format='png')
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0)
plt_show(show, fig=fig)
return out
| -5,006,268,207,408,695,000
|
Plot BEM contours on anatomical slices.
|
mne/viz/misc.py
|
_plot_mri_contours
|
Aniket-Pradhan/mne-python
|
python
|
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal', slices=None, show=True, show_indices=False, show_orientation=False, img_output=False):
import matplotlib.pyplot as plt
from matplotlib import patheffects
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
(_, vox_mri_t, _, _, _, nim) = _read_mri_info(mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
((x, y, z), (flip_x, flip_y, flip_z), order) = _mri_orientation(nim, orientation)
transpose = (x < y)
data = _get_img_fdata(nim)
shift_x = (data.shape[x] if (flip_x < 0) else 0)
shift_y = (data.shape[y] if (flip_y < 0) else 0)
n_slices = data.shape[z]
if (slices is None):
slices = np.round(np.linspace(0, (n_slices - 1), 14)).astype(int)[1:(- 1)]
slices = np.atleast_1d(slices).copy()
slices[(slices < 0)] += n_slices
if ((not np.array_equal(np.sort(slices), slices)) or (slices.ndim != 1) or (slices.size < 1) or (slices[0] < 0) or (slices[(- 1)] >= n_slices) or (slices.dtype.kind not in 'iu')):
raise ValueError(('slices must be a sorted 1D array of int with unique elements, at least one element, and no elements greater than %d, got %s' % ((n_slices - 1), slices)))
if (flip_z < 0):
slices = slices[::(- 1)]
surfs = list()
for (file_name, color) in surfaces:
surf = dict()
(surf['rr'], surf['tris']) = read_surface(file_name)
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if (src is not None):
_ensure_src(src, extra=' or None')
if (src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI):
raise ValueError(f"Source space must be in MRI coordinates, got {_frame_to_str[src[0]['coord_frame']]}")
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, (points * 1000.0)))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
(fig, ax) = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = ([ax] * len(slices))
w = fig.get_size_inches()[0]
fig.set_size_inches([w, ((w / data.shape[x]) * data.shape[y])])
plt.close(fig)
else:
n_col = 4
(fig, axs, _, _) = _prepare_trellis(len(slices), n_col)
n_axes = len(axs)
fig.set_facecolor('k')
bounds = np.concatenate([[(- np.inf)], (slices[:(- 1)] + (np.diff(slices) / 2.0)), [np.inf]])
slicer = ([slice(None)] * 3)
ori_labels = dict(R='LR', A='PA', S='IS')
(xlabels, ylabels) = (ori_labels[order[0]], ori_labels[order[1]])
path_effects = [patheffects.withStroke(linewidth=4, foreground='k', alpha=0.75)]
out = (list() if img_output else fig)
for (ai, (ax, sl, lower, upper)) in enumerate(zip(axs, slices, bounds[:(- 1)], bounds[1:])):
slicer[z] = sl
dat = data[tuple(slicer)]
dat = (dat.T if transpose else dat)
dat = dat[::flip_y, ::flip_x]
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal')
for (surf, color) in surfs:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
ax.tricontour(((flip_x * surf['rr'][:, x]) + shift_x), ((flip_y * surf['rr'][:, y]) + shift_y), surf['tris'], surf['rr'][:, z], levels=[sl], colors=color, linewidths=1.0, zorder=1)
if len(sources):
in_slice = ((sources[:, z] >= lower) & (sources[:, z] < upper))
ax.scatter(((flip_x * sources[(in_slice, x)]) + shift_x), ((flip_y * sources[(in_slice, y)]) + shift_y), marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(((dat.shape[1] // 8) + 0.5), 0.5, str(sl), color='w', fontsize='x-small', va='bottom', ha='left')
kwargs = dict(color='#66CCEE', fontsize='medium', path_effects=path_effects, family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ((ai % n_col) == 0):
ax.text(0, (dat.shape[0] / 2.0), xlabels[0], va='center', ha='left', **kwargs)
if (((ai % n_col) == (n_col - 1)) or (ai == (n_axes - 1))):
ax.text((dat.shape[1] - 1), (dat.shape[0] / 2.0), xlabels[1], va='center', ha='right', **kwargs)
if (ai >= (n_axes - n_col)):
ax.text((dat.shape[1] / 2.0), 0, ylabels[0], ha='center', va='bottom', **kwargs)
if ((ai < n_col) or (n_col == 1)):
ax.text((dat.shape[1] / 2.0), (dat.shape[0] - 1), ylabels[1], ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight', pad_inches=0, format='png')
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0)
plt_show(show, fig=fig)
return out
|
def plot_bem(subject=None, subjects_dir=None, orientation='coronal', slices=None, brain_surfaces=None, src=None, show=True, show_indices=True, mri='T1.mgz', show_orientation=True):
'Plot BEM contours on anatomical slices.\n\n Parameters\n ----------\n subject : str\n Subject name.\n subjects_dir : str | None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n orientation : str\n \'coronal\' or \'axial\' or \'sagittal\'.\n slices : list of int\n Slice indices.\n brain_surfaces : None | str | list of str\n One or more brain surface to plot (optional). Entries should correspond\n to files in the subject\'s ``surf`` directory (e.g. ``"white"``).\n src : None | SourceSpaces | str\n SourceSpaces instance or path to a source space to plot individual\n sources as scatter-plot. Sources will be shown on exactly one slice\n (whichever slice is closest to each source in the given orientation\n plane). Path can be absolute or relative to the subject\'s ``bem``\n folder.\n\n .. versionchanged:: 0.20\n All sources are shown on the nearest slice rather than some\n being omitted.\n show : bool\n Show figure if True.\n show_indices : bool\n Show slice indices if True.\n\n .. versionadded:: 0.20\n mri : str\n The name of the MRI to use. Can be a standard FreeSurfer MRI such as\n ``\'T1.mgz\'``, or a full path to a custom MRI file.\n\n .. versionadded:: 0.21\n show_orientation : str\n Show the orientation (L/R, P/A, I/S) of the data slices.\n\n .. versionadded:: 0.21\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n mne.viz.plot_alignment\n\n Notes\n -----\n Images are plotted in MRI voxel coordinates.\n\n If ``src`` is not None, for a given slice index, all source points are\n shown that are halfway between the previous slice and the given slice,\n and halfway between the given slice and the next slice.\n For large slice decimations, this can\n make some source points appear outside the BEM contour, which is shown\n for the given slice index. For example, in the case where the single\n midpoint slice is used ``slices=[128]``, all source points will be shown\n on top of the midpoint MRI slice with the BEM boundary drawn for that\n slice.\n '
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
bem_path = op.join(subjects_dir, subject, 'bem')
if (not op.isdir(bem_path)):
raise IOError(('Subject bem directory "%s" does not exist' % bem_path))
surfaces = _get_bem_plotting_surfaces(bem_path)
if (brain_surfaces is not None):
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf', ((hemi + '.') + surf_name))
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError(('Surface %s does not exist.' % surf_fname))
if isinstance(src, str):
if (not op.exists(src)):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError(('%s does not exist' % src))
src = read_source_spaces(src)
elif ((src is not None) and (not isinstance(src, SourceSpaces))):
raise TypeError(('src needs to be None, str or SourceSpaces instance, not %s' % repr(src)))
if (len(surfaces) == 0):
raise IOError('No surface files found. Surface files must end with inner_skull.surf, outer_skull.surf or outer_skin.surf')
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices, show, show_indices, show_orientation)
| 8,103,669,124,879,086,000
|
Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
|
mne/viz/misc.py
|
plot_bem
|
Aniket-Pradhan/mne-python
|
python
|
def plot_bem(subject=None, subjects_dir=None, orientation='coronal', slices=None, brain_surfaces=None, src=None, show=True, show_indices=True, mri='T1.mgz', show_orientation=True):
'Plot BEM contours on anatomical slices.\n\n Parameters\n ----------\n subject : str\n Subject name.\n subjects_dir : str | None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n orientation : str\n \'coronal\' or \'axial\' or \'sagittal\'.\n slices : list of int\n Slice indices.\n brain_surfaces : None | str | list of str\n One or more brain surface to plot (optional). Entries should correspond\n to files in the subject\'s ``surf`` directory (e.g. ``"white"``).\n src : None | SourceSpaces | str\n SourceSpaces instance or path to a source space to plot individual\n sources as scatter-plot. Sources will be shown on exactly one slice\n (whichever slice is closest to each source in the given orientation\n plane). Path can be absolute or relative to the subject\'s ``bem``\n folder.\n\n .. versionchanged:: 0.20\n All sources are shown on the nearest slice rather than some\n being omitted.\n show : bool\n Show figure if True.\n show_indices : bool\n Show slice indices if True.\n\n .. versionadded:: 0.20\n mri : str\n The name of the MRI to use. Can be a standard FreeSurfer MRI such as\n ``\'T1.mgz\'``, or a full path to a custom MRI file.\n\n .. versionadded:: 0.21\n show_orientation : str\n Show the orientation (L/R, P/A, I/S) of the data slices.\n\n .. versionadded:: 0.21\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n mne.viz.plot_alignment\n\n Notes\n -----\n Images are plotted in MRI voxel coordinates.\n\n If ``src`` is not None, for a given slice index, all source points are\n shown that are halfway between the previous slice and the given slice,\n and halfway between the given slice and the next slice.\n For large slice decimations, this can\n make some source points appear outside the BEM contour, which is shown\n for the given slice index. For example, in the case where the single\n midpoint slice is used ``slices=[128]``, all source points will be shown\n on top of the midpoint MRI slice with the BEM boundary drawn for that\n slice.\n '
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
bem_path = op.join(subjects_dir, subject, 'bem')
if (not op.isdir(bem_path)):
raise IOError(('Subject bem directory "%s" does not exist' % bem_path))
surfaces = _get_bem_plotting_surfaces(bem_path)
if (brain_surfaces is not None):
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf', ((hemi + '.') + surf_name))
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError(('Surface %s does not exist.' % surf_fname))
if isinstance(src, str):
if (not op.exists(src)):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError(('%s does not exist' % src))
src = read_source_spaces(src)
elif ((src is not None) and (not isinstance(src, SourceSpaces))):
raise TypeError(('src needs to be None, str or SourceSpaces instance, not %s' % repr(src)))
if (len(surfaces) == 0):
raise IOError('No surface files found. Surface files must end with inner_skull.surf, outer_skull.surf or outer_skin.surf')
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices, show, show_indices, show_orientation)
|
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None, axes=None, equal_spacing=True, show=True, on_missing='raise', verbose=None):
"Plot events to get a visual display of the paradigm.\n\n Parameters\n ----------\n events : array, shape (n_events, 3)\n The events.\n sfreq : float | None\n The sample frequency. If None, data will be displayed in samples (not\n seconds).\n first_samp : int\n The index of the first sample. Recordings made on Neuromag systems\n number samples relative to the system start (not relative to the\n beginning of the recording). In such cases the ``raw.first_samp``\n attribute can be passed here. Default is 0.\n color : dict | None\n Dictionary of event_id integers as keys and colors as values. If None,\n colors are automatically drawn from a default list (cycled through if\n number of events longer than list of default colors). Color can be any\n valid :doc:`matplotlib color <tutorials/colors/colors>`.\n event_id : dict | None\n Dictionary of event labels (e.g. 'aud_l') as keys and their associated\n event_id values. Labels are used to plot a legend. If None, no legend\n is drawn.\n axes : instance of Axes\n The subplot handle.\n equal_spacing : bool\n Use equal spacing between events in y-axis.\n show : bool\n Show figure if True.\n %(on_missing_events)s\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n "
if (sfreq is None):
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if (len(events) == 0):
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if (event_id is not None):
event_id_rev = {v: k for (k, v) in event_id.items()}
(conditions, unique_events_id) = zip(*sorted(event_id.items(), key=(lambda x: x[1])))
keep = np.ones(len(unique_events_id), bool)
for (ii, this_event) in enumerate(unique_events_id):
if (this_event not in unique_events):
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for (cond, k) in zip(conditions, keep) if k]
unique_events_id = [id_ for (id_, k) in zip(unique_events_id, keep) if k]
if (len(unique_events_id) == 0):
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if (this_event not in unique_events_id):
warn(('event %s missing from event_id will be ignored' % this_event))
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if (axes is None):
fig = plt.figure()
ax = (axes if axes else plt.gca())
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = ((events[(np.in1d(events[:, 2], unique_events_id), 0)].max() - first_samp) / sfreq)
(handles, labels) = (list(), list())
for (idx, ev) in enumerate(unique_events_id):
ev_mask = (events[:, 2] == ev)
count = ev_mask.sum()
if (count == 0):
continue
y = np.full(count, ((idx + 1) if equal_spacing else events[(ev_mask, 2)][0]))
if (event_id is not None):
event_label = ('%s (%s)' % (event_id_rev[ev], count))
else:
event_label = ('N=%d' % (count,))
labels.append(event_label)
kwargs = {}
if (ev in color):
kwargs['color'] = color[ev]
handles.append(ax.plot(((events[(ev_mask, 0)] - first_samp) / sfreq), y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, (unique_events_id.size + 1))
ax.set_yticks((1 + np.arange(unique_events_id.size)))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([(min_event - 1), (max_event + 1)])
ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])
ax.grid(True)
fig = (fig if (fig is not None) else plt.gcf())
(handles, labels) = (handles[::(- 1)], labels[::(- 1)])
box = ax.get_position()
factor = (0.8 if (event_id is not None) else 0.9)
ax.set_position([box.x0, box.y0, (box.width * factor), box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5), fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
| 1,551,174,879,459,412,200
|
Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
|
mne/viz/misc.py
|
plot_events
|
Aniket-Pradhan/mne-python
|
python
|
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None, axes=None, equal_spacing=True, show=True, on_missing='raise', verbose=None):
"Plot events to get a visual display of the paradigm.\n\n Parameters\n ----------\n events : array, shape (n_events, 3)\n The events.\n sfreq : float | None\n The sample frequency. If None, data will be displayed in samples (not\n seconds).\n first_samp : int\n The index of the first sample. Recordings made on Neuromag systems\n number samples relative to the system start (not relative to the\n beginning of the recording). In such cases the ``raw.first_samp``\n attribute can be passed here. Default is 0.\n color : dict | None\n Dictionary of event_id integers as keys and colors as values. If None,\n colors are automatically drawn from a default list (cycled through if\n number of events longer than list of default colors). Color can be any\n valid :doc:`matplotlib color <tutorials/colors/colors>`.\n event_id : dict | None\n Dictionary of event labels (e.g. 'aud_l') as keys and their associated\n event_id values. Labels are used to plot a legend. If None, no legend\n is drawn.\n axes : instance of Axes\n The subplot handle.\n equal_spacing : bool\n Use equal spacing between events in y-axis.\n show : bool\n Show figure if True.\n %(on_missing_events)s\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n "
if (sfreq is None):
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if (len(events) == 0):
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if (event_id is not None):
event_id_rev = {v: k for (k, v) in event_id.items()}
(conditions, unique_events_id) = zip(*sorted(event_id.items(), key=(lambda x: x[1])))
keep = np.ones(len(unique_events_id), bool)
for (ii, this_event) in enumerate(unique_events_id):
if (this_event not in unique_events):
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for (cond, k) in zip(conditions, keep) if k]
unique_events_id = [id_ for (id_, k) in zip(unique_events_id, keep) if k]
if (len(unique_events_id) == 0):
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if (this_event not in unique_events_id):
warn(('event %s missing from event_id will be ignored' % this_event))
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if (axes is None):
fig = plt.figure()
ax = (axes if axes else plt.gca())
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = ((events[(np.in1d(events[:, 2], unique_events_id), 0)].max() - first_samp) / sfreq)
(handles, labels) = (list(), list())
for (idx, ev) in enumerate(unique_events_id):
ev_mask = (events[:, 2] == ev)
count = ev_mask.sum()
if (count == 0):
continue
y = np.full(count, ((idx + 1) if equal_spacing else events[(ev_mask, 2)][0]))
if (event_id is not None):
event_label = ('%s (%s)' % (event_id_rev[ev], count))
else:
event_label = ('N=%d' % (count,))
labels.append(event_label)
kwargs = {}
if (ev in color):
kwargs['color'] = color[ev]
handles.append(ax.plot(((events[(ev_mask, 0)] - first_samp) / sfreq), y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, (unique_events_id.size + 1))
ax.set_yticks((1 + np.arange(unique_events_id.size)))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([(min_event - 1), (max_event + 1)])
ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])
ax.grid(True)
fig = (fig if (fig is not None) else plt.gcf())
(handles, labels) = (handles[::(- 1)], labels[::(- 1)])
box = ax.get_position()
factor = (0.8 if (event_id is not None) else 0.9)
ax.set_position([box.x0, box.y0, (box.width * factor), box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5), fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
|
def _get_presser(fig):
'Get our press callback.'
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for (key, val) in callbacks.items():
if (LooseVersion(matplotlib.__version__) >= '3'):
func = val()
else:
func = val.func
if (func.__class__.__name__ == 'partial'):
break
else:
func = None
assert (func is not None)
return func
| -2,291,499,084,552,287,000
|
Get our press callback.
|
mne/viz/misc.py
|
_get_presser
|
Aniket-Pradhan/mne-python
|
python
|
def _get_presser(fig):
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for (key, val) in callbacks.items():
if (LooseVersion(matplotlib.__version__) >= '3'):
func = val()
else:
func = val.func
if (func.__class__.__name__ == 'partial'):
break
else:
func = None
assert (func is not None)
return func
|
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
'Plot the amplitude traces of a set of dipoles.\n\n Parameters\n ----------\n dipoles : list of instance of Dipole\n The dipoles whose amplitudes should be shown.\n colors : list of color | None\n Color to plot with each dipole. If None default colors are used.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n '
import matplotlib.pyplot as plt
if (colors is None):
colors = cycle(_get_color_list())
(fig, ax) = plt.subplots(1, 1)
xlim = [np.inf, (- np.inf)]
for (dip, color) in zip(dipoles, colors):
ax.plot(dip.times, (dip.amplitude * 1000000000.0), color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[(- 1)])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
| 4,548,696,912,232,993,000
|
Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
|
mne/viz/misc.py
|
plot_dipole_amplitudes
|
Aniket-Pradhan/mne-python
|
python
|
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
'Plot the amplitude traces of a set of dipoles.\n\n Parameters\n ----------\n dipoles : list of instance of Dipole\n The dipoles whose amplitudes should be shown.\n colors : list of color | None\n Color to plot with each dipole. If None default colors are used.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n '
import matplotlib.pyplot as plt
if (colors is None):
colors = cycle(_get_color_list())
(fig, ax) = plt.subplots(1, 1)
xlim = [np.inf, (- np.inf)]
for (dip, color) in zip(dipoles, colors):
ax.plot(dip.times, (dip.amplitude * 1000000000.0), color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[(- 1)])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
|
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
'Adjust some properties of axes.\n\n Parameters\n ----------\n axes : list\n List of axes to process.\n remove_spines : list of str\n Which axis spines to remove.\n grid : bool\n Turn grid on (True) or off (False).\n '
axes = ([axes] if (not isinstance(axes, (list, tuple, np.ndarray))) else axes)
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
| 4,676,078,817,384,858,000
|
Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
|
mne/viz/misc.py
|
adjust_axes
|
Aniket-Pradhan/mne-python
|
python
|
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
'Adjust some properties of axes.\n\n Parameters\n ----------\n axes : list\n List of axes to process.\n remove_spines : list of str\n Which axis spines to remove.\n grid : bool\n Turn grid on (True) or off (False).\n '
axes = ([axes] if (not isinstance(axes, (list, tuple, np.ndarray))) else axes)
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
|
def _filter_ticks(lims, fscale):
'Create approximately spaced ticks between lims.'
if (fscale == 'linear'):
return (None, None)
lims = np.array(lims)
ticks = list()
if (lims[1] > (20 * lims[0])):
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))), (int(np.floor(np.log10(lims[1]))) + 1)):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[((ticks >= lims[0]) & (ticks <= lims[1]))]
ticklabels = [(('%g' if (t < 1) else '%d') % t) for t in ticks]
return (ticks, ticklabels)
| 6,549,562,070,031,120,000
|
Create approximately spaced ticks between lims.
|
mne/viz/misc.py
|
_filter_ticks
|
Aniket-Pradhan/mne-python
|
python
|
def _filter_ticks(lims, fscale):
if (fscale == 'linear'):
return (None, None)
lims = np.array(lims)
ticks = list()
if (lims[1] > (20 * lims[0])):
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))), (int(np.floor(np.log10(lims[1]))) + 1)):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[((ticks >= lims[0]) & (ticks <= lims[1]))]
ticklabels = [(('%g' if (t < 1) else '%d') % t) for t in ticks]
return (ticks, ticklabels)
|
def _get_flim(flim, fscale, freq, sfreq=None):
'Get reasonable frequency limits.'
if (flim is None):
if (freq is None):
flim = [(0.1 if (fscale == 'log') else 0.0), (sfreq / 2.0)]
else:
if (fscale == 'linear'):
flim = [freq[0]]
else:
flim = [(freq[0] if (freq[0] > 0) else (0.1 * freq[1]))]
flim += [freq[(- 1)]]
if (fscale == 'log'):
if (flim[0] <= 0):
raise ValueError(('flim[0] must be positive, got %s' % flim[0]))
elif (flim[0] < 0):
raise ValueError(('flim[0] must be non-negative, got %s' % flim[0]))
return flim
| 2,384,666,132,643,033,000
|
Get reasonable frequency limits.
|
mne/viz/misc.py
|
_get_flim
|
Aniket-Pradhan/mne-python
|
python
|
def _get_flim(flim, fscale, freq, sfreq=None):
if (flim is None):
if (freq is None):
flim = [(0.1 if (fscale == 'log') else 0.0), (sfreq / 2.0)]
else:
if (fscale == 'linear'):
flim = [freq[0]]
else:
flim = [(freq[0] if (freq[0] > 0) else (0.1 * freq[1]))]
flim += [freq[(- 1)]]
if (fscale == 'log'):
if (flim[0] <= 0):
raise ValueError(('flim[0] must be positive, got %s' % flim[0]))
elif (flim[0] < 0):
raise ValueError(('flim[0] must be non-negative, got %s' % flim[0]))
return flim
|
def _check_fscale(fscale):
'Check for valid fscale.'
if ((not isinstance(fscale, str)) or (fscale not in ('log', 'linear'))):
raise ValueError(('fscale must be "log" or "linear", got %s' % (fscale,)))
| -175,384,852,521,488,900
|
Check for valid fscale.
|
mne/viz/misc.py
|
_check_fscale
|
Aniket-Pradhan/mne-python
|
python
|
def _check_fscale(fscale):
if ((not isinstance(fscale, str)) or (fscale not in ('log', 'linear'))):
raise ValueError(('fscale must be "log" or "linear", got %s' % (fscale,)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.