body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@classmethod def from_custom_template(cls, searchpath, name): '\n Factory function for creating a subclass of ``Styler``\n with a custom template and Jinja environment.\n\n Parameters\n ----------\n searchpath : str or list\n Path or paths of directories containing the templates\n name : str\n Name of your custom template to use for rendering\n\n Returns\n -------\n MyStyler : subclass of Styler\n Has the correct ``env`` and ``template`` class attributes set.\n ' loader = ChoiceLoader([FileSystemLoader(searchpath), cls.loader]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
4,448,585,505,095,176,000
Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set.
pandas/io/formats/style.py
from_custom_template
harunpehlivan/pandas
python
@classmethod def from_custom_template(cls, searchpath, name): '\n Factory function for creating a subclass of ``Styler``\n with a custom template and Jinja environment.\n\n Parameters\n ----------\n searchpath : str or list\n Path or paths of directories containing the templates\n name : str\n Name of your custom template to use for rendering\n\n Returns\n -------\n MyStyler : subclass of Styler\n Has the correct ``env`` and ``template`` class attributes set.\n ' loader = ChoiceLoader([FileSystemLoader(searchpath), cls.loader]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
def pipe(self, func, *args, **kwargs): '\n Apply ``func(self, *args, **kwargs)``, and return the result.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n func : function\n Function to apply to the Styler. Alternatively, a\n ``(callable, keyword)`` tuple where ``keyword`` is a string\n indicating the keyword of ``callable`` that expects the Styler.\n *args, **kwargs :\n Arguments passed to `func`.\n\n Returns\n -------\n object :\n The value returned by ``func``.\n\n See Also\n --------\n DataFrame.pipe : Analogous method for DataFrame.\n Styler.apply : Apply a function row-wise, column-wise, or table-wise to\n modify the dataframe\'s styling.\n\n Notes\n -----\n Like :meth:`DataFrame.pipe`, this method can simplify the\n application of several user-defined functions to a styler. Instead\n of writing:\n\n .. code-block:: python\n\n f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)\n\n users can write:\n\n .. code-block:: python\n\n (df.style.set_precision(3)\n .pipe(g, arg1=a)\n .pipe(f, arg2=b, arg3=c))\n\n In particular, this allows users to define functions that take a\n styler object, along with other parameters, and return the styler after\n making styling changes (such as calling :meth:`Styler.apply` or\n :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined\n style "transformations" can be interleaved with calls to the built-in\n Styler interface.\n\n Examples\n --------\n >>> def format_conversion(styler):\n ... return (styler.set_properties(**{\'text-align\': \'right\'})\n ... .format({\'conversion\': \'{:.1%}\'}))\n\n The user-defined ``format_conversion`` function above can be called\n within a sequence of other style modifications:\n\n >>> df = pd.DataFrame({\'trial\': list(range(5)),\n ... \'conversion\': [0.75, 0.85, np.nan, 0.7, 0.72]})\n >>> (df.style\n ... .highlight_min(subset=[\'conversion\'], color=\'yellow\')\n ... .pipe(format_conversion)\n ... .set_caption("Results with minimum conversion highlighted."))\n ' return com._pipe(self, func, *args, **kwargs)
5,797,857,673,291,711,000
Apply ``func(self, *args, **kwargs)``, and return the result. .. versionadded:: 0.24.0 Parameters ---------- func : function Function to apply to the Styler. Alternatively, a ``(callable, keyword)`` tuple where ``keyword`` is a string indicating the keyword of ``callable`` that expects the Styler. *args, **kwargs : Arguments passed to `func`. Returns ------- object : The value returned by ``func``. See Also -------- DataFrame.pipe : Analogous method for DataFrame. Styler.apply : Apply a function row-wise, column-wise, or table-wise to modify the dataframe's styling. Notes ----- Like :meth:`DataFrame.pipe`, this method can simplify the application of several user-defined functions to a styler. Instead of writing: .. code-block:: python f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c) users can write: .. code-block:: python (df.style.set_precision(3) .pipe(g, arg1=a) .pipe(f, arg2=b, arg3=c)) In particular, this allows users to define functions that take a styler object, along with other parameters, and return the styler after making styling changes (such as calling :meth:`Styler.apply` or :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined style "transformations" can be interleaved with calls to the built-in Styler interface. Examples -------- >>> def format_conversion(styler): ... return (styler.set_properties(**{'text-align': 'right'}) ... .format({'conversion': '{:.1%}'})) The user-defined ``format_conversion`` function above can be called within a sequence of other style modifications: >>> df = pd.DataFrame({'trial': list(range(5)), ... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]}) >>> (df.style ... .highlight_min(subset=['conversion'], color='yellow') ... .pipe(format_conversion) ... .set_caption("Results with minimum conversion highlighted."))
pandas/io/formats/style.py
pipe
harunpehlivan/pandas
python
def pipe(self, func, *args, **kwargs): '\n Apply ``func(self, *args, **kwargs)``, and return the result.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n func : function\n Function to apply to the Styler. Alternatively, a\n ``(callable, keyword)`` tuple where ``keyword`` is a string\n indicating the keyword of ``callable`` that expects the Styler.\n *args, **kwargs :\n Arguments passed to `func`.\n\n Returns\n -------\n object :\n The value returned by ``func``.\n\n See Also\n --------\n DataFrame.pipe : Analogous method for DataFrame.\n Styler.apply : Apply a function row-wise, column-wise, or table-wise to\n modify the dataframe\'s styling.\n\n Notes\n -----\n Like :meth:`DataFrame.pipe`, this method can simplify the\n application of several user-defined functions to a styler. Instead\n of writing:\n\n .. code-block:: python\n\n f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)\n\n users can write:\n\n .. code-block:: python\n\n (df.style.set_precision(3)\n .pipe(g, arg1=a)\n .pipe(f, arg2=b, arg3=c))\n\n In particular, this allows users to define functions that take a\n styler object, along with other parameters, and return the styler after\n making styling changes (such as calling :meth:`Styler.apply` or\n :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined\n style "transformations" can be interleaved with calls to the built-in\n Styler interface.\n\n Examples\n --------\n >>> def format_conversion(styler):\n ... return (styler.set_properties(**{\'text-align\': \'right\'})\n ... .format({\'conversion\': \'{:.1%}\'}))\n\n The user-defined ``format_conversion`` function above can be called\n within a sequence of other style modifications:\n\n >>> df = pd.DataFrame({\'trial\': list(range(5)),\n ... \'conversion\': [0.75, 0.85, np.nan, 0.7, 0.72]})\n >>> (df.style\n ... .highlight_min(subset=[\'conversion\'], color=\'yellow\')\n ... .pipe(format_conversion)\n ... .set_caption("Results with minimum conversion highlighted."))\n ' return com._pipe(self, func, *args, **kwargs)
def css_bar(start, end, color): '\n Generate CSS code to draw a bar from start to end.\n ' css = 'width: 10em; height: 80%;' if (end > start): css += 'background: linear-gradient(90deg,' if (start > 0): css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(s=start, c=color) css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(e=min(end, width), c=color) return css
-5,143,326,183,301,107,000
Generate CSS code to draw a bar from start to end.
pandas/io/formats/style.py
css_bar
harunpehlivan/pandas
python
def css_bar(start, end, color): '\n \n ' css = 'width: 10em; height: 80%;' if (end > start): css += 'background: linear-gradient(90deg,' if (start > 0): css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(s=start, c=color) css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(e=min(end, width), c=color) return css
def relative_luminance(rgba): '\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n ' (r, g, b) = (((x / 12.92) if (x <= 0.03928) else ((x + 0.055) / (1.055 ** 2.4))) for x in rgba[:3]) return (((0.2126 * r) + (0.7152 * g)) + (0.0722 * b))
2,695,997,616,953,070,600
Calculate relative luminance of a color. The calculation adheres to the W3C standards (https://www.w3.org/WAI/GL/wiki/Relative_luminance) Parameters ---------- color : rgb or rgba tuple Returns ------- float The relative luminance as a value from 0 to 1
pandas/io/formats/style.py
relative_luminance
harunpehlivan/pandas
python
def relative_luminance(rgba): '\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n ' (r, g, b) = (((x / 12.92) if (x <= 0.03928) else ((x + 0.055) / (1.055 ** 2.4))) for x in rgba[:3]) return (((0.2126 * r) + (0.7152 * g)) + (0.0722 * b))
def get_message(msg): 'Get metric instance from dictionary or string' if (not isinstance(msg, dict)): try: msg = json.loads(msg, encoding='utf-8') except json.JSONDecodeError: return None typ = msg.pop('__type') if (typ == 'metric'): return Metric(**msg) return None
-1,440,209,607,654,485,200
Get metric instance from dictionary or string
csm_test_utils/message.py
get_message
opentelekomcloud-infra/csm-test-utils
python
def get_message(msg): if (not isinstance(msg, dict)): try: msg = json.loads(msg, encoding='utf-8') except json.JSONDecodeError: return None typ = msg.pop('__type') if (typ == 'metric'): return Metric(**msg) return None
def push_metric(data: Metric, message_socket_address): 'push metrics to socket' with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as _socket: try: _socket.connect(message_socket_address) msg = ('%s\n' % data.serialize()) _socket.sendall(msg.encode('utf8')) return 'success' except socket.error as err: LOGGER.exception('Error establishing connection to socket') raise err except Exception as ex: LOGGER.exception('Error writing message to socket') raise ex
-1,707,675,506,603,498,800
push metrics to socket
csm_test_utils/message.py
push_metric
opentelekomcloud-infra/csm-test-utils
python
def push_metric(data: Metric, message_socket_address): with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as _socket: try: _socket.connect(message_socket_address) msg = ('%s\n' % data.serialize()) _socket.sendall(msg.encode('utf8')) return 'success' except socket.error as err: LOGGER.exception('Error establishing connection to socket') raise err except Exception as ex: LOGGER.exception('Error writing message to socket') raise ex
def serialize(self) -> str: 'Serialize data as json string' try: return json.dumps(self, separators=(',', ':')) except json.JSONDecodeError as err: return err.msg
4,459,465,730,251,297,300
Serialize data as json string
csm_test_utils/message.py
serialize
opentelekomcloud-infra/csm-test-utils
python
def serialize(self) -> str: try: return json.dumps(self, separators=(',', ':')) except json.JSONDecodeError as err: return err.msg
def __bytes__(self) -> bytes: 'Returns bytes interpretation of data' data = self.serialize() return ('%s\n' % data).encode('utf8')
6,820,283,154,981,992,000
Returns bytes interpretation of data
csm_test_utils/message.py
__bytes__
opentelekomcloud-infra/csm-test-utils
python
def __bytes__(self) -> bytes: data = self.serialize() return ('%s\n' % data).encode('utf8')
def _verbose_message(message, *args, **kwargs): 'Print the message to stderr if -v/PYTHONVERBOSE is turned on.' verbosity = kwargs.pop('verbosity', 1) if (sys.flags.verbose >= verbosity): if (not message.startswith(('#', 'import '))): message = ('# ' + message) print(message.format(*args), file=sys.stderr)
-9,013,888,047,320,691,000
Print the message to stderr if -v/PYTHONVERBOSE is turned on.
palimport/_utils.py
_verbose_message
asmodehn/lark_import
python
def _verbose_message(message, *args, **kwargs): verbosity = kwargs.pop('verbosity', 1) if (sys.flags.verbose >= verbosity): if (not message.startswith(('#', 'import '))): message = ('# ' + message) print(message.format(*args), file=sys.stderr)
def validate_station(station): 'Check that the station ID is well-formed.' if (station is None): return station = station.replace('.shtml', '') if (not re.fullmatch('ID[A-Z]\\d\\d\\d\\d\\d\\.\\d\\d\\d\\d\\d', station)): raise vol.error.Invalid('Malformed station ID') return station
-1,019,518,209,456,315,800
Check that the station ID is well-formed.
homeassistant/components/bom/sensor.py
validate_station
5mauggy/home-assistant
python
def validate_station(station): if (station is None): return station = station.replace('.shtml', ) if (not re.fullmatch('ID[A-Z]\\d\\d\\d\\d\\d\\.\\d\\d\\d\\d\\d', station)): raise vol.error.Invalid('Malformed station ID') return station
def setup_platform(hass, config, add_entities, discovery_info=None): 'Set up the BOM sensor.' station = config.get(CONF_STATION) (zone_id, wmo_id) = (config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)) if (station is not None): if (zone_id and wmo_id): _LOGGER.warning('Using config %s, not %s and %s for BOM sensor', CONF_STATION, CONF_ZONE_ID, CONF_WMO_ID) elif (zone_id and wmo_id): station = '{}.{}'.format(zone_id, wmo_id) else: station = closest_station(config.get(CONF_LATITUDE), config.get(CONF_LONGITUDE), hass.config.config_dir) if (station is None): _LOGGER.error('Could not get BOM weather station from lat/lon') return bom_data = BOMCurrentData(station) try: bom_data.update() except ValueError as err: _LOGGER.error('Received error from BOM Current: %s', err) return add_entities([BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME)) for variable in config[CONF_MONITORED_CONDITIONS]])
7,841,557,922,441,994,000
Set up the BOM sensor.
homeassistant/components/bom/sensor.py
setup_platform
5mauggy/home-assistant
python
def setup_platform(hass, config, add_entities, discovery_info=None): station = config.get(CONF_STATION) (zone_id, wmo_id) = (config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)) if (station is not None): if (zone_id and wmo_id): _LOGGER.warning('Using config %s, not %s and %s for BOM sensor', CONF_STATION, CONF_ZONE_ID, CONF_WMO_ID) elif (zone_id and wmo_id): station = '{}.{}'.format(zone_id, wmo_id) else: station = closest_station(config.get(CONF_LATITUDE), config.get(CONF_LONGITUDE), hass.config.config_dir) if (station is None): _LOGGER.error('Could not get BOM weather station from lat/lon') return bom_data = BOMCurrentData(station) try: bom_data.update() except ValueError as err: _LOGGER.error('Received error from BOM Current: %s', err) return add_entities([BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME)) for variable in config[CONF_MONITORED_CONDITIONS]])
def _get_bom_stations(): 'Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.\n\n This function does several MB of internet requests, so please use the\n caching version to minimise latency and hit-count.\n ' latlon = {} with io.BytesIO() as file_obj: with ftplib.FTP('ftp.bom.gov.au') as ftp: ftp.login() ftp.cwd('anon2/home/ncc/metadata/sitelists') ftp.retrbinary('RETR stations.zip', file_obj.write) file_obj.seek(0) with zipfile.ZipFile(file_obj) as zipped: with zipped.open('stations.txt') as station_txt: for _ in range(4): station_txt.readline() while True: line = station_txt.readline().decode().strip() if (len(line) < 120): break (wmo, lat, lon) = (line[a:b].strip() for (a, b) in [(128, 134), (70, 78), (79, 88)]) if (wmo != '..'): latlon[wmo] = (float(lat), float(lon)) zones = {} pattern = '<a href="/products/(?P<zone>ID[A-Z]\\d\\d\\d\\d\\d)/(?P=zone)\\.(?P<wmo>\\d\\d\\d\\d\\d).shtml">' for state in ('nsw', 'vic', 'qld', 'wa', 'tas', 'nt'): url = 'http://www.bom.gov.au/{0}/observations/{0}all.shtml'.format(state) for (zone_id, wmo_id) in re.findall(pattern, requests.get(url).text): zones[wmo_id] = zone_id return {'{}.{}'.format(zones[k], k): latlon[k] for k in (set(latlon) & set(zones))}
3,295,056,305,154,763,000
Return {CONF_STATION: (lat, lon)} for all stations, for auto-config. This function does several MB of internet requests, so please use the caching version to minimise latency and hit-count.
homeassistant/components/bom/sensor.py
_get_bom_stations
5mauggy/home-assistant
python
def _get_bom_stations(): 'Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.\n\n This function does several MB of internet requests, so please use the\n caching version to minimise latency and hit-count.\n ' latlon = {} with io.BytesIO() as file_obj: with ftplib.FTP('ftp.bom.gov.au') as ftp: ftp.login() ftp.cwd('anon2/home/ncc/metadata/sitelists') ftp.retrbinary('RETR stations.zip', file_obj.write) file_obj.seek(0) with zipfile.ZipFile(file_obj) as zipped: with zipped.open('stations.txt') as station_txt: for _ in range(4): station_txt.readline() while True: line = station_txt.readline().decode().strip() if (len(line) < 120): break (wmo, lat, lon) = (line[a:b].strip() for (a, b) in [(128, 134), (70, 78), (79, 88)]) if (wmo != '..'): latlon[wmo] = (float(lat), float(lon)) zones = {} pattern = '<a href="/products/(?P<zone>ID[A-Z]\\d\\d\\d\\d\\d)/(?P=zone)\\.(?P<wmo>\\d\\d\\d\\d\\d).shtml">' for state in ('nsw', 'vic', 'qld', 'wa', 'tas', 'nt'): url = 'http://www.bom.gov.au/{0}/observations/{0}all.shtml'.format(state) for (zone_id, wmo_id) in re.findall(pattern, requests.get(url).text): zones[wmo_id] = zone_id return {'{}.{}'.format(zones[k], k): latlon[k] for k in (set(latlon) & set(zones))}
def bom_stations(cache_dir): 'Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.\n\n Results from internet requests are cached as compressed JSON, making\n subsequent calls very much faster.\n ' cache_file = os.path.join(cache_dir, '.bom-stations.json.gz') if (not os.path.isfile(cache_file)): stations = _get_bom_stations() with gzip.open(cache_file, 'wt') as cache: json.dump(stations, cache, sort_keys=True) return stations with gzip.open(cache_file, 'rt') as cache: return {k: tuple(v) for (k, v) in json.load(cache).items()}
-3,257,003,656,173,373,400
Return {CONF_STATION: (lat, lon)} for all stations, for auto-config. Results from internet requests are cached as compressed JSON, making subsequent calls very much faster.
homeassistant/components/bom/sensor.py
bom_stations
5mauggy/home-assistant
python
def bom_stations(cache_dir): 'Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.\n\n Results from internet requests are cached as compressed JSON, making\n subsequent calls very much faster.\n ' cache_file = os.path.join(cache_dir, '.bom-stations.json.gz') if (not os.path.isfile(cache_file)): stations = _get_bom_stations() with gzip.open(cache_file, 'wt') as cache: json.dump(stations, cache, sort_keys=True) return stations with gzip.open(cache_file, 'rt') as cache: return {k: tuple(v) for (k, v) in json.load(cache).items()}
def closest_station(lat, lon, cache_dir): 'Return the ZONE_ID.WMO_ID of the closest station to our lat/lon.' if ((lat is None) or (lon is None) or (not os.path.isdir(cache_dir))): return stations = bom_stations(cache_dir) def comparable_dist(wmo_id): 'Create a psudeo-distance from latitude/longitude.' (station_lat, station_lon) = stations[wmo_id] return (((lat - station_lat) ** 2) + ((lon - station_lon) ** 2)) return min(stations, key=comparable_dist)
6,523,936,549,118,849,000
Return the ZONE_ID.WMO_ID of the closest station to our lat/lon.
homeassistant/components/bom/sensor.py
closest_station
5mauggy/home-assistant
python
def closest_station(lat, lon, cache_dir): if ((lat is None) or (lon is None) or (not os.path.isdir(cache_dir))): return stations = bom_stations(cache_dir) def comparable_dist(wmo_id): 'Create a psudeo-distance from latitude/longitude.' (station_lat, station_lon) = stations[wmo_id] return (((lat - station_lat) ** 2) + ((lon - station_lon) ** 2)) return min(stations, key=comparable_dist)
def __init__(self, bom_data, condition, stationname): 'Initialize the sensor.' self.bom_data = bom_data self._condition = condition self.stationname = stationname
143,747,721,404,573,150
Initialize the sensor.
homeassistant/components/bom/sensor.py
__init__
5mauggy/home-assistant
python
def __init__(self, bom_data, condition, stationname): self.bom_data = bom_data self._condition = condition self.stationname = stationname
@property def name(self): 'Return the name of the sensor.' if (self.stationname is None): return 'BOM {}'.format(SENSOR_TYPES[self._condition][0]) return 'BOM {} {}'.format(self.stationname, SENSOR_TYPES[self._condition][0])
-6,286,635,050,685,421,000
Return the name of the sensor.
homeassistant/components/bom/sensor.py
name
5mauggy/home-assistant
python
@property def name(self): if (self.stationname is None): return 'BOM {}'.format(SENSOR_TYPES[self._condition][0]) return 'BOM {} {}'.format(self.stationname, SENSOR_TYPES[self._condition][0])
@property def state(self): 'Return the state of the sensor.' return self.bom_data.get_reading(self._condition)
-2,573,970,461,134,171,600
Return the state of the sensor.
homeassistant/components/bom/sensor.py
state
5mauggy/home-assistant
python
@property def state(self): return self.bom_data.get_reading(self._condition)
@property def device_state_attributes(self): 'Return the state attributes of the device.' attr = {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_LAST_UPDATE: self.bom_data.last_updated, ATTR_SENSOR_ID: self._condition, ATTR_STATION_ID: self.bom_data.latest_data['wmo'], ATTR_STATION_NAME: self.bom_data.latest_data['name'], ATTR_ZONE_ID: self.bom_data.latest_data['history_product']} return attr
-5,342,490,108,203,357,000
Return the state attributes of the device.
homeassistant/components/bom/sensor.py
device_state_attributes
5mauggy/home-assistant
python
@property def device_state_attributes(self): attr = {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_LAST_UPDATE: self.bom_data.last_updated, ATTR_SENSOR_ID: self._condition, ATTR_STATION_ID: self.bom_data.latest_data['wmo'], ATTR_STATION_NAME: self.bom_data.latest_data['name'], ATTR_ZONE_ID: self.bom_data.latest_data['history_product']} return attr
@property def unit_of_measurement(self): 'Return the units of measurement.' return SENSOR_TYPES[self._condition][1]
-4,311,322,716,511,070,000
Return the units of measurement.
homeassistant/components/bom/sensor.py
unit_of_measurement
5mauggy/home-assistant
python
@property def unit_of_measurement(self): return SENSOR_TYPES[self._condition][1]
def update(self): 'Update current conditions.' self.bom_data.update()
439,338,767,930,620,200
Update current conditions.
homeassistant/components/bom/sensor.py
update
5mauggy/home-assistant
python
def update(self): self.bom_data.update()
def __init__(self, station_id): 'Initialize the data object.' (self._zone_id, self._wmo_id) = station_id.split('.') self._data = None self.last_updated = None
-3,496,315,959,322,159,600
Initialize the data object.
homeassistant/components/bom/sensor.py
__init__
5mauggy/home-assistant
python
def __init__(self, station_id): (self._zone_id, self._wmo_id) = station_id.split('.') self._data = None self.last_updated = None
def _build_url(self): 'Build the URL for the requests.' url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id) _LOGGER.debug('BOM URL: %s', url) return url
-6,698,946,057,005,399,000
Build the URL for the requests.
homeassistant/components/bom/sensor.py
_build_url
5mauggy/home-assistant
python
def _build_url(self): url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id) _LOGGER.debug('BOM URL: %s', url) return url
@property def latest_data(self): 'Return the latest data object.' if self._data: return self._data[0] return None
6,897,681,113,500,615,000
Return the latest data object.
homeassistant/components/bom/sensor.py
latest_data
5mauggy/home-assistant
python
@property def latest_data(self): if self._data: return self._data[0] return None
def get_reading(self, condition): 'Return the value for the given condition.\n\n BOM weather publishes condition readings for weather (and a few other\n conditions) at intervals throughout the day. To avoid a `-` value in\n the frontend for these conditions, we traverse the historical data\n for the latest value that is not `-`.\n\n Iterators are used in this method to avoid iterating needlessly\n through the entire BOM provided dataset.\n ' condition_readings = (entry[condition] for entry in self._data) return next((x for x in condition_readings if (x != '-')), None)
7,540,319,837,574,102,000
Return the value for the given condition. BOM weather publishes condition readings for weather (and a few other conditions) at intervals throughout the day. To avoid a `-` value in the frontend for these conditions, we traverse the historical data for the latest value that is not `-`. Iterators are used in this method to avoid iterating needlessly through the entire BOM provided dataset.
homeassistant/components/bom/sensor.py
get_reading
5mauggy/home-assistant
python
def get_reading(self, condition): 'Return the value for the given condition.\n\n BOM weather publishes condition readings for weather (and a few other\n conditions) at intervals throughout the day. To avoid a `-` value in\n the frontend for these conditions, we traverse the historical data\n for the latest value that is not `-`.\n\n Iterators are used in this method to avoid iterating needlessly\n through the entire BOM provided dataset.\n ' condition_readings = (entry[condition] for entry in self._data) return next((x for x in condition_readings if (x != '-')), None)
def should_update(self): 'Determine whether an update should occur.\n\n BOM provides updated data every 30 minutes. We manually define\n refreshing logic here rather than a throttle to keep updates\n in lock-step with BOM.\n\n If 35 minutes has passed since the last BOM data update, then\n an update should be done.\n ' if (self.last_updated is None): return True now = datetime.datetime.now() update_due_at = (self.last_updated + datetime.timedelta(minutes=35)) return (now > update_due_at)
742,864,539,779,868,200
Determine whether an update should occur. BOM provides updated data every 30 minutes. We manually define refreshing logic here rather than a throttle to keep updates in lock-step with BOM. If 35 minutes has passed since the last BOM data update, then an update should be done.
homeassistant/components/bom/sensor.py
should_update
5mauggy/home-assistant
python
def should_update(self): 'Determine whether an update should occur.\n\n BOM provides updated data every 30 minutes. We manually define\n refreshing logic here rather than a throttle to keep updates\n in lock-step with BOM.\n\n If 35 minutes has passed since the last BOM data update, then\n an update should be done.\n ' if (self.last_updated is None): return True now = datetime.datetime.now() update_due_at = (self.last_updated + datetime.timedelta(minutes=35)) return (now > update_due_at)
@Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): 'Get the latest data from BOM.' if (not self.should_update()): _LOGGER.debug('BOM was updated %s minutes ago, skipping update as < 35 minutes, Now: %s, LastUpdate: %s', (datetime.datetime.now() - self.last_updated), datetime.datetime.now(), self.last_updated) return try: result = requests.get(self._build_url(), timeout=10).json() self._data = result['observations']['data'] self.last_updated = datetime.datetime.strptime(str(self._data[0]['local_date_time_full']), '%Y%m%d%H%M%S') return except ValueError as err: _LOGGER.error('Check BOM %s', err.args) self._data = None raise
8,597,626,351,255,408,000
Get the latest data from BOM.
homeassistant/components/bom/sensor.py
update
5mauggy/home-assistant
python
@Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): if (not self.should_update()): _LOGGER.debug('BOM was updated %s minutes ago, skipping update as < 35 minutes, Now: %s, LastUpdate: %s', (datetime.datetime.now() - self.last_updated), datetime.datetime.now(), self.last_updated) return try: result = requests.get(self._build_url(), timeout=10).json() self._data = result['observations']['data'] self.last_updated = datetime.datetime.strptime(str(self._data[0]['local_date_time_full']), '%Y%m%d%H%M%S') return except ValueError as err: _LOGGER.error('Check BOM %s', err.args) self._data = None raise
def comparable_dist(wmo_id): 'Create a psudeo-distance from latitude/longitude.' (station_lat, station_lon) = stations[wmo_id] return (((lat - station_lat) ** 2) + ((lon - station_lon) ** 2))
-6,675,706,677,488,623,000
Create a psudeo-distance from latitude/longitude.
homeassistant/components/bom/sensor.py
comparable_dist
5mauggy/home-assistant
python
def comparable_dist(wmo_id): (station_lat, station_lon) = stations[wmo_id] return (((lat - station_lat) ** 2) + ((lon - station_lon) ** 2))
def reset_train_val_dataloaders(self, model) -> None: '\n Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n ' if (self.trainer.train_dataloader is None): self.trainer.reset_train_dataloader(model) if (self.trainer.val_dataloaders is None): self.trainer.reset_val_dataloader(model)
-6,859,237,390,870,597,000
Resets train and val dataloaders if none are attached to the trainer. The val dataloader must be initialized before training loop starts, as the training loop inspects the val dataloader to determine whether to run the evaluation loop.
pytorch_lightning/trainer/training_loop.py
reset_train_val_dataloaders
dcfidalgo/pytorch-lightning
python
def reset_train_val_dataloaders(self, model) -> None: '\n Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n ' if (self.trainer.train_dataloader is None): self.trainer.reset_train_dataloader(model) if (self.trainer.val_dataloaders is None): self.trainer.reset_val_dataloader(model)
def get_optimizers_iterable(self, batch_idx=None): '\n Generates an iterable with (idx, optimizer) for each optimizer.\n ' if (not self.trainer.optimizer_frequencies): return list(enumerate(self.trainer.optimizers)) if (batch_idx is None): batch_idx = self.trainer.total_batch_idx optimizers_loop_length = self.optimizer_freq_cumsum[(- 1)] current_place_in_loop = (batch_idx % optimizers_loop_length) opt_idx = np.argmax((self.optimizer_freq_cumsum > current_place_in_loop)) return [[opt_idx, self.trainer.optimizers[opt_idx]]]
-5,717,690,482,004,592,000
Generates an iterable with (idx, optimizer) for each optimizer.
pytorch_lightning/trainer/training_loop.py
get_optimizers_iterable
dcfidalgo/pytorch-lightning
python
def get_optimizers_iterable(self, batch_idx=None): '\n \n ' if (not self.trainer.optimizer_frequencies): return list(enumerate(self.trainer.optimizers)) if (batch_idx is None): batch_idx = self.trainer.total_batch_idx optimizers_loop_length = self.optimizer_freq_cumsum[(- 1)] current_place_in_loop = (batch_idx % optimizers_loop_length) opt_idx = np.argmax((self.optimizer_freq_cumsum > current_place_in_loop)) return [[opt_idx, self.trainer.optimizers[opt_idx]]]
@staticmethod def _prepare_outputs(outputs: List[List[List[Result]]], batch_mode: bool) -> Union[(List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict)]: '\n Extract required information from batch or epoch end results.\n\n Args:\n outputs: A 3-dimensional list of ``Result`` objects with dimensions:\n [optimizer outs][batch outs][tbptt steps].\n\n batch_mode: If True, ignore the batch output dimension.\n\n Returns:\n The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will\n be collapsed.\n ' processed_outputs = [] for opt_outputs in outputs: if (len(opt_outputs) == 0): continue processed_batch_outputs = [] if batch_mode: opt_outputs = [opt_outputs] for batch_outputs in opt_outputs: processed_tbptt_outputs = [] for tbptt_output in batch_outputs: out = tbptt_output.extra out['loss'] = tbptt_output.minimize processed_tbptt_outputs.append(out) if (len(processed_tbptt_outputs) == 1): processed_tbptt_outputs = processed_tbptt_outputs[0] processed_batch_outputs.append(processed_tbptt_outputs) if batch_mode: processed_batch_outputs = processed_batch_outputs[0] processed_outputs.append(processed_batch_outputs) if (len(processed_outputs) == 1): processed_outputs = processed_outputs[0] return processed_outputs
-2,936,267,877,877,756,000
Extract required information from batch or epoch end results. Args: outputs: A 3-dimensional list of ``Result`` objects with dimensions: [optimizer outs][batch outs][tbptt steps]. batch_mode: If True, ignore the batch output dimension. Returns: The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will be collapsed.
pytorch_lightning/trainer/training_loop.py
_prepare_outputs
dcfidalgo/pytorch-lightning
python
@staticmethod def _prepare_outputs(outputs: List[List[List[Result]]], batch_mode: bool) -> Union[(List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict)]: '\n Extract required information from batch or epoch end results.\n\n Args:\n outputs: A 3-dimensional list of ``Result`` objects with dimensions:\n [optimizer outs][batch outs][tbptt steps].\n\n batch_mode: If True, ignore the batch output dimension.\n\n Returns:\n The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will\n be collapsed.\n ' processed_outputs = [] for opt_outputs in outputs: if (len(opt_outputs) == 0): continue processed_batch_outputs = [] if batch_mode: opt_outputs = [opt_outputs] for batch_outputs in opt_outputs: processed_tbptt_outputs = [] for tbptt_output in batch_outputs: out = tbptt_output.extra out['loss'] = tbptt_output.minimize processed_tbptt_outputs.append(out) if (len(processed_tbptt_outputs) == 1): processed_tbptt_outputs = processed_tbptt_outputs[0] processed_batch_outputs.append(processed_tbptt_outputs) if batch_mode: processed_batch_outputs = processed_batch_outputs[0] processed_outputs.append(processed_batch_outputs) if (len(processed_outputs) == 1): processed_outputs = processed_outputs[0] return processed_outputs
@contextmanager def block_ddp_sync_behaviour(self, should_block_sync: bool=False): '\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n ' if (isinstance(self.trainer.training_type_plugin, ParallelPlugin) and (self.trainer.lightning_module.automatic_optimization or should_block_sync)): with self.trainer.training_type_plugin.block_backward_sync(): (yield None) else: (yield None)
6,418,188,747,189,470,000
automatic_optimization = True Blocks ddp sync gradients behaviour on backwards pass. This is useful for skipping sync when accumulating gradients, reducing communication overhead automatic_optimization = False do not block ddp gradient sync when using manual optimization as gradients are needed within the training step Returns: context manager with sync behaviour off
pytorch_lightning/trainer/training_loop.py
block_ddp_sync_behaviour
dcfidalgo/pytorch-lightning
python
@contextmanager def block_ddp_sync_behaviour(self, should_block_sync: bool=False): '\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n ' if (isinstance(self.trainer.training_type_plugin, ParallelPlugin) and (self.trainer.lightning_module.automatic_optimization or should_block_sync)): with self.trainer.training_type_plugin.block_backward_sync(): (yield None) else: (yield None)
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens): 'Wrap forward, zero_grad and backward in a closure so second order methods work' with self.trainer.profiler.profile('training_step_and_backward'): result = self.training_step(split_batch, batch_idx, opt_idx, hiddens) self._curr_step_result = result if ((not self._skip_backward) and self.trainer.lightning_module.automatic_optimization): is_first_batch_to_accumulate = ((batch_idx % self.trainer.accumulate_grad_batches) == 0) if is_first_batch_to_accumulate: self.on_before_zero_grad(optimizer) self.optimizer_zero_grad(batch_idx, optimizer, opt_idx) if (result is not None): with self.trainer.profiler.profile('backward'): self.backward(result, optimizer, opt_idx) if (not self.should_accumulate()): self.on_after_backward(result.training_step_output, batch_idx, result.loss) if self.trainer.terminate_on_nan: self._check_finite(result.loss) else: self.warning_cache.warn('training_step returned None. If this was on purpose, ignore this warning...') if (len(self.trainer.optimizers) > 1): self.trainer.lightning_module.untoggle_optimizer(opt_idx) return result
-7,326,739,331,186,369,000
Wrap forward, zero_grad and backward in a closure so second order methods work
pytorch_lightning/trainer/training_loop.py
training_step_and_backward
dcfidalgo/pytorch-lightning
python
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens): with self.trainer.profiler.profile('training_step_and_backward'): result = self.training_step(split_batch, batch_idx, opt_idx, hiddens) self._curr_step_result = result if ((not self._skip_backward) and self.trainer.lightning_module.automatic_optimization): is_first_batch_to_accumulate = ((batch_idx % self.trainer.accumulate_grad_batches) == 0) if is_first_batch_to_accumulate: self.on_before_zero_grad(optimizer) self.optimizer_zero_grad(batch_idx, optimizer, opt_idx) if (result is not None): with self.trainer.profiler.profile('backward'): self.backward(result, optimizer, opt_idx) if (not self.should_accumulate()): self.on_after_backward(result.training_step_output, batch_idx, result.loss) if self.trainer.terminate_on_nan: self._check_finite(result.loss) else: self.warning_cache.warn('training_step returned None. If this was on purpose, ignore this warning...') if (len(self.trainer.optimizers) > 1): self.trainer.lightning_module.untoggle_optimizer(opt_idx) return result
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool=False) -> bool: ' Decide if we should run validation. ' if (not self.trainer.enable_validation): return False if (((self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch) != 0): return False is_val_check_batch = False if (isinstance(self.trainer.limit_train_batches, int) and (self.trainer.val_check_batch == float('inf'))): is_val_check_batch = (((batch_idx + 1) % self.trainer.limit_train_batches) == 0) elif (self.trainer.val_check_batch != float('inf')): is_val_check_batch = (((batch_idx + 1) % self.trainer.val_check_batch) == 0) epoch_end_val_check = (((batch_idx + 1) % self.trainer.num_training_batches) == 0) is_last_batch_for_infinite_dataset = (is_last_batch and (self.trainer.val_check_batch == float('inf'))) if on_epoch: return ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop or is_last_batch_for_infinite_dataset) else: return (is_val_check_batch and (not epoch_end_val_check))
-6,102,987,013,807,913,000
Decide if we should run validation.
pytorch_lightning/trainer/training_loop.py
_should_check_val_fx
dcfidalgo/pytorch-lightning
python
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool=False) -> bool: ' ' if (not self.trainer.enable_validation): return False if (((self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch) != 0): return False is_val_check_batch = False if (isinstance(self.trainer.limit_train_batches, int) and (self.trainer.val_check_batch == float('inf'))): is_val_check_batch = (((batch_idx + 1) % self.trainer.limit_train_batches) == 0) elif (self.trainer.val_check_batch != float('inf')): is_val_check_batch = (((batch_idx + 1) % self.trainer.val_check_batch) == 0) epoch_end_val_check = (((batch_idx + 1) % self.trainer.num_training_batches) == 0) is_last_batch_for_infinite_dataset = (is_last_batch and (self.trainer.val_check_batch == float('inf'))) if on_epoch: return ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop or is_last_batch_for_infinite_dataset) else: return (is_val_check_batch and (not epoch_end_val_check))
def _truncated_bptt_enabled(self) -> bool: ' Temporary tbptt utilities until this flag is fully migrated to the lightning module. ' return (self._truncated_bptt_steps() > 0)
-3,175,895,986,339,829,000
Temporary tbptt utilities until this flag is fully migrated to the lightning module.
pytorch_lightning/trainer/training_loop.py
_truncated_bptt_enabled
dcfidalgo/pytorch-lightning
python
def _truncated_bptt_enabled(self) -> bool: ' ' return (self._truncated_bptt_steps() > 0)
@parameterized.parameters((512, 64, 32, 64, np.float32, 0.0001), (512, 64, 32, 64, np.float64, 1e-08), (512, 64, 64, 64, np.float32, 0.0001), (512, 64, 64, 64, np.float64, 1e-08), (512, 72, 64, 64, np.float32, 0.0001), (512, 72, 64, 64, np.float64, 1e-08), (512, 64, 25, 64, np.float32, 0.0001), (512, 64, 25, 64, np.float64, 1e-08), (512, 25, 15, 36, np.float32, 0.0001), (512, 25, 15, 36, np.float64, 1e-08), (123, 23, 5, 42, np.float32, 0.0001), (123, 23, 5, 42, np.float64, 1e-08)) def test_stft_and_inverse_stft(self, signal_length, frame_length, frame_step, fft_length, np_rtype, tol): 'Test that spectral_ops.stft/inverse_stft match a NumPy implementation.' signal = np.random.random(signal_length).astype(np_rtype) self._compare(signal, frame_length, frame_step, fft_length, tol)
-4,160,313,818,380,276,700
Test that spectral_ops.stft/inverse_stft match a NumPy implementation.
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
test_stft_and_inverse_stft
05259/tensorflow
python
@parameterized.parameters((512, 64, 32, 64, np.float32, 0.0001), (512, 64, 32, 64, np.float64, 1e-08), (512, 64, 64, 64, np.float32, 0.0001), (512, 64, 64, 64, np.float64, 1e-08), (512, 72, 64, 64, np.float32, 0.0001), (512, 72, 64, 64, np.float64, 1e-08), (512, 64, 25, 64, np.float32, 0.0001), (512, 64, 25, 64, np.float64, 1e-08), (512, 25, 15, 36, np.float32, 0.0001), (512, 25, 15, 36, np.float64, 1e-08), (123, 23, 5, 42, np.float32, 0.0001), (123, 23, 5, 42, np.float64, 1e-08)) def test_stft_and_inverse_stft(self, signal_length, frame_length, frame_step, fft_length, np_rtype, tol): signal = np.random.random(signal_length).astype(np_rtype) self._compare(signal, frame_length, frame_step, fft_length, tol)
@parameterized.parameters((256, 32), (256, 64), (128, 25), (127, 32), (128, 64)) def test_inverse_stft_window_fn(self, frame_length, frame_step): 'Test that inverse_stft_window_fn has unit gain at each window phase.' hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32) inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step) inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32) (hann_window, inverse_window) = self.evaluate([hann_window, inverse_window]) product_window = (hann_window * inverse_window) for i in range(frame_step): self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
-6,633,481,258,799,354,000
Test that inverse_stft_window_fn has unit gain at each window phase.
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
test_inverse_stft_window_fn
05259/tensorflow
python
@parameterized.parameters((256, 32), (256, 64), (128, 25), (127, 32), (128, 64)) def test_inverse_stft_window_fn(self, frame_length, frame_step): hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32) inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step) inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32) (hann_window, inverse_window) = self.evaluate([hann_window, inverse_window]) product_window = (hann_window * inverse_window) for i in range(frame_step): self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
@parameterized.parameters((256, 64), (128, 32)) def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step): 'Test inverse_stft_window_fn in special overlap = 3/4 case.' hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32) inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step) inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32) self.assertAllClose(hann_window, (inverse_window * 1.5))
-8,461,162,554,945,300,000
Test inverse_stft_window_fn in special overlap = 3/4 case.
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
test_inverse_stft_window_fn_special_case
05259/tensorflow
python
@parameterized.parameters((256, 64), (128, 32)) def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step): hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32) inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step) inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32) self.assertAllClose(hann_window, (inverse_window * 1.5))
@staticmethod def _compute_stft_gradient(signal, frame_length=32, frame_step=16, fft_length=32): 'Computes the gradient of the STFT with respect to `signal`.' stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length) magnitude_stft = math_ops.abs(stft) loss = math_ops.reduce_sum(magnitude_stft) return gradients_impl.gradients([loss], [signal])[0]
7,295,433,165,289,676,000
Computes the gradient of the STFT with respect to `signal`.
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
_compute_stft_gradient
05259/tensorflow
python
@staticmethod def _compute_stft_gradient(signal, frame_length=32, frame_step=16, fft_length=32): stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length) magnitude_stft = math_ops.abs(stft) loss = math_ops.reduce_sum(magnitude_stft) return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self): 'Test that spectral_ops.stft has a working gradient.' if context.executing_eagerly(): return with self.session() as sess: signal_length = 512 empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32) empty_signal_gradient = sess.run(self._compute_stft_gradient(empty_signal)) self.assertTrue((empty_signal_gradient == 0.0).all()) sinusoid = math_ops.sin(((2 * np.pi) * math_ops.linspace(0.0, 1.0, signal_length))) sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid)) self.assertFalse((sinusoid_gradient == 0.0).all())
823,800,399,930,374,500
Test that spectral_ops.stft has a working gradient.
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
test_gradients
05259/tensorflow
python
def test_gradients(self): if context.executing_eagerly(): return with self.session() as sess: signal_length = 512 empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32) empty_signal_gradient = sess.run(self._compute_stft_gradient(empty_signal)) self.assertTrue((empty_signal_gradient == 0.0).all()) sinusoid = math_ops.sin(((2 * np.pi) * math_ops.linspace(0.0, 1.0, signal_length))) sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid)) self.assertFalse((sinusoid_gradient == 0.0).all())
def test_reuse_input(self): 'Objects should be reusable after write()' original = b'original' tests = [bytearray(original), memoryview(bytearray(original))] for data in tests: self.buffer.write(data) data[:] = b'reused!!' self.assertEqual(self.buffer.read(), original)
-2,576,115,287,122,548,000
Objects should be reusable after write()
tests/test_buffer.py
test_reuse_input
18928172992817182/streamlink
python
def test_reuse_input(self): original = b'original' tests = [bytearray(original), memoryview(bytearray(original))] for data in tests: self.buffer.write(data) data[:] = b'reused!!' self.assertEqual(self.buffer.read(), original)
@property def customdata(self): '\n Assigns extra data each datum. This may be useful when\n listening to hover, click and selection events. Note that,\n "scatter" traces also appends customdata items in the markers\n DOM elements\n \n The \'customdata\' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ' return self['customdata']
1,177,023,494,794,418,000
Assigns extra data each datum. This may be useful when listening to hover, click and selection events. Note that, "scatter" traces also appends customdata items in the markers DOM elements The 'customdata' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
customdata
180Studios/LoginApp
python
@property def customdata(self): '\n Assigns extra data each datum. This may be useful when\n listening to hover, click and selection events. Note that,\n "scatter" traces also appends customdata items in the markers\n DOM elements\n \n The \'customdata\' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ' return self['customdata']
@property def customdatasrc(self): "\n Sets the source reference on plot.ly for customdata .\n \n The 'customdatasrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['customdatasrc']
-6,397,660,091,915,112,000
Sets the source reference on plot.ly for customdata . The 'customdatasrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
customdatasrc
180Studios/LoginApp
python
@property def customdatasrc(self): "\n Sets the source reference on plot.ly for customdata .\n \n The 'customdatasrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['customdatasrc']
@property def diagonal(self): "\n The 'diagonal' property is an instance of Diagonal\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Diagonal\n - A dict of string/value properties that will be passed\n to the Diagonal constructor\n \n Supported dict properties:\n \n visible\n Determines whether or not subplots on the\n diagonal are displayed.\n\n Returns\n -------\n plotly.graph_objs.splom.Diagonal\n " return self['diagonal']
-5,254,479,112,447,050,000
The 'diagonal' property is an instance of Diagonal that may be specified as: - An instance of plotly.graph_objs.splom.Diagonal - A dict of string/value properties that will be passed to the Diagonal constructor Supported dict properties: visible Determines whether or not subplots on the diagonal are displayed. Returns ------- plotly.graph_objs.splom.Diagonal
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
diagonal
180Studios/LoginApp
python
@property def diagonal(self): "\n The 'diagonal' property is an instance of Diagonal\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Diagonal\n - A dict of string/value properties that will be passed\n to the Diagonal constructor\n \n Supported dict properties:\n \n visible\n Determines whether or not subplots on the\n diagonal are displayed.\n\n Returns\n -------\n plotly.graph_objs.splom.Diagonal\n " return self['diagonal']
@property def dimensions(self): "\n The 'dimensions' property is a tuple of instances of\n Dimension that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.splom.Dimension\n - A list or tuple of dicts of string/value properties that\n will be passed to the Dimension constructor\n \n Supported dict properties:\n \n axis\n plotly.graph_objs.splom.dimension.Axis instance\n or dict with compatible properties\n label\n Sets the label corresponding to this splom\n dimension.\n name\n When used in a template, named items are\n created in the output figure in addition to any\n items the figure already has in this array. You\n can modify these items in the output figure by\n making your own item with `templateitemname`\n matching this `name` alongside your\n modifications (including `visible: false` or\n `enabled: false` to hide it). Has no effect\n outside of a template.\n templateitemname\n Used to refer to a named item in this array in\n the template. Named items from the template\n will be created even without a matching item in\n the input figure, but you can modify one by\n making an item with `templateitemname` matching\n its `name`, alongside your modifications\n (including `visible: false` or `enabled: false`\n to hide it). If there is no template or no\n matching item, this item will be hidden unless\n you explicitly show it with `visible: true`.\n values\n Sets the dimension values to be plotted.\n valuessrc\n Sets the source reference on plot.ly for\n values .\n visible\n Determines whether or not this dimension is\n shown on the graph. Note that even visible\n false dimension contribute to the default grid\n generate by this splom trace.\n\n Returns\n -------\n tuple[plotly.graph_objs.splom.Dimension]\n " return self['dimensions']
7,061,134,127,882,084,000
The 'dimensions' property is a tuple of instances of Dimension that may be specified as: - A list or tuple of instances of plotly.graph_objs.splom.Dimension - A list or tuple of dicts of string/value properties that will be passed to the Dimension constructor Supported dict properties: axis plotly.graph_objs.splom.dimension.Axis instance or dict with compatible properties label Sets the label corresponding to this splom dimension. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. values Sets the dimension values to be plotted. valuessrc Sets the source reference on plot.ly for values . visible Determines whether or not this dimension is shown on the graph. Note that even visible false dimension contribute to the default grid generate by this splom trace. Returns ------- tuple[plotly.graph_objs.splom.Dimension]
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
dimensions
180Studios/LoginApp
python
@property def dimensions(self): "\n The 'dimensions' property is a tuple of instances of\n Dimension that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.splom.Dimension\n - A list or tuple of dicts of string/value properties that\n will be passed to the Dimension constructor\n \n Supported dict properties:\n \n axis\n plotly.graph_objs.splom.dimension.Axis instance\n or dict with compatible properties\n label\n Sets the label corresponding to this splom\n dimension.\n name\n When used in a template, named items are\n created in the output figure in addition to any\n items the figure already has in this array. You\n can modify these items in the output figure by\n making your own item with `templateitemname`\n matching this `name` alongside your\n modifications (including `visible: false` or\n `enabled: false` to hide it). Has no effect\n outside of a template.\n templateitemname\n Used to refer to a named item in this array in\n the template. Named items from the template\n will be created even without a matching item in\n the input figure, but you can modify one by\n making an item with `templateitemname` matching\n its `name`, alongside your modifications\n (including `visible: false` or `enabled: false`\n to hide it). If there is no template or no\n matching item, this item will be hidden unless\n you explicitly show it with `visible: true`.\n values\n Sets the dimension values to be plotted.\n valuessrc\n Sets the source reference on plot.ly for\n values .\n visible\n Determines whether or not this dimension is\n shown on the graph. Note that even visible\n false dimension contribute to the default grid\n generate by this splom trace.\n\n Returns\n -------\n tuple[plotly.graph_objs.splom.Dimension]\n " return self['dimensions']
@property def dimensiondefaults(self): "\n When used in a template (as\n layout.template.data.splom.dimensiondefaults), sets the default\n property values to use for elements of splom.dimensions\n \n The 'dimensiondefaults' property is an instance of Dimension\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Dimension\n - A dict of string/value properties that will be passed\n to the Dimension constructor\n \n Supported dict properties:\n\n Returns\n -------\n plotly.graph_objs.splom.Dimension\n " return self['dimensiondefaults']
-3,862,303,385,040,442,400
When used in a template (as layout.template.data.splom.dimensiondefaults), sets the default property values to use for elements of splom.dimensions The 'dimensiondefaults' property is an instance of Dimension that may be specified as: - An instance of plotly.graph_objs.splom.Dimension - A dict of string/value properties that will be passed to the Dimension constructor Supported dict properties: Returns ------- plotly.graph_objs.splom.Dimension
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
dimensiondefaults
180Studios/LoginApp
python
@property def dimensiondefaults(self): "\n When used in a template (as\n layout.template.data.splom.dimensiondefaults), sets the default\n property values to use for elements of splom.dimensions\n \n The 'dimensiondefaults' property is an instance of Dimension\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Dimension\n - A dict of string/value properties that will be passed\n to the Dimension constructor\n \n Supported dict properties:\n\n Returns\n -------\n plotly.graph_objs.splom.Dimension\n " return self['dimensiondefaults']
@property def hoverinfo(self): "\n Determines which trace information appear on hover. If `none`\n or `skip` are set, no information is displayed upon hovering.\n But, if `none` is set, click and hover events are still fired.\n \n The 'hoverinfo' property is a flaglist and may be specified\n as a string containing:\n - Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters\n (e.g. 'x+y')\n OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')\n - A list or array of the above\n\n Returns\n -------\n Any|numpy.ndarray\n " return self['hoverinfo']
1,056,236,944,801,603,700
Determines which trace information appear on hover. If `none` or `skip` are set, no information is displayed upon hovering. But, if `none` is set, click and hover events are still fired. The 'hoverinfo' property is a flaglist and may be specified as a string containing: - Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters (e.g. 'x+y') OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip') - A list or array of the above Returns ------- Any|numpy.ndarray
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hoverinfo
180Studios/LoginApp
python
@property def hoverinfo(self): "\n Determines which trace information appear on hover. If `none`\n or `skip` are set, no information is displayed upon hovering.\n But, if `none` is set, click and hover events are still fired.\n \n The 'hoverinfo' property is a flaglist and may be specified\n as a string containing:\n - Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters\n (e.g. 'x+y')\n OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')\n - A list or array of the above\n\n Returns\n -------\n Any|numpy.ndarray\n " return self['hoverinfo']
@property def hoverinfosrc(self): "\n Sets the source reference on plot.ly for hoverinfo .\n \n The 'hoverinfosrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['hoverinfosrc']
7,963,201,236,316,905,000
Sets the source reference on plot.ly for hoverinfo . The 'hoverinfosrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hoverinfosrc
180Studios/LoginApp
python
@property def hoverinfosrc(self): "\n Sets the source reference on plot.ly for hoverinfo .\n \n The 'hoverinfosrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['hoverinfosrc']
@property def hoverlabel(self): "\n The 'hoverlabel' property is an instance of Hoverlabel\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Hoverlabel\n - A dict of string/value properties that will be passed\n to the Hoverlabel constructor\n \n Supported dict properties:\n \n bgcolor\n Sets the background color of the hover labels\n for this trace\n bgcolorsrc\n Sets the source reference on plot.ly for\n bgcolor .\n bordercolor\n Sets the border color of the hover labels for\n this trace.\n bordercolorsrc\n Sets the source reference on plot.ly for\n bordercolor .\n font\n Sets the font used in hover labels.\n namelength\n Sets the length (in number of characters) of\n the trace name in the hover labels for this\n trace. -1 shows the whole name regardless of\n length. 0-3 shows the first 0-3 characters, and\n an integer >3 will show the whole name if it is\n less than that many characters, but if it is\n longer, will truncate to `namelength - 3`\n characters and add an ellipsis.\n namelengthsrc\n Sets the source reference on plot.ly for\n namelength .\n\n Returns\n -------\n plotly.graph_objs.splom.Hoverlabel\n " return self['hoverlabel']
-3,727,103,481,074,180,600
The 'hoverlabel' property is an instance of Hoverlabel that may be specified as: - An instance of plotly.graph_objs.splom.Hoverlabel - A dict of string/value properties that will be passed to the Hoverlabel constructor Supported dict properties: bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on plot.ly for bgcolor . bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on plot.ly for bordercolor . font Sets the font used in hover labels. namelength Sets the length (in number of characters) of the trace name in the hover labels for this trace. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on plot.ly for namelength . Returns ------- plotly.graph_objs.splom.Hoverlabel
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hoverlabel
180Studios/LoginApp
python
@property def hoverlabel(self): "\n The 'hoverlabel' property is an instance of Hoverlabel\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Hoverlabel\n - A dict of string/value properties that will be passed\n to the Hoverlabel constructor\n \n Supported dict properties:\n \n bgcolor\n Sets the background color of the hover labels\n for this trace\n bgcolorsrc\n Sets the source reference on plot.ly for\n bgcolor .\n bordercolor\n Sets the border color of the hover labels for\n this trace.\n bordercolorsrc\n Sets the source reference on plot.ly for\n bordercolor .\n font\n Sets the font used in hover labels.\n namelength\n Sets the length (in number of characters) of\n the trace name in the hover labels for this\n trace. -1 shows the whole name regardless of\n length. 0-3 shows the first 0-3 characters, and\n an integer >3 will show the whole name if it is\n less than that many characters, but if it is\n longer, will truncate to `namelength - 3`\n characters and add an ellipsis.\n namelengthsrc\n Sets the source reference on plot.ly for\n namelength .\n\n Returns\n -------\n plotly.graph_objs.splom.Hoverlabel\n " return self['hoverlabel']
@property def hovertemplate(self): '\n Template string used for rendering the information that appear\n on hover box. Note that this will override `hoverinfo`.\n Variables are inserted using %{variable}, for example "y:\n %{y}". Numbers are formatted using d3-format\'s syntax\n %{variable:d3-format}, for example "Price: %{y:$.2f}". See http\n s://github.com/d3/d3-format/blob/master/README.md#locale_format\n for details on the formatting syntax. The variables available\n in `hovertemplate` are the ones emitted as event data described\n at this link https://plot.ly/javascript/plotlyjs-events/#event-\n data. Additionally, every attributes that can be specified per-\n point (the ones that are `arrayOk: true`) are available.\n Anything contained in tag `<extra>` is displayed in the\n secondary box, for example "<extra>{fullData.name}</extra>".\n \n The \'hovertemplate\' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n ' return self['hovertemplate']
7,679,512,898,802,646,000
Template string used for rendering the information that appear on hover box. Note that this will override `hoverinfo`. Variables are inserted using %{variable}, for example "y: %{y}". Numbers are formatted using d3-format's syntax %{variable:d3-format}, for example "Price: %{y:$.2f}". See http s://github.com/d3/d3-format/blob/master/README.md#locale_format for details on the formatting syntax. The variables available in `hovertemplate` are the ones emitted as event data described at this link https://plot.ly/javascript/plotlyjs-events/#event- data. Additionally, every attributes that can be specified per- point (the ones that are `arrayOk: true`) are available. Anything contained in tag `<extra>` is displayed in the secondary box, for example "<extra>{fullData.name}</extra>". The 'hovertemplate' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hovertemplate
180Studios/LoginApp
python
@property def hovertemplate(self): '\n Template string used for rendering the information that appear\n on hover box. Note that this will override `hoverinfo`.\n Variables are inserted using %{variable}, for example "y:\n %{y}". Numbers are formatted using d3-format\'s syntax\n %{variable:d3-format}, for example "Price: %{y:$.2f}". See http\n s://github.com/d3/d3-format/blob/master/README.md#locale_format\n for details on the formatting syntax. The variables available\n in `hovertemplate` are the ones emitted as event data described\n at this link https://plot.ly/javascript/plotlyjs-events/#event-\n data. Additionally, every attributes that can be specified per-\n point (the ones that are `arrayOk: true`) are available.\n Anything contained in tag `<extra>` is displayed in the\n secondary box, for example "<extra>{fullData.name}</extra>".\n \n The \'hovertemplate\' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n ' return self['hovertemplate']
@property def hovertemplatesrc(self): "\n Sets the source reference on plot.ly for hovertemplate .\n \n The 'hovertemplatesrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['hovertemplatesrc']
-8,271,637,640,725,401,000
Sets the source reference on plot.ly for hovertemplate . The 'hovertemplatesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hovertemplatesrc
180Studios/LoginApp
python
@property def hovertemplatesrc(self): "\n Sets the source reference on plot.ly for hovertemplate .\n \n The 'hovertemplatesrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['hovertemplatesrc']
@property def hovertext(self): "\n Same as `text`.\n \n The 'hovertext' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n " return self['hovertext']
7,117,407,928,880,878,000
Same as `text`. The 'hovertext' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hovertext
180Studios/LoginApp
python
@property def hovertext(self): "\n Same as `text`.\n \n The 'hovertext' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n " return self['hovertext']
@property def hovertextsrc(self): "\n Sets the source reference on plot.ly for hovertext .\n \n The 'hovertextsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['hovertextsrc']
-3,061,199,869,597,252,000
Sets the source reference on plot.ly for hovertext . The 'hovertextsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
hovertextsrc
180Studios/LoginApp
python
@property def hovertextsrc(self): "\n Sets the source reference on plot.ly for hovertext .\n \n The 'hovertextsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['hovertextsrc']
@property def ids(self): "\n Assigns id labels to each datum. These ids for object constancy\n of data points during animation. Should be an array of strings,\n not numbers or any other type.\n \n The 'ids' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n " return self['ids']
-8,640,669,461,977,475,000
Assigns id labels to each datum. These ids for object constancy of data points during animation. Should be an array of strings, not numbers or any other type. The 'ids' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
ids
180Studios/LoginApp
python
@property def ids(self): "\n Assigns id labels to each datum. These ids for object constancy\n of data points during animation. Should be an array of strings,\n not numbers or any other type.\n \n The 'ids' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n " return self['ids']
@property def idssrc(self): "\n Sets the source reference on plot.ly for ids .\n \n The 'idssrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['idssrc']
-5,876,914,191,141,589,000
Sets the source reference on plot.ly for ids . The 'idssrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
idssrc
180Studios/LoginApp
python
@property def idssrc(self): "\n Sets the source reference on plot.ly for ids .\n \n The 'idssrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['idssrc']
@property def legendgroup(self): "\n Sets the legend group for this trace. Traces part of the same\n legend group hide/show at the same time when toggling legend\n items.\n \n The 'legendgroup' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n " return self['legendgroup']
-1,439,907,517,046,329,900
Sets the legend group for this trace. Traces part of the same legend group hide/show at the same time when toggling legend items. The 'legendgroup' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
legendgroup
180Studios/LoginApp
python
@property def legendgroup(self): "\n Sets the legend group for this trace. Traces part of the same\n legend group hide/show at the same time when toggling legend\n items.\n \n The 'legendgroup' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n " return self['legendgroup']
@property def marker(self): '\n The \'marker\' property is an instance of Marker\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Marker\n - A dict of string/value properties that will be passed\n to the Marker constructor\n \n Supported dict properties:\n \n autocolorscale\n Determines whether the colorscale is a default\n palette (`autocolorscale: true`) or the palette\n determined by `marker.colorscale`. Has an\n effect only if in `marker.color`is set to a\n numerical array. In case `colorscale` is\n unspecified or `autocolorscale` is true, the\n default palette will be chosen according to\n whether numbers in the `color` array are all\n positive, all negative or mixed.\n cauto\n Determines whether or not the color domain is\n computed with respect to the input data (here\n in `marker.color`) or the bounds set in\n `marker.cmin` and `marker.cmax` Has an effect\n only if in `marker.color`is set to a numerical\n array. Defaults to `false` when `marker.cmin`\n and `marker.cmax` are set by the user.\n cmax\n Sets the upper bound of the color domain. Has\n an effect only if in `marker.color`is set to a\n numerical array. Value should have the same\n units as in `marker.color` and if set,\n `marker.cmin` must be set as well.\n cmid\n Sets the mid-point of the color domain by\n scaling `marker.cmin` and/or `marker.cmax` to\n be equidistant to this point. Has an effect\n only if in `marker.color`is set to a numerical\n array. Value should have the same units as in\n `marker.color`. Has no effect when\n `marker.cauto` is `false`.\n cmin\n Sets the lower bound of the color domain. Has\n an effect only if in `marker.color`is set to a\n numerical array. Value should have the same\n units as in `marker.color` and if set,\n `marker.cmax` must be set as well.\n color\n Sets themarkercolor. It accepts either a\n specific color or an array of numbers that are\n mapped to the colorscale relative to the max\n and min values of the array or relative to\n `marker.cmin` and `marker.cmax` if set.\n colorbar\n plotly.graph_objs.splom.marker.ColorBar\n instance or dict with compatible properties\n colorscale\n Sets the colorscale. Has an effect only if in\n `marker.color`is set to a numerical array. The\n colorscale must be an array containing arrays\n mapping a normalized value to an rgb, rgba,\n hex, hsl, hsv, or named color string. At\n minimum, a mapping for the lowest (0) and\n highest (1) values are required. For example,\n `[[0, \'rgb(0,0,255)\', [1, \'rgb(255,0,0)\']]`. To\n control the bounds of the colorscale in color\n space, use`marker.cmin` and `marker.cmax`.\n Alternatively, `colorscale` may be a palette\n name string of the following list: Greys,YlGnBu\n ,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R\n ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri\n c,Viridis,Cividis.\n colorsrc\n Sets the source reference on plot.ly for color\n .\n line\n plotly.graph_objs.splom.marker.Line instance or\n dict with compatible properties\n opacity\n Sets the marker opacity.\n opacitysrc\n Sets the source reference on plot.ly for\n opacity .\n reversescale\n Reverses the color mapping if true. Has an\n effect only if in `marker.color`is set to a\n numerical array. If true, `marker.cmin` will\n correspond to the last color in the array and\n `marker.cmax` will correspond to the first\n color.\n showscale\n Determines whether or not a colorbar is\n displayed for this trace. Has an effect only if\n in `marker.color`is set to a numerical array.\n size\n Sets the marker size (in px).\n sizemin\n Has an effect only if `marker.size` is set to a\n numerical array. Sets the minimum size (in px)\n of the rendered marker points.\n sizemode\n Has an effect only if `marker.size` is set to a\n numerical array. Sets the rule for which the\n data in `size` is converted to pixels.\n sizeref\n Has an effect only if `marker.size` is set to a\n numerical array. Sets the scale factor used to\n determine the rendered size of marker points.\n Use with `sizemin` and `sizemode`.\n sizesrc\n Sets the source reference on plot.ly for size\n .\n symbol\n Sets the marker symbol type. Adding 100 is\n equivalent to appending "-open" to a symbol\n name. Adding 200 is equivalent to appending\n "-dot" to a symbol name. Adding 300 is\n equivalent to appending "-open-dot" or "dot-\n open" to a symbol name.\n symbolsrc\n Sets the source reference on plot.ly for\n symbol .\n\n Returns\n -------\n plotly.graph_objs.splom.Marker\n ' return self['marker']
3,519,738,121,507,022,000
The 'marker' property is an instance of Marker that may be specified as: - An instance of plotly.graph_objs.splom.Marker - A dict of string/value properties that will be passed to the Marker constructor Supported dict properties: autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if in `marker.color`is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here in `marker.color`) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if in `marker.color`is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if in `marker.color`is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmin` must be set as well. cmid Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if in `marker.color`is set to a numerical array. Value should have the same units as in `marker.color`. Has no effect when `marker.cauto` is `false`. cmin Sets the lower bound of the color domain. Has an effect only if in `marker.color`is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmax` must be set as well. color Sets themarkercolor. It accepts either a specific color or an array of numbers that are mapped to the colorscale relative to the max and min values of the array or relative to `marker.cmin` and `marker.cmax` if set. colorbar plotly.graph_objs.splom.marker.ColorBar instance or dict with compatible properties colorscale Sets the colorscale. Has an effect only if in `marker.color`is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Greys,YlGnBu ,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri c,Viridis,Cividis. colorsrc Sets the source reference on plot.ly for color . line plotly.graph_objs.splom.marker.Line instance or dict with compatible properties opacity Sets the marker opacity. opacitysrc Sets the source reference on plot.ly for opacity . reversescale Reverses the color mapping if true. Has an effect only if in `marker.color`is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. showscale Determines whether or not a colorbar is displayed for this trace. Has an effect only if in `marker.color`is set to a numerical array. size Sets the marker size (in px). sizemin Has an effect only if `marker.size` is set to a numerical array. Sets the minimum size (in px) of the rendered marker points. sizemode Has an effect only if `marker.size` is set to a numerical array. Sets the rule for which the data in `size` is converted to pixels. sizeref Has an effect only if `marker.size` is set to a numerical array. Sets the scale factor used to determine the rendered size of marker points. Use with `sizemin` and `sizemode`. sizesrc Sets the source reference on plot.ly for size . symbol Sets the marker symbol type. Adding 100 is equivalent to appending "-open" to a symbol name. Adding 200 is equivalent to appending "-dot" to a symbol name. Adding 300 is equivalent to appending "-open-dot" or "dot- open" to a symbol name. symbolsrc Sets the source reference on plot.ly for symbol . Returns ------- plotly.graph_objs.splom.Marker
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
marker
180Studios/LoginApp
python
@property def marker(self): '\n The \'marker\' property is an instance of Marker\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Marker\n - A dict of string/value properties that will be passed\n to the Marker constructor\n \n Supported dict properties:\n \n autocolorscale\n Determines whether the colorscale is a default\n palette (`autocolorscale: true`) or the palette\n determined by `marker.colorscale`. Has an\n effect only if in `marker.color`is set to a\n numerical array. In case `colorscale` is\n unspecified or `autocolorscale` is true, the\n default palette will be chosen according to\n whether numbers in the `color` array are all\n positive, all negative or mixed.\n cauto\n Determines whether or not the color domain is\n computed with respect to the input data (here\n in `marker.color`) or the bounds set in\n `marker.cmin` and `marker.cmax` Has an effect\n only if in `marker.color`is set to a numerical\n array. Defaults to `false` when `marker.cmin`\n and `marker.cmax` are set by the user.\n cmax\n Sets the upper bound of the color domain. Has\n an effect only if in `marker.color`is set to a\n numerical array. Value should have the same\n units as in `marker.color` and if set,\n `marker.cmin` must be set as well.\n cmid\n Sets the mid-point of the color domain by\n scaling `marker.cmin` and/or `marker.cmax` to\n be equidistant to this point. Has an effect\n only if in `marker.color`is set to a numerical\n array. Value should have the same units as in\n `marker.color`. Has no effect when\n `marker.cauto` is `false`.\n cmin\n Sets the lower bound of the color domain. Has\n an effect only if in `marker.color`is set to a\n numerical array. Value should have the same\n units as in `marker.color` and if set,\n `marker.cmax` must be set as well.\n color\n Sets themarkercolor. It accepts either a\n specific color or an array of numbers that are\n mapped to the colorscale relative to the max\n and min values of the array or relative to\n `marker.cmin` and `marker.cmax` if set.\n colorbar\n plotly.graph_objs.splom.marker.ColorBar\n instance or dict with compatible properties\n colorscale\n Sets the colorscale. Has an effect only if in\n `marker.color`is set to a numerical array. The\n colorscale must be an array containing arrays\n mapping a normalized value to an rgb, rgba,\n hex, hsl, hsv, or named color string. At\n minimum, a mapping for the lowest (0) and\n highest (1) values are required. For example,\n `[[0, \'rgb(0,0,255)\', [1, \'rgb(255,0,0)\']]`. To\n control the bounds of the colorscale in color\n space, use`marker.cmin` and `marker.cmax`.\n Alternatively, `colorscale` may be a palette\n name string of the following list: Greys,YlGnBu\n ,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R\n ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri\n c,Viridis,Cividis.\n colorsrc\n Sets the source reference on plot.ly for color\n .\n line\n plotly.graph_objs.splom.marker.Line instance or\n dict with compatible properties\n opacity\n Sets the marker opacity.\n opacitysrc\n Sets the source reference on plot.ly for\n opacity .\n reversescale\n Reverses the color mapping if true. Has an\n effect only if in `marker.color`is set to a\n numerical array. If true, `marker.cmin` will\n correspond to the last color in the array and\n `marker.cmax` will correspond to the first\n color.\n showscale\n Determines whether or not a colorbar is\n displayed for this trace. Has an effect only if\n in `marker.color`is set to a numerical array.\n size\n Sets the marker size (in px).\n sizemin\n Has an effect only if `marker.size` is set to a\n numerical array. Sets the minimum size (in px)\n of the rendered marker points.\n sizemode\n Has an effect only if `marker.size` is set to a\n numerical array. Sets the rule for which the\n data in `size` is converted to pixels.\n sizeref\n Has an effect only if `marker.size` is set to a\n numerical array. Sets the scale factor used to\n determine the rendered size of marker points.\n Use with `sizemin` and `sizemode`.\n sizesrc\n Sets the source reference on plot.ly for size\n .\n symbol\n Sets the marker symbol type. Adding 100 is\n equivalent to appending "-open" to a symbol\n name. Adding 200 is equivalent to appending\n "-dot" to a symbol name. Adding 300 is\n equivalent to appending "-open-dot" or "dot-\n open" to a symbol name.\n symbolsrc\n Sets the source reference on plot.ly for\n symbol .\n\n Returns\n -------\n plotly.graph_objs.splom.Marker\n ' return self['marker']
@property def name(self): "\n Sets the trace name. The trace name appear as the legend item\n and on hover.\n \n The 'name' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n " return self['name']
-6,361,504,644,165,565,000
Sets the trace name. The trace name appear as the legend item and on hover. The 'name' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
name
180Studios/LoginApp
python
@property def name(self): "\n Sets the trace name. The trace name appear as the legend item\n and on hover.\n \n The 'name' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n " return self['name']
@property def opacity(self): "\n Sets the opacity of the trace.\n \n The 'opacity' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n\n Returns\n -------\n int|float\n " return self['opacity']
3,079,945,175,595,132,400
Sets the opacity of the trace. The 'opacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
opacity
180Studios/LoginApp
python
@property def opacity(self): "\n Sets the opacity of the trace.\n \n The 'opacity' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n\n Returns\n -------\n int|float\n " return self['opacity']
@property def selected(self): "\n The 'selected' property is an instance of Selected\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Selected\n - A dict of string/value properties that will be passed\n to the Selected constructor\n \n Supported dict properties:\n \n marker\n plotly.graph_objs.splom.selected.Marker\n instance or dict with compatible properties\n\n Returns\n -------\n plotly.graph_objs.splom.Selected\n " return self['selected']
1,050,611,856,426,197,100
The 'selected' property is an instance of Selected that may be specified as: - An instance of plotly.graph_objs.splom.Selected - A dict of string/value properties that will be passed to the Selected constructor Supported dict properties: marker plotly.graph_objs.splom.selected.Marker instance or dict with compatible properties Returns ------- plotly.graph_objs.splom.Selected
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
selected
180Studios/LoginApp
python
@property def selected(self): "\n The 'selected' property is an instance of Selected\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Selected\n - A dict of string/value properties that will be passed\n to the Selected constructor\n \n Supported dict properties:\n \n marker\n plotly.graph_objs.splom.selected.Marker\n instance or dict with compatible properties\n\n Returns\n -------\n plotly.graph_objs.splom.Selected\n " return self['selected']
@property def selectedpoints(self): "\n Array containing integer indices of selected points. Has an\n effect only for traces that support selections. Note that an\n empty array means an empty selection where the `unselected` are\n turned on for all points, whereas, any other non-array values\n means no selection all where the `selected` and `unselected`\n styles have no effect.\n \n The 'selectedpoints' property accepts values of any type\n\n Returns\n -------\n Any\n " return self['selectedpoints']
-3,455,274,300,976,448,500
Array containing integer indices of selected points. Has an effect only for traces that support selections. Note that an empty array means an empty selection where the `unselected` are turned on for all points, whereas, any other non-array values means no selection all where the `selected` and `unselected` styles have no effect. The 'selectedpoints' property accepts values of any type Returns ------- Any
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
selectedpoints
180Studios/LoginApp
python
@property def selectedpoints(self): "\n Array containing integer indices of selected points. Has an\n effect only for traces that support selections. Note that an\n empty array means an empty selection where the `unselected` are\n turned on for all points, whereas, any other non-array values\n means no selection all where the `selected` and `unselected`\n styles have no effect.\n \n The 'selectedpoints' property accepts values of any type\n\n Returns\n -------\n Any\n " return self['selectedpoints']
@property def showlegend(self): "\n Determines whether or not an item corresponding to this trace\n is shown in the legend.\n \n The 'showlegend' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n " return self['showlegend']
-7,652,109,045,393,845,000
Determines whether or not an item corresponding to this trace is shown in the legend. The 'showlegend' property must be specified as a bool (either True, or False) Returns ------- bool
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
showlegend
180Studios/LoginApp
python
@property def showlegend(self): "\n Determines whether or not an item corresponding to this trace\n is shown in the legend.\n \n The 'showlegend' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n " return self['showlegend']
@property def showlowerhalf(self): "\n Determines whether or not subplots on the lower half from the\n diagonal are displayed.\n \n The 'showlowerhalf' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n " return self['showlowerhalf']
7,164,965,194,827,310,000
Determines whether or not subplots on the lower half from the diagonal are displayed. The 'showlowerhalf' property must be specified as a bool (either True, or False) Returns ------- bool
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
showlowerhalf
180Studios/LoginApp
python
@property def showlowerhalf(self): "\n Determines whether or not subplots on the lower half from the\n diagonal are displayed.\n \n The 'showlowerhalf' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n " return self['showlowerhalf']
@property def showupperhalf(self): "\n Determines whether or not subplots on the upper half from the\n diagonal are displayed.\n \n The 'showupperhalf' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n " return self['showupperhalf']
-1,581,927,955,969,309,700
Determines whether or not subplots on the upper half from the diagonal are displayed. The 'showupperhalf' property must be specified as a bool (either True, or False) Returns ------- bool
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
showupperhalf
180Studios/LoginApp
python
@property def showupperhalf(self): "\n Determines whether or not subplots on the upper half from the\n diagonal are displayed.\n \n The 'showupperhalf' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n " return self['showupperhalf']
@property def stream(self): "\n The 'stream' property is an instance of Stream\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Stream\n - A dict of string/value properties that will be passed\n to the Stream constructor\n \n Supported dict properties:\n \n maxpoints\n Sets the maximum number of points to keep on\n the plots from an incoming stream. If\n `maxpoints` is set to 50, only the newest 50\n points will be displayed on the plot.\n token\n The stream id number links a data trace on a\n plot with a stream. See\n https://plot.ly/settings for more details.\n\n Returns\n -------\n plotly.graph_objs.splom.Stream\n " return self['stream']
-661,828,426,000,341,100
The 'stream' property is an instance of Stream that may be specified as: - An instance of plotly.graph_objs.splom.Stream - A dict of string/value properties that will be passed to the Stream constructor Supported dict properties: maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://plot.ly/settings for more details. Returns ------- plotly.graph_objs.splom.Stream
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
stream
180Studios/LoginApp
python
@property def stream(self): "\n The 'stream' property is an instance of Stream\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Stream\n - A dict of string/value properties that will be passed\n to the Stream constructor\n \n Supported dict properties:\n \n maxpoints\n Sets the maximum number of points to keep on\n the plots from an incoming stream. If\n `maxpoints` is set to 50, only the newest 50\n points will be displayed on the plot.\n token\n The stream id number links a data trace on a\n plot with a stream. See\n https://plot.ly/settings for more details.\n\n Returns\n -------\n plotly.graph_objs.splom.Stream\n " return self['stream']
@property def text(self): "\n Sets text elements associated with each (x,y) pair to appear on\n hover. If a single string, the same string appears over all the\n data points. If an array of string, the items are mapped in\n order to the this trace's (x,y) coordinates.\n \n The 'text' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n " return self['text']
1,313,500,544,468,579,800
Sets text elements associated with each (x,y) pair to appear on hover. If a single string, the same string appears over all the data points. If an array of string, the items are mapped in order to the this trace's (x,y) coordinates. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
text
180Studios/LoginApp
python
@property def text(self): "\n Sets text elements associated with each (x,y) pair to appear on\n hover. If a single string, the same string appears over all the\n data points. If an array of string, the items are mapped in\n order to the this trace's (x,y) coordinates.\n \n The 'text' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n str|numpy.ndarray\n " return self['text']
@property def textsrc(self): "\n Sets the source reference on plot.ly for text .\n \n The 'textsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['textsrc']
6,589,185,397,491,211,000
Sets the source reference on plot.ly for text . The 'textsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
textsrc
180Studios/LoginApp
python
@property def textsrc(self): "\n Sets the source reference on plot.ly for text .\n \n The 'textsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n " return self['textsrc']
@property def uid(self): "\n Assign an id to this trace, Use this to provide object\n constancy between traces during animations and transitions.\n \n The 'uid' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n " return self['uid']
3,958,919,285,292,402,000
Assign an id to this trace, Use this to provide object constancy between traces during animations and transitions. The 'uid' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
uid
180Studios/LoginApp
python
@property def uid(self): "\n Assign an id to this trace, Use this to provide object\n constancy between traces during animations and transitions.\n \n The 'uid' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n " return self['uid']
@property def uirevision(self): "\n Controls persistence of some user-driven changes to the trace:\n `constraintrange` in `parcoords` traces, as well as some\n `editable: true` modifications such as `name` and\n `colorbar.title`. Defaults to `layout.uirevision`. Note that\n other user-driven trace attribute changes are controlled by\n `layout` attributes: `trace.visible` is controlled by\n `layout.legend.uirevision`, `selectedpoints` is controlled by\n `layout.selectionrevision`, and `colorbar.(x|y)` (accessible\n with `config: {editable: true}`) is controlled by\n `layout.editrevision`. Trace changes are tracked by `uid`,\n which only falls back on trace index if no `uid` is provided.\n So if your app can add/remove traces before the end of the\n `data` array, such that the same trace has a different index,\n you can still preserve user-driven changes if you give each\n trace a `uid` that stays with it as it moves.\n \n The 'uirevision' property accepts values of any type\n\n Returns\n -------\n Any\n " return self['uirevision']
6,291,104,720,439,785,000
Controls persistence of some user-driven changes to the trace: `constraintrange` in `parcoords` traces, as well as some `editable: true` modifications such as `name` and `colorbar.title`. Defaults to `layout.uirevision`. Note that other user-driven trace attribute changes are controlled by `layout` attributes: `trace.visible` is controlled by `layout.legend.uirevision`, `selectedpoints` is controlled by `layout.selectionrevision`, and `colorbar.(x|y)` (accessible with `config: {editable: true}`) is controlled by `layout.editrevision`. Trace changes are tracked by `uid`, which only falls back on trace index if no `uid` is provided. So if your app can add/remove traces before the end of the `data` array, such that the same trace has a different index, you can still preserve user-driven changes if you give each trace a `uid` that stays with it as it moves. The 'uirevision' property accepts values of any type Returns ------- Any
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
uirevision
180Studios/LoginApp
python
@property def uirevision(self): "\n Controls persistence of some user-driven changes to the trace:\n `constraintrange` in `parcoords` traces, as well as some\n `editable: true` modifications such as `name` and\n `colorbar.title`. Defaults to `layout.uirevision`. Note that\n other user-driven trace attribute changes are controlled by\n `layout` attributes: `trace.visible` is controlled by\n `layout.legend.uirevision`, `selectedpoints` is controlled by\n `layout.selectionrevision`, and `colorbar.(x|y)` (accessible\n with `config: {editable: true}`) is controlled by\n `layout.editrevision`. Trace changes are tracked by `uid`,\n which only falls back on trace index if no `uid` is provided.\n So if your app can add/remove traces before the end of the\n `data` array, such that the same trace has a different index,\n you can still preserve user-driven changes if you give each\n trace a `uid` that stays with it as it moves.\n \n The 'uirevision' property accepts values of any type\n\n Returns\n -------\n Any\n " return self['uirevision']
@property def unselected(self): "\n The 'unselected' property is an instance of Unselected\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Unselected\n - A dict of string/value properties that will be passed\n to the Unselected constructor\n \n Supported dict properties:\n \n marker\n plotly.graph_objs.splom.unselected.Marker\n instance or dict with compatible properties\n\n Returns\n -------\n plotly.graph_objs.splom.Unselected\n " return self['unselected']
8,059,231,958,851,131,000
The 'unselected' property is an instance of Unselected that may be specified as: - An instance of plotly.graph_objs.splom.Unselected - A dict of string/value properties that will be passed to the Unselected constructor Supported dict properties: marker plotly.graph_objs.splom.unselected.Marker instance or dict with compatible properties Returns ------- plotly.graph_objs.splom.Unselected
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
unselected
180Studios/LoginApp
python
@property def unselected(self): "\n The 'unselected' property is an instance of Unselected\n that may be specified as:\n - An instance of plotly.graph_objs.splom.Unselected\n - A dict of string/value properties that will be passed\n to the Unselected constructor\n \n Supported dict properties:\n \n marker\n plotly.graph_objs.splom.unselected.Marker\n instance or dict with compatible properties\n\n Returns\n -------\n plotly.graph_objs.splom.Unselected\n " return self['unselected']
@property def visible(self): '\n Determines whether or not this trace is visible. If\n "legendonly", the trace is not drawn, but can appear as a\n legend item (provided that the legend itself is visible).\n \n The \'visible\' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n [True, False, \'legendonly\']\n\n Returns\n -------\n Any\n ' return self['visible']
-710,799,896,792,870,900
Determines whether or not this trace is visible. If "legendonly", the trace is not drawn, but can appear as a legend item (provided that the legend itself is visible). The 'visible' property is an enumeration that may be specified as: - One of the following enumeration values: [True, False, 'legendonly'] Returns ------- Any
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
visible
180Studios/LoginApp
python
@property def visible(self): '\n Determines whether or not this trace is visible. If\n "legendonly", the trace is not drawn, but can appear as a\n legend item (provided that the legend itself is visible).\n \n The \'visible\' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n [True, False, \'legendonly\']\n\n Returns\n -------\n Any\n ' return self['visible']
@property def xaxes(self): "\n Sets the list of x axes corresponding to dimensions of this\n splom trace. By default, a splom will match the first N xaxes\n where N is the number of input dimensions. Note that, in case\n where `diagonal.visible` is false and `showupperhalf` or\n `showlowerhalf` is false, this splom trace will generate one\n less x-axis and one less y-axis.\n \n The 'xaxes' property is an info array that may be specified as:\n * a list of elements where:\n The 'xaxes[i]' property is an identifier of a particular\n subplot, of type 'x', that may be specified as the string 'x'\n optionally followed by an integer >= 1\n (e.g. 'x', 'x1', 'x2', 'x3', etc.)\n\n Returns\n -------\n list\n " return self['xaxes']
-343,617,779,404,871,900
Sets the list of x axes corresponding to dimensions of this splom trace. By default, a splom will match the first N xaxes where N is the number of input dimensions. Note that, in case where `diagonal.visible` is false and `showupperhalf` or `showlowerhalf` is false, this splom trace will generate one less x-axis and one less y-axis. The 'xaxes' property is an info array that may be specified as: * a list of elements where: The 'xaxes[i]' property is an identifier of a particular subplot, of type 'x', that may be specified as the string 'x' optionally followed by an integer >= 1 (e.g. 'x', 'x1', 'x2', 'x3', etc.) Returns ------- list
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
xaxes
180Studios/LoginApp
python
@property def xaxes(self): "\n Sets the list of x axes corresponding to dimensions of this\n splom trace. By default, a splom will match the first N xaxes\n where N is the number of input dimensions. Note that, in case\n where `diagonal.visible` is false and `showupperhalf` or\n `showlowerhalf` is false, this splom trace will generate one\n less x-axis and one less y-axis.\n \n The 'xaxes' property is an info array that may be specified as:\n * a list of elements where:\n The 'xaxes[i]' property is an identifier of a particular\n subplot, of type 'x', that may be specified as the string 'x'\n optionally followed by an integer >= 1\n (e.g. 'x', 'x1', 'x2', 'x3', etc.)\n\n Returns\n -------\n list\n " return self['xaxes']
@property def yaxes(self): "\n Sets the list of y axes corresponding to dimensions of this\n splom trace. By default, a splom will match the first N yaxes\n where N is the number of input dimensions. Note that, in case\n where `diagonal.visible` is false and `showupperhalf` or\n `showlowerhalf` is false, this splom trace will generate one\n less x-axis and one less y-axis.\n \n The 'yaxes' property is an info array that may be specified as:\n * a list of elements where:\n The 'yaxes[i]' property is an identifier of a particular\n subplot, of type 'y', that may be specified as the string 'y'\n optionally followed by an integer >= 1\n (e.g. 'y', 'y1', 'y2', 'y3', etc.)\n\n Returns\n -------\n list\n " return self['yaxes']
-7,748,419,616,988,008,000
Sets the list of y axes corresponding to dimensions of this splom trace. By default, a splom will match the first N yaxes where N is the number of input dimensions. Note that, in case where `diagonal.visible` is false and `showupperhalf` or `showlowerhalf` is false, this splom trace will generate one less x-axis and one less y-axis. The 'yaxes' property is an info array that may be specified as: * a list of elements where: The 'yaxes[i]' property is an identifier of a particular subplot, of type 'y', that may be specified as the string 'y' optionally followed by an integer >= 1 (e.g. 'y', 'y1', 'y2', 'y3', etc.) Returns ------- list
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
yaxes
180Studios/LoginApp
python
@property def yaxes(self): "\n Sets the list of y axes corresponding to dimensions of this\n splom trace. By default, a splom will match the first N yaxes\n where N is the number of input dimensions. Note that, in case\n where `diagonal.visible` is false and `showupperhalf` or\n `showlowerhalf` is false, this splom trace will generate one\n less x-axis and one less y-axis.\n \n The 'yaxes' property is an info array that may be specified as:\n * a list of elements where:\n The 'yaxes[i]' property is an identifier of a particular\n subplot, of type 'y', that may be specified as the string 'y'\n optionally followed by an integer >= 1\n (e.g. 'y', 'y1', 'y2', 'y3', etc.)\n\n Returns\n -------\n list\n " return self['yaxes']
def __init__(self, arg=None, customdata=None, customdatasrc=None, diagonal=None, dimensions=None, dimensiondefaults=None, hoverinfo=None, hoverinfosrc=None, hoverlabel=None, hovertemplate=None, hovertemplatesrc=None, hovertext=None, hovertextsrc=None, ids=None, idssrc=None, legendgroup=None, marker=None, name=None, opacity=None, selected=None, selectedpoints=None, showlegend=None, showlowerhalf=None, showupperhalf=None, stream=None, text=None, textsrc=None, uid=None, uirevision=None, unselected=None, visible=None, xaxes=None, yaxes=None, **kwargs): '\n Construct a new Splom object\n \n Splom traces generate scatter plot matrix visualizations. Each\n splom `dimensions` items correspond to a generated axis. Values\n for each of those dimensions are set in `dimensions[i].values`.\n Splom traces support all `scattergl` marker style attributes.\n Specify `layout.grid` attributes and/or layout x-axis and\n y-axis attributes for more control over the axis positioning\n and style.\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of plotly.graph_objs.Splom\n customdata\n Assigns extra data each datum. This may be useful when\n listening to hover, click and selection events. Note\n that, "scatter" traces also appends customdata items in\n the markers DOM elements\n customdatasrc\n Sets the source reference on plot.ly for customdata .\n diagonal\n plotly.graph_objs.splom.Diagonal instance or dict with\n compatible properties\n dimensions\n plotly.graph_objs.splom.Dimension instance or dict with\n compatible properties\n dimensiondefaults\n When used in a template (as\n layout.template.data.splom.dimensiondefaults), sets the\n default property values to use for elements of\n splom.dimensions\n hoverinfo\n Determines which trace information appear on hover. If\n `none` or `skip` are set, no information is displayed\n upon hovering. But, if `none` is set, click and hover\n events are still fired.\n hoverinfosrc\n Sets the source reference on plot.ly for hoverinfo .\n hoverlabel\n plotly.graph_objs.splom.Hoverlabel instance or dict\n with compatible properties\n hovertemplate\n Template string used for rendering the information that\n appear on hover box. Note that this will override\n `hoverinfo`. Variables are inserted using %{variable},\n for example "y: %{y}". Numbers are formatted using\n d3-format\'s syntax %{variable:d3-format}, for example\n "Price: %{y:$.2f}". See https://github.com/d3/d3-format\n /blob/master/README.md#locale_format for details on the\n formatting syntax. The variables available in\n `hovertemplate` are the ones emitted as event data\n described at this link\n https://plot.ly/javascript/plotlyjs-events/#event-data.\n Additionally, every attributes that can be specified\n per-point (the ones that are `arrayOk: true`) are\n available. Anything contained in tag `<extra>` is\n displayed in the secondary box, for example\n "<extra>{fullData.name}</extra>".\n hovertemplatesrc\n Sets the source reference on plot.ly for hovertemplate\n .\n hovertext\n Same as `text`.\n hovertextsrc\n Sets the source reference on plot.ly for hovertext .\n ids\n Assigns id labels to each datum. These ids for object\n constancy of data points during animation. Should be an\n array of strings, not numbers or any other type.\n idssrc\n Sets the source reference on plot.ly for ids .\n legendgroup\n Sets the legend group for this trace. Traces part of\n the same legend group hide/show at the same time when\n toggling legend items.\n marker\n plotly.graph_objs.splom.Marker instance or dict with\n compatible properties\n name\n Sets the trace name. The trace name appear as the\n legend item and on hover.\n opacity\n Sets the opacity of the trace.\n selected\n plotly.graph_objs.splom.Selected instance or dict with\n compatible properties\n selectedpoints\n Array containing integer indices of selected points.\n Has an effect only for traces that support selections.\n Note that an empty array means an empty selection where\n the `unselected` are turned on for all points, whereas,\n any other non-array values means no selection all where\n the `selected` and `unselected` styles have no effect.\n showlegend\n Determines whether or not an item corresponding to this\n trace is shown in the legend.\n showlowerhalf\n Determines whether or not subplots on the lower half\n from the diagonal are displayed.\n showupperhalf\n Determines whether or not subplots on the upper half\n from the diagonal are displayed.\n stream\n plotly.graph_objs.splom.Stream instance or dict with\n compatible properties\n text\n Sets text elements associated with each (x,y) pair to\n appear on hover. If a single string, the same string\n appears over all the data points. If an array of\n string, the items are mapped in order to the this\n trace\'s (x,y) coordinates.\n textsrc\n Sets the source reference on plot.ly for text .\n uid\n Assign an id to this trace, Use this to provide object\n constancy between traces during animations and\n transitions.\n uirevision\n Controls persistence of some user-driven changes to the\n trace: `constraintrange` in `parcoords` traces, as well\n as some `editable: true` modifications such as `name`\n and `colorbar.title`. Defaults to `layout.uirevision`.\n Note that other user-driven trace attribute changes are\n controlled by `layout` attributes: `trace.visible` is\n controlled by `layout.legend.uirevision`,\n `selectedpoints` is controlled by\n `layout.selectionrevision`, and `colorbar.(x|y)`\n (accessible with `config: {editable: true}`) is\n controlled by `layout.editrevision`. Trace changes are\n tracked by `uid`, which only falls back on trace index\n if no `uid` is provided. So if your app can add/remove\n traces before the end of the `data` array, such that\n the same trace has a different index, you can still\n preserve user-driven changes if you give each trace a\n `uid` that stays with it as it moves.\n unselected\n plotly.graph_objs.splom.Unselected instance or dict\n with compatible properties\n visible\n Determines whether or not this trace is visible. If\n "legendonly", the trace is not drawn, but can appear as\n a legend item (provided that the legend itself is\n visible).\n xaxes\n Sets the list of x axes corresponding to dimensions of\n this splom trace. By default, a splom will match the\n first N xaxes where N is the number of input\n dimensions. Note that, in case where `diagonal.visible`\n is false and `showupperhalf` or `showlowerhalf` is\n false, this splom trace will generate one less x-axis\n and one less y-axis.\n yaxes\n Sets the list of y axes corresponding to dimensions of\n this splom trace. By default, a splom will match the\n first N yaxes where N is the number of input\n dimensions. Note that, in case where `diagonal.visible`\n is false and `showupperhalf` or `showlowerhalf` is\n false, this splom trace will generate one less x-axis\n and one less y-axis.\n\n Returns\n -------\n Splom\n ' super(Splom, self).__init__('splom') if (arg is None): arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = copy.copy(arg) else: raise ValueError('The first argument to the plotly.graph_objs.Splom \nconstructor must be a dict or \nan instance of plotly.graph_objs.Splom') self._skip_invalid = kwargs.pop('skip_invalid', False) from plotly.validators import splom as v_splom self._validators['customdata'] = v_splom.CustomdataValidator() self._validators['customdatasrc'] = v_splom.CustomdatasrcValidator() self._validators['diagonal'] = v_splom.DiagonalValidator() self._validators['dimensions'] = v_splom.DimensionsValidator() self._validators['dimensiondefaults'] = v_splom.DimensionValidator() self._validators['hoverinfo'] = v_splom.HoverinfoValidator() self._validators['hoverinfosrc'] = v_splom.HoverinfosrcValidator() self._validators['hoverlabel'] = v_splom.HoverlabelValidator() self._validators['hovertemplate'] = v_splom.HovertemplateValidator() self._validators['hovertemplatesrc'] = v_splom.HovertemplatesrcValidator() self._validators['hovertext'] = v_splom.HovertextValidator() self._validators['hovertextsrc'] = v_splom.HovertextsrcValidator() self._validators['ids'] = v_splom.IdsValidator() self._validators['idssrc'] = v_splom.IdssrcValidator() self._validators['legendgroup'] = v_splom.LegendgroupValidator() self._validators['marker'] = v_splom.MarkerValidator() self._validators['name'] = v_splom.NameValidator() self._validators['opacity'] = v_splom.OpacityValidator() self._validators['selected'] = v_splom.SelectedValidator() self._validators['selectedpoints'] = v_splom.SelectedpointsValidator() self._validators['showlegend'] = v_splom.ShowlegendValidator() self._validators['showlowerhalf'] = v_splom.ShowlowerhalfValidator() self._validators['showupperhalf'] = v_splom.ShowupperhalfValidator() self._validators['stream'] = v_splom.StreamValidator() self._validators['text'] = v_splom.TextValidator() self._validators['textsrc'] = v_splom.TextsrcValidator() self._validators['uid'] = v_splom.UidValidator() self._validators['uirevision'] = v_splom.UirevisionValidator() self._validators['unselected'] = v_splom.UnselectedValidator() self._validators['visible'] = v_splom.VisibleValidator() self._validators['xaxes'] = v_splom.XaxesValidator() self._validators['yaxes'] = v_splom.YaxesValidator() _v = arg.pop('customdata', None) self['customdata'] = (customdata if (customdata is not None) else _v) _v = arg.pop('customdatasrc', None) self['customdatasrc'] = (customdatasrc if (customdatasrc is not None) else _v) _v = arg.pop('diagonal', None) self['diagonal'] = (diagonal if (diagonal is not None) else _v) _v = arg.pop('dimensions', None) self['dimensions'] = (dimensions if (dimensions is not None) else _v) _v = arg.pop('dimensiondefaults', None) self['dimensiondefaults'] = (dimensiondefaults if (dimensiondefaults is not None) else _v) _v = arg.pop('hoverinfo', None) self['hoverinfo'] = (hoverinfo if (hoverinfo is not None) else _v) _v = arg.pop('hoverinfosrc', None) self['hoverinfosrc'] = (hoverinfosrc if (hoverinfosrc is not None) else _v) _v = arg.pop('hoverlabel', None) self['hoverlabel'] = (hoverlabel if (hoverlabel is not None) else _v) _v = arg.pop('hovertemplate', None) self['hovertemplate'] = (hovertemplate if (hovertemplate is not None) else _v) _v = arg.pop('hovertemplatesrc', None) self['hovertemplatesrc'] = (hovertemplatesrc if (hovertemplatesrc is not None) else _v) _v = arg.pop('hovertext', None) self['hovertext'] = (hovertext if (hovertext is not None) else _v) _v = arg.pop('hovertextsrc', None) self['hovertextsrc'] = (hovertextsrc if (hovertextsrc is not None) else _v) _v = arg.pop('ids', None) self['ids'] = (ids if (ids is not None) else _v) _v = arg.pop('idssrc', None) self['idssrc'] = (idssrc if (idssrc is not None) else _v) _v = arg.pop('legendgroup', None) self['legendgroup'] = (legendgroup if (legendgroup is not None) else _v) _v = arg.pop('marker', None) self['marker'] = (marker if (marker is not None) else _v) _v = arg.pop('name', None) self['name'] = (name if (name is not None) else _v) _v = arg.pop('opacity', None) self['opacity'] = (opacity if (opacity is not None) else _v) _v = arg.pop('selected', None) self['selected'] = (selected if (selected is not None) else _v) _v = arg.pop('selectedpoints', None) self['selectedpoints'] = (selectedpoints if (selectedpoints is not None) else _v) _v = arg.pop('showlegend', None) self['showlegend'] = (showlegend if (showlegend is not None) else _v) _v = arg.pop('showlowerhalf', None) self['showlowerhalf'] = (showlowerhalf if (showlowerhalf is not None) else _v) _v = arg.pop('showupperhalf', None) self['showupperhalf'] = (showupperhalf if (showupperhalf is not None) else _v) _v = arg.pop('stream', None) self['stream'] = (stream if (stream is not None) else _v) _v = arg.pop('text', None) self['text'] = (text if (text is not None) else _v) _v = arg.pop('textsrc', None) self['textsrc'] = (textsrc if (textsrc is not None) else _v) _v = arg.pop('uid', None) self['uid'] = (uid if (uid is not None) else _v) _v = arg.pop('uirevision', None) self['uirevision'] = (uirevision if (uirevision is not None) else _v) _v = arg.pop('unselected', None) self['unselected'] = (unselected if (unselected is not None) else _v) _v = arg.pop('visible', None) self['visible'] = (visible if (visible is not None) else _v) _v = arg.pop('xaxes', None) self['xaxes'] = (xaxes if (xaxes is not None) else _v) _v = arg.pop('yaxes', None) self['yaxes'] = (yaxes if (yaxes is not None) else _v) from _plotly_utils.basevalidators import LiteralValidator self._props['type'] = 'splom' self._validators['type'] = LiteralValidator(plotly_name='type', parent_name='splom', val='splom') arg.pop('type', None) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
1,546,266,752,610,994,700
Construct a new Splom object Splom traces generate scatter plot matrix visualizations. Each splom `dimensions` items correspond to a generated axis. Values for each of those dimensions are set in `dimensions[i].values`. Splom traces support all `scattergl` marker style attributes. Specify `layout.grid` attributes and/or layout x-axis and y-axis attributes for more control over the axis positioning and style. Parameters ---------- arg dict of properties compatible with this constructor or an instance of plotly.graph_objs.Splom customdata Assigns extra data each datum. This may be useful when listening to hover, click and selection events. Note that, "scatter" traces also appends customdata items in the markers DOM elements customdatasrc Sets the source reference on plot.ly for customdata . diagonal plotly.graph_objs.splom.Diagonal instance or dict with compatible properties dimensions plotly.graph_objs.splom.Dimension instance or dict with compatible properties dimensiondefaults When used in a template (as layout.template.data.splom.dimensiondefaults), sets the default property values to use for elements of splom.dimensions hoverinfo Determines which trace information appear on hover. If `none` or `skip` are set, no information is displayed upon hovering. But, if `none` is set, click and hover events are still fired. hoverinfosrc Sets the source reference on plot.ly for hoverinfo . hoverlabel plotly.graph_objs.splom.Hoverlabel instance or dict with compatible properties hovertemplate Template string used for rendering the information that appear on hover box. Note that this will override `hoverinfo`. Variables are inserted using %{variable}, for example "y: %{y}". Numbers are formatted using d3-format's syntax %{variable:d3-format}, for example "Price: %{y:$.2f}". See https://github.com/d3/d3-format /blob/master/README.md#locale_format for details on the formatting syntax. The variables available in `hovertemplate` are the ones emitted as event data described at this link https://plot.ly/javascript/plotlyjs-events/#event-data. Additionally, every attributes that can be specified per-point (the ones that are `arrayOk: true`) are available. Anything contained in tag `<extra>` is displayed in the secondary box, for example "<extra>{fullData.name}</extra>". hovertemplatesrc Sets the source reference on plot.ly for hovertemplate . hovertext Same as `text`. hovertextsrc Sets the source reference on plot.ly for hovertext . ids Assigns id labels to each datum. These ids for object constancy of data points during animation. Should be an array of strings, not numbers or any other type. idssrc Sets the source reference on plot.ly for ids . legendgroup Sets the legend group for this trace. Traces part of the same legend group hide/show at the same time when toggling legend items. marker plotly.graph_objs.splom.Marker instance or dict with compatible properties name Sets the trace name. The trace name appear as the legend item and on hover. opacity Sets the opacity of the trace. selected plotly.graph_objs.splom.Selected instance or dict with compatible properties selectedpoints Array containing integer indices of selected points. Has an effect only for traces that support selections. Note that an empty array means an empty selection where the `unselected` are turned on for all points, whereas, any other non-array values means no selection all where the `selected` and `unselected` styles have no effect. showlegend Determines whether or not an item corresponding to this trace is shown in the legend. showlowerhalf Determines whether or not subplots on the lower half from the diagonal are displayed. showupperhalf Determines whether or not subplots on the upper half from the diagonal are displayed. stream plotly.graph_objs.splom.Stream instance or dict with compatible properties text Sets text elements associated with each (x,y) pair to appear on hover. If a single string, the same string appears over all the data points. If an array of string, the items are mapped in order to the this trace's (x,y) coordinates. textsrc Sets the source reference on plot.ly for text . uid Assign an id to this trace, Use this to provide object constancy between traces during animations and transitions. uirevision Controls persistence of some user-driven changes to the trace: `constraintrange` in `parcoords` traces, as well as some `editable: true` modifications such as `name` and `colorbar.title`. Defaults to `layout.uirevision`. Note that other user-driven trace attribute changes are controlled by `layout` attributes: `trace.visible` is controlled by `layout.legend.uirevision`, `selectedpoints` is controlled by `layout.selectionrevision`, and `colorbar.(x|y)` (accessible with `config: {editable: true}`) is controlled by `layout.editrevision`. Trace changes are tracked by `uid`, which only falls back on trace index if no `uid` is provided. So if your app can add/remove traces before the end of the `data` array, such that the same trace has a different index, you can still preserve user-driven changes if you give each trace a `uid` that stays with it as it moves. unselected plotly.graph_objs.splom.Unselected instance or dict with compatible properties visible Determines whether or not this trace is visible. If "legendonly", the trace is not drawn, but can appear as a legend item (provided that the legend itself is visible). xaxes Sets the list of x axes corresponding to dimensions of this splom trace. By default, a splom will match the first N xaxes where N is the number of input dimensions. Note that, in case where `diagonal.visible` is false and `showupperhalf` or `showlowerhalf` is false, this splom trace will generate one less x-axis and one less y-axis. yaxes Sets the list of y axes corresponding to dimensions of this splom trace. By default, a splom will match the first N yaxes where N is the number of input dimensions. Note that, in case where `diagonal.visible` is false and `showupperhalf` or `showlowerhalf` is false, this splom trace will generate one less x-axis and one less y-axis. Returns ------- Splom
venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py
__init__
180Studios/LoginApp
python
def __init__(self, arg=None, customdata=None, customdatasrc=None, diagonal=None, dimensions=None, dimensiondefaults=None, hoverinfo=None, hoverinfosrc=None, hoverlabel=None, hovertemplate=None, hovertemplatesrc=None, hovertext=None, hovertextsrc=None, ids=None, idssrc=None, legendgroup=None, marker=None, name=None, opacity=None, selected=None, selectedpoints=None, showlegend=None, showlowerhalf=None, showupperhalf=None, stream=None, text=None, textsrc=None, uid=None, uirevision=None, unselected=None, visible=None, xaxes=None, yaxes=None, **kwargs): '\n Construct a new Splom object\n \n Splom traces generate scatter plot matrix visualizations. Each\n splom `dimensions` items correspond to a generated axis. Values\n for each of those dimensions are set in `dimensions[i].values`.\n Splom traces support all `scattergl` marker style attributes.\n Specify `layout.grid` attributes and/or layout x-axis and\n y-axis attributes for more control over the axis positioning\n and style.\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of plotly.graph_objs.Splom\n customdata\n Assigns extra data each datum. This may be useful when\n listening to hover, click and selection events. Note\n that, "scatter" traces also appends customdata items in\n the markers DOM elements\n customdatasrc\n Sets the source reference on plot.ly for customdata .\n diagonal\n plotly.graph_objs.splom.Diagonal instance or dict with\n compatible properties\n dimensions\n plotly.graph_objs.splom.Dimension instance or dict with\n compatible properties\n dimensiondefaults\n When used in a template (as\n layout.template.data.splom.dimensiondefaults), sets the\n default property values to use for elements of\n splom.dimensions\n hoverinfo\n Determines which trace information appear on hover. If\n `none` or `skip` are set, no information is displayed\n upon hovering. But, if `none` is set, click and hover\n events are still fired.\n hoverinfosrc\n Sets the source reference on plot.ly for hoverinfo .\n hoverlabel\n plotly.graph_objs.splom.Hoverlabel instance or dict\n with compatible properties\n hovertemplate\n Template string used for rendering the information that\n appear on hover box. Note that this will override\n `hoverinfo`. Variables are inserted using %{variable},\n for example "y: %{y}". Numbers are formatted using\n d3-format\'s syntax %{variable:d3-format}, for example\n "Price: %{y:$.2f}". See https://github.com/d3/d3-format\n /blob/master/README.md#locale_format for details on the\n formatting syntax. The variables available in\n `hovertemplate` are the ones emitted as event data\n described at this link\n https://plot.ly/javascript/plotlyjs-events/#event-data.\n Additionally, every attributes that can be specified\n per-point (the ones that are `arrayOk: true`) are\n available. Anything contained in tag `<extra>` is\n displayed in the secondary box, for example\n "<extra>{fullData.name}</extra>".\n hovertemplatesrc\n Sets the source reference on plot.ly for hovertemplate\n .\n hovertext\n Same as `text`.\n hovertextsrc\n Sets the source reference on plot.ly for hovertext .\n ids\n Assigns id labels to each datum. These ids for object\n constancy of data points during animation. Should be an\n array of strings, not numbers or any other type.\n idssrc\n Sets the source reference on plot.ly for ids .\n legendgroup\n Sets the legend group for this trace. Traces part of\n the same legend group hide/show at the same time when\n toggling legend items.\n marker\n plotly.graph_objs.splom.Marker instance or dict with\n compatible properties\n name\n Sets the trace name. The trace name appear as the\n legend item and on hover.\n opacity\n Sets the opacity of the trace.\n selected\n plotly.graph_objs.splom.Selected instance or dict with\n compatible properties\n selectedpoints\n Array containing integer indices of selected points.\n Has an effect only for traces that support selections.\n Note that an empty array means an empty selection where\n the `unselected` are turned on for all points, whereas,\n any other non-array values means no selection all where\n the `selected` and `unselected` styles have no effect.\n showlegend\n Determines whether or not an item corresponding to this\n trace is shown in the legend.\n showlowerhalf\n Determines whether or not subplots on the lower half\n from the diagonal are displayed.\n showupperhalf\n Determines whether or not subplots on the upper half\n from the diagonal are displayed.\n stream\n plotly.graph_objs.splom.Stream instance or dict with\n compatible properties\n text\n Sets text elements associated with each (x,y) pair to\n appear on hover. If a single string, the same string\n appears over all the data points. If an array of\n string, the items are mapped in order to the this\n trace\'s (x,y) coordinates.\n textsrc\n Sets the source reference on plot.ly for text .\n uid\n Assign an id to this trace, Use this to provide object\n constancy between traces during animations and\n transitions.\n uirevision\n Controls persistence of some user-driven changes to the\n trace: `constraintrange` in `parcoords` traces, as well\n as some `editable: true` modifications such as `name`\n and `colorbar.title`. Defaults to `layout.uirevision`.\n Note that other user-driven trace attribute changes are\n controlled by `layout` attributes: `trace.visible` is\n controlled by `layout.legend.uirevision`,\n `selectedpoints` is controlled by\n `layout.selectionrevision`, and `colorbar.(x|y)`\n (accessible with `config: {editable: true}`) is\n controlled by `layout.editrevision`. Trace changes are\n tracked by `uid`, which only falls back on trace index\n if no `uid` is provided. So if your app can add/remove\n traces before the end of the `data` array, such that\n the same trace has a different index, you can still\n preserve user-driven changes if you give each trace a\n `uid` that stays with it as it moves.\n unselected\n plotly.graph_objs.splom.Unselected instance or dict\n with compatible properties\n visible\n Determines whether or not this trace is visible. If\n "legendonly", the trace is not drawn, but can appear as\n a legend item (provided that the legend itself is\n visible).\n xaxes\n Sets the list of x axes corresponding to dimensions of\n this splom trace. By default, a splom will match the\n first N xaxes where N is the number of input\n dimensions. Note that, in case where `diagonal.visible`\n is false and `showupperhalf` or `showlowerhalf` is\n false, this splom trace will generate one less x-axis\n and one less y-axis.\n yaxes\n Sets the list of y axes corresponding to dimensions of\n this splom trace. By default, a splom will match the\n first N yaxes where N is the number of input\n dimensions. Note that, in case where `diagonal.visible`\n is false and `showupperhalf` or `showlowerhalf` is\n false, this splom trace will generate one less x-axis\n and one less y-axis.\n\n Returns\n -------\n Splom\n ' super(Splom, self).__init__('splom') if (arg is None): arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = copy.copy(arg) else: raise ValueError('The first argument to the plotly.graph_objs.Splom \nconstructor must be a dict or \nan instance of plotly.graph_objs.Splom') self._skip_invalid = kwargs.pop('skip_invalid', False) from plotly.validators import splom as v_splom self._validators['customdata'] = v_splom.CustomdataValidator() self._validators['customdatasrc'] = v_splom.CustomdatasrcValidator() self._validators['diagonal'] = v_splom.DiagonalValidator() self._validators['dimensions'] = v_splom.DimensionsValidator() self._validators['dimensiondefaults'] = v_splom.DimensionValidator() self._validators['hoverinfo'] = v_splom.HoverinfoValidator() self._validators['hoverinfosrc'] = v_splom.HoverinfosrcValidator() self._validators['hoverlabel'] = v_splom.HoverlabelValidator() self._validators['hovertemplate'] = v_splom.HovertemplateValidator() self._validators['hovertemplatesrc'] = v_splom.HovertemplatesrcValidator() self._validators['hovertext'] = v_splom.HovertextValidator() self._validators['hovertextsrc'] = v_splom.HovertextsrcValidator() self._validators['ids'] = v_splom.IdsValidator() self._validators['idssrc'] = v_splom.IdssrcValidator() self._validators['legendgroup'] = v_splom.LegendgroupValidator() self._validators['marker'] = v_splom.MarkerValidator() self._validators['name'] = v_splom.NameValidator() self._validators['opacity'] = v_splom.OpacityValidator() self._validators['selected'] = v_splom.SelectedValidator() self._validators['selectedpoints'] = v_splom.SelectedpointsValidator() self._validators['showlegend'] = v_splom.ShowlegendValidator() self._validators['showlowerhalf'] = v_splom.ShowlowerhalfValidator() self._validators['showupperhalf'] = v_splom.ShowupperhalfValidator() self._validators['stream'] = v_splom.StreamValidator() self._validators['text'] = v_splom.TextValidator() self._validators['textsrc'] = v_splom.TextsrcValidator() self._validators['uid'] = v_splom.UidValidator() self._validators['uirevision'] = v_splom.UirevisionValidator() self._validators['unselected'] = v_splom.UnselectedValidator() self._validators['visible'] = v_splom.VisibleValidator() self._validators['xaxes'] = v_splom.XaxesValidator() self._validators['yaxes'] = v_splom.YaxesValidator() _v = arg.pop('customdata', None) self['customdata'] = (customdata if (customdata is not None) else _v) _v = arg.pop('customdatasrc', None) self['customdatasrc'] = (customdatasrc if (customdatasrc is not None) else _v) _v = arg.pop('diagonal', None) self['diagonal'] = (diagonal if (diagonal is not None) else _v) _v = arg.pop('dimensions', None) self['dimensions'] = (dimensions if (dimensions is not None) else _v) _v = arg.pop('dimensiondefaults', None) self['dimensiondefaults'] = (dimensiondefaults if (dimensiondefaults is not None) else _v) _v = arg.pop('hoverinfo', None) self['hoverinfo'] = (hoverinfo if (hoverinfo is not None) else _v) _v = arg.pop('hoverinfosrc', None) self['hoverinfosrc'] = (hoverinfosrc if (hoverinfosrc is not None) else _v) _v = arg.pop('hoverlabel', None) self['hoverlabel'] = (hoverlabel if (hoverlabel is not None) else _v) _v = arg.pop('hovertemplate', None) self['hovertemplate'] = (hovertemplate if (hovertemplate is not None) else _v) _v = arg.pop('hovertemplatesrc', None) self['hovertemplatesrc'] = (hovertemplatesrc if (hovertemplatesrc is not None) else _v) _v = arg.pop('hovertext', None) self['hovertext'] = (hovertext if (hovertext is not None) else _v) _v = arg.pop('hovertextsrc', None) self['hovertextsrc'] = (hovertextsrc if (hovertextsrc is not None) else _v) _v = arg.pop('ids', None) self['ids'] = (ids if (ids is not None) else _v) _v = arg.pop('idssrc', None) self['idssrc'] = (idssrc if (idssrc is not None) else _v) _v = arg.pop('legendgroup', None) self['legendgroup'] = (legendgroup if (legendgroup is not None) else _v) _v = arg.pop('marker', None) self['marker'] = (marker if (marker is not None) else _v) _v = arg.pop('name', None) self['name'] = (name if (name is not None) else _v) _v = arg.pop('opacity', None) self['opacity'] = (opacity if (opacity is not None) else _v) _v = arg.pop('selected', None) self['selected'] = (selected if (selected is not None) else _v) _v = arg.pop('selectedpoints', None) self['selectedpoints'] = (selectedpoints if (selectedpoints is not None) else _v) _v = arg.pop('showlegend', None) self['showlegend'] = (showlegend if (showlegend is not None) else _v) _v = arg.pop('showlowerhalf', None) self['showlowerhalf'] = (showlowerhalf if (showlowerhalf is not None) else _v) _v = arg.pop('showupperhalf', None) self['showupperhalf'] = (showupperhalf if (showupperhalf is not None) else _v) _v = arg.pop('stream', None) self['stream'] = (stream if (stream is not None) else _v) _v = arg.pop('text', None) self['text'] = (text if (text is not None) else _v) _v = arg.pop('textsrc', None) self['textsrc'] = (textsrc if (textsrc is not None) else _v) _v = arg.pop('uid', None) self['uid'] = (uid if (uid is not None) else _v) _v = arg.pop('uirevision', None) self['uirevision'] = (uirevision if (uirevision is not None) else _v) _v = arg.pop('unselected', None) self['unselected'] = (unselected if (unselected is not None) else _v) _v = arg.pop('visible', None) self['visible'] = (visible if (visible is not None) else _v) _v = arg.pop('xaxes', None) self['xaxes'] = (xaxes if (xaxes is not None) else _v) _v = arg.pop('yaxes', None) self['yaxes'] = (yaxes if (yaxes is not None) else _v) from _plotly_utils.basevalidators import LiteralValidator self._props['type'] = 'splom' self._validators['type'] = LiteralValidator(plotly_name='type', parent_name='splom', val='splom') arg.pop('type', None) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
def send_email(message: str) -> None: "\n Sends an email to target email with given message.\n Args:\n message (str): message you're sending\n " with open('../creds.json', 'r') as f: creds = json.loads(f) gmail_user = creds['user'] gmail_pass = creds['pass'] try: server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(gmail_user, gmail_pass) server.sendmail(gmail_user, creds['target'], message) except: print('Email didnt work...')
-795,476,533,735,353,900
Sends an email to target email with given message. Args: message (str): message you're sending
vaccines.py
send_email
Karalius/get-vaccine-vilnius
python
def send_email(message: str) -> None: "\n Sends an email to target email with given message.\n Args:\n message (str): message you're sending\n " with open('../creds.json', 'r') as f: creds = json.loads(f) gmail_user = creds['user'] gmail_pass = creds['pass'] try: server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(gmail_user, gmail_pass) server.sendmail(gmail_user, creds['target'], message) except: print('Email didnt work...')
def get_data() -> None: '\n Infinite loop of every 10min requests to Vilnius vaccination center.\n Collects count of vaccines and adds to PostgreSQL database.\n Sends an email if Pfizer vaccine is available.\n ' while True: sql_connection = psycopg2.connect(database=DATABASE, user=USER, password=PASSWORD, host=HOST) cur = sql_connection.cursor() headers = {'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'sec-ch-ua': '^\\^', 'sec-ch-ua-mobile': '?0', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Sec-Fetch-Site': 'cross-site', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-User': '?1', 'Sec-Fetch-Dest': 'document', 'Accept-Language': 'en-US,en;q=0.9'} page = requests.get('https://vilnius-vac.myhybridlab.com/selfregister/vaccine', headers=headers) soup = BeautifulSoup(page.content, 'html.parser') vaccines = soup.find('vaccine-rooms', class_=None)[':vaccine-rooms'] json_object = json.loads(vaccines) time_raw = soup.find('small', class_='text-muted').get_text().split() time_str = ((time_raw[2] + ' ') + time_raw[3]) dt = datetime.fromisoformat(time_str) now = datetime.now().replace(microsecond=0) eet_dt = (now + timedelta(hours=3)) diff_secs = (eet_dt - dt).seconds total_sleep = (602 - diff_secs) moderna = json_object[0]['free_total'] pfizer = json_object[1]['free_total'] astra = json_object[2]['free_total'] janssen = json_object[3]['free_total'] cur.execute(f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});") sql_connection.commit() sql_connection.close() if (pfizer > 0): send_email('Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine') time.sleep(total_sleep)
4,513,721,702,642,129,000
Infinite loop of every 10min requests to Vilnius vaccination center. Collects count of vaccines and adds to PostgreSQL database. Sends an email if Pfizer vaccine is available.
vaccines.py
get_data
Karalius/get-vaccine-vilnius
python
def get_data() -> None: '\n Infinite loop of every 10min requests to Vilnius vaccination center.\n Collects count of vaccines and adds to PostgreSQL database.\n Sends an email if Pfizer vaccine is available.\n ' while True: sql_connection = psycopg2.connect(database=DATABASE, user=USER, password=PASSWORD, host=HOST) cur = sql_connection.cursor() headers = {'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'sec-ch-ua': '^\\^', 'sec-ch-ua-mobile': '?0', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Sec-Fetch-Site': 'cross-site', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-User': '?1', 'Sec-Fetch-Dest': 'document', 'Accept-Language': 'en-US,en;q=0.9'} page = requests.get('https://vilnius-vac.myhybridlab.com/selfregister/vaccine', headers=headers) soup = BeautifulSoup(page.content, 'html.parser') vaccines = soup.find('vaccine-rooms', class_=None)[':vaccine-rooms'] json_object = json.loads(vaccines) time_raw = soup.find('small', class_='text-muted').get_text().split() time_str = ((time_raw[2] + ' ') + time_raw[3]) dt = datetime.fromisoformat(time_str) now = datetime.now().replace(microsecond=0) eet_dt = (now + timedelta(hours=3)) diff_secs = (eet_dt - dt).seconds total_sleep = (602 - diff_secs) moderna = json_object[0]['free_total'] pfizer = json_object[1]['free_total'] astra = json_object[2]['free_total'] janssen = json_object[3]['free_total'] cur.execute(f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});") sql_connection.commit() sql_connection.close() if (pfizer > 0): send_email('Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine') time.sleep(total_sleep)
def _assert_tensorflow_version(): "Check that we're using a compatible TF version." (major, minor, _) = tf.version.VERSION.split('.') if ((int(major) not in (1, 2)) or ((int(major) == 1) and (int(minor) < 15))): raise RuntimeError(('Tensorflow version >= 1.15, < 3 is required. Found (%s). Please install the latest 1.x or 2.x version from https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)) if (int(major) == 2): tf.compat.v1.logging.warning(('Tensorflow version (%s) found. Note that TFMA support for TF 2.0 is currently in beta' % tf.version.VERSION))
4,537,565,554,868,918,000
Check that we're using a compatible TF version.
tensorflow_model_analysis/api/model_eval_lib.py
_assert_tensorflow_version
Bobgy/model-analysis
python
def _assert_tensorflow_version(): (major, minor, _) = tf.version.VERSION.split('.') if ((int(major) not in (1, 2)) or ((int(major) == 1) and (int(minor) < 15))): raise RuntimeError(('Tensorflow version >= 1.15, < 3 is required. Found (%s). Please install the latest 1.x or 2.x version from https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)) if (int(major) == 2): tf.compat.v1.logging.warning(('Tensorflow version (%s) found. Note that TFMA support for TF 2.0 is currently in beta' % tf.version.VERSION))
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel], eval_config: Optional[config.EvalConfig]): 'Returns True if legacy evaluation is being used.' return (eval_shared_model and (not isinstance(eval_shared_model, dict)) and (((not eval_shared_model.model_loader.tags) or (eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags)) and ((not eval_config) or (not eval_config.metrics_specs))))
4,020,011,206,858,171,400
Returns True if legacy evaluation is being used.
tensorflow_model_analysis/api/model_eval_lib.py
_is_legacy_eval
Bobgy/model-analysis
python
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel], eval_config: Optional[config.EvalConfig]): return (eval_shared_model and (not isinstance(eval_shared_model, dict)) and (((not eval_shared_model.model_loader.tags) or (eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags)) and ((not eval_config) or (not eval_config.metrics_specs))))
def _load_eval_run(output_path: Text) -> Tuple[(config.EvalConfig, Text, Text, Dict[(Text, Text)])]: 'Returns eval config, data location, file format, and model locations.' path = os.path.join(output_path, _EVAL_CONFIG_FILE) if tf.io.gfile.exists(path): with tf.io.gfile.GFile(path, 'r') as f: pb = json_format.Parse(f.read(), config_pb2.EvalRun()) _check_version(pb.version, output_path) return (pb.eval_config, pb.data_location, pb.file_format, pb.model_locations) else: path = os.path.splitext(path)[0] serialized_record = six.next(tf.compat.v1.python_io.tf_record_iterator(path)) final_dict = pickle.loads(serialized_record) _check_version(final_dict, output_path) old_config = final_dict['eval_config'] slicing_specs = None if old_config.slice_spec: slicing_specs = [s.to_proto() for s in old_config.slice_spec] options = config.Options() options.compute_confidence_intervals.value = old_config.compute_confidence_intervals options.k_anonymization_count.value = old_config.k_anonymization_count return (config.EvalConfig(slicing_specs=slicing_specs, options=options), old_config.data_location, '', {'': old_config.model_location})
-3,223,791,447,349,410,300
Returns eval config, data location, file format, and model locations.
tensorflow_model_analysis/api/model_eval_lib.py
_load_eval_run
Bobgy/model-analysis
python
def _load_eval_run(output_path: Text) -> Tuple[(config.EvalConfig, Text, Text, Dict[(Text, Text)])]: path = os.path.join(output_path, _EVAL_CONFIG_FILE) if tf.io.gfile.exists(path): with tf.io.gfile.GFile(path, 'r') as f: pb = json_format.Parse(f.read(), config_pb2.EvalRun()) _check_version(pb.version, output_path) return (pb.eval_config, pb.data_location, pb.file_format, pb.model_locations) else: path = os.path.splitext(path)[0] serialized_record = six.next(tf.compat.v1.python_io.tf_record_iterator(path)) final_dict = pickle.loads(serialized_record) _check_version(final_dict, output_path) old_config = final_dict['eval_config'] slicing_specs = None if old_config.slice_spec: slicing_specs = [s.to_proto() for s in old_config.slice_spec] options = config.Options() options.compute_confidence_intervals.value = old_config.compute_confidence_intervals options.k_anonymization_count.value = old_config.k_anonymization_count return (config.EvalConfig(slicing_specs=slicing_specs, options=options), old_config.data_location, , {: old_config.model_location})
def load_validation_result(validations_file: Text) -> Optional[ValidationResult]: 'Read and deserialize the ValidationResult.' validation_records = [] for record in tf.compat.v1.python_io.tf_record_iterator(validations_file): validation_records.append(ValidationResult.FromString(record)) if validation_records: assert (len(validation_records) == 1) return validation_records[0]
7,744,466,919,958,878,000
Read and deserialize the ValidationResult.
tensorflow_model_analysis/api/model_eval_lib.py
load_validation_result
Bobgy/model-analysis
python
def load_validation_result(validations_file: Text) -> Optional[ValidationResult]: validation_records = [] for record in tf.compat.v1.python_io.tf_record_iterator(validations_file): validation_records.append(ValidationResult.FromString(record)) if validation_records: assert (len(validation_records) == 1) return validation_records[0]
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults: 'Run model analysis for a single model on multiple data sets.\n\n Args:\n results: A list of TFMA evaluation results.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n\n Returns:\n An EvalResults containing all evaluation results. This can be used to\n construct a time series view.\n ' return EvalResults(results, mode)
1,152,483,092,745,140,900
Run model analysis for a single model on multiple data sets. Args: results: A list of TFMA evaluation results. mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and tfma.MODEL_CENTRIC_MODE are supported. Returns: An EvalResults containing all evaluation results. This can be used to construct a time series view.
tensorflow_model_analysis/api/model_eval_lib.py
make_eval_results
Bobgy/model-analysis
python
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults: 'Run model analysis for a single model on multiple data sets.\n\n Args:\n results: A list of TFMA evaluation results.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n\n Returns:\n An EvalResults containing all evaluation results. This can be used to\n construct a time series view.\n ' return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text], mode: Text, model_name: Optional[Text]=None) -> EvalResults: 'Run model analysis for a single model on multiple data sets.\n\n Args:\n output_paths: A list of output paths of completed tfma runs.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n model_name: The name of the model if multiple models are evaluated together.\n\n Returns:\n An EvalResults containing the evaluation results serialized at output_paths.\n This can be used to construct a time series view.\n ' results = [load_eval_result(output_path, model_name=model_name) for output_path in output_paths] return make_eval_results(results, mode)
6,960,574,085,333,971,000
Run model analysis for a single model on multiple data sets. Args: output_paths: A list of output paths of completed tfma runs. mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and tfma.MODEL_CENTRIC_MODE are supported. model_name: The name of the model if multiple models are evaluated together. Returns: An EvalResults containing the evaluation results serialized at output_paths. This can be used to construct a time series view.
tensorflow_model_analysis/api/model_eval_lib.py
load_eval_results
Bobgy/model-analysis
python
def load_eval_results(output_paths: List[Text], mode: Text, model_name: Optional[Text]=None) -> EvalResults: 'Run model analysis for a single model on multiple data sets.\n\n Args:\n output_paths: A list of output paths of completed tfma runs.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n model_name: The name of the model if multiple models are evaluated together.\n\n Returns:\n An EvalResults containing the evaluation results serialized at output_paths.\n This can be used to construct a time series view.\n ' results = [load_eval_result(output_path, model_name=model_name) for output_path in output_paths] return make_eval_results(results, mode)
def load_eval_result(output_path: Text, model_name: Optional[Text]=None) -> EvalResult: 'Creates an EvalResult object for use with the visualization functions.' (eval_config, data_location, file_format, model_locations) = _load_eval_run(output_path) metrics_proto_list = metrics_and_plots_serialization.load_and_deserialize_metrics(path=os.path.join(output_path, constants.METRICS_KEY), model_name=model_name) plots_proto_list = metrics_and_plots_serialization.load_and_deserialize_plots(path=os.path.join(output_path, constants.PLOTS_KEY)) if (model_name is None): model_location = list(model_locations.values())[0] else: model_location = model_locations[model_name] return EvalResult(slicing_metrics=metrics_proto_list, plots=plots_proto_list, config=eval_config, data_location=data_location, file_format=file_format, model_location=model_location)
2,867,715,658,580,579,300
Creates an EvalResult object for use with the visualization functions.
tensorflow_model_analysis/api/model_eval_lib.py
load_eval_result
Bobgy/model-analysis
python
def load_eval_result(output_path: Text, model_name: Optional[Text]=None) -> EvalResult: (eval_config, data_location, file_format, model_locations) = _load_eval_run(output_path) metrics_proto_list = metrics_and_plots_serialization.load_and_deserialize_metrics(path=os.path.join(output_path, constants.METRICS_KEY), model_name=model_name) plots_proto_list = metrics_and_plots_serialization.load_and_deserialize_plots(path=os.path.join(output_path, constants.PLOTS_KEY)) if (model_name is None): model_location = list(model_locations.values())[0] else: model_location = model_locations[model_name] return EvalResult(slicing_metrics=metrics_proto_list, plots=plots_proto_list, config=eval_config, data_location=data_location, file_format=file_format, model_location=model_location)
def default_eval_shared_model(eval_saved_model_path: Text, add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]]=None, include_default_metrics: Optional[bool]=True, example_weight_key: Optional[Union[(Text, Dict[(Text, Text)])]]=None, additional_fetches: Optional[List[Text]]=None, blacklist_feature_fetches: Optional[List[Text]]=None, tags: Optional[List[Text]]=None, eval_config: Optional[config.EvalConfig]=None) -> types.EvalSharedModel: 'Returns default EvalSharedModel.\n\n Args:\n eval_saved_model_path: Path to EvalSavedModel.\n add_metrics_callbacks: Optional list of callbacks for adding additional\n metrics to the graph (see EvalSharedModel for more information on how to\n configure additional metrics). Metrics for example count and example\n weights will be added automatically.\n include_default_metrics: True to include the default metrics that are part\n of the saved model graph during evaluation. Note that\n eval_config.options.include_default_metrics must also be true.\n example_weight_key: Example weight key (single-output model) or dict of\n example weight keys (multi-output model) keyed by output name.\n additional_fetches: Prefixes of additional tensors stored in\n signature_def.inputs that should be fetched at prediction time. The\n "features" and "labels" tensors are handled automatically and should not\n be included.\n blacklist_feature_fetches: List of tensor names in the features dictionary\n which should be excluded from the fetches request. This is useful in\n scenarios where features are large (e.g. images) and can lead to excessive\n memory use if stored.\n tags: Model tags (e.g. \'serve\' for serving or \'eval\' for EvalSavedModel).\n eval_config: Eval config. Only used for setting default tags.\n ' if (tags is None): if eval_config: signatures = [s.signature_name for s in eval_config.model_specs] if (eval_constants.EVAL_TAG in signatures): if (not all(((s == eval_constants.EVAL_TAG) for s in signatures))): tf.compat.v1.logging.warning('mixture of eval and non-eval signatures used: eval_config={}'.format(eval_config)) tags = [eval_constants.EVAL_TAG] else: tags = [tf.saved_model.SERVING] else: tags = [eval_constants.EVAL_TAG] if (tags == [eval_constants.EVAL_TAG]): if (not add_metrics_callbacks): add_metrics_callbacks = [] example_count_callback = post_export_metrics.example_count() add_metrics_callbacks.append(example_count_callback) if example_weight_key: if isinstance(example_weight_key, dict): for (output_name, key) in example_weight_key.items(): example_weight_callback = post_export_metrics.example_weight(key, metric_tag=output_name) add_metrics_callbacks.append(example_weight_callback) else: example_weight_callback = post_export_metrics.example_weight(example_weight_key) add_metrics_callbacks.append(example_weight_callback) return types.EvalSharedModel(model_path=eval_saved_model_path, add_metrics_callbacks=add_metrics_callbacks, include_default_metrics=include_default_metrics, example_weight_key=example_weight_key, additional_fetches=additional_fetches, model_loader=types.ModelLoader(tags=tags, construct_fn=model_util.model_construct_fn(eval_saved_model_path=eval_saved_model_path, add_metrics_callbacks=add_metrics_callbacks, include_default_metrics=include_default_metrics, additional_fetches=additional_fetches, blacklist_feature_fetches=blacklist_feature_fetches, tags=tags)))
4,766,532,646,388,441,000
Returns default EvalSharedModel. Args: eval_saved_model_path: Path to EvalSavedModel. add_metrics_callbacks: Optional list of callbacks for adding additional metrics to the graph (see EvalSharedModel for more information on how to configure additional metrics). Metrics for example count and example weights will be added automatically. include_default_metrics: True to include the default metrics that are part of the saved model graph during evaluation. Note that eval_config.options.include_default_metrics must also be true. example_weight_key: Example weight key (single-output model) or dict of example weight keys (multi-output model) keyed by output name. additional_fetches: Prefixes of additional tensors stored in signature_def.inputs that should be fetched at prediction time. The "features" and "labels" tensors are handled automatically and should not be included. blacklist_feature_fetches: List of tensor names in the features dictionary which should be excluded from the fetches request. This is useful in scenarios where features are large (e.g. images) and can lead to excessive memory use if stored. tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel). eval_config: Eval config. Only used for setting default tags.
tensorflow_model_analysis/api/model_eval_lib.py
default_eval_shared_model
Bobgy/model-analysis
python
def default_eval_shared_model(eval_saved_model_path: Text, add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]]=None, include_default_metrics: Optional[bool]=True, example_weight_key: Optional[Union[(Text, Dict[(Text, Text)])]]=None, additional_fetches: Optional[List[Text]]=None, blacklist_feature_fetches: Optional[List[Text]]=None, tags: Optional[List[Text]]=None, eval_config: Optional[config.EvalConfig]=None) -> types.EvalSharedModel: 'Returns default EvalSharedModel.\n\n Args:\n eval_saved_model_path: Path to EvalSavedModel.\n add_metrics_callbacks: Optional list of callbacks for adding additional\n metrics to the graph (see EvalSharedModel for more information on how to\n configure additional metrics). Metrics for example count and example\n weights will be added automatically.\n include_default_metrics: True to include the default metrics that are part\n of the saved model graph during evaluation. Note that\n eval_config.options.include_default_metrics must also be true.\n example_weight_key: Example weight key (single-output model) or dict of\n example weight keys (multi-output model) keyed by output name.\n additional_fetches: Prefixes of additional tensors stored in\n signature_def.inputs that should be fetched at prediction time. The\n "features" and "labels" tensors are handled automatically and should not\n be included.\n blacklist_feature_fetches: List of tensor names in the features dictionary\n which should be excluded from the fetches request. This is useful in\n scenarios where features are large (e.g. images) and can lead to excessive\n memory use if stored.\n tags: Model tags (e.g. \'serve\' for serving or \'eval\' for EvalSavedModel).\n eval_config: Eval config. Only used for setting default tags.\n ' if (tags is None): if eval_config: signatures = [s.signature_name for s in eval_config.model_specs] if (eval_constants.EVAL_TAG in signatures): if (not all(((s == eval_constants.EVAL_TAG) for s in signatures))): tf.compat.v1.logging.warning('mixture of eval and non-eval signatures used: eval_config={}'.format(eval_config)) tags = [eval_constants.EVAL_TAG] else: tags = [tf.saved_model.SERVING] else: tags = [eval_constants.EVAL_TAG] if (tags == [eval_constants.EVAL_TAG]): if (not add_metrics_callbacks): add_metrics_callbacks = [] example_count_callback = post_export_metrics.example_count() add_metrics_callbacks.append(example_count_callback) if example_weight_key: if isinstance(example_weight_key, dict): for (output_name, key) in example_weight_key.items(): example_weight_callback = post_export_metrics.example_weight(key, metric_tag=output_name) add_metrics_callbacks.append(example_weight_callback) else: example_weight_callback = post_export_metrics.example_weight(example_weight_key) add_metrics_callbacks.append(example_weight_callback) return types.EvalSharedModel(model_path=eval_saved_model_path, add_metrics_callbacks=add_metrics_callbacks, include_default_metrics=include_default_metrics, example_weight_key=example_weight_key, additional_fetches=additional_fetches, model_loader=types.ModelLoader(tags=tags, construct_fn=model_util.model_construct_fn(eval_saved_model_path=eval_saved_model_path, add_metrics_callbacks=add_metrics_callbacks, include_default_metrics=include_default_metrics, additional_fetches=additional_fetches, blacklist_feature_fetches=blacklist_feature_fetches, tags=tags)))
def default_extractors(eval_shared_model: Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]=None, eval_config: config.EvalConfig=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None, desired_batch_size: Optional[int]=None, materialize: Optional[bool]=True) -> List[extractor.Extractor]: 'Returns the default extractors for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Shared model (single-model evaluation) or dict of shared\n models keyed by model name (multi-model evaluation). Required unless the\n predictions are provided alongside of the features (i.e. model-agnostic\n evaluations).\n eval_config: Eval config.\n slice_spec: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n materialize: True to have extractors create materialized output.\n\n Raises:\n NotImplementedError: If eval_config contains mixed serving and eval models.\n ' if (eval_config is not None): eval_config = config.update_eval_config_with_defaults(eval_config) slice_spec = [slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs] if _is_legacy_eval(eval_shared_model, eval_config): return [predict_extractor.PredictExtractor(eval_shared_model, desired_batch_size, materialize=materialize), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] elif eval_shared_model: model_types = model_util.get_model_types(eval_config) if (not model_types.issubset(constants.VALID_MODEL_TYPES)): raise NotImplementedError('model type must be one of: {}. evalconfig={}'.format(str(constants.VALID_MODEL_TYPES), eval_config)) if (model_types == set([constants.TF_LITE])): return [input_extractor.InputExtractor(eval_config=eval_config), tflite_predict_extractor.TFLitePredictExtractor(eval_config=eval_config, eval_shared_model=eval_shared_model, desired_batch_size=desired_batch_size), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] elif (constants.TF_LITE in model_types): raise NotImplementedError('support for mixing tf_lite and non-tf_lite models is not implemented: eval_config={}'.format(eval_config)) elif (eval_config and all(((s.signature_name == eval_constants.EVAL_TAG) for s in eval_config.model_specs))): return [predict_extractor.PredictExtractor(eval_shared_model, desired_batch_size, materialize=materialize, eval_config=eval_config), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] elif (eval_config and any(((s.signature_name == eval_constants.EVAL_TAG) for s in eval_config.model_specs))): raise NotImplementedError('support for mixing eval and non-eval models is not implemented: eval_config={}'.format(eval_config)) else: return [input_extractor.InputExtractor(eval_config=eval_config), predict_extractor_v2.PredictExtractor(eval_config=eval_config, eval_shared_model=eval_shared_model, desired_batch_size=desired_batch_size), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] else: return [input_extractor.InputExtractor(eval_config=eval_config), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)]
5,195,463,914,530,202,000
Returns the default extractors for use in ExtractAndEvaluate. Args: eval_shared_model: Shared model (single-model evaluation) or dict of shared models keyed by model name (multi-model evaluation). Required unless the predictions are provided alongside of the features (i.e. model-agnostic evaluations). eval_config: Eval config. slice_spec: Deprecated (use EvalConfig). desired_batch_size: Optional batch size for batching in Predict. materialize: True to have extractors create materialized output. Raises: NotImplementedError: If eval_config contains mixed serving and eval models.
tensorflow_model_analysis/api/model_eval_lib.py
default_extractors
Bobgy/model-analysis
python
def default_extractors(eval_shared_model: Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]=None, eval_config: config.EvalConfig=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None, desired_batch_size: Optional[int]=None, materialize: Optional[bool]=True) -> List[extractor.Extractor]: 'Returns the default extractors for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Shared model (single-model evaluation) or dict of shared\n models keyed by model name (multi-model evaluation). Required unless the\n predictions are provided alongside of the features (i.e. model-agnostic\n evaluations).\n eval_config: Eval config.\n slice_spec: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n materialize: True to have extractors create materialized output.\n\n Raises:\n NotImplementedError: If eval_config contains mixed serving and eval models.\n ' if (eval_config is not None): eval_config = config.update_eval_config_with_defaults(eval_config) slice_spec = [slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs] if _is_legacy_eval(eval_shared_model, eval_config): return [predict_extractor.PredictExtractor(eval_shared_model, desired_batch_size, materialize=materialize), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] elif eval_shared_model: model_types = model_util.get_model_types(eval_config) if (not model_types.issubset(constants.VALID_MODEL_TYPES)): raise NotImplementedError('model type must be one of: {}. evalconfig={}'.format(str(constants.VALID_MODEL_TYPES), eval_config)) if (model_types == set([constants.TF_LITE])): return [input_extractor.InputExtractor(eval_config=eval_config), tflite_predict_extractor.TFLitePredictExtractor(eval_config=eval_config, eval_shared_model=eval_shared_model, desired_batch_size=desired_batch_size), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] elif (constants.TF_LITE in model_types): raise NotImplementedError('support for mixing tf_lite and non-tf_lite models is not implemented: eval_config={}'.format(eval_config)) elif (eval_config and all(((s.signature_name == eval_constants.EVAL_TAG) for s in eval_config.model_specs))): return [predict_extractor.PredictExtractor(eval_shared_model, desired_batch_size, materialize=materialize, eval_config=eval_config), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] elif (eval_config and any(((s.signature_name == eval_constants.EVAL_TAG) for s in eval_config.model_specs))): raise NotImplementedError('support for mixing eval and non-eval models is not implemented: eval_config={}'.format(eval_config)) else: return [input_extractor.InputExtractor(eval_config=eval_config), predict_extractor_v2.PredictExtractor(eval_config=eval_config, eval_shared_model=eval_shared_model, desired_batch_size=desired_batch_size), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)] else: return [input_extractor.InputExtractor(eval_config=eval_config), slice_key_extractor.SliceKeyExtractor(slice_spec, materialize=materialize)]
def default_evaluators(eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None, eval_config: config.EvalConfig=None, compute_confidence_intervals: Optional[bool]=False, k_anonymization_count: int=1, desired_batch_size: Optional[int]=None, serialize: bool=False, random_seed_for_testing: Optional[int]=None) -> List[evaluator.Evaluator]: 'Returns the default evaluators for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if there are metrics to be computed in-graph using the model.\n eval_config: Eval config.\n compute_confidence_intervals: Deprecated (use eval_config).\n k_anonymization_count: Deprecated (use eval_config).\n desired_batch_size: Optional batch size for batching in combiner.\n serialize: Deprecated.\n random_seed_for_testing: Provide for deterministic tests only.\n ' disabled_outputs = [] if eval_config: eval_config = config.update_eval_config_with_defaults(eval_config) disabled_outputs = eval_config.options.disabled_outputs.values if (model_util.get_model_types(eval_config) == set([constants.TF_LITE])): if eval_shared_model: if isinstance(eval_shared_model, dict): eval_shared_model = {k: v._replace(include_default_metrics=False) for (k, v) in eval_shared_model.items()} else: eval_shared_model = eval_shared_model._replace(include_default_metrics=False) if ((constants.METRICS_KEY in disabled_outputs) and (constants.PLOTS_KEY in disabled_outputs)): return [] if _is_legacy_eval(eval_shared_model, eval_config): if (eval_config is not None): if eval_config.options.HasField('compute_confidence_intervals'): compute_confidence_intervals = eval_config.options.compute_confidence_intervals.value if eval_config.options.HasField('k_anonymization_count'): k_anonymization_count = eval_config.options.k_anonymization_count.value return [metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(eval_shared_model, compute_confidence_intervals=compute_confidence_intervals, k_anonymization_count=k_anonymization_count, desired_batch_size=desired_batch_size, serialize=serialize, random_seed_for_testing=random_seed_for_testing)] else: return [metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(eval_config=eval_config, eval_shared_model=eval_shared_model)]
1,749,821,193,430,307,300
Returns the default evaluators for use in ExtractAndEvaluate. Args: eval_shared_model: Optional shared model (single-model evaluation) or dict of shared models keyed by model name (multi-model evaluation). Only required if there are metrics to be computed in-graph using the model. eval_config: Eval config. compute_confidence_intervals: Deprecated (use eval_config). k_anonymization_count: Deprecated (use eval_config). desired_batch_size: Optional batch size for batching in combiner. serialize: Deprecated. random_seed_for_testing: Provide for deterministic tests only.
tensorflow_model_analysis/api/model_eval_lib.py
default_evaluators
Bobgy/model-analysis
python
def default_evaluators(eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None, eval_config: config.EvalConfig=None, compute_confidence_intervals: Optional[bool]=False, k_anonymization_count: int=1, desired_batch_size: Optional[int]=None, serialize: bool=False, random_seed_for_testing: Optional[int]=None) -> List[evaluator.Evaluator]: 'Returns the default evaluators for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if there are metrics to be computed in-graph using the model.\n eval_config: Eval config.\n compute_confidence_intervals: Deprecated (use eval_config).\n k_anonymization_count: Deprecated (use eval_config).\n desired_batch_size: Optional batch size for batching in combiner.\n serialize: Deprecated.\n random_seed_for_testing: Provide for deterministic tests only.\n ' disabled_outputs = [] if eval_config: eval_config = config.update_eval_config_with_defaults(eval_config) disabled_outputs = eval_config.options.disabled_outputs.values if (model_util.get_model_types(eval_config) == set([constants.TF_LITE])): if eval_shared_model: if isinstance(eval_shared_model, dict): eval_shared_model = {k: v._replace(include_default_metrics=False) for (k, v) in eval_shared_model.items()} else: eval_shared_model = eval_shared_model._replace(include_default_metrics=False) if ((constants.METRICS_KEY in disabled_outputs) and (constants.PLOTS_KEY in disabled_outputs)): return [] if _is_legacy_eval(eval_shared_model, eval_config): if (eval_config is not None): if eval_config.options.HasField('compute_confidence_intervals'): compute_confidence_intervals = eval_config.options.compute_confidence_intervals.value if eval_config.options.HasField('k_anonymization_count'): k_anonymization_count = eval_config.options.k_anonymization_count.value return [metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(eval_shared_model, compute_confidence_intervals=compute_confidence_intervals, k_anonymization_count=k_anonymization_count, desired_batch_size=desired_batch_size, serialize=serialize, random_seed_for_testing=random_seed_for_testing)] else: return [metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(eval_config=eval_config, eval_shared_model=eval_shared_model)]
def default_writers(output_path: Optional[Text], eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None) -> List[writer.Writer]: 'Returns the default writers for use in WriteResults.\n\n Args:\n output_path: Output path.\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if legacy add_metrics_callbacks are used.\n ' add_metric_callbacks = [] if (eval_shared_model and (not isinstance(eval_shared_model, dict))): add_metric_callbacks = eval_shared_model.add_metrics_callbacks output_paths = {constants.METRICS_KEY: os.path.join(output_path, constants.METRICS_KEY), constants.PLOTS_KEY: os.path.join(output_path, constants.PLOTS_KEY), constants.VALIDATIONS_KEY: os.path.join(output_path, constants.VALIDATIONS_KEY)} return [metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(output_paths=output_paths, add_metrics_callbacks=add_metric_callbacks)]
6,589,016,826,840,738,000
Returns the default writers for use in WriteResults. Args: output_path: Output path. eval_shared_model: Optional shared model (single-model evaluation) or dict of shared models keyed by model name (multi-model evaluation). Only required if legacy add_metrics_callbacks are used.
tensorflow_model_analysis/api/model_eval_lib.py
default_writers
Bobgy/model-analysis
python
def default_writers(output_path: Optional[Text], eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None) -> List[writer.Writer]: 'Returns the default writers for use in WriteResults.\n\n Args:\n output_path: Output path.\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if legacy add_metrics_callbacks are used.\n ' add_metric_callbacks = [] if (eval_shared_model and (not isinstance(eval_shared_model, dict))): add_metric_callbacks = eval_shared_model.add_metrics_callbacks output_paths = {constants.METRICS_KEY: os.path.join(output_path, constants.METRICS_KEY), constants.PLOTS_KEY: os.path.join(output_path, constants.PLOTS_KEY), constants.VALIDATIONS_KEY: os.path.join(output_path, constants.VALIDATIONS_KEY)} return [metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(output_paths=output_paths, add_metrics_callbacks=add_metric_callbacks)]
@beam.ptransform_fn @beam.typehints.with_input_types(bytes) @beam.typehints.with_output_types(types.Extracts) def InputsToExtracts(inputs: beam.pvalue.PCollection): 'Converts serialized inputs (e.g. examples) to Extracts.' return (inputs | beam.Map((lambda x: {constants.INPUT_KEY: x})))
1,328,535,458,801,781,500
Converts serialized inputs (e.g. examples) to Extracts.
tensorflow_model_analysis/api/model_eval_lib.py
InputsToExtracts
Bobgy/model-analysis
python
@beam.ptransform_fn @beam.typehints.with_input_types(bytes) @beam.typehints.with_output_types(types.Extracts) def InputsToExtracts(inputs: beam.pvalue.PCollection): return (inputs | beam.Map((lambda x: {constants.INPUT_KEY: x})))
@beam.ptransform_fn @beam.typehints.with_input_types(types.Extracts) @beam.typehints.with_output_types(evaluator.Evaluation) def ExtractAndEvaluate(extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor], evaluators: List[evaluator.Evaluator]): 'Performs Extractions and Evaluations in provided order.' evaluation = {} def update(evaluation: Dict[(Text, Any)], new_evaluation: Dict[(Text, Any)]): for (k, v) in new_evaluation.items(): if (k not in evaluation): evaluation[k] = [] evaluation[k].append(v) return evaluation for v in evaluators: if (not v.run_after): update(evaluation, (extracts | (v.stage_name >> v.ptransform))) for x in extractors: extracts = (extracts | (x.stage_name >> x.ptransform)) for v in evaluators: if (v.run_after == x.stage_name): update(evaluation, (extracts | (v.stage_name >> v.ptransform))) for v in evaluators: if (v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME): update(evaluation, (extracts | (v.stage_name >> v.ptransform))) result = {} for (k, v) in evaluation.items(): if (len(v) == 1): result[k] = v[0] continue result[k] = ((v | (('FlattenEvaluationOutput(%s)' % k) >> beam.Flatten())) | (('CombineEvaluationOutput(%s)' % k) >> beam.CombinePerKey(_CombineEvaluationDictionariesFn()))) return result
-2,748,688,428,476,792,000
Performs Extractions and Evaluations in provided order.
tensorflow_model_analysis/api/model_eval_lib.py
ExtractAndEvaluate
Bobgy/model-analysis
python
@beam.ptransform_fn @beam.typehints.with_input_types(types.Extracts) @beam.typehints.with_output_types(evaluator.Evaluation) def ExtractAndEvaluate(extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor], evaluators: List[evaluator.Evaluator]): evaluation = {} def update(evaluation: Dict[(Text, Any)], new_evaluation: Dict[(Text, Any)]): for (k, v) in new_evaluation.items(): if (k not in evaluation): evaluation[k] = [] evaluation[k].append(v) return evaluation for v in evaluators: if (not v.run_after): update(evaluation, (extracts | (v.stage_name >> v.ptransform))) for x in extractors: extracts = (extracts | (x.stage_name >> x.ptransform)) for v in evaluators: if (v.run_after == x.stage_name): update(evaluation, (extracts | (v.stage_name >> v.ptransform))) for v in evaluators: if (v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME): update(evaluation, (extracts | (v.stage_name >> v.ptransform))) result = {} for (k, v) in evaluation.items(): if (len(v) == 1): result[k] = v[0] continue result[k] = ((v | (('FlattenEvaluationOutput(%s)' % k) >> beam.Flatten())) | (('CombineEvaluationOutput(%s)' % k) >> beam.CombinePerKey(_CombineEvaluationDictionariesFn()))) return result
@beam.ptransform_fn @beam.typehints.with_input_types(Union[(evaluator.Evaluation, validator.Validation)]) @beam.typehints.with_output_types(beam.pvalue.PDone) def WriteResults(evaluation_or_validation: Union[(evaluator.Evaluation, validator.Validation)], writers: List[writer.Writer]): 'Writes Evaluation or Validation results using given writers.\n\n Args:\n evaluation_or_validation: Evaluation or Validation output.\n writers: Writes to use for writing out output.\n\n Raises:\n ValueError: If Evaluation or Validation is empty.\n\n Returns:\n beam.pvalue.PDone.\n ' if (not evaluation_or_validation): raise ValueError('Evaluations and Validations cannot be empty') for w in writers: _ = (evaluation_or_validation | (w.stage_name >> w.ptransform)) return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
8,322,397,795,302,271,000
Writes Evaluation or Validation results using given writers. Args: evaluation_or_validation: Evaluation or Validation output. writers: Writes to use for writing out output. Raises: ValueError: If Evaluation or Validation is empty. Returns: beam.pvalue.PDone.
tensorflow_model_analysis/api/model_eval_lib.py
WriteResults
Bobgy/model-analysis
python
@beam.ptransform_fn @beam.typehints.with_input_types(Union[(evaluator.Evaluation, validator.Validation)]) @beam.typehints.with_output_types(beam.pvalue.PDone) def WriteResults(evaluation_or_validation: Union[(evaluator.Evaluation, validator.Validation)], writers: List[writer.Writer]): 'Writes Evaluation or Validation results using given writers.\n\n Args:\n evaluation_or_validation: Evaluation or Validation output.\n writers: Writes to use for writing out output.\n\n Raises:\n ValueError: If Evaluation or Validation is empty.\n\n Returns:\n beam.pvalue.PDone.\n ' if (not evaluation_or_validation): raise ValueError('Evaluations and Validations cannot be empty') for w in writers: _ = (evaluation_or_validation | (w.stage_name >> w.ptransform)) return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn @beam.typehints.with_input_types(beam.Pipeline) @beam.typehints.with_output_types(beam.pvalue.PDone) def WriteEvalConfig(pipeline: beam.Pipeline, eval_config: config.EvalConfig, output_path: Text, data_location: Optional[Text]='', file_format: Optional[Text]='', model_locations: Optional[Dict[(Text, Text)]]=None): 'Writes EvalConfig to file.\n\n Args:\n pipeline: Beam pipeline.\n eval_config: EvalConfig.\n output_path: Output path.\n data_location: Optional location for data used with config.\n file_format: Optional format for data used with config.\n model_locations: Optional location(s) for model(s) used with config.\n\n Returns:\n beam.pvalue.PDone.\n ' return ((pipeline | ('CreateEvalConfig' >> beam.Create([_serialize_eval_run(eval_config, data_location, file_format, model_locations)]))) | ('WriteEvalConfig' >> beam.io.WriteToText(os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template='')))
-1,003,215,287,247,355,600
Writes EvalConfig to file. Args: pipeline: Beam pipeline. eval_config: EvalConfig. output_path: Output path. data_location: Optional location for data used with config. file_format: Optional format for data used with config. model_locations: Optional location(s) for model(s) used with config. Returns: beam.pvalue.PDone.
tensorflow_model_analysis/api/model_eval_lib.py
WriteEvalConfig
Bobgy/model-analysis
python
@beam.ptransform_fn @beam.typehints.with_input_types(beam.Pipeline) @beam.typehints.with_output_types(beam.pvalue.PDone) def WriteEvalConfig(pipeline: beam.Pipeline, eval_config: config.EvalConfig, output_path: Text, data_location: Optional[Text]=, file_format: Optional[Text]=, model_locations: Optional[Dict[(Text, Text)]]=None): 'Writes EvalConfig to file.\n\n Args:\n pipeline: Beam pipeline.\n eval_config: EvalConfig.\n output_path: Output path.\n data_location: Optional location for data used with config.\n file_format: Optional format for data used with config.\n model_locations: Optional location(s) for model(s) used with config.\n\n Returns:\n beam.pvalue.PDone.\n ' return ((pipeline | ('CreateEvalConfig' >> beam.Create([_serialize_eval_run(eval_config, data_location, file_format, model_locations)]))) | ('WriteEvalConfig' >> beam.io.WriteToText(os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=)))
@beam.ptransform_fn @beam.typehints.with_output_types(beam.pvalue.PDone) def ExtractEvaluateAndWriteResults(examples: beam.pvalue.PCollection, eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None, eval_config: config.EvalConfig=None, extractors: Optional[List[extractor.Extractor]]=None, evaluators: Optional[List[evaluator.Evaluator]]=None, writers: Optional[List[writer.Writer]]=None, output_path: Optional[Text]=None, display_only_data_location: Optional[Text]=None, display_only_file_format: Optional[Text]=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None, write_config: Optional[bool]=True, compute_confidence_intervals: Optional[bool]=False, k_anonymization_count: int=1, desired_batch_size: Optional[int]=None, random_seed_for_testing: Optional[int]=None) -> beam.pvalue.PDone: "PTransform for performing extraction, evaluation, and writing results.\n\n Users who want to construct their own Beam pipelines instead of using the\n lightweight run_model_analysis functions should use this PTransform.\n\n Example usage:\n eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])\n eval_shared_model = tfma.default_eval_shared_model(\n eval_saved_model_path=model_location, eval_config=eval_config)\n with beam.Pipeline(runner=...) as p:\n _ = (p\n | 'ReadData' >> beam.io.ReadFromTFRecord(data_location)\n | 'ExtractEvaluateAndWriteResults' >>\n tfma.ExtractEvaluateAndWriteResults(\n eval_shared_model=eval_shared_model,\n eval_config=eval_config,\n ...))\n result = tfma.load_eval_result(output_path=output_path)\n tfma.view.render_slicing_metrics(result)\n\n Note that the exact serialization format is an internal implementation detail\n and subject to change. Users should only use the TFMA functions to write and\n read the results.\n\n Args:\n examples: PCollection of input examples. Can be any format the model accepts\n (e.g. string containing CSV row, TensorFlow.Example, etc).\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if needed by default extractors, evaluators, or writers and for\n display purposes of the model path.\n eval_config: Eval config.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n output_path: Path to output metrics and plots results.\n display_only_data_location: Optional path indicating where the examples were\n read from. This is used only for display purposes - data will not actually\n be read from this path.\n display_only_file_format: Optional format of the examples. This is used only\n for display purposes.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n k_anonymization_count: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n random_seed_for_testing: Provide for deterministic tests only.\n\n Raises:\n ValueError: If EvalConfig invalid or matching Extractor not found for an\n Evaluator.\n\n Returns:\n PDone.\n " eval_shared_models = eval_shared_model if (not isinstance(eval_shared_model, dict)): eval_shared_models = {'': eval_shared_model} if (eval_config is None): model_specs = [] for (model_name, shared_model) in eval_shared_models.items(): example_weight_key = shared_model.example_weight_key example_weight_keys = {} if (example_weight_key and isinstance(example_weight_key, dict)): example_weight_keys = example_weight_key example_weight_key = '' model_specs.append(config.ModelSpec(name=model_name, example_weight_key=example_weight_key, example_weight_keys=example_weight_keys)) slicing_specs = None if slice_spec: slicing_specs = [s.to_proto() for s in slice_spec] options = config.Options() options.compute_confidence_intervals.value = compute_confidence_intervals options.k_anonymization_count.value = k_anonymization_count if (not write_config): options.disabled_outputs.values.append(_EVAL_CONFIG_FILE) eval_config = config.EvalConfig(model_specs=model_specs, slicing_specs=slicing_specs, options=options) else: eval_config = config.update_eval_config_with_defaults(eval_config) config.verify_eval_config(eval_config) if (not extractors): extractors = default_extractors(eval_config=eval_config, eval_shared_model=eval_shared_model, materialize=False, desired_batch_size=desired_batch_size) if (not evaluators): evaluators = default_evaluators(eval_config=eval_config, eval_shared_model=eval_shared_model, random_seed_for_testing=random_seed_for_testing) for v in evaluators: evaluator.verify_evaluator(v, extractors) if (not writers): writers = default_writers(output_path=output_path, eval_shared_model=eval_shared_model) _ = (((examples | ('InputsToExtracts' >> InputsToExtracts())) | ('ExtractAndEvaluate' >> ExtractAndEvaluate(extractors=extractors, evaluators=evaluators))) | ('WriteResults' >> WriteResults(writers=writers))) if (_EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values): data_location = '<user provided PCollection>' if (display_only_data_location is not None): data_location = display_only_data_location file_format = '<unknown>' if (display_only_file_format is not None): file_format = display_only_file_format model_locations = {} for (k, v) in eval_shared_models.items(): model_locations[k] = ('<unknown>' if ((v is None) or (v.model_path is None)) else v.model_path) _ = (examples.pipeline | WriteEvalConfig(eval_config, output_path, data_location, file_format, model_locations)) return beam.pvalue.PDone(examples.pipeline)
-1,977,251,296,438,576,400
PTransform for performing extraction, evaluation, and writing results. Users who want to construct their own Beam pipelines instead of using the lightweight run_model_analysis functions should use this PTransform. Example usage: eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...]) eval_shared_model = tfma.default_eval_shared_model( eval_saved_model_path=model_location, eval_config=eval_config) with beam.Pipeline(runner=...) as p: _ = (p | 'ReadData' >> beam.io.ReadFromTFRecord(data_location) | 'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults( eval_shared_model=eval_shared_model, eval_config=eval_config, ...)) result = tfma.load_eval_result(output_path=output_path) tfma.view.render_slicing_metrics(result) Note that the exact serialization format is an internal implementation detail and subject to change. Users should only use the TFMA functions to write and read the results. Args: examples: PCollection of input examples. Can be any format the model accepts (e.g. string containing CSV row, TensorFlow.Example, etc). eval_shared_model: Optional shared model (single-model evaluation) or dict of shared models keyed by model name (multi-model evaluation). Only required if needed by default extractors, evaluators, or writers and for display purposes of the model path. eval_config: Eval config. extractors: Optional list of Extractors to apply to Extracts. Typically these will be added by calling the default_extractors function. If no extractors are provided, default_extractors (non-materialized) will be used. evaluators: Optional list of Evaluators for evaluating Extracts. Typically these will be added by calling the default_evaluators function. If no evaluators are provided, default_evaluators will be used. writers: Optional list of Writers for writing Evaluation output. Typically these will be added by calling the default_writers function. If no writers are provided, default_writers will be used. output_path: Path to output metrics and plots results. display_only_data_location: Optional path indicating where the examples were read from. This is used only for display purposes - data will not actually be read from this path. display_only_file_format: Optional format of the examples. This is used only for display purposes. slice_spec: Deprecated (use EvalConfig). write_config: Deprecated (use EvalConfig). compute_confidence_intervals: Deprecated (use EvalConfig). k_anonymization_count: Deprecated (use EvalConfig). desired_batch_size: Optional batch size for batching in Predict. random_seed_for_testing: Provide for deterministic tests only. Raises: ValueError: If EvalConfig invalid or matching Extractor not found for an Evaluator. Returns: PDone.
tensorflow_model_analysis/api/model_eval_lib.py
ExtractEvaluateAndWriteResults
Bobgy/model-analysis
python
@beam.ptransform_fn @beam.typehints.with_output_types(beam.pvalue.PDone) def ExtractEvaluateAndWriteResults(examples: beam.pvalue.PCollection, eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None, eval_config: config.EvalConfig=None, extractors: Optional[List[extractor.Extractor]]=None, evaluators: Optional[List[evaluator.Evaluator]]=None, writers: Optional[List[writer.Writer]]=None, output_path: Optional[Text]=None, display_only_data_location: Optional[Text]=None, display_only_file_format: Optional[Text]=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None, write_config: Optional[bool]=True, compute_confidence_intervals: Optional[bool]=False, k_anonymization_count: int=1, desired_batch_size: Optional[int]=None, random_seed_for_testing: Optional[int]=None) -> beam.pvalue.PDone: "PTransform for performing extraction, evaluation, and writing results.\n\n Users who want to construct their own Beam pipelines instead of using the\n lightweight run_model_analysis functions should use this PTransform.\n\n Example usage:\n eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])\n eval_shared_model = tfma.default_eval_shared_model(\n eval_saved_model_path=model_location, eval_config=eval_config)\n with beam.Pipeline(runner=...) as p:\n _ = (p\n | 'ReadData' >> beam.io.ReadFromTFRecord(data_location)\n | 'ExtractEvaluateAndWriteResults' >>\n tfma.ExtractEvaluateAndWriteResults(\n eval_shared_model=eval_shared_model,\n eval_config=eval_config,\n ...))\n result = tfma.load_eval_result(output_path=output_path)\n tfma.view.render_slicing_metrics(result)\n\n Note that the exact serialization format is an internal implementation detail\n and subject to change. Users should only use the TFMA functions to write and\n read the results.\n\n Args:\n examples: PCollection of input examples. Can be any format the model accepts\n (e.g. string containing CSV row, TensorFlow.Example, etc).\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if needed by default extractors, evaluators, or writers and for\n display purposes of the model path.\n eval_config: Eval config.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n output_path: Path to output metrics and plots results.\n display_only_data_location: Optional path indicating where the examples were\n read from. This is used only for display purposes - data will not actually\n be read from this path.\n display_only_file_format: Optional format of the examples. This is used only\n for display purposes.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n k_anonymization_count: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n random_seed_for_testing: Provide for deterministic tests only.\n\n Raises:\n ValueError: If EvalConfig invalid or matching Extractor not found for an\n Evaluator.\n\n Returns:\n PDone.\n " eval_shared_models = eval_shared_model if (not isinstance(eval_shared_model, dict)): eval_shared_models = {: eval_shared_model} if (eval_config is None): model_specs = [] for (model_name, shared_model) in eval_shared_models.items(): example_weight_key = shared_model.example_weight_key example_weight_keys = {} if (example_weight_key and isinstance(example_weight_key, dict)): example_weight_keys = example_weight_key example_weight_key = model_specs.append(config.ModelSpec(name=model_name, example_weight_key=example_weight_key, example_weight_keys=example_weight_keys)) slicing_specs = None if slice_spec: slicing_specs = [s.to_proto() for s in slice_spec] options = config.Options() options.compute_confidence_intervals.value = compute_confidence_intervals options.k_anonymization_count.value = k_anonymization_count if (not write_config): options.disabled_outputs.values.append(_EVAL_CONFIG_FILE) eval_config = config.EvalConfig(model_specs=model_specs, slicing_specs=slicing_specs, options=options) else: eval_config = config.update_eval_config_with_defaults(eval_config) config.verify_eval_config(eval_config) if (not extractors): extractors = default_extractors(eval_config=eval_config, eval_shared_model=eval_shared_model, materialize=False, desired_batch_size=desired_batch_size) if (not evaluators): evaluators = default_evaluators(eval_config=eval_config, eval_shared_model=eval_shared_model, random_seed_for_testing=random_seed_for_testing) for v in evaluators: evaluator.verify_evaluator(v, extractors) if (not writers): writers = default_writers(output_path=output_path, eval_shared_model=eval_shared_model) _ = (((examples | ('InputsToExtracts' >> InputsToExtracts())) | ('ExtractAndEvaluate' >> ExtractAndEvaluate(extractors=extractors, evaluators=evaluators))) | ('WriteResults' >> WriteResults(writers=writers))) if (_EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values): data_location = '<user provided PCollection>' if (display_only_data_location is not None): data_location = display_only_data_location file_format = '<unknown>' if (display_only_file_format is not None): file_format = display_only_file_format model_locations = {} for (k, v) in eval_shared_models.items(): model_locations[k] = ('<unknown>' if ((v is None) or (v.model_path is None)) else v.model_path) _ = (examples.pipeline | WriteEvalConfig(eval_config, output_path, data_location, file_format, model_locations)) return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None, eval_config: config.EvalConfig=None, data_location: Text='', file_format: Text='tfrecords', output_path: Optional[Text]=None, extractors: Optional[List[extractor.Extractor]]=None, evaluators: Optional[List[evaluator.Evaluator]]=None, writers: Optional[List[writer.Writer]]=None, pipeline_options: Optional[Any]=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None, write_config: Optional[bool]=True, compute_confidence_intervals: Optional[bool]=False, k_anonymization_count: int=1, desired_batch_size: Optional[int]=None, random_seed_for_testing: Optional[int]=None) -> Union[(EvalResult, EvalResults)]: "Runs TensorFlow model analysis.\n\n It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow\n Eval SavedModel and returns the results.\n\n This is a simplified API for users who want to quickly get something running\n locally. Users who wish to create their own Beam pipelines can use the\n Evaluate PTransform instead.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if needed by default extractors, evaluators, or writers.\n eval_config: Eval config.\n data_location: The location of the data files.\n file_format: The file format of the data, can be either 'text' or\n 'tfrecords' for now. By default, 'tfrecords' will be used.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n pipeline_options: Optional arguments to run the Pipeline, for instance\n whether to run directly.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n k_anonymization_count: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n random_seed_for_testing: Provide for deterministic tests only.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n\n Raises:\n ValueError: If the file_format is unknown to us.\n " _assert_tensorflow_version() if (output_path is None): output_path = tempfile.mkdtemp() if (not tf.io.gfile.exists(output_path)): tf.io.gfile.makedirs(output_path) if (eval_config is None): model_specs = [] eval_shared_models = eval_shared_model if (not isinstance(eval_shared_model, dict)): eval_shared_models = {'': eval_shared_model} for (model_name, shared_model) in eval_shared_models.items(): example_weight_key = shared_model.example_weight_key example_weight_keys = {} if (example_weight_key and isinstance(example_weight_key, dict)): example_weight_keys = example_weight_key example_weight_key = '' model_specs.append(config.ModelSpec(name=model_name, example_weight_key=example_weight_key, example_weight_keys=example_weight_keys)) slicing_specs = None if slice_spec: slicing_specs = [s.to_proto() for s in slice_spec] options = config.Options() options.compute_confidence_intervals.value = compute_confidence_intervals options.k_anonymization_count.value = k_anonymization_count if (not write_config): options.disabled_outputs.values.append(_EVAL_CONFIG_FILE) eval_config = config.EvalConfig(model_specs=model_specs, slicing_specs=slicing_specs, options=options) with beam.Pipeline(options=pipeline_options) as p: if (file_format == 'tfrecords'): data = (p | ('ReadFromTFRecord' >> beam.io.ReadFromTFRecord(file_pattern=data_location, compression_type=beam.io.filesystem.CompressionTypes.AUTO))) elif (file_format == 'text'): data = (p | ('ReadFromText' >> beam.io.textio.ReadFromText(data_location))) else: raise ValueError('unknown file_format: {}'.format(file_format)) _ = (data | ('ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(eval_config=eval_config, eval_shared_model=eval_shared_model, display_only_data_location=data_location, display_only_file_format=file_format, output_path=output_path, extractors=extractors, evaluators=evaluators, writers=writers, desired_batch_size=desired_batch_size, random_seed_for_testing=random_seed_for_testing))) if (len(eval_config.model_specs) <= 1): return load_eval_result(output_path) else: results = [] for spec in eval_config.model_specs: results.append(load_eval_result(output_path, model_name=spec.name)) return EvalResults(results, constants.MODEL_CENTRIC_MODE)
277,492,606,528,607,420
Runs TensorFlow model analysis. It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow Eval SavedModel and returns the results. This is a simplified API for users who want to quickly get something running locally. Users who wish to create their own Beam pipelines can use the Evaluate PTransform instead. Args: eval_shared_model: Optional shared model (single-model evaluation) or dict of shared models keyed by model name (multi-model evaluation). Only required if needed by default extractors, evaluators, or writers. eval_config: Eval config. data_location: The location of the data files. file_format: The file format of the data, can be either 'text' or 'tfrecords' for now. By default, 'tfrecords' will be used. output_path: The directory to output metrics and results to. If None, we use a temporary directory. extractors: Optional list of Extractors to apply to Extracts. Typically these will be added by calling the default_extractors function. If no extractors are provided, default_extractors (non-materialized) will be used. evaluators: Optional list of Evaluators for evaluating Extracts. Typically these will be added by calling the default_evaluators function. If no evaluators are provided, default_evaluators will be used. writers: Optional list of Writers for writing Evaluation output. Typically these will be added by calling the default_writers function. If no writers are provided, default_writers will be used. pipeline_options: Optional arguments to run the Pipeline, for instance whether to run directly. slice_spec: Deprecated (use EvalConfig). write_config: Deprecated (use EvalConfig). compute_confidence_intervals: Deprecated (use EvalConfig). k_anonymization_count: Deprecated (use EvalConfig). desired_batch_size: Optional batch size for batching in Predict. random_seed_for_testing: Provide for deterministic tests only. Returns: An EvalResult that can be used with the TFMA visualization functions. Raises: ValueError: If the file_format is unknown to us.
tensorflow_model_analysis/api/model_eval_lib.py
run_model_analysis
Bobgy/model-analysis
python
def run_model_analysis(eval_shared_model: Optional[Union[(types.EvalSharedModel, Dict[(Text, types.EvalSharedModel)])]]=None, eval_config: config.EvalConfig=None, data_location: Text=, file_format: Text='tfrecords', output_path: Optional[Text]=None, extractors: Optional[List[extractor.Extractor]]=None, evaluators: Optional[List[evaluator.Evaluator]]=None, writers: Optional[List[writer.Writer]]=None, pipeline_options: Optional[Any]=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None, write_config: Optional[bool]=True, compute_confidence_intervals: Optional[bool]=False, k_anonymization_count: int=1, desired_batch_size: Optional[int]=None, random_seed_for_testing: Optional[int]=None) -> Union[(EvalResult, EvalResults)]: "Runs TensorFlow model analysis.\n\n It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow\n Eval SavedModel and returns the results.\n\n This is a simplified API for users who want to quickly get something running\n locally. Users who wish to create their own Beam pipelines can use the\n Evaluate PTransform instead.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if needed by default extractors, evaluators, or writers.\n eval_config: Eval config.\n data_location: The location of the data files.\n file_format: The file format of the data, can be either 'text' or\n 'tfrecords' for now. By default, 'tfrecords' will be used.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n pipeline_options: Optional arguments to run the Pipeline, for instance\n whether to run directly.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n k_anonymization_count: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n random_seed_for_testing: Provide for deterministic tests only.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n\n Raises:\n ValueError: If the file_format is unknown to us.\n " _assert_tensorflow_version() if (output_path is None): output_path = tempfile.mkdtemp() if (not tf.io.gfile.exists(output_path)): tf.io.gfile.makedirs(output_path) if (eval_config is None): model_specs = [] eval_shared_models = eval_shared_model if (not isinstance(eval_shared_model, dict)): eval_shared_models = {: eval_shared_model} for (model_name, shared_model) in eval_shared_models.items(): example_weight_key = shared_model.example_weight_key example_weight_keys = {} if (example_weight_key and isinstance(example_weight_key, dict)): example_weight_keys = example_weight_key example_weight_key = model_specs.append(config.ModelSpec(name=model_name, example_weight_key=example_weight_key, example_weight_keys=example_weight_keys)) slicing_specs = None if slice_spec: slicing_specs = [s.to_proto() for s in slice_spec] options = config.Options() options.compute_confidence_intervals.value = compute_confidence_intervals options.k_anonymization_count.value = k_anonymization_count if (not write_config): options.disabled_outputs.values.append(_EVAL_CONFIG_FILE) eval_config = config.EvalConfig(model_specs=model_specs, slicing_specs=slicing_specs, options=options) with beam.Pipeline(options=pipeline_options) as p: if (file_format == 'tfrecords'): data = (p | ('ReadFromTFRecord' >> beam.io.ReadFromTFRecord(file_pattern=data_location, compression_type=beam.io.filesystem.CompressionTypes.AUTO))) elif (file_format == 'text'): data = (p | ('ReadFromText' >> beam.io.textio.ReadFromText(data_location))) else: raise ValueError('unknown file_format: {}'.format(file_format)) _ = (data | ('ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(eval_config=eval_config, eval_shared_model=eval_shared_model, display_only_data_location=data_location, display_only_file_format=file_format, output_path=output_path, extractors=extractors, evaluators=evaluators, writers=writers, desired_batch_size=desired_batch_size, random_seed_for_testing=random_seed_for_testing))) if (len(eval_config.model_specs) <= 1): return load_eval_result(output_path) else: results = [] for spec in eval_config.model_specs: results.append(load_eval_result(output_path, model_name=spec.name)) return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(model_location: Text, data_location: Text, output_path: Text=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None) -> EvalResult: 'Run model analysis for a single model on a single data set.\n\n This is a convenience wrapper around run_model_analysis for a single model\n with a single data set. For more complex use cases, use\n tfma.run_model_analysis.\n\n Args:\n model_location: Path to the export eval saved model.\n data_location: The location of the data files.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n slice_spec: A list of tfma.slicer.SingleSliceSpec.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n ' if (output_path is None): output_path = tempfile.mkdtemp() if (not tf.io.gfile.exists(output_path)): tf.io.gfile.makedirs(output_path) eval_config = config.EvalConfig(slicing_specs=[s.to_proto() for s in slice_spec]) return run_model_analysis(eval_config=eval_config, eval_shared_model=default_eval_shared_model(eval_saved_model_path=model_location), data_location=data_location, output_path=output_path)
-1,324,916,261,838,926,800
Run model analysis for a single model on a single data set. This is a convenience wrapper around run_model_analysis for a single model with a single data set. For more complex use cases, use tfma.run_model_analysis. Args: model_location: Path to the export eval saved model. data_location: The location of the data files. output_path: The directory to output metrics and results to. If None, we use a temporary directory. slice_spec: A list of tfma.slicer.SingleSliceSpec. Returns: An EvalResult that can be used with the TFMA visualization functions.
tensorflow_model_analysis/api/model_eval_lib.py
single_model_analysis
Bobgy/model-analysis
python
def single_model_analysis(model_location: Text, data_location: Text, output_path: Text=None, slice_spec: Optional[List[slicer.SingleSliceSpec]]=None) -> EvalResult: 'Run model analysis for a single model on a single data set.\n\n This is a convenience wrapper around run_model_analysis for a single model\n with a single data set. For more complex use cases, use\n tfma.run_model_analysis.\n\n Args:\n model_location: Path to the export eval saved model.\n data_location: The location of the data files.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n slice_spec: A list of tfma.slicer.SingleSliceSpec.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n ' if (output_path is None): output_path = tempfile.mkdtemp() if (not tf.io.gfile.exists(output_path)): tf.io.gfile.makedirs(output_path) eval_config = config.EvalConfig(slicing_specs=[s.to_proto() for s in slice_spec]) return run_model_analysis(eval_config=eval_config, eval_shared_model=default_eval_shared_model(eval_saved_model_path=model_location), data_location=data_location, output_path=output_path)
def multiple_model_analysis(model_locations: List[Text], data_location: Text, **kwargs) -> EvalResults: 'Run model analysis for multiple models on the same data set.\n\n Args:\n model_locations: A list of paths to the export eval saved model.\n data_location: The location of the data files.\n **kwargs: The args used for evaluation. See tfma.single_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as model_locations.\n ' results = [] for m in model_locations: results.append(single_model_analysis(m, data_location, **kwargs)) return EvalResults(results, constants.MODEL_CENTRIC_MODE)
-8,708,293,839,599,697,000
Run model analysis for multiple models on the same data set. Args: model_locations: A list of paths to the export eval saved model. data_location: The location of the data files. **kwargs: The args used for evaluation. See tfma.single_model_analysis() for details. Returns: A tfma.EvalResults containing all the evaluation results with the same order as model_locations.
tensorflow_model_analysis/api/model_eval_lib.py
multiple_model_analysis
Bobgy/model-analysis
python
def multiple_model_analysis(model_locations: List[Text], data_location: Text, **kwargs) -> EvalResults: 'Run model analysis for multiple models on the same data set.\n\n Args:\n model_locations: A list of paths to the export eval saved model.\n data_location: The location of the data files.\n **kwargs: The args used for evaluation. See tfma.single_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as model_locations.\n ' results = [] for m in model_locations: results.append(single_model_analysis(m, data_location, **kwargs)) return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text], **kwargs) -> EvalResults: 'Run model analysis for a single model on multiple data sets.\n\n Args:\n model_location: The location of the exported eval saved model.\n data_locations: A list of data set locations.\n **kwargs: The args used for evaluation. See tfma.run_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as data_locations.\n ' results = [] for d in data_locations: results.append(single_model_analysis(model_location, d, **kwargs)) return EvalResults(results, constants.DATA_CENTRIC_MODE)
8,351,321,221,426,479,000
Run model analysis for a single model on multiple data sets. Args: model_location: The location of the exported eval saved model. data_locations: A list of data set locations. **kwargs: The args used for evaluation. See tfma.run_model_analysis() for details. Returns: A tfma.EvalResults containing all the evaluation results with the same order as data_locations.
tensorflow_model_analysis/api/model_eval_lib.py
multiple_data_analysis
Bobgy/model-analysis
python
def multiple_data_analysis(model_location: Text, data_locations: List[Text], **kwargs) -> EvalResults: 'Run model analysis for a single model on multiple data sets.\n\n Args:\n model_location: The location of the exported eval saved model.\n data_locations: A list of data set locations.\n **kwargs: The args used for evaluation. See tfma.run_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as data_locations.\n ' results = [] for d in data_locations: results.append(single_model_analysis(model_location, d, **kwargs)) return EvalResults(results, constants.DATA_CENTRIC_MODE)
def cross_channel_threshold_detector(multichannel, fs, **kwargs): "\n Parameters\n ----------\n multichannel : np.array\n Msamples x Nchannels audio data\n fs : float >0\n detector_function : function, optional \n The function used to detect the start and end of a signal. \n Any custom detector function can be given, the compulsory inputs\n are audio np.array, sample rate and the function should accept keyword\n arguments (even if it doesn't use them.)\n Defaults to dBrms_detector. \n \n \n Returns\n -------\n all_detections : list\n A list with sublists containing start-stop times of the detections \n in each channel. Each sublist contains the detections in one channel.\n \n Notes\n -----\n For further keyword arguments see the `threshold_detector` function\n \n See Also\n --------\n dBrms_detector\n \n " (samples, channels) = multichannel.shape detector_function = kwargs.get('detector_function', dBrms_detector) print(channels, samples) all_detections = [] for each in tqdm.tqdm(range(channels)): all_detections.append(detector_function(multichannel[:, each], fs, **kwargs)) return all_detections
-593,467,887,461,798,300
Parameters ---------- multichannel : np.array Msamples x Nchannels audio data fs : float >0 detector_function : function, optional The function used to detect the start and end of a signal. Any custom detector function can be given, the compulsory inputs are audio np.array, sample rate and the function should accept keyword arguments (even if it doesn't use them.) Defaults to dBrms_detector. Returns ------- all_detections : list A list with sublists containing start-stop times of the detections in each channel. Each sublist contains the detections in one channel. Notes ----- For further keyword arguments see the `threshold_detector` function See Also -------- dBrms_detector
batracker/signal_detection/detection.py
cross_channel_threshold_detector
thejasvibr/batracker
python
def cross_channel_threshold_detector(multichannel, fs, **kwargs): "\n Parameters\n ----------\n multichannel : np.array\n Msamples x Nchannels audio data\n fs : float >0\n detector_function : function, optional \n The function used to detect the start and end of a signal. \n Any custom detector function can be given, the compulsory inputs\n are audio np.array, sample rate and the function should accept keyword\n arguments (even if it doesn't use them.)\n Defaults to dBrms_detector. \n \n \n Returns\n -------\n all_detections : list\n A list with sublists containing start-stop times of the detections \n in each channel. Each sublist contains the detections in one channel.\n \n Notes\n -----\n For further keyword arguments see the `threshold_detector` function\n \n See Also\n --------\n dBrms_detector\n \n " (samples, channels) = multichannel.shape detector_function = kwargs.get('detector_function', dBrms_detector) print(channels, samples) all_detections = [] for each in tqdm.tqdm(range(channels)): all_detections.append(detector_function(multichannel[:, each], fs, **kwargs)) return all_detections
def dBrms_detector(one_channel, fs, **kwargs): '\n Calculates the dB rms profile of the input audio and \n selects regions which arae above the profile. \n \n Parameters\n ----------\n one_channel\n fs\n dbrms_threshold: float, optional\n Defaults to -50 dB rms\n dbrms_window: float, optional\n The window which is used to calculate the dB rms profile\n in seconds. Defaults to 0.001 seconds.\n \n Returns\n -------\n detections : list with tuples\n Each tuple corresponds to a candidate signal region\n ' if (one_channel.ndim > 1): raise IndexError(f'Input audio must be flattened, and have only 1 dimension. Current audio has {one_channel.ndim} dimensions') dbrms_window = kwargs.get('dbrms_window', 0.001) dbrms_threshold = kwargs.get('dbrms_threshold', (- 50)) window_samples = int((fs * dbrms_window)) dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples)) (labelled, num_regions) = ndimage.label((dBrms_profile > dbrms_threshold)) if (num_regions == 0): print(f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!') regions_above = ndimage.find_objects(labelled.flatten()) regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above] return regions_above_timestamps
8,576,930,007,192,636,000
Calculates the dB rms profile of the input audio and selects regions which arae above the profile. Parameters ---------- one_channel fs dbrms_threshold: float, optional Defaults to -50 dB rms dbrms_window: float, optional The window which is used to calculate the dB rms profile in seconds. Defaults to 0.001 seconds. Returns ------- detections : list with tuples Each tuple corresponds to a candidate signal region
batracker/signal_detection/detection.py
dBrms_detector
thejasvibr/batracker
python
def dBrms_detector(one_channel, fs, **kwargs): '\n Calculates the dB rms profile of the input audio and \n selects regions which arae above the profile. \n \n Parameters\n ----------\n one_channel\n fs\n dbrms_threshold: float, optional\n Defaults to -50 dB rms\n dbrms_window: float, optional\n The window which is used to calculate the dB rms profile\n in seconds. Defaults to 0.001 seconds.\n \n Returns\n -------\n detections : list with tuples\n Each tuple corresponds to a candidate signal region\n ' if (one_channel.ndim > 1): raise IndexError(f'Input audio must be flattened, and have only 1 dimension. Current audio has {one_channel.ndim} dimensions') dbrms_window = kwargs.get('dbrms_window', 0.001) dbrms_threshold = kwargs.get('dbrms_threshold', (- 50)) window_samples = int((fs * dbrms_window)) dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples)) (labelled, num_regions) = ndimage.label((dBrms_profile > dbrms_threshold)) if (num_regions == 0): print(f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!') regions_above = ndimage.find_objects(labelled.flatten()) regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above] return regions_above_timestamps
def envelope_detector(audio, fs, **kwargs): '\n Generates the Hilbert envelope of the audio. Signals are detected\n wherever the envelope goes beyond a user-defined threshold value.\n \n Two main options are to segment loud signals with reference to dB peak or \n with reference dB above floor level. \n \n Parameters\n ----------\n audio\n fs\n \n \n Keyword Arguments\n -----------------\n threshold_db_floor: float, optional\n The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as\n the floor level. If not specified, then threshold_dbpeak is used to segment signals.\n threshold_dbpeak : float, optional\n The value beyond which a signal is considered to start.\n Used only if relative_to_baseline is True.\n lowpass_durn: float, optional\n The highest time-resolution of envelope fluctuation to keep. \n This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope\n signal. \n \n\n Returns\n -------\n regions_above_timestamps \n \n \n \n ' envelope = np.abs(signal.hilbert(audio)) if (not (kwargs.get('lowpass_durn') is None)): lowpass_durn = kwargs['lowpass_durn'] freq = (1.0 / lowpass_durn) (b, a) = signal.butter(1, (freq / (fs * 0.5)), 'lowpass') envelope = signal.filtfilt(b, a, envelope) if (not (kwargs.get('threshold_db_floor', None) is None)): floor_level = np.percentile((20 * np.log10(envelope)), 5) threshold_db = (floor_level + kwargs['threshold_db_floor']) else: threshold_db = kwargs['threshold_dbpeak'] linear_threshold = (10 ** (threshold_db / 20)) (labelled, num_detections) = ndimage.label((envelope >= linear_threshold)) regions_above = ndimage.find_objects(labelled.flatten()) regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above] return regions_above_timestamps
-5,478,177,382,828,583,000
Generates the Hilbert envelope of the audio. Signals are detected wherever the envelope goes beyond a user-defined threshold value. Two main options are to segment loud signals with reference to dB peak or with reference dB above floor level. Parameters ---------- audio fs Keyword Arguments ----------------- threshold_db_floor: float, optional The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as the floor level. If not specified, then threshold_dbpeak is used to segment signals. threshold_dbpeak : float, optional The value beyond which a signal is considered to start. Used only if relative_to_baseline is True. lowpass_durn: float, optional The highest time-resolution of envelope fluctuation to keep. This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope signal. Returns ------- regions_above_timestamps
batracker/signal_detection/detection.py
envelope_detector
thejasvibr/batracker
python
def envelope_detector(audio, fs, **kwargs): '\n Generates the Hilbert envelope of the audio. Signals are detected\n wherever the envelope goes beyond a user-defined threshold value.\n \n Two main options are to segment loud signals with reference to dB peak or \n with reference dB above floor level. \n \n Parameters\n ----------\n audio\n fs\n \n \n Keyword Arguments\n -----------------\n threshold_db_floor: float, optional\n The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as\n the floor level. If not specified, then threshold_dbpeak is used to segment signals.\n threshold_dbpeak : float, optional\n The value beyond which a signal is considered to start.\n Used only if relative_to_baseline is True.\n lowpass_durn: float, optional\n The highest time-resolution of envelope fluctuation to keep. \n This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope\n signal. \n \n\n Returns\n -------\n regions_above_timestamps \n \n \n \n ' envelope = np.abs(signal.hilbert(audio)) if (not (kwargs.get('lowpass_durn') is None)): lowpass_durn = kwargs['lowpass_durn'] freq = (1.0 / lowpass_durn) (b, a) = signal.butter(1, (freq / (fs * 0.5)), 'lowpass') envelope = signal.filtfilt(b, a, envelope) if (not (kwargs.get('threshold_db_floor', None) is None)): floor_level = np.percentile((20 * np.log10(envelope)), 5) threshold_db = (floor_level + kwargs['threshold_db_floor']) else: threshold_db = kwargs['threshold_dbpeak'] linear_threshold = (10 ** (threshold_db / 20)) (labelled, num_detections) = ndimage.label((envelope >= linear_threshold)) regions_above = ndimage.find_objects(labelled.flatten()) regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above] return regions_above_timestamps
def moving_rms(X, **kwargs): 'Calculates moving rms of a signal with given window size. \n Outputs np.array of *same* size as X. The rms of the \n last few samples <= window_size away from the end are assigned\n to last full-window rms calculated\n Parameters\n ----------\n X : np.array\n Signal of interest. \n window_size : int, optional\n Defaults to 125 samples. \n Returns\n -------\n all_rms : np.array\n Moving rms of the signal. \n ' window_size = kwargs.get('window_size', 125) starts = np.arange(0, X.size) stops = (starts + window_size) valid = (stops < X.size) valid_starts = np.int32(starts[valid]) valid_stops = np.int32(stops[valid]) all_rms = (np.ones(X.size).reshape((- 1), 1) * 999) for (i, (start, stop)) in enumerate(zip(valid_starts, valid_stops)): rms_value = rms(X[start:stop]) all_rms[i] = rms_value all_rms[(all_rms == 999)] = np.nan return all_rms
4,701,221,638,837,301,000
Calculates moving rms of a signal with given window size. Outputs np.array of *same* size as X. The rms of the last few samples <= window_size away from the end are assigned to last full-window rms calculated Parameters ---------- X : np.array Signal of interest. window_size : int, optional Defaults to 125 samples. Returns ------- all_rms : np.array Moving rms of the signal.
batracker/signal_detection/detection.py
moving_rms
thejasvibr/batracker
python
def moving_rms(X, **kwargs): 'Calculates moving rms of a signal with given window size. \n Outputs np.array of *same* size as X. The rms of the \n last few samples <= window_size away from the end are assigned\n to last full-window rms calculated\n Parameters\n ----------\n X : np.array\n Signal of interest. \n window_size : int, optional\n Defaults to 125 samples. \n Returns\n -------\n all_rms : np.array\n Moving rms of the signal. \n ' window_size = kwargs.get('window_size', 125) starts = np.arange(0, X.size) stops = (starts + window_size) valid = (stops < X.size) valid_starts = np.int32(starts[valid]) valid_stops = np.int32(stops[valid]) all_rms = (np.ones(X.size).reshape((- 1), 1) * 999) for (i, (start, stop)) in enumerate(zip(valid_starts, valid_stops)): rms_value = rms(X[start:stop]) all_rms[i] = rms_value all_rms[(all_rms == 999)] = np.nan return all_rms
def parse_fn(line_words, line_tags): 'Encodes words into bytes for tensor\n\n :param line_words: one line with words (aka sentences) with space between each word/token\n :param line_tags: one line of tags (one tag per word in line_words)\n :return: (list of encoded words, len(words)), list of encoded tags\n ' words = [w.encode() for w in line_words.strip().split()] tags = [t.encode() for t in line_tags.strip().split()] assert (len(words) == len(tags)), 'Number of words {} and Number of tags must be the same {}'.format(len(words), len(tags)) return ((words, len(words)), tags)
-6,792,739,573,695,873,000
Encodes words into bytes for tensor :param line_words: one line with words (aka sentences) with space between each word/token :param line_tags: one line of tags (one tag per word in line_words) :return: (list of encoded words, len(words)), list of encoded tags
src/model/lstm_crf/main.py
parse_fn
vikasbahirwani/SequenceTagging
python
def parse_fn(line_words, line_tags): 'Encodes words into bytes for tensor\n\n :param line_words: one line with words (aka sentences) with space between each word/token\n :param line_tags: one line of tags (one tag per word in line_words)\n :return: (list of encoded words, len(words)), list of encoded tags\n ' words = [w.encode() for w in line_words.strip().split()] tags = [t.encode() for t in line_tags.strip().split()] assert (len(words) == len(tags)), 'Number of words {} and Number of tags must be the same {}'.format(len(words), len(tags)) return ((words, len(words)), tags)