edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
"""Mark channels in an existing BIDS dataset as "bad".
example usage:
$ mne_bids mark_bad_channels --ch_name="MEG 0112" --description="noisy" \
--ch_name="MEG 0131" --description="flat" \
--subject_id=01 --task=experiment --session=test \
--bids_root=bids_root --overwrite
"""
# Authors: Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD-3-Clause
from mne.utils import logger
import mne_bids
from mne_bids.config import reader
from mne_bids import BIDSPath, mark_bad_channels
def run():
"""Run the mark_bad_channels command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--ch_name', dest='ch_names', action='append',
default=[],
help='The names of the bad channels. If multiple '
'channels are bad, pass the --ch_name parameter '
'multiple times.')
parser.add_option('--description', dest='descriptions', action='append',
default=[],
help='Descriptions as to why the channels are bad. '
'Must match the number of bad channels provided. '
'Pass multiple times to supply more than one '
'value in that case.')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the folder containing the BIDS '
'dataset')
parser.add_option('--subject_id', dest='subject',
help=('Subject name'))
parser.add_option('--session_id', dest='session',
help='Session name')
parser.add_option('--task', dest='task',
help='Task name')
parser.add_option('--acq', dest='acquisition',
help='Acquisition parameter')
parser.add_option('--run', dest='run',
help='Run number')
parser.add_option('--proc', dest='processing',
help='Processing label.')
parser.add_option('--rec', dest='recording',
help='Recording name')
parser.add_option('--type', dest='datatype',
help='Recording data type, e.g. meg, ieeg or eeg')
parser.add_option('--suffix', dest='suffix',
help='The filename suffix, i.e. the last part before '
'the extension')
parser.add_option('--ext', dest='extension',
help='The filename extension, including the leading '
'period, e.g. .fif')
parser.add_option('--overwrite', dest='overwrite', action='store_true',
help='Replace existing channel status entries')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='Whether do generate additional diagnostic output')
opt, args = parser.parse_args()
if args:
parser.print_help()
parser.error(f'Please do not specify arguments without flags. '
f'Got: {args}.\n')
if opt.bids_root is None:
parser.print_help()
parser.error('You must specify bids_root')
if opt.ch_names is None:
parser.print_help()
parser.error('You must specify some --ch_name parameters.')
ch_names = [] if opt.ch_names == [''] else opt.ch_names
bids_path = BIDSPath(subject=opt.subject, session=opt.session,
task=opt.task, acquisition=opt.acquisition,
run=opt.run, processing=opt.processing,
recording=opt.recording, datatype=opt.datatype,
suffix=opt.suffix, extension=opt.extension,
root=opt.bids_root)
bids_paths = bids_path.match()
# Only keep data we can actually read & write.
allowed_extensions = list(reader.keys())
bids_paths = [p for p in bids_paths
if p.extension in allowed_extensions]
if not bids_paths:
logger.info('No matching files found. Please consider using a less '
'restrictive set of entities to broaden the search.')
return # XXX should be return with an error code?
logger.info(f'Marking channels {', '.join(ch_names)} as bad in '
f'{len(bids_paths)} recording(s) …')
for bids_path in bids_paths:
logger.info(f'Processing: {bids_path.basename}')
mark_bad_channels(ch_names=ch_names, descriptions=opt.descriptions,
bids_path=bids_path, overwrite=opt.overwrite,
verbose=opt.verbose)
if __name__ == '__main__':
run()
| """Mark channels in an existing BIDS dataset as "bad".
example usage:
$ mne_bids mark_bad_channels --ch_name="MEG 0112" --description="noisy" \
--ch_name="MEG 0131" --description="flat" \
--subject_id=01 --task=experiment --session=test \
--bids_root=bids_root --overwrite
"""
# Authors: Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD-3-Clause
from mne.utils import logger
import mne_bids
from mne_bids.config import reader
from mne_bids import BIDSPath, mark_bad_channels
def run():
"""Run the mark_bad_channels command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--ch_name', dest='ch_names', action='append',
default=[],
help='The names of the bad channels. If multiple '
'channels are bad, pass the --ch_name parameter '
'multiple times.')
parser.add_option('--description', dest='descriptions', action='append',
default=[],
help='Descriptions as to why the channels are bad. '
'Must match the number of bad channels provided. '
'Pass multiple times to supply more than one '
'value in that case.')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the folder containing the BIDS '
'dataset')
parser.add_option('--subject_id', dest='subject',
help=('Subject name'))
parser.add_option('--session_id', dest='session',
help='Session name')
parser.add_option('--task', dest='task',
help='Task name')
parser.add_option('--acq', dest='acquisition',
help='Acquisition parameter')
parser.add_option('--run', dest='run',
help='Run number')
parser.add_option('--proc', dest='processing',
help='Processing label.')
parser.add_option('--rec', dest='recording',
help='Recording name')
parser.add_option('--type', dest='datatype',
help='Recording data type, e.g. meg, ieeg or eeg')
parser.add_option('--suffix', dest='suffix',
help='The filename suffix, i.e. the last part before '
'the extension')
parser.add_option('--ext', dest='extension',
help='The filename extension, including the leading '
'period, e.g. .fif')
parser.add_option('--overwrite', dest='overwrite', action='store_true',
help='Replace existing channel status entries')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='Whether do generate additional diagnostic output')
opt, args = parser.parse_args()
if args:
parser.print_help()
parser.error(f'Please do not specify arguments without flags. '
f'Got: {args}.\n')
if opt.bids_root is None:
parser.print_help()
parser.error('You must specify bids_root')
if opt.ch_names is None:
parser.print_help()
parser.error('You must specify some --ch_name parameters.')
ch_names = [] if opt.ch_names == [''] else opt.ch_names
bids_path = BIDSPath(subject=opt.subject, session=opt.session,
task=opt.task, acquisition=opt.acquisition,
run=opt.run, processing=opt.processing,
recording=opt.recording, datatype=opt.datatype,
suffix=opt.suffix, extension=opt.extension,
root=opt.bids_root)
bids_paths = bids_path.match()
# Only keep data we can actually read & write.
allowed_extensions = list(reader.keys())
bids_paths = [p for p in bids_paths
if p.extension in allowed_extensions]
if not bids_paths:
logger.info('No matching files found. Please consider using a less '
'restrictive set of entities to broaden the search.')
return # XXX should be return with an error code?
logger.info(f'Marking channels {", ".join(ch_names)} as bad in '
f'{len(bids_paths)} recording(s) …')
for bids_path in bids_paths:
logger.info(f'Processing: {bids_path.basename}')
mark_bad_channels(ch_names=ch_names, descriptions=opt.descriptions,
bids_path=bids_path, overwrite=opt.overwrite,
verbose=opt.verbose)
if __name__ == '__main__':
run()
|
# Alacritty config options
# Antonio Sarosi
# December 10, 2020
from typing import List, Dict, Any
from collections.abc import Mapping
from pathlib import Path
from sys import stderr
import yaml
import log
class ConfigError(Exception):
def __init__(self, message='Error applying configuration'):
super().__init__(message)
class Alacritty:
def __init__(self):
self.base_path = Path().home() / '.config' / 'alacritty'
if not self.base_path.exists():
raise ConfigError(f'Config directory not found: {self.base_path}')
self.config_file = self.base_path / 'alacritty.yml'
if not self.config_file.is_file():
log.warn('Config file not found')
self.config_file.touch()
print('Created config file =>', end=' ', file=stderr)
log.color_print(self.config_file, log.Color.BLUE, file=stderr)
self.config = self._load(self.config_file)
if self.config is None:
self.config = {}
log.warn('Alacritty config file was empty')
self.resources = {
'themes': {
'type': 'Themes directory',
'path': self.base_path / 'themes',
'exists': lambda: self.resources['themes']['path'].is_dir(),
'create': lambda: self.resources['themes']['path'].mkdir()
},
'fonts': {
'type': 'Fonts file',
'path': self.base_path / 'fonts.yaml',
'exists': lambda: self.resources['fonts']['path'].is_file(),
'create': lambda: self.resources['fonts']['path'].touch()
}
}
def _load(self, yaml_file: Path) -> Dict[str, Any]:
with open(yaml_file) as f:
try:
return yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
raise ConfigError((
'YAML error at parsing file "{0}", '
'at line {1.problem_mark.line}, '
'column {1.problem_mark.column}:\n'
'{1.problem} {1.context}'
).format(yaml_file.name, e))
def _resource_path(self, resource: str) -> Path:
if resource not in self.resources:
raise ConfigError(f'Path for resource "{resource}" not set')
resource = self.resources[resource]
if not resource['exists']():
log.warn(f'{resource['type']} not found')
resource['create']()
print('Created resource =>', end=' ', file=stderr)
log.color_print(resource['path'], log.Color.BLUE, file=stderr)
return resource['path']
def save(self):
with open(self.config_file, 'w') as f:
yaml.dump(self.config, f)
def apply(self, **config):
if config is None or len(config) < 1:
raise ConfigError('No options provided')
actions = {
'theme': self.change_theme,
'font': self.change_font,
'size': self.change_font_size,
'opacity': self.change_opacity,
'padding': self.change_padding,
'offset': self.change_font_offset,
'list': self.list,
'print': self.print,
}
errors_found = 0
for param, action in actions.items():
if param in config:
try:
action(config[param])
except ConfigError as e:
log.err(e)
errors_found += 1
if errors_found > 0:
raise ConfigError(f'\n{errors_found} error(s) found')
def change_theme(self, theme: str):
themes_directory = self._resource_path('themes')
theme_file = themes_directory / f'{theme}.yaml'
if not theme_file.is_file():
raise ConfigError(f'Theme "{theme}" not found')
theme_yaml = self._load(theme_file)
if theme_yaml is None:
raise ConfigError(f'File {theme_file.name} is empty')
if 'colors' not in theme_yaml:
raise ConfigError(f'{theme_file} does not contain color config')
expected_colors = [
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
]
expected_props = {
'primary': ['background', 'foreground'],
'normal': expected_colors,
'bright': expected_colors,
}
for k in expected_props:
if k not in theme_yaml['colors']:
log.warn(f'Missing "colors:{k}" for theme "{theme}"')
continue
for v in expected_props[k]:
if v not in theme_yaml['colors'][k]:
log.warn(f'Missing "colors:{k}:{v}" for theme "{theme}"')
self.config['colors'] = theme_yaml['colors']
log.ok(f'Theme {theme} applied')
def change_font_size(self, size: float):
if size <= 0:
raise ConfigError('Font size cannot be negative or zero')
if 'font' not in self.config:
self.config['font'] = {}
log.warn('"font" prop config was not present in alacritty.yml')
self.config['font']['size'] = size
log.ok(f'Font size set to {size:.1f}')
def change_font(self, font: str):
if 'font' not in self.config:
self.config['font'] = {}
log.warn('"font" prop was not present in alacritty.yml')
fonts_file = self._resource_path('fonts')
fonts = self._load(fonts_file)
if fonts is None:
raise ConfigError(f'File "{fonts_file}" is empty')
if 'fonts' not in fonts:
raise ConfigError(f'No font config found in "{fonts_file}"')
fonts = fonts['fonts']
if font not in fonts:
raise ConfigError(f'Config for font "{font}" not found')
font_types = ['normal', 'bold', 'italic']
if isinstance(fonts[font], str):
font_name = fonts[font]
fonts[font] = {}
for t in font_types:
fonts[font][t] = font_name
if not isinstance(fonts[font], Mapping):
raise ConfigError(f'Font "{font}" has wrong format')
for t in font_types:
if t not in fonts[font]:
raise ConfigError(f'Font "{font}" does not have "{t}" property')
if t not in self.config['font']:
self.config['font'][t] = {'family': 'tmp'}
self.config['font'][t]['family'] = fonts[font][t]
log.ok(f'Font {font} applied')
def change_opacity(self, opacity: float):
if opacity < 0.0 or opacity > 1.0:
raise ConfigError('Opacity should be between 0.0 and 1.0')
self.config['background_opacity'] = opacity
log.ok(f'Opacity set to {opacity:.2f}')
def change_padding(self, padding: List[int]):
if len(padding) != 2:
raise ConfigError('Padding should only have an x and y value')
x, y = padding
if 'window' not in self.config:
self.config['window'] = {}
log.warn('"window" prop was not present in config file')
if 'padding' not in self.config['window']:
self.config['window']['padding'] = {}
log.warn('"padding" prop was not present in config file')
self.config['window']['padding']['x'] = x
self.config['window']['padding']['y'] = y
log.ok(f'Padding set to x: {x}, y: {y}')
def change_font_offset(self, offset: List[int]):
if len(offset) != 2:
raise ConfigError('Wrong offset config, should be [x, y]')
x, y = offset
if 'font' not in self.config:
self.config['font'] = {}
if 'offset' not in self.config['font']:
log.warn('"offset" prop was not set')
self.config['font']['offset'] = {}
self.config['font']['offset']['x'] = x
self.config['font']['offset']['y'] = y
log.ok(f'Offset set to x: {x}, y: {y}')
def list(self, to_be_listed: str):
def list_themes():
themes_dir = self._resource_path('themes')
themes = [file.name.split('.')[0] for file in themes_dir.iterdir()]
if len(themes) < 1:
log.warn('Cannot list themes, themes directory is empty')
else:
log.color_print('Themes:', log.Color.BOLD)
for theme in themes:
log.color_print(f' {theme}', log.Color.BLUE)
def list_fonts():
fonts = self._load(self._resource_path('fonts'))
if fonts is None or 'fonts' not in fonts:
log.warn('Cannot list fonts, no fonts found')
else:
log.color_print('Fonts:', log.Color.BOLD)
for font in fonts['fonts']:
log.color_print(f' {font}', log.Color.PURPLE)
options = {
'themes': list_themes,
'fonts': list_fonts,
}
if to_be_listed == 'all':
for _, list_function in options.items():
list_function()
else:
if to_be_listed not in options:
raise ConfigError(f'Cannot list {to_be_listed}, unknown option')
options[to_be_listed]()
def print(self, to_be_printed: List[str]):
def print_config():
log.color_print(self.config_file, log.Color.BOLD)
print(yaml.dump(self.config))
def print_fonts():
fonts_file = self._resource_path('fonts')
log.color_print(fonts_file, log.Color.BOLD)
print(yaml.dump(self._load(fonts_file)))
def print_theme(theme: str):
themes_dir = self._resource_path('themes')
theme_file = themes_dir / f'{theme}.yaml'
if not theme_file.is_file():
raise ConfigError(
f'Failed printing "{theme}" theme, "{theme_file}" not found'
)
log.color_print(theme_file, log.Color.BOLD)
print(yaml.dump(self._load(theme_file)))
options = {
'fonts': print_fonts,
'config': print_config,
}
if len(to_be_printed) == 0:
to_be_printed.append('config')
for param in to_be_printed:
if param not in options:
print_theme(param)
else:
options[param]()
| # Alacritty config options
# Antonio Sarosi
# December 10, 2020
from typing import List, Dict, Any
from collections.abc import Mapping
from pathlib import Path
from sys import stderr
import yaml
import log
class ConfigError(Exception):
def __init__(self, message='Error applying configuration'):
super().__init__(message)
class Alacritty:
def __init__(self):
self.base_path = Path().home() / '.config' / 'alacritty'
if not self.base_path.exists():
raise ConfigError(f'Config directory not found: {self.base_path}')
self.config_file = self.base_path / 'alacritty.yml'
if not self.config_file.is_file():
log.warn('Config file not found')
self.config_file.touch()
print('Created config file =>', end=' ', file=stderr)
log.color_print(self.config_file, log.Color.BLUE, file=stderr)
self.config = self._load(self.config_file)
if self.config is None:
self.config = {}
log.warn('Alacritty config file was empty')
self.resources = {
'themes': {
'type': 'Themes directory',
'path': self.base_path / 'themes',
'exists': lambda: self.resources['themes']['path'].is_dir(),
'create': lambda: self.resources['themes']['path'].mkdir()
},
'fonts': {
'type': 'Fonts file',
'path': self.base_path / 'fonts.yaml',
'exists': lambda: self.resources['fonts']['path'].is_file(),
'create': lambda: self.resources['fonts']['path'].touch()
}
}
def _load(self, yaml_file: Path) -> Dict[str, Any]:
with open(yaml_file) as f:
try:
return yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
raise ConfigError((
'YAML error at parsing file "{0}", '
'at line {1.problem_mark.line}, '
'column {1.problem_mark.column}:\n'
'{1.problem} {1.context}'
).format(yaml_file.name, e))
def _resource_path(self, resource: str) -> Path:
if resource not in self.resources:
raise ConfigError(f'Path for resource "{resource}" not set')
resource = self.resources[resource]
if not resource['exists']():
log.warn(f'{resource["type"]} not found')
resource['create']()
print('Created resource =>', end=' ', file=stderr)
log.color_print(resource['path'], log.Color.BLUE, file=stderr)
return resource['path']
def save(self):
with open(self.config_file, 'w') as f:
yaml.dump(self.config, f)
def apply(self, **config):
if config is None or len(config) < 1:
raise ConfigError('No options provided')
actions = {
'theme': self.change_theme,
'font': self.change_font,
'size': self.change_font_size,
'opacity': self.change_opacity,
'padding': self.change_padding,
'offset': self.change_font_offset,
'list': self.list,
'print': self.print,
}
errors_found = 0
for param, action in actions.items():
if param in config:
try:
action(config[param])
except ConfigError as e:
log.err(e)
errors_found += 1
if errors_found > 0:
raise ConfigError(f'\n{errors_found} error(s) found')
def change_theme(self, theme: str):
themes_directory = self._resource_path('themes')
theme_file = themes_directory / f'{theme}.yaml'
if not theme_file.is_file():
raise ConfigError(f'Theme "{theme}" not found')
theme_yaml = self._load(theme_file)
if theme_yaml is None:
raise ConfigError(f'File {theme_file.name} is empty')
if 'colors' not in theme_yaml:
raise ConfigError(f'{theme_file} does not contain color config')
expected_colors = [
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
]
expected_props = {
'primary': ['background', 'foreground'],
'normal': expected_colors,
'bright': expected_colors,
}
for k in expected_props:
if k not in theme_yaml['colors']:
log.warn(f'Missing "colors:{k}" for theme "{theme}"')
continue
for v in expected_props[k]:
if v not in theme_yaml['colors'][k]:
log.warn(f'Missing "colors:{k}:{v}" for theme "{theme}"')
self.config['colors'] = theme_yaml['colors']
log.ok(f'Theme {theme} applied')
def change_font_size(self, size: float):
if size <= 0:
raise ConfigError('Font size cannot be negative or zero')
if 'font' not in self.config:
self.config['font'] = {}
log.warn('"font" prop config was not present in alacritty.yml')
self.config['font']['size'] = size
log.ok(f'Font size set to {size:.1f}')
def change_font(self, font: str):
if 'font' not in self.config:
self.config['font'] = {}
log.warn('"font" prop was not present in alacritty.yml')
fonts_file = self._resource_path('fonts')
fonts = self._load(fonts_file)
if fonts is None:
raise ConfigError(f'File "{fonts_file}" is empty')
if 'fonts' not in fonts:
raise ConfigError(f'No font config found in "{fonts_file}"')
fonts = fonts['fonts']
if font not in fonts:
raise ConfigError(f'Config for font "{font}" not found')
font_types = ['normal', 'bold', 'italic']
if isinstance(fonts[font], str):
font_name = fonts[font]
fonts[font] = {}
for t in font_types:
fonts[font][t] = font_name
if not isinstance(fonts[font], Mapping):
raise ConfigError(f'Font "{font}" has wrong format')
for t in font_types:
if t not in fonts[font]:
raise ConfigError(f'Font "{font}" does not have "{t}" property')
if t not in self.config['font']:
self.config['font'][t] = {'family': 'tmp'}
self.config['font'][t]['family'] = fonts[font][t]
log.ok(f'Font {font} applied')
def change_opacity(self, opacity: float):
if opacity < 0.0 or opacity > 1.0:
raise ConfigError('Opacity should be between 0.0 and 1.0')
self.config['background_opacity'] = opacity
log.ok(f'Opacity set to {opacity:.2f}')
def change_padding(self, padding: List[int]):
if len(padding) != 2:
raise ConfigError('Padding should only have an x and y value')
x, y = padding
if 'window' not in self.config:
self.config['window'] = {}
log.warn('"window" prop was not present in config file')
if 'padding' not in self.config['window']:
self.config['window']['padding'] = {}
log.warn('"padding" prop was not present in config file')
self.config['window']['padding']['x'] = x
self.config['window']['padding']['y'] = y
log.ok(f'Padding set to x: {x}, y: {y}')
def change_font_offset(self, offset: List[int]):
if len(offset) != 2:
raise ConfigError('Wrong offset config, should be [x, y]')
x, y = offset
if 'font' not in self.config:
self.config['font'] = {}
if 'offset' not in self.config['font']:
log.warn('"offset" prop was not set')
self.config['font']['offset'] = {}
self.config['font']['offset']['x'] = x
self.config['font']['offset']['y'] = y
log.ok(f'Offset set to x: {x}, y: {y}')
def list(self, to_be_listed: str):
def list_themes():
themes_dir = self._resource_path('themes')
themes = [file.name.split('.')[0] for file in themes_dir.iterdir()]
if len(themes) < 1:
log.warn('Cannot list themes, themes directory is empty')
else:
log.color_print('Themes:', log.Color.BOLD)
for theme in themes:
log.color_print(f' {theme}', log.Color.BLUE)
def list_fonts():
fonts = self._load(self._resource_path('fonts'))
if fonts is None or 'fonts' not in fonts:
log.warn('Cannot list fonts, no fonts found')
else:
log.color_print('Fonts:', log.Color.BOLD)
for font in fonts['fonts']:
log.color_print(f' {font}', log.Color.PURPLE)
options = {
'themes': list_themes,
'fonts': list_fonts,
}
if to_be_listed == 'all':
for _, list_function in options.items():
list_function()
else:
if to_be_listed not in options:
raise ConfigError(f'Cannot list {to_be_listed}, unknown option')
options[to_be_listed]()
def print(self, to_be_printed: List[str]):
def print_config():
log.color_print(self.config_file, log.Color.BOLD)
print(yaml.dump(self.config))
def print_fonts():
fonts_file = self._resource_path('fonts')
log.color_print(fonts_file, log.Color.BOLD)
print(yaml.dump(self._load(fonts_file)))
def print_theme(theme: str):
themes_dir = self._resource_path('themes')
theme_file = themes_dir / f'{theme}.yaml'
if not theme_file.is_file():
raise ConfigError(
f'Failed printing "{theme}" theme, "{theme_file}" not found'
)
log.color_print(theme_file, log.Color.BOLD)
print(yaml.dump(self._load(theme_file)))
options = {
'fonts': print_fonts,
'config': print_config,
}
if len(to_be_printed) == 0:
to_be_printed.append('config')
for param in to_be_printed:
if param not in options:
print_theme(param)
else:
options[param]()
|
#!/usr/bin/env python
"""A Verification, Storage and Query/Retrieve SCP application."""
import argparse
from configparser import ConfigParser
import os
import sys
import pydicom.config
from pydicom.dataset import Dataset
from pynetdicom import (
AE, evt, AllStoragePresentationContexts, ALL_TRANSFER_SYNTAXES
)
from pynetdicom import _config, _handlers
from pynetdicom.apps.common import setup_logging
from pynetdicom.sop_class import (
Verification,
PatientRootQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelMove,
PatientRootQueryRetrieveInformationModelGet,
StudyRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelGet
)
from pynetdicom.utils import validate_ae_title
from pynetdicom.apps.qrscp.handlers import (
handle_echo, handle_find, handle_get, handle_move, handle_store
)
from pynetdicom.apps.qrscp import db
# Use `None` for empty values
pydicom.config.use_none_as_empty_text_VR_value = True
# Don't log identifiers
_config.LOG_RESPONSE_IDENTIFIERS = False
# Override the standard logging handlers
def _dont_log(event):
pass
_handlers._send_c_find_rsp = _dont_log
_handlers._send_c_get_rsp = _dont_log
_handlers._send_c_move_rsp = _dont_log
_handlers._send_c_store_rq = _dont_log
_handlers._recv_c_store_rsp = _dont_log
__version__ = "1.0.0"
def _log_config(config, logger):
"""Log the configuration settings.
Parameters
----------
logger : logging.Logger
The application's logger.
"""
logger.debug("Configuration settings")
app = config["DEFAULT"]
aet, port, pdu = app["ae_title"], app["port"], app["max_pdu"]
logger.debug(
f" AE title: {aet}, Port: {port}, Max. PDU: {pdu}"
)
logger.debug(" Timeouts:")
acse, dimse = app["acse_timeout"], app["dimse_timeout"]
network = app["network_timeout"]
logger.debug(f" ACSE: {acse}, DIMSE: {dimse}, Network: {network}")
logger.debug(f" Storage directory: {app["instance_location"]}")
logger.debug(f" Database location: {app["database_location"]}")
if config.sections():
logger.debug(" Move destinations: ")
else:
logger.debug(" Move destinations: none")
for ae_title in config.sections():
addr = config[ae_title]["address"]
port = config[ae_title]["port"]
logger.debug(f' {ae_title}: ({addr}, {port})')
logger.debug("")
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description=(
"The qrscp application implements a Service Class Provider (SCP) "
"for the Verification, Storage and Query/Retrieve (QR) Service "
"Classes."
),
usage="qrscp [options]"
)
# General Options
gen_opts = parser.add_argument_group("General Options")
gen_opts.add_argument(
"--version",
help="print version information and exit",
action="store_true"
)
output = gen_opts.add_mutually_exclusive_group()
output.add_argument(
"-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_const",
dest="log_type", const="q"
)
output.add_argument(
"-v", "--verbose",
help="verbose mode, print processing details",
action="store_const",
dest="log_type", const="v"
)
output.add_argument(
"-d", "--debug",
help="debug mode, print debug information",
action="store_const",
dest="log_type", const="d"
)
gen_opts.add_argument(
"-ll", "--log-level", metavar="[l]",
help=(
"use level l for the logger (critical, error, warn, info, debug)"
),
type=str,
choices=["critical", "error", "warn", "info", "debug"]
)
fdir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(fdir, "default.ini")
gen_opts.add_argument(
"-c", "--config", metavar="[f]ilename",
help="use configuration file f",
default=fpath,
)
net_opts = parser.add_argument_group("Networking Options")
net_opts.add_argument(
"--port",
help="override the configured TCP/IP listen port number",
)
net_opts.add_argument(
"-aet", "--ae-title", metavar="[a]etitle",
help="override the configured AE title",
)
net_opts.add_argument(
"-ta", "--acse-timeout", metavar="[s]econds",
help="override the configured timeout for ACSE messages",
)
net_opts.add_argument(
"-td", "--dimse-timeout", metavar="[s]econds",
help="override the configured timeout for DIMSE messages",
)
net_opts.add_argument(
"-tn", "--network-timeout", metavar="[s]econds",
help="override the configured timeout for the network",
)
net_opts.add_argument(
"-pdu", "--max-pdu", metavar="[n]umber of bytes",
help="override the configured max receive pdu to n bytes",
)
net_opts.add_argument(
"-ba", "--bind-address", metavar="[a]ddress",
help=(
"override the configured address of the network interface to "
"listen on"
),
)
db_opts = parser.add_argument_group("Database Options")
db_opts.add_argument(
"--database-location", metavar="[f]ile",
help="override the location of the database using file f",
type=str,
)
db_opts.add_argument(
"--instance-location", metavar="[d]irectory",
help=(
"override the configured instance storage location to directory d"
),
type=str
)
db_opts.add_argument(
"--clean",
help=(
"remove all entries from the database and delete the "
"corresponding stored instances"
),
action="store_true",
)
return parser.parse_args()
def clean(db_path, logger):
"""Remove all entries from the database and delete the corresponding
stored instances.
Parameters
----------
db_path : str
The database path to use with create_engine().
logger : logging.Logger
The application logger.
Returns
-------
bool
``True`` if the storage directory and database were both cleaned
successfully, ``False`` otherwise.
"""
engine = create_engine(db_path)
with engine.connect() as conn:
Session = sessionmaker(bind=engine)
session = Session()
try:
fpaths = [ii.filename for ii in session.query(Instance).all()]
except Exception as exc:
logger.error("Exception raised while querying the database")
logger.exception(exc)
session.rollback()
finally:
session.close()
return False
storage_cleaned = True
for fpath in fpaths:
try:
os.remove(os.path.join(config.INSTANCE_LOCATION, fpath))
except Exception as exc:
logger.error(f"Unable to delete the instance at '{fpath}'")
logger.exception(exc)
storage_cleaned = False
if storage_cleaned:
logger.info("Storage directory cleaned successfully")
else:
logger.error("Failed to clean storage directory")
database_cleaned = False
try:
clear(session)
database_cleaned = True
logger.info("Database cleaned successfully")
except Exception as exc:
logger.error("Failed to clean the database")
logger.exception(exc)
session.rollback()
finally:
session.close()
return database_cleaned and storage_cleaned
def main(args=None):
"""Run the application."""
if args is not None:
sys.argv = args
args = _setup_argparser()
if args.version:
print(f"qrscp.py v{__version__}")
sys.exit()
APP_LOGGER = setup_logging(args, "qrscp")
APP_LOGGER.debug(f"qrscp.py v{__version__}")
APP_LOGGER.debug("")
APP_LOGGER.debug("Using configuration from:")
APP_LOGGER.debug(f" {args.config}")
APP_LOGGER.debug("")
config = ConfigParser()
config.read(args.config)
if args.ae_title:
config["DEFAULT"]["ae_title"] = args.ae_title
if args.port:
config["DEFAULT"]["port"] = args.port
if args.max_pdu:
config["DEFAULT"]["max_pdu"] = args.max_pdu
if args.acse_timeout:
config["DEFAULT"]["acse_timeout"] = args.acse_timeout
if args.dimse_timeout:
config["DEFAULT"]["dimse_timeout"] = args.dimse_timeout
if args.network_timeout:
config["DEFAULT"]["network_timeout"] = args.network_timeout
if args.bind_address:
config["DEFAULT"]["bind_address"] = args.bind_address
if args.database_location:
config["DEFAULT"]["database_location"] = args.database_location
if args.instance_location:
config["DEFAULT"]["instance_location"] = args.instance_location
# Log configuration settings
_log_config(config, APP_LOGGER)
app_config = config["DEFAULT"]
dests = {}
for ae_title in config.sections():
dest = config[ae_title]
# Convert to bytes and validate the AE title
ae_title = validate_ae_title(ae_title.encode("ascii"), use_short=True)
dests[ae_title] = (dest["address"], dest.getint("port"))
# Use default or specified configuration file
current_dir = os.path.abspath(os.path.dirname(__file__))
instance_dir = os.path.join(current_dir, app_config["instance_location"])
db_path = os.path.join(current_dir, app_config["database_location"])
# The path to the database
db_path = f"sqlite:///{db_path}"
db.create(db_path)
# Clean up the database and storage directory
if args.clean:
response = input(
"This will delete all instances from both the storage directory "
"and the database. Are you sure you wish to continue? [yes/no]: "
)
if response != "yes":
sys.exit()
if clean(db_path, APP_LOGGER):
sys.exit()
else:
sys.exit(1)
# Try to create the instance storage directory
os.makedirs(instance_dir, exist_ok=True)
ae = AE(app_config["ae_title"])
ae.maximum_pdu_size = app_config.getint("max_pdu")
ae.acse_timeout = app_config.getfloat("acse_timeout")
ae.dimse_timeout = app_config.getfloat("dimse_timeout")
ae.network_timeout = app_config.getfloat("network_timeout")
## Add supported presentation contexts
# Verification SCP
ae.add_supported_context(Verification, ALL_TRANSFER_SYNTAXES)
# Storage SCP - support all transfer syntaxes
for cx in AllStoragePresentationContexts:
ae.add_supported_context(
cx.abstract_syntax, ALL_TRANSFER_SYNTAXES,
scp_role=True, scu_role=False
)
# Query/Retrieve SCP
ae.add_supported_context(PatientRootQueryRetrieveInformationModelFind)
ae.add_supported_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_supported_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelFind)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelMove)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelGet)
# Set our handler bindings
handlers = [
(evt.EVT_C_ECHO, handle_echo, [args, APP_LOGGER]),
(evt.EVT_C_FIND, handle_find, [db_path, args, APP_LOGGER]),
(evt.EVT_C_GET, handle_get, [db_path, args, APP_LOGGER]),
(evt.EVT_C_MOVE, handle_move, [dests, db_path, args, APP_LOGGER]),
(
evt.EVT_C_STORE,
handle_store,
[instance_dir, db_path, args, APP_LOGGER]
),
]
# Listen for incoming association requests
ae.start_server(
(app_config["bind_address"], app_config.getint("port")),
evt_handlers=handlers
)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
"""A Verification, Storage and Query/Retrieve SCP application."""
import argparse
from configparser import ConfigParser
import os
import sys
import pydicom.config
from pydicom.dataset import Dataset
from pynetdicom import (
AE, evt, AllStoragePresentationContexts, ALL_TRANSFER_SYNTAXES
)
from pynetdicom import _config, _handlers
from pynetdicom.apps.common import setup_logging
from pynetdicom.sop_class import (
Verification,
PatientRootQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelMove,
PatientRootQueryRetrieveInformationModelGet,
StudyRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelGet
)
from pynetdicom.utils import validate_ae_title
from pynetdicom.apps.qrscp.handlers import (
handle_echo, handle_find, handle_get, handle_move, handle_store
)
from pynetdicom.apps.qrscp import db
# Use `None` for empty values
pydicom.config.use_none_as_empty_text_VR_value = True
# Don't log identifiers
_config.LOG_RESPONSE_IDENTIFIERS = False
# Override the standard logging handlers
def _dont_log(event):
pass
_handlers._send_c_find_rsp = _dont_log
_handlers._send_c_get_rsp = _dont_log
_handlers._send_c_move_rsp = _dont_log
_handlers._send_c_store_rq = _dont_log
_handlers._recv_c_store_rsp = _dont_log
__version__ = "1.0.0"
def _log_config(config, logger):
"""Log the configuration settings.
Parameters
----------
logger : logging.Logger
The application's logger.
"""
logger.debug("Configuration settings")
app = config["DEFAULT"]
aet, port, pdu = app["ae_title"], app["port"], app["max_pdu"]
logger.debug(
f" AE title: {aet}, Port: {port}, Max. PDU: {pdu}"
)
logger.debug(" Timeouts:")
acse, dimse = app["acse_timeout"], app["dimse_timeout"]
network = app["network_timeout"]
logger.debug(f" ACSE: {acse}, DIMSE: {dimse}, Network: {network}")
logger.debug(f" Storage directory: {app['instance_location']}")
logger.debug(f" Database location: {app['database_location']}")
if config.sections():
logger.debug(" Move destinations: ")
else:
logger.debug(" Move destinations: none")
for ae_title in config.sections():
addr = config[ae_title]["address"]
port = config[ae_title]["port"]
logger.debug(f' {ae_title}: ({addr}, {port})')
logger.debug("")
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description=(
"The qrscp application implements a Service Class Provider (SCP) "
"for the Verification, Storage and Query/Retrieve (QR) Service "
"Classes."
),
usage="qrscp [options]"
)
# General Options
gen_opts = parser.add_argument_group("General Options")
gen_opts.add_argument(
"--version",
help="print version information and exit",
action="store_true"
)
output = gen_opts.add_mutually_exclusive_group()
output.add_argument(
"-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_const",
dest="log_type", const="q"
)
output.add_argument(
"-v", "--verbose",
help="verbose mode, print processing details",
action="store_const",
dest="log_type", const="v"
)
output.add_argument(
"-d", "--debug",
help="debug mode, print debug information",
action="store_const",
dest="log_type", const="d"
)
gen_opts.add_argument(
"-ll", "--log-level", metavar="[l]",
help=(
"use level l for the logger (critical, error, warn, info, debug)"
),
type=str,
choices=["critical", "error", "warn", "info", "debug"]
)
fdir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(fdir, "default.ini")
gen_opts.add_argument(
"-c", "--config", metavar="[f]ilename",
help="use configuration file f",
default=fpath,
)
net_opts = parser.add_argument_group("Networking Options")
net_opts.add_argument(
"--port",
help="override the configured TCP/IP listen port number",
)
net_opts.add_argument(
"-aet", "--ae-title", metavar="[a]etitle",
help="override the configured AE title",
)
net_opts.add_argument(
"-ta", "--acse-timeout", metavar="[s]econds",
help="override the configured timeout for ACSE messages",
)
net_opts.add_argument(
"-td", "--dimse-timeout", metavar="[s]econds",
help="override the configured timeout for DIMSE messages",
)
net_opts.add_argument(
"-tn", "--network-timeout", metavar="[s]econds",
help="override the configured timeout for the network",
)
net_opts.add_argument(
"-pdu", "--max-pdu", metavar="[n]umber of bytes",
help="override the configured max receive pdu to n bytes",
)
net_opts.add_argument(
"-ba", "--bind-address", metavar="[a]ddress",
help=(
"override the configured address of the network interface to "
"listen on"
),
)
db_opts = parser.add_argument_group("Database Options")
db_opts.add_argument(
"--database-location", metavar="[f]ile",
help="override the location of the database using file f",
type=str,
)
db_opts.add_argument(
"--instance-location", metavar="[d]irectory",
help=(
"override the configured instance storage location to directory d"
),
type=str
)
db_opts.add_argument(
"--clean",
help=(
"remove all entries from the database and delete the "
"corresponding stored instances"
),
action="store_true",
)
return parser.parse_args()
def clean(db_path, logger):
"""Remove all entries from the database and delete the corresponding
stored instances.
Parameters
----------
db_path : str
The database path to use with create_engine().
logger : logging.Logger
The application logger.
Returns
-------
bool
``True`` if the storage directory and database were both cleaned
successfully, ``False`` otherwise.
"""
engine = create_engine(db_path)
with engine.connect() as conn:
Session = sessionmaker(bind=engine)
session = Session()
try:
fpaths = [ii.filename for ii in session.query(Instance).all()]
except Exception as exc:
logger.error("Exception raised while querying the database")
logger.exception(exc)
session.rollback()
finally:
session.close()
return False
storage_cleaned = True
for fpath in fpaths:
try:
os.remove(os.path.join(config.INSTANCE_LOCATION, fpath))
except Exception as exc:
logger.error(f"Unable to delete the instance at '{fpath}'")
logger.exception(exc)
storage_cleaned = False
if storage_cleaned:
logger.info("Storage directory cleaned successfully")
else:
logger.error("Failed to clean storage directory")
database_cleaned = False
try:
clear(session)
database_cleaned = True
logger.info("Database cleaned successfully")
except Exception as exc:
logger.error("Failed to clean the database")
logger.exception(exc)
session.rollback()
finally:
session.close()
return database_cleaned and storage_cleaned
def main(args=None):
"""Run the application."""
if args is not None:
sys.argv = args
args = _setup_argparser()
if args.version:
print(f"qrscp.py v{__version__}")
sys.exit()
APP_LOGGER = setup_logging(args, "qrscp")
APP_LOGGER.debug(f"qrscp.py v{__version__}")
APP_LOGGER.debug("")
APP_LOGGER.debug("Using configuration from:")
APP_LOGGER.debug(f" {args.config}")
APP_LOGGER.debug("")
config = ConfigParser()
config.read(args.config)
if args.ae_title:
config["DEFAULT"]["ae_title"] = args.ae_title
if args.port:
config["DEFAULT"]["port"] = args.port
if args.max_pdu:
config["DEFAULT"]["max_pdu"] = args.max_pdu
if args.acse_timeout:
config["DEFAULT"]["acse_timeout"] = args.acse_timeout
if args.dimse_timeout:
config["DEFAULT"]["dimse_timeout"] = args.dimse_timeout
if args.network_timeout:
config["DEFAULT"]["network_timeout"] = args.network_timeout
if args.bind_address:
config["DEFAULT"]["bind_address"] = args.bind_address
if args.database_location:
config["DEFAULT"]["database_location"] = args.database_location
if args.instance_location:
config["DEFAULT"]["instance_location"] = args.instance_location
# Log configuration settings
_log_config(config, APP_LOGGER)
app_config = config["DEFAULT"]
dests = {}
for ae_title in config.sections():
dest = config[ae_title]
# Convert to bytes and validate the AE title
ae_title = validate_ae_title(ae_title.encode("ascii"), use_short=True)
dests[ae_title] = (dest["address"], dest.getint("port"))
# Use default or specified configuration file
current_dir = os.path.abspath(os.path.dirname(__file__))
instance_dir = os.path.join(current_dir, app_config["instance_location"])
db_path = os.path.join(current_dir, app_config["database_location"])
# The path to the database
db_path = f"sqlite:///{db_path}"
db.create(db_path)
# Clean up the database and storage directory
if args.clean:
response = input(
"This will delete all instances from both the storage directory "
"and the database. Are you sure you wish to continue? [yes/no]: "
)
if response != "yes":
sys.exit()
if clean(db_path, APP_LOGGER):
sys.exit()
else:
sys.exit(1)
# Try to create the instance storage directory
os.makedirs(instance_dir, exist_ok=True)
ae = AE(app_config["ae_title"])
ae.maximum_pdu_size = app_config.getint("max_pdu")
ae.acse_timeout = app_config.getfloat("acse_timeout")
ae.dimse_timeout = app_config.getfloat("dimse_timeout")
ae.network_timeout = app_config.getfloat("network_timeout")
## Add supported presentation contexts
# Verification SCP
ae.add_supported_context(Verification, ALL_TRANSFER_SYNTAXES)
# Storage SCP - support all transfer syntaxes
for cx in AllStoragePresentationContexts:
ae.add_supported_context(
cx.abstract_syntax, ALL_TRANSFER_SYNTAXES,
scp_role=True, scu_role=False
)
# Query/Retrieve SCP
ae.add_supported_context(PatientRootQueryRetrieveInformationModelFind)
ae.add_supported_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_supported_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelFind)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelMove)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelGet)
# Set our handler bindings
handlers = [
(evt.EVT_C_ECHO, handle_echo, [args, APP_LOGGER]),
(evt.EVT_C_FIND, handle_find, [db_path, args, APP_LOGGER]),
(evt.EVT_C_GET, handle_get, [db_path, args, APP_LOGGER]),
(evt.EVT_C_MOVE, handle_move, [dests, db_path, args, APP_LOGGER]),
(
evt.EVT_C_STORE,
handle_store,
[instance_dir, db_path, args, APP_LOGGER]
),
]
# Listen for incoming association requests
ae.start_server(
(app_config["bind_address"], app_config.getint("port")),
evt_handlers=handlers
)
if __name__ == "__main__":
main()
|
import pytest, os, logging, uuid
import pytest_html
from selenium import webdriver
from py.xml import html
from framework.action_framework import Actions
from framework.configuration import Config
from framework.element_provider import BasicWebElementProvider
DRIVER_SCOPE = os.environ.get('DRIVER_SCOPE', 'function')
CONFIG_PATH = os.environ.get('SELENIUM_CONFIG_PATH', None)
if not CONFIG_PATH:
logging.warning('SELENIUM_CONFIG_PATH not set! using default config.yml')
CONFIG_PATH = 'configurationz/config.yml'
ROOT_DIR = os.path.dirname(__file__)
REPORT_DIR = os.environ.get('REPORT_DIR', ROOT_DIR + '/reports')
SCREENSHOTS_DIR = REPORT_DIR + '/screenshots'
CHROME_DRIVER_PATH = 'C:/webdriver/chromedriver.exe'
@pytest.fixture(scope=DRIVER_SCOPE)
def driver(conf):
if conf.wd_hub_url:
caps = {
"browser_name": conf.browser,
"version": str(conf.browser_version),
"enableVNC": True,
"enableVideo": False,
"acceptInsecureCerts": True
}
options = webdriver.ChromeOptions()
options.headless = conf.headless
options.add_argument('--start-maximized')
driver = webdriver.Remote(
command_executor=conf.wd_hub_url,
desired_capabilities=caps,
options=options
)
else:
driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)
driver.maximize_window()
yield driver
driver.quit()
@pytest.fixture(scope=DRIVER_SCOPE)
def actions(driver, conf):
actions = Actions(BasicWebElementProvider(driver, conf))
return actions
@pytest.fixture(scope='session')
def conf():
cfg = Config.from_file(CONFIG_PATH)
return cfg
def pytest_html_report_title(report):
report.title = os.environ.get('TEST_JOB_NAME', 'Tests report')
def pytest_html_results_table_header(cells):
cells.pop()
cells.insert(4, html.th('Title'))
title = cells.pop()
duration = cells.pop()
test = cells.pop()
cells.insert(1, title)
cells.insert(2, test)
cells.insert(3, duration)
def pytest_html_results_table_row(report, cells):
cells.pop()
cells.insert(4, html.td(report.test_tile))
title = cells.pop()
duration = cells.pop()
test = cells.pop()
cells.insert(1, title)
cells.insert(2, test)
cells.insert(3, duration)
# noinspection PyUnresolvedReferences
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item):
outcome = yield
report = outcome.get_result()
docs = item.function.__doc__.split('\n') if item.function.__doc__ else []
report.test_tile = docs[1] if len(docs) >= 1 else ''
extra = getattr(report, 'extra', [])
if report.when == 'call':
htmldocs = []
if len(docs) >= 2:
docs = docs[1:]
for doc in docs:
if doc.strip() == '':
htmldocs.append('<br>')
else:
htmldocs.append(f'<p style="margin: 5px; margin-left: 12px;">{doc}</p>')
docstr = ''.join(htmldocs[2:len(htmldocs) - 1])
extra.append(pytest_html.extras.html(f'<h4 style="margin: 8px;">Test description</h4>{docstr}'))
if report.failed:
actions: Actions = item.funcargs.get('actions', None)
driver: Actions = item.funcargs.get('driver', None)
wd = None
if actions: wd = actions.element_provider.driver
if driver: wd = driver
if wd:
name = f'{item.function.__name__}__{str(uuid.uuid4()).replace('-', '')[:15]}.png'
spath = f'{SCREENSHOTS_DIR}/{name}'
print('taking screenshot ->', spath)
wd.save_screenshot(spath)
dom = '<div class="image">'
dom += f'<a href="screenshots/{name}" target="_blank">'
dom += f'<img src="screenshots/{name}"></a></div>'
extra.append(pytest_html.extras.html(dom))
report.extra = extra
| import pytest, os, logging, uuid
import pytest_html
from selenium import webdriver
from py.xml import html
from framework.action_framework import Actions
from framework.configuration import Config
from framework.element_provider import BasicWebElementProvider
DRIVER_SCOPE = os.environ.get('DRIVER_SCOPE', 'function')
CONFIG_PATH = os.environ.get('SELENIUM_CONFIG_PATH', None)
if not CONFIG_PATH:
logging.warning('SELENIUM_CONFIG_PATH not set! using default config.yml')
CONFIG_PATH = 'configurationz/config.yml'
ROOT_DIR = os.path.dirname(__file__)
REPORT_DIR = os.environ.get('REPORT_DIR', ROOT_DIR + '/reports')
SCREENSHOTS_DIR = REPORT_DIR + '/screenshots'
CHROME_DRIVER_PATH = 'C:/webdriver/chromedriver.exe'
@pytest.fixture(scope=DRIVER_SCOPE)
def driver(conf):
if conf.wd_hub_url:
caps = {
"browser_name": conf.browser,
"version": str(conf.browser_version),
"enableVNC": True,
"enableVideo": False,
"acceptInsecureCerts": True
}
options = webdriver.ChromeOptions()
options.headless = conf.headless
options.add_argument('--start-maximized')
driver = webdriver.Remote(
command_executor=conf.wd_hub_url,
desired_capabilities=caps,
options=options
)
else:
driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)
driver.maximize_window()
yield driver
driver.quit()
@pytest.fixture(scope=DRIVER_SCOPE)
def actions(driver, conf):
actions = Actions(BasicWebElementProvider(driver, conf))
return actions
@pytest.fixture(scope='session')
def conf():
cfg = Config.from_file(CONFIG_PATH)
return cfg
def pytest_html_report_title(report):
report.title = os.environ.get('TEST_JOB_NAME', 'Tests report')
def pytest_html_results_table_header(cells):
cells.pop()
cells.insert(4, html.th('Title'))
title = cells.pop()
duration = cells.pop()
test = cells.pop()
cells.insert(1, title)
cells.insert(2, test)
cells.insert(3, duration)
def pytest_html_results_table_row(report, cells):
cells.pop()
cells.insert(4, html.td(report.test_tile))
title = cells.pop()
duration = cells.pop()
test = cells.pop()
cells.insert(1, title)
cells.insert(2, test)
cells.insert(3, duration)
# noinspection PyUnresolvedReferences
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item):
outcome = yield
report = outcome.get_result()
docs = item.function.__doc__.split('\n') if item.function.__doc__ else []
report.test_tile = docs[1] if len(docs) >= 1 else ''
extra = getattr(report, 'extra', [])
if report.when == 'call':
htmldocs = []
if len(docs) >= 2:
docs = docs[1:]
for doc in docs:
if doc.strip() == '':
htmldocs.append('<br>')
else:
htmldocs.append(f'<p style="margin: 5px; margin-left: 12px;">{doc}</p>')
docstr = ''.join(htmldocs[2:len(htmldocs) - 1])
extra.append(pytest_html.extras.html(f'<h4 style="margin: 8px;">Test description</h4>{docstr}'))
if report.failed:
actions: Actions = item.funcargs.get('actions', None)
driver: Actions = item.funcargs.get('driver', None)
wd = None
if actions: wd = actions.element_provider.driver
if driver: wd = driver
if wd:
name = f'{item.function.__name__}__{str(uuid.uuid4()).replace("-", "")[:15]}.png'
spath = f'{SCREENSHOTS_DIR}/{name}'
print('taking screenshot ->', spath)
wd.save_screenshot(spath)
dom = '<div class="image">'
dom += f'<a href="screenshots/{name}" target="_blank">'
dom += f'<img src="screenshots/{name}"></a></div>'
extra.append(pytest_html.extras.html(dom))
report.extra = extra
|
import logging
from typing import List, Callable, Dict, Optional, Union
import math
import qtawesome as qta
from qtpy.QtCore import Qt, QTimer
from qtpy.QtGui import QFont, QCloseEvent
from qtpy.QtWidgets import QDialog, QFrame, QGridLayout, QHBoxLayout, QToolButton, QButtonGroup, QLabel, QSlider, \
QDoubleSpinBox, QSpacerItem, QSizePolicy, QWidget, QAbstractSpinBox, QListWidgetItem
from acoustics.standards.iec_61260_1_2014 import NOMINAL_OCTAVE_CENTER_FREQUENCIES
from model.iir import LowShelf, HighShelf, PeakingEQ, CompleteFilter, Passthrough, SOS
from model.limits import PhaseRangeCalculator, DecibelRangeCalculator
from model.magnitude import MagnitudeModel
from model.preferences import GEQ_GEOMETRY, GEQ_GRAPH_X_MIN, GEQ_GRAPH_X_MAX, get_filter_colour, Preferences
from model.xy import MagnitudeData
from ui.geq import Ui_geqDialog
GEQ = 'GEQ'
logger = logging.getLogger('geq')
class GeqDialog(QDialog, Ui_geqDialog):
def __init__(self, parent, prefs: Preferences, channels: Dict[str, bool], existing_filters: List[SOS],
on_save: Callable[[List[str], List[SOS]], None], preset: str = GEQ, **kwargs):
super(GeqDialog, self).__init__(parent)
self.__on_save = on_save
self.prefs = prefs
self.setupUi(self)
from model.report import block_signals
with block_signals(self.presetSelector):
self.presetSelector.addItem('')
self.presetSelector.addItem(GEQ)
self.presetSelector.addItem('BEQ')
for c in channels.keys():
self.channelList.addItem(c)
self.presetSelector.currentTextChanged.connect(self.__load_preset)
self.limitsButton.setIcon(qta.icon('fa5s.arrows-alt'))
self.showPhase.setIcon(qta.icon('mdi.cosine-wave'))
self.advancedMode.setIcon(qta.icon('mdi.toggle-switch'))
self.showIndividual.setIcon(qta.icon('fa5s.chart-line'))
self.__mag_update_timer = QTimer(self)
self.__mag_update_timer.setSingleShot(True)
self.__peq_editors: List[PeqEditor] = []
self.scrollableLayout.addSpacerItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum))
if preset:
self.presetSelector.setCurrentText(preset)
if existing_filters:
self.__load_filters(existing_filters)
else:
self.update_peq_editors()
self.__magnitude_model = MagnitudeModel('preview', self.previewChart, prefs,
self.__get_data(), 'Filter', fill_primary=False,
x_min_pref_key=GEQ_GRAPH_X_MIN, x_max_pref_key=GEQ_GRAPH_X_MAX,
secondary_data_provider=self.__get_data('phase'),
secondary_name='Phase', secondary_prefix='deg', fill_secondary=False,
y_range_calc=DecibelRangeCalculator(60),
y2_range_calc=PhaseRangeCalculator(), show_y2_in_legend=False, **kwargs)
self.limitsButton.setToolTip('Set graph axis limits')
self.showPhase.toggled.connect(self.__trigger_redraw)
self.showPhase.setToolTip('Display phase response')
self.showIndividual.toggled.connect(self.__trigger_redraw)
self.showIndividual.setToolTip('Display individual filter responses')
self.advancedMode.toggled.connect(lambda b: [p.advanced(b) for p in self.__peq_editors])
self.advancedMode.toggled.connect(lambda b: self.advancedMode.setIcon(qta.icon(f"mdi.toggle-switch{"" if b else "-off"}")))
self.advancedMode.setToolTip('Show Q and Frequency Sliders')
self.channelList.itemSelectionChanged.connect(self.__trigger_redraw)
selected_channels = [c for c, b in channels.items() if b]
for i in range(self.channelList.count()):
item: QListWidgetItem = self.channelList.item(i)
item.setSelected(item.text() in selected_channels)
self.__mag_update_timer.timeout.connect(self.__magnitude_model.redraw)
def __load_filters(self, to_load: List[SOS]):
valid = [f for f in to_load if isinstance(f, (PeakingEQ, LowShelf, HighShelf))]
self.peqCount.setValue(len(valid))
self.update_peq_editors()
for i, f in enumerate(valid):
self.__peq_editors[i].load(f)
def __load_preset(self, preset: str):
if preset == 'GEQ':
freqs = NOMINAL_OCTAVE_CENTER_FREQUENCIES.tolist()
self.peqCount.setValue(len(freqs) + 2)
self.__peq_editors[0].reset('LS', 40, 0.707)
for i, freq in enumerate(freqs):
self.__peq_editors[i+1].reset('PEQ', freq, 1.0)
self.__peq_editors[-1].reset('HS', 8000, 0.707)
elif preset == 'BEQ':
self.peqCount.setValue(10)
for i in range(10):
self.__peq_editors[i].reset('LS', 20, 0.8)
def __trigger_redraw(self):
if not self.__mag_update_timer.isActive():
self.__mag_update_timer.start(20)
def __get_data(self, mode='mag'):
return lambda *args, **kwargs: self.get_curve_data(mode, *args, **kwargs)
def get_curve_data(self, mode, reference=None):
''' preview of the filter to display on the chart '''
result = []
final_filter = CompleteFilter(fs=48000, filters=self.__get_filters(), sort_by_id=True)
if mode == 'mag' or self.showPhase.isChecked():
extra = 0
if len(final_filter) > 0:
result.append(final_filter.get_transfer_function()
.get_data(mode=mode, colour=get_filter_colour(len(result))))
else:
extra += 1
for i, f in enumerate(final_filter):
if self.showIndividual.isChecked():
colour = get_filter_colour(len(result) + extra)
data: MagnitudeData = f.get_transfer_function().get_data(mode=mode, colour=colour, linestyle=':')
data.override_name = f"PEQ {i}"
result.append(data)
return result
def __get_filters(self, include_zero=False) -> List[SOS]:
filters = [e.make_filter(include_zero) for i, e in enumerate(self.__peq_editors) if i < self.peqCount.value()]
return [f for f in filters if f]
def show_limits(self):
''' shows the limits dialog for the filter chart. '''
self.__magnitude_model.show_limits()
def update_peq_editors(self):
for i in range(self.peqCount.value()):
if i >= len(self.__peq_editors):
self.__create_peq_editor(i)
else:
self.__peq_editors[i].show()
if self.peqCount.value() < len(self.__peq_editors):
for i in range(self.peqCount.value(), len(self.__peq_editors)):
self.__peq_editors[i].hide()
def __create_peq_editor(self, i: int):
editor = PeqEditor(self.scrollable, i, self.__trigger_redraw)
self.__peq_editors.append(editor)
self.scrollableLayout.insertWidget(i, editor.widget)
def accept(self):
self.__on_save([c.text() for c in self.channelList.selectedItems()], self.__get_filters(include_zero=True))
self.prefs.set(GEQ_GEOMETRY, self.saveGeometry())
super().accept()
def __restore_geometry(self):
''' loads the saved window size '''
geometry = self.prefs.get(GEQ_GEOMETRY)
if geometry is not None:
self.restoreGeometry(geometry)
def reject(self):
self.prefs.set(GEQ_GEOMETRY, self.saveGeometry())
super().reject()
class PeqEditor:
def __init__(self, parent: QWidget, idx: int, on_change: Callable[[], None]):
font = QFont()
font.setPointSize(10)
self.__idx = idx
self.__geq_frame = QFrame(parent)
self.__geq_frame.setFrameShape(QFrame.StyledPanel)
self.__geq_frame.setFrameShadow(QFrame.Raised)
self.__grid_layout = QGridLayout(self.__geq_frame)
title_font = QFont()
title_font.setPointSize(10)
title_font.setItalic(True)
title_font.setBold(True)
self.__title = QLabel(self.__geq_frame)
self.__title.setFont(title_font)
self.__title.setText(f"PEQ {idx + 1}")
self.__title.setAlignment(Qt.AlignCenter)
self.__filter_selector_layout = QHBoxLayout()
self.__peq_button = QToolButton(self.__geq_frame)
self.__peq_button.setCheckable(True)
self.__peq_button.setChecked(True)
self.__peq_button.setText("P")
self.__button_group = QButtonGroup(self.__geq_frame)
self.__button_group.addButton(self.__peq_button)
self.__filter_selector_layout.addWidget(self.__peq_button)
self.__ls_button = QToolButton(self.__geq_frame)
self.__ls_button.setCheckable(True)
self.__ls_button.setText("LS")
self.__button_group.addButton(self.__ls_button)
self.__filter_selector_layout.addWidget(self.__ls_button)
self.__hs_button = QToolButton(self.__geq_frame)
self.__hs_button.setCheckable(True)
self.__hs_button.setText("HS")
self.__button_group.addButton(self.__hs_button)
self.__filter_selector_layout.addWidget(self.__hs_button)
self.__grid_layout.addWidget(self.__title, 0, 0, 1, 4)
self.__grid_layout.addLayout(self.__filter_selector_layout, 1, 0, 1, 4)
self.__gain_label = QLabel(self.__geq_frame)
self.__gain_label.setFont(font)
self.__gain_label.setText("Gain (dB)")
self.__gain_slider = QSlider(self.__geq_frame)
self.__gain_slider.setMinimum(-3000)
self.__gain_slider.setMaximum(3000)
self.__gain_slider.setOrientation(Qt.Vertical)
self.__gain_slider.setTickPosition(QSlider.TicksBelow)
self.__gain_slider.setTickInterval(300)
self.__gain_slider.setToolTip('Gain (dB)')
self.__gain = QDoubleSpinBox(self.__geq_frame)
self.__gain.setFont(font)
self.__gain.setMinimum(-30)
self.__gain.setMaximum(30)
self.__gain.setSingleStep(0.1)
self.__gain.setDecimals(2)
self.__freq_slider = QSlider(self.__geq_frame)
self.__freq_slider.setOrientation(Qt.Vertical)
self.__freq_slider.setTickPosition(QSlider.TicksBelow)
self.__freq_slider.setMinimum(1)
self.__freq_slider.setMaximum(1500)
self.__freq_slider.setTickInterval(100)
self.__freq_slider.setToolTip('Frequency (Hz)')
self.__freq_label = QLabel(self.__geq_frame)
self.__freq_label.setFont(font)
self.__freq_label.setText("Freq (Hz)")
self.__freq = QDoubleSpinBox(self.__geq_frame)
self.__freq.setFont(font)
self.__freq.setMinimum(1)
self.__freq.setMaximum(24000)
self.__freq.setDecimals(1)
self.__freq.setStepType(QAbstractSpinBox.AdaptiveDecimalStepType)
self.__q_slider = QSlider(self.__geq_frame)
self.__q_slider.setOrientation(Qt.Vertical)
self.__q_slider.setTickPosition(QSlider.TicksBelow)
self.__q_slider.setMinimum(1)
self.__q_slider.setMaximum(20000000)
self.__q_slider.setTickInterval(1000000)
self.__q_slider.setToolTip('Q')
self.__q_label = QLabel(self.__geq_frame)
self.__q_label.setFont(font)
self.__q_label.setText("Q")
self.__q = QDoubleSpinBox(self.__geq_frame)
self.__q.setFont(font)
self.__q.setMinimum(0.001)
self.__q.setMaximum(20)
self.__q.setDecimals(3)
self.__q.setStepType(QAbstractSpinBox.AdaptiveDecimalStepType)
def to_q_slider(v: float) -> float:
return (math.log((v + 0.15) / 0.15) / math.log(1.28)) * 1000000
def from_q_slider(v: float) -> float:
return (0.15 * (1.28 ** (v / 1000000))) - 0.15
def to_freq_slider(v: float) -> float:
return (math.log(v) / math.log(2)) * 100
def from_freq_slider(v: float) -> float:
return 2**(v/100)
self.__freq_slider.valueChanged.connect(lambda v: self.__freq.setValue(from_freq_slider(v)))
self.__q_slider.valueChanged.connect(lambda v: self.__q.setValue(from_q_slider(v)))
self.__gain_slider.valueChanged.connect(lambda v: self.__gain.setValue(v/100))
self.__gain.valueChanged.connect(on_change)
self.__q.valueChanged.connect(on_change)
self.__freq.valueChanged.connect(on_change)
self.__gain.valueChanged.connect(lambda v: self.__update_slider(self.__gain_slider, v, lambda v: v*100))
self.__q.valueChanged.connect(lambda v: self.__update_slider(self.__q_slider, v, to_q_slider))
self.__freq.valueChanged.connect(lambda v: self.__update_slider(self.__freq_slider, v, to_freq_slider))
self.__ls_button.toggled.connect(on_change)
self.__hs_button.toggled.connect(on_change)
self.__peq_button.toggled.connect(on_change)
self.__freq_slider.setValue(1000)
self.__q_slider.setValue(to_q_slider(0.707))
self.__grid_layout.addWidget(self.__gain_label, 2, 0, 1, 1)
self.__grid_layout.addWidget(self.__gain_slider, 2, 1, 7, 1)
self.__grid_layout.addWidget(self.__freq_slider, 2, 2, 7, 1)
self.__grid_layout.addWidget(self.__q_slider, 2, 3, 7, 1)
self.__grid_layout.addWidget(self.__gain, 3, 0, 1, 1)
self.__grid_layout.addWidget(self.__freq_label, 4, 0, 1, 1)
self.__grid_layout.addWidget(self.__freq, 5, 0, 1, 1)
self.__grid_layout.addWidget(self.__q_label, 6, 0, 1, 1)
self.__grid_layout.addWidget(self.__q, 7, 0, 1, 1)
self.__grid_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding), 8, 0, 1, 1)
@staticmethod
def __update_slider(slider: QSlider, v: float, translate: Callable[[float], float] = lambda x: x) -> None:
from model.report import block_signals
with block_signals(slider):
slider.setValue(translate(v))
def make_filter(self, include_zero) -> Optional[Union[LowShelf, HighShelf, PeakingEQ, Passthrough]]:
if math.isclose(self.__gain.value(), 0.0) and not include_zero:
return None
if self.__ls_button.isChecked():
return LowShelf(48000, self.__freq.value(), self.__q.value(), self.__gain.value(), f_id=self.__idx)
elif self.__hs_button.isChecked():
return HighShelf(48000, self.__freq.value(), self.__q.value(), self.__gain.value(), f_id=self.__idx)
elif self.__peq_button.isChecked():
return PeakingEQ(48000, self.__freq.value(), self.__q.value(), self.__gain.value(), f_id=self.__idx)
else:
return Passthrough(fs=48000)
def show(self) -> None:
self.__geq_frame.show()
def hide(self) -> None:
self.__geq_frame.hide()
@property
def widget(self) -> QWidget:
return self.__geq_frame
def reset(self, filter_type: str, freq: float, q: float, gain: float = 0.0) -> None:
if filter_type == 'LS':
self.__ls_button.setChecked(True)
elif filter_type == 'HS':
self.__hs_button.setChecked(True)
else:
self.__peq_button.setChecked(True)
self.__freq.setValue(freq)
self.__q.setValue(q)
self.__gain.setValue(gain)
def advanced(self, on: bool) -> None:
self.__q_slider.setVisible(on)
self.__freq_slider.setVisible(on)
def load(self, f: Union[LowShelf, HighShelf, PeakingEQ]) -> None:
self.reset(f.filter_type, f.freq, f.q, gain= f.gain)
| import logging
from typing import List, Callable, Dict, Optional, Union
import math
import qtawesome as qta
from qtpy.QtCore import Qt, QTimer
from qtpy.QtGui import QFont, QCloseEvent
from qtpy.QtWidgets import QDialog, QFrame, QGridLayout, QHBoxLayout, QToolButton, QButtonGroup, QLabel, QSlider, \
QDoubleSpinBox, QSpacerItem, QSizePolicy, QWidget, QAbstractSpinBox, QListWidgetItem
from acoustics.standards.iec_61260_1_2014 import NOMINAL_OCTAVE_CENTER_FREQUENCIES
from model.iir import LowShelf, HighShelf, PeakingEQ, CompleteFilter, Passthrough, SOS
from model.limits import PhaseRangeCalculator, DecibelRangeCalculator
from model.magnitude import MagnitudeModel
from model.preferences import GEQ_GEOMETRY, GEQ_GRAPH_X_MIN, GEQ_GRAPH_X_MAX, get_filter_colour, Preferences
from model.xy import MagnitudeData
from ui.geq import Ui_geqDialog
GEQ = 'GEQ'
logger = logging.getLogger('geq')
class GeqDialog(QDialog, Ui_geqDialog):
def __init__(self, parent, prefs: Preferences, channels: Dict[str, bool], existing_filters: List[SOS],
on_save: Callable[[List[str], List[SOS]], None], preset: str = GEQ, **kwargs):
super(GeqDialog, self).__init__(parent)
self.__on_save = on_save
self.prefs = prefs
self.setupUi(self)
from model.report import block_signals
with block_signals(self.presetSelector):
self.presetSelector.addItem('')
self.presetSelector.addItem(GEQ)
self.presetSelector.addItem('BEQ')
for c in channels.keys():
self.channelList.addItem(c)
self.presetSelector.currentTextChanged.connect(self.__load_preset)
self.limitsButton.setIcon(qta.icon('fa5s.arrows-alt'))
self.showPhase.setIcon(qta.icon('mdi.cosine-wave'))
self.advancedMode.setIcon(qta.icon('mdi.toggle-switch'))
self.showIndividual.setIcon(qta.icon('fa5s.chart-line'))
self.__mag_update_timer = QTimer(self)
self.__mag_update_timer.setSingleShot(True)
self.__peq_editors: List[PeqEditor] = []
self.scrollableLayout.addSpacerItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum))
if preset:
self.presetSelector.setCurrentText(preset)
if existing_filters:
self.__load_filters(existing_filters)
else:
self.update_peq_editors()
self.__magnitude_model = MagnitudeModel('preview', self.previewChart, prefs,
self.__get_data(), 'Filter', fill_primary=False,
x_min_pref_key=GEQ_GRAPH_X_MIN, x_max_pref_key=GEQ_GRAPH_X_MAX,
secondary_data_provider=self.__get_data('phase'),
secondary_name='Phase', secondary_prefix='deg', fill_secondary=False,
y_range_calc=DecibelRangeCalculator(60),
y2_range_calc=PhaseRangeCalculator(), show_y2_in_legend=False, **kwargs)
self.limitsButton.setToolTip('Set graph axis limits')
self.showPhase.toggled.connect(self.__trigger_redraw)
self.showPhase.setToolTip('Display phase response')
self.showIndividual.toggled.connect(self.__trigger_redraw)
self.showIndividual.setToolTip('Display individual filter responses')
self.advancedMode.toggled.connect(lambda b: [p.advanced(b) for p in self.__peq_editors])
self.advancedMode.toggled.connect(lambda b: self.advancedMode.setIcon(qta.icon(f"mdi.toggle-switch{'' if b else '-off'}")))
self.advancedMode.setToolTip('Show Q and Frequency Sliders')
self.channelList.itemSelectionChanged.connect(self.__trigger_redraw)
selected_channels = [c for c, b in channels.items() if b]
for i in range(self.channelList.count()):
item: QListWidgetItem = self.channelList.item(i)
item.setSelected(item.text() in selected_channels)
self.__mag_update_timer.timeout.connect(self.__magnitude_model.redraw)
def __load_filters(self, to_load: List[SOS]):
valid = [f for f in to_load if isinstance(f, (PeakingEQ, LowShelf, HighShelf))]
self.peqCount.setValue(len(valid))
self.update_peq_editors()
for i, f in enumerate(valid):
self.__peq_editors[i].load(f)
def __load_preset(self, preset: str):
if preset == 'GEQ':
freqs = NOMINAL_OCTAVE_CENTER_FREQUENCIES.tolist()
self.peqCount.setValue(len(freqs) + 2)
self.__peq_editors[0].reset('LS', 40, 0.707)
for i, freq in enumerate(freqs):
self.__peq_editors[i+1].reset('PEQ', freq, 1.0)
self.__peq_editors[-1].reset('HS', 8000, 0.707)
elif preset == 'BEQ':
self.peqCount.setValue(10)
for i in range(10):
self.__peq_editors[i].reset('LS', 20, 0.8)
def __trigger_redraw(self):
if not self.__mag_update_timer.isActive():
self.__mag_update_timer.start(20)
def __get_data(self, mode='mag'):
return lambda *args, **kwargs: self.get_curve_data(mode, *args, **kwargs)
def get_curve_data(self, mode, reference=None):
''' preview of the filter to display on the chart '''
result = []
final_filter = CompleteFilter(fs=48000, filters=self.__get_filters(), sort_by_id=True)
if mode == 'mag' or self.showPhase.isChecked():
extra = 0
if len(final_filter) > 0:
result.append(final_filter.get_transfer_function()
.get_data(mode=mode, colour=get_filter_colour(len(result))))
else:
extra += 1
for i, f in enumerate(final_filter):
if self.showIndividual.isChecked():
colour = get_filter_colour(len(result) + extra)
data: MagnitudeData = f.get_transfer_function().get_data(mode=mode, colour=colour, linestyle=':')
data.override_name = f"PEQ {i}"
result.append(data)
return result
def __get_filters(self, include_zero=False) -> List[SOS]:
filters = [e.make_filter(include_zero) for i, e in enumerate(self.__peq_editors) if i < self.peqCount.value()]
return [f for f in filters if f]
def show_limits(self):
''' shows the limits dialog for the filter chart. '''
self.__magnitude_model.show_limits()
def update_peq_editors(self):
for i in range(self.peqCount.value()):
if i >= len(self.__peq_editors):
self.__create_peq_editor(i)
else:
self.__peq_editors[i].show()
if self.peqCount.value() < len(self.__peq_editors):
for i in range(self.peqCount.value(), len(self.__peq_editors)):
self.__peq_editors[i].hide()
def __create_peq_editor(self, i: int):
editor = PeqEditor(self.scrollable, i, self.__trigger_redraw)
self.__peq_editors.append(editor)
self.scrollableLayout.insertWidget(i, editor.widget)
def accept(self):
self.__on_save([c.text() for c in self.channelList.selectedItems()], self.__get_filters(include_zero=True))
self.prefs.set(GEQ_GEOMETRY, self.saveGeometry())
super().accept()
def __restore_geometry(self):
''' loads the saved window size '''
geometry = self.prefs.get(GEQ_GEOMETRY)
if geometry is not None:
self.restoreGeometry(geometry)
def reject(self):
self.prefs.set(GEQ_GEOMETRY, self.saveGeometry())
super().reject()
class PeqEditor:
def __init__(self, parent: QWidget, idx: int, on_change: Callable[[], None]):
font = QFont()
font.setPointSize(10)
self.__idx = idx
self.__geq_frame = QFrame(parent)
self.__geq_frame.setFrameShape(QFrame.StyledPanel)
self.__geq_frame.setFrameShadow(QFrame.Raised)
self.__grid_layout = QGridLayout(self.__geq_frame)
title_font = QFont()
title_font.setPointSize(10)
title_font.setItalic(True)
title_font.setBold(True)
self.__title = QLabel(self.__geq_frame)
self.__title.setFont(title_font)
self.__title.setText(f"PEQ {idx + 1}")
self.__title.setAlignment(Qt.AlignCenter)
self.__filter_selector_layout = QHBoxLayout()
self.__peq_button = QToolButton(self.__geq_frame)
self.__peq_button.setCheckable(True)
self.__peq_button.setChecked(True)
self.__peq_button.setText("P")
self.__button_group = QButtonGroup(self.__geq_frame)
self.__button_group.addButton(self.__peq_button)
self.__filter_selector_layout.addWidget(self.__peq_button)
self.__ls_button = QToolButton(self.__geq_frame)
self.__ls_button.setCheckable(True)
self.__ls_button.setText("LS")
self.__button_group.addButton(self.__ls_button)
self.__filter_selector_layout.addWidget(self.__ls_button)
self.__hs_button = QToolButton(self.__geq_frame)
self.__hs_button.setCheckable(True)
self.__hs_button.setText("HS")
self.__button_group.addButton(self.__hs_button)
self.__filter_selector_layout.addWidget(self.__hs_button)
self.__grid_layout.addWidget(self.__title, 0, 0, 1, 4)
self.__grid_layout.addLayout(self.__filter_selector_layout, 1, 0, 1, 4)
self.__gain_label = QLabel(self.__geq_frame)
self.__gain_label.setFont(font)
self.__gain_label.setText("Gain (dB)")
self.__gain_slider = QSlider(self.__geq_frame)
self.__gain_slider.setMinimum(-3000)
self.__gain_slider.setMaximum(3000)
self.__gain_slider.setOrientation(Qt.Vertical)
self.__gain_slider.setTickPosition(QSlider.TicksBelow)
self.__gain_slider.setTickInterval(300)
self.__gain_slider.setToolTip('Gain (dB)')
self.__gain = QDoubleSpinBox(self.__geq_frame)
self.__gain.setFont(font)
self.__gain.setMinimum(-30)
self.__gain.setMaximum(30)
self.__gain.setSingleStep(0.1)
self.__gain.setDecimals(2)
self.__freq_slider = QSlider(self.__geq_frame)
self.__freq_slider.setOrientation(Qt.Vertical)
self.__freq_slider.setTickPosition(QSlider.TicksBelow)
self.__freq_slider.setMinimum(1)
self.__freq_slider.setMaximum(1500)
self.__freq_slider.setTickInterval(100)
self.__freq_slider.setToolTip('Frequency (Hz)')
self.__freq_label = QLabel(self.__geq_frame)
self.__freq_label.setFont(font)
self.__freq_label.setText("Freq (Hz)")
self.__freq = QDoubleSpinBox(self.__geq_frame)
self.__freq.setFont(font)
self.__freq.setMinimum(1)
self.__freq.setMaximum(24000)
self.__freq.setDecimals(1)
self.__freq.setStepType(QAbstractSpinBox.AdaptiveDecimalStepType)
self.__q_slider = QSlider(self.__geq_frame)
self.__q_slider.setOrientation(Qt.Vertical)
self.__q_slider.setTickPosition(QSlider.TicksBelow)
self.__q_slider.setMinimum(1)
self.__q_slider.setMaximum(20000000)
self.__q_slider.setTickInterval(1000000)
self.__q_slider.setToolTip('Q')
self.__q_label = QLabel(self.__geq_frame)
self.__q_label.setFont(font)
self.__q_label.setText("Q")
self.__q = QDoubleSpinBox(self.__geq_frame)
self.__q.setFont(font)
self.__q.setMinimum(0.001)
self.__q.setMaximum(20)
self.__q.setDecimals(3)
self.__q.setStepType(QAbstractSpinBox.AdaptiveDecimalStepType)
def to_q_slider(v: float) -> float:
return (math.log((v + 0.15) / 0.15) / math.log(1.28)) * 1000000
def from_q_slider(v: float) -> float:
return (0.15 * (1.28 ** (v / 1000000))) - 0.15
def to_freq_slider(v: float) -> float:
return (math.log(v) / math.log(2)) * 100
def from_freq_slider(v: float) -> float:
return 2**(v/100)
self.__freq_slider.valueChanged.connect(lambda v: self.__freq.setValue(from_freq_slider(v)))
self.__q_slider.valueChanged.connect(lambda v: self.__q.setValue(from_q_slider(v)))
self.__gain_slider.valueChanged.connect(lambda v: self.__gain.setValue(v/100))
self.__gain.valueChanged.connect(on_change)
self.__q.valueChanged.connect(on_change)
self.__freq.valueChanged.connect(on_change)
self.__gain.valueChanged.connect(lambda v: self.__update_slider(self.__gain_slider, v, lambda v: v*100))
self.__q.valueChanged.connect(lambda v: self.__update_slider(self.__q_slider, v, to_q_slider))
self.__freq.valueChanged.connect(lambda v: self.__update_slider(self.__freq_slider, v, to_freq_slider))
self.__ls_button.toggled.connect(on_change)
self.__hs_button.toggled.connect(on_change)
self.__peq_button.toggled.connect(on_change)
self.__freq_slider.setValue(1000)
self.__q_slider.setValue(to_q_slider(0.707))
self.__grid_layout.addWidget(self.__gain_label, 2, 0, 1, 1)
self.__grid_layout.addWidget(self.__gain_slider, 2, 1, 7, 1)
self.__grid_layout.addWidget(self.__freq_slider, 2, 2, 7, 1)
self.__grid_layout.addWidget(self.__q_slider, 2, 3, 7, 1)
self.__grid_layout.addWidget(self.__gain, 3, 0, 1, 1)
self.__grid_layout.addWidget(self.__freq_label, 4, 0, 1, 1)
self.__grid_layout.addWidget(self.__freq, 5, 0, 1, 1)
self.__grid_layout.addWidget(self.__q_label, 6, 0, 1, 1)
self.__grid_layout.addWidget(self.__q, 7, 0, 1, 1)
self.__grid_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding), 8, 0, 1, 1)
@staticmethod
def __update_slider(slider: QSlider, v: float, translate: Callable[[float], float] = lambda x: x) -> None:
from model.report import block_signals
with block_signals(slider):
slider.setValue(translate(v))
def make_filter(self, include_zero) -> Optional[Union[LowShelf, HighShelf, PeakingEQ, Passthrough]]:
if math.isclose(self.__gain.value(), 0.0) and not include_zero:
return None
if self.__ls_button.isChecked():
return LowShelf(48000, self.__freq.value(), self.__q.value(), self.__gain.value(), f_id=self.__idx)
elif self.__hs_button.isChecked():
return HighShelf(48000, self.__freq.value(), self.__q.value(), self.__gain.value(), f_id=self.__idx)
elif self.__peq_button.isChecked():
return PeakingEQ(48000, self.__freq.value(), self.__q.value(), self.__gain.value(), f_id=self.__idx)
else:
return Passthrough(fs=48000)
def show(self) -> None:
self.__geq_frame.show()
def hide(self) -> None:
self.__geq_frame.hide()
@property
def widget(self) -> QWidget:
return self.__geq_frame
def reset(self, filter_type: str, freq: float, q: float, gain: float = 0.0) -> None:
if filter_type == 'LS':
self.__ls_button.setChecked(True)
elif filter_type == 'HS':
self.__hs_button.setChecked(True)
else:
self.__peq_button.setChecked(True)
self.__freq.setValue(freq)
self.__q.setValue(q)
self.__gain.setValue(gain)
def advanced(self, on: bool) -> None:
self.__q_slider.setVisible(on)
self.__freq_slider.setVisible(on)
def load(self, f: Union[LowShelf, HighShelf, PeakingEQ]) -> None:
self.reset(f.filter_type, f.freq, f.q, gain= f.gain)
|
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ":" + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
| import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
import json
import os
import uuid
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import override_settings
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from talents.models import Agency, Talent
from orders.models import (
AgencyProfit,
AgencyProfitPercentage,
Buyer,
Charge,
CreditCard,
CustomTalentProfitPercentage,
TalentProfit,
DefaultTalentProfitPercentage,
Order,
)
from request_shoutout.domain.models import Charge as DomainCharge
from shoutouts.models import ShoutoutVideo
from utils.telegram import TELEGRAM_BOT_API_URL
from wirecard.models import WirecardTransactionData
User = get_user_model()
FAKE_WIRECARD_ORDER_HASH = 'ORD-O5DLMAJZPTHV'
FAKE_WIRECARD_PAYMENT_HASH = 'PAY-HL7QRKFEQNHV'
def get_wirecard_mocked_abriged_response():
wirecard_capture_payment_api_abriged_response = {
'id': FAKE_WIRECARD_PAYMENT_HASH,
'status': 'AUTHORIZED',
}
capture_payment_response = mock.Mock()
capture_payment_response.status_code = 200
capture_payment_response.json.return_value = wirecard_capture_payment_api_abriged_response
return capture_payment_response
@override_settings(
task_eager_propagates=True,
task_always_eager=True,
broker_url='memory://',
backend='memory'
)
@mock.patch('wirecard.services.requests.post', return_value=get_wirecard_mocked_abriged_response())
class FulfillShoutoutRequestTest(APITestCase):
def do_login(self, user, password):
data = {
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'password': password,
}
response = self.client.post(reverse('accounts:signin'), data, format='json')
token = response.data['access']
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {token}')
def setUp(self):
self.maxDiff = None
password = 'senha123'
user = User(
email='talent1@viggio.com.br',
first_name='Nome',
last_name='Sobrenome',
)
user.set_password(password)
user.save()
self.do_login(user, password)
self.talent = Talent.objects.create(
user=user,
price=1000,
phone_number=1,
area_code=1,
main_social_media='',
social_media_username='',
number_of_followers=1,
)
self.order = Order.objects.create(
hash_id=uuid.uuid4(),
talent_id=self.talent.id,
video_is_for='someone_else',
is_from='MJ',
is_to='Peter',
instruction="Go Get 'em, Tiger",
email='mary.jane.watson@spiderman.com',
is_public=True,
expiration_datetime=datetime.now(timezone.utc) + timedelta(days=4),
)
charge = Charge.objects.create(
order=self.order,
amount_paid=1000,
payment_date=datetime.now(timezone.utc) - timedelta(days=3),
status=DomainCharge.PRE_AUTHORIZED,
)
CreditCard.objects.create(
charge=charge,
fullname='Peter Parker',
birthdate='2019-12-31',
tax_document='12346578910',
credit_card_hash='<encrypted-credit-card-hash>',
)
Buyer.objects.create(
charge=charge,
fullname='Mary Jane Watson',
birthdate='2019-12-31',
tax_document='09876543210',
)
WirecardTransactionData.objects.create(
order=self.order,
wirecard_order_hash=FAKE_WIRECARD_ORDER_HASH,
wirecard_payment_hash=FAKE_WIRECARD_PAYMENT_HASH,
)
DefaultTalentProfitPercentage.objects.create(value='0.75')
self.request_data = {
'talent_id': self.talent.id,
'order_hash': self.order.hash_id,
'order_video': SimpleUploadedFile("file.mp4", b"filecontentstring"),
}
self.agency = Agency.objects.create(name='Agency')
AgencyProfitPercentage.objects.create(agency=self.agency, value='0.05')
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_shoutout_video(self, mock1):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ShoutoutVideo.objects.count(), 1)
shoutout = ShoutoutVideo.objects.first()
expected_file_url = f'orders/talent-1/order-{shoutout.order.hash_id}/viggio-para-peter.mp4'
self.assertEqual(shoutout.hash_id, response.data['shoutout_hash'])
self.assertTrue(shoutout.file.url.endswith(expected_file_url))
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_talent_profit(self, mock1):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(TalentProfit.objects.count(), 1)
talent_profit_qs = TalentProfit.objects.filter(
talent=self.talent,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.75'),
profit=Decimal('750.00'),
paid=False
)
self.assertTrue(talent_profit_qs.exists())
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_agency_profit_when_talent_is_managed(self, mock1): # noqa: E501
self.talent.agency = self.agency
self.talent.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(AgencyProfit.objects.count(), 1)
agency_profit_qs = AgencyProfit.objects.filter(
agency=self.agency,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.05'),
profit=Decimal('50.00'),
paid=False
)
self.assertTrue(agency_profit_qs.exists())
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_dont_create_a_agency_profit_when_talent_isnt_managed(self, mock1): # noqa: E501
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(AgencyProfit.objects.count(), 0)
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_after_upload_a_shoutout_transcode_process_is_triggered(self, mock1):
with mock.patch('transcoder.tasks.transcode') as mocked_transcoder:
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(ShoutoutVideo.objects.count(), 1)
shoutout = ShoutoutVideo.objects.first()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mocked_transcoder.assert_called_once_with(shoutout, 'mp4')
@mock.patch('transcoder.tasks.transcode', mock.Mock())
def test_send_email_to_customer_after_transcode_process_ending(self, mock1):
with mock.patch('post_office.mailgun.requests') as mocked_requests:
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
shoutout = ShoutoutVideo.objects.first()
expected_calls = [
mock.call(
auth=('api', os.environ['MAILGUN_API_KEY']),
url=os.environ['MAILGUN_API_URL'],
data={
'from': os.environ['CONTACT_EMAIL'],
'to': 'MJ <mary.jane.watson@spiderman.com>',
'subject': 'Seu viggio para Peter está pronto',
'template': 'notify-customer-that-his-viggio-is-ready',
'v:order_is_to': 'Peter',
'v:customer_name': 'MJ',
'v:talent_name': 'Nome Sobrenome',
'v:shoutout_absolute_url': f'{os.environ['SITE_URL']}v/{shoutout.hash_id}'
},
),
]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(mocked_requests.post.mock_calls, expected_calls)
@mock.patch('request_shoutout.adapters.db.orm.DjangoTalentProfit.persist', side_effect=Exception())
def test_rollback_when_fulfilling_a_shoutout_request_fails(self, mock1, mock2):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(
response.data,
{'error': 'It happened an issue when persisting shoutout video'},
)
self.assertEqual(TalentProfit.objects.count(), 0)
self.assertEqual(ShoutoutVideo.objects.count(), 0)
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_when_talent_profit_percentage_is_not_the_default(self, mock1):
CustomTalentProfitPercentage.objects.create(talent=self.talent, value=Decimal('0.80'))
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(TalentProfit.objects.count(), 1)
talent_profit_qs = TalentProfit.objects.filter(
talent=self.talent,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.80'),
profit=Decimal('800.00'),
paid=False
)
self.assertTrue(talent_profit_qs.exists())
def test_cant_fulfill_same_order_twice(self, mock1):
ShoutoutVideo.objects.create(
hash_id=uuid.uuid4(),
order=self.order,
talent=self.talent,
file=SimpleUploadedFile("file.mp4", b"filecontentstring"),
)
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Order already has a shoutout attached.'})
def test_cant_fulfill_an_expired_order(self, mock1):
self.order.expiration_datetime = datetime.now(timezone.utc) - timedelta(hours=1)
self.order.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': "Can't fulfill an expired order."})
def test_a_talent_cant_fulfill_an_order_requested_to_another_talent(self, mock1):
user = User.objects.create(email='talent100@youtuber.com')
talent = Talent.objects.create(
user=user,
price=10,
phone_number=1,
area_code=1,
main_social_media='',
social_media_username='',
number_of_followers=1,
)
self.order.talent_id = talent.id
self.order.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Order belongs to another Talent.'})
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
@mock.patch('utils.telegram.requests.post')
def test_when_capture_payment_fails_it_should_send_alert_message_to_staff(self, mock1, telegram_request_post): # noqa: E501
expected_call = mock.call(
url=f'{TELEGRAM_BOT_API_URL}/sendMessage',
data=json.dumps({
'chat_id': os.environ['TELEGRAM_GROUP_ID'],
'text': (
'OCORREU UM ERRO AO CAPTURAR UM PAGAMENTO. '
'Verifique o Sentry: '
'https://sentry.io/organizations/viggio-sandbox/issues/?project=1770932'
)
}),
headers={'Content-Type': 'application/json'}
)
method_path = 'request_shoutout.adapters.db.orm.WirecardPaymentApi.capture_payment'
with mock.patch(method_path, side_effect=Exception):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(telegram_request_post.mock_calls, [expected_call])
| import json
import os
import uuid
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import override_settings
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from talents.models import Agency, Talent
from orders.models import (
AgencyProfit,
AgencyProfitPercentage,
Buyer,
Charge,
CreditCard,
CustomTalentProfitPercentage,
TalentProfit,
DefaultTalentProfitPercentage,
Order,
)
from request_shoutout.domain.models import Charge as DomainCharge
from shoutouts.models import ShoutoutVideo
from utils.telegram import TELEGRAM_BOT_API_URL
from wirecard.models import WirecardTransactionData
User = get_user_model()
FAKE_WIRECARD_ORDER_HASH = 'ORD-O5DLMAJZPTHV'
FAKE_WIRECARD_PAYMENT_HASH = 'PAY-HL7QRKFEQNHV'
def get_wirecard_mocked_abriged_response():
wirecard_capture_payment_api_abriged_response = {
'id': FAKE_WIRECARD_PAYMENT_HASH,
'status': 'AUTHORIZED',
}
capture_payment_response = mock.Mock()
capture_payment_response.status_code = 200
capture_payment_response.json.return_value = wirecard_capture_payment_api_abriged_response
return capture_payment_response
@override_settings(
task_eager_propagates=True,
task_always_eager=True,
broker_url='memory://',
backend='memory'
)
@mock.patch('wirecard.services.requests.post', return_value=get_wirecard_mocked_abriged_response())
class FulfillShoutoutRequestTest(APITestCase):
def do_login(self, user, password):
data = {
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'password': password,
}
response = self.client.post(reverse('accounts:signin'), data, format='json')
token = response.data['access']
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {token}')
def setUp(self):
self.maxDiff = None
password = 'senha123'
user = User(
email='talent1@viggio.com.br',
first_name='Nome',
last_name='Sobrenome',
)
user.set_password(password)
user.save()
self.do_login(user, password)
self.talent = Talent.objects.create(
user=user,
price=1000,
phone_number=1,
area_code=1,
main_social_media='',
social_media_username='',
number_of_followers=1,
)
self.order = Order.objects.create(
hash_id=uuid.uuid4(),
talent_id=self.talent.id,
video_is_for='someone_else',
is_from='MJ',
is_to='Peter',
instruction="Go Get 'em, Tiger",
email='mary.jane.watson@spiderman.com',
is_public=True,
expiration_datetime=datetime.now(timezone.utc) + timedelta(days=4),
)
charge = Charge.objects.create(
order=self.order,
amount_paid=1000,
payment_date=datetime.now(timezone.utc) - timedelta(days=3),
status=DomainCharge.PRE_AUTHORIZED,
)
CreditCard.objects.create(
charge=charge,
fullname='Peter Parker',
birthdate='2019-12-31',
tax_document='12346578910',
credit_card_hash='<encrypted-credit-card-hash>',
)
Buyer.objects.create(
charge=charge,
fullname='Mary Jane Watson',
birthdate='2019-12-31',
tax_document='09876543210',
)
WirecardTransactionData.objects.create(
order=self.order,
wirecard_order_hash=FAKE_WIRECARD_ORDER_HASH,
wirecard_payment_hash=FAKE_WIRECARD_PAYMENT_HASH,
)
DefaultTalentProfitPercentage.objects.create(value='0.75')
self.request_data = {
'talent_id': self.talent.id,
'order_hash': self.order.hash_id,
'order_video': SimpleUploadedFile("file.mp4", b"filecontentstring"),
}
self.agency = Agency.objects.create(name='Agency')
AgencyProfitPercentage.objects.create(agency=self.agency, value='0.05')
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_shoutout_video(self, mock1):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ShoutoutVideo.objects.count(), 1)
shoutout = ShoutoutVideo.objects.first()
expected_file_url = f'orders/talent-1/order-{shoutout.order.hash_id}/viggio-para-peter.mp4'
self.assertEqual(shoutout.hash_id, response.data['shoutout_hash'])
self.assertTrue(shoutout.file.url.endswith(expected_file_url))
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_talent_profit(self, mock1):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(TalentProfit.objects.count(), 1)
talent_profit_qs = TalentProfit.objects.filter(
talent=self.talent,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.75'),
profit=Decimal('750.00'),
paid=False
)
self.assertTrue(talent_profit_qs.exists())
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_create_a_agency_profit_when_talent_is_managed(self, mock1): # noqa: E501
self.talent.agency = self.agency
self.talent.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(AgencyProfit.objects.count(), 1)
agency_profit_qs = AgencyProfit.objects.filter(
agency=self.agency,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.05'),
profit=Decimal('50.00'),
paid=False
)
self.assertTrue(agency_profit_qs.exists())
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_fulfilling_a_shoutout_request_dont_create_a_agency_profit_when_talent_isnt_managed(self, mock1): # noqa: E501
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(AgencyProfit.objects.count(), 0)
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_after_upload_a_shoutout_transcode_process_is_triggered(self, mock1):
with mock.patch('transcoder.tasks.transcode') as mocked_transcoder:
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(ShoutoutVideo.objects.count(), 1)
shoutout = ShoutoutVideo.objects.first()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mocked_transcoder.assert_called_once_with(shoutout, 'mp4')
@mock.patch('transcoder.tasks.transcode', mock.Mock())
def test_send_email_to_customer_after_transcode_process_ending(self, mock1):
with mock.patch('post_office.mailgun.requests') as mocked_requests:
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
shoutout = ShoutoutVideo.objects.first()
expected_calls = [
mock.call(
auth=('api', os.environ['MAILGUN_API_KEY']),
url=os.environ['MAILGUN_API_URL'],
data={
'from': os.environ['CONTACT_EMAIL'],
'to': 'MJ <mary.jane.watson@spiderman.com>',
'subject': 'Seu viggio para Peter está pronto',
'template': 'notify-customer-that-his-viggio-is-ready',
'v:order_is_to': 'Peter',
'v:customer_name': 'MJ',
'v:talent_name': 'Nome Sobrenome',
'v:shoutout_absolute_url': f'{os.environ["SITE_URL"]}v/{shoutout.hash_id}'
},
),
]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(mocked_requests.post.mock_calls, expected_calls)
@mock.patch('request_shoutout.adapters.db.orm.DjangoTalentProfit.persist', side_effect=Exception())
def test_rollback_when_fulfilling_a_shoutout_request_fails(self, mock1, mock2):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(
response.data,
{'error': 'It happened an issue when persisting shoutout video'},
)
self.assertEqual(TalentProfit.objects.count(), 0)
self.assertEqual(ShoutoutVideo.objects.count(), 0)
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
def test_when_talent_profit_percentage_is_not_the_default(self, mock1):
CustomTalentProfitPercentage.objects.create(talent=self.talent, value=Decimal('0.80'))
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(TalentProfit.objects.count(), 1)
talent_profit_qs = TalentProfit.objects.filter(
talent=self.talent,
order=self.order,
shoutout_price=1000,
profit_percentage=Decimal('0.80'),
profit=Decimal('800.00'),
paid=False
)
self.assertTrue(talent_profit_qs.exists())
def test_cant_fulfill_same_order_twice(self, mock1):
ShoutoutVideo.objects.create(
hash_id=uuid.uuid4(),
order=self.order,
talent=self.talent,
file=SimpleUploadedFile("file.mp4", b"filecontentstring"),
)
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Order already has a shoutout attached.'})
def test_cant_fulfill_an_expired_order(self, mock1):
self.order.expiration_datetime = datetime.now(timezone.utc) - timedelta(hours=1)
self.order.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': "Can't fulfill an expired order."})
def test_a_talent_cant_fulfill_an_order_requested_to_another_talent(self, mock1):
user = User.objects.create(email='talent100@youtuber.com')
talent = Talent.objects.create(
user=user,
price=10,
phone_number=1,
area_code=1,
main_social_media='',
social_media_username='',
number_of_followers=1,
)
self.order.talent_id = talent.id
self.order.save()
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Order belongs to another Talent.'})
@mock.patch('transcoder.tasks.transcode', mock.Mock())
@mock.patch('post_office.mailgun.requests', mock.Mock())
@mock.patch('utils.telegram.requests.post')
def test_when_capture_payment_fails_it_should_send_alert_message_to_staff(self, mock1, telegram_request_post): # noqa: E501
expected_call = mock.call(
url=f'{TELEGRAM_BOT_API_URL}/sendMessage',
data=json.dumps({
'chat_id': os.environ['TELEGRAM_GROUP_ID'],
'text': (
'OCORREU UM ERRO AO CAPTURAR UM PAGAMENTO. '
'Verifique o Sentry: '
'https://sentry.io/organizations/viggio-sandbox/issues/?project=1770932'
)
}),
headers={'Content-Type': 'application/json'}
)
method_path = 'request_shoutout.adapters.db.orm.WirecardPaymentApi.capture_payment'
with mock.patch(method_path, side_effect=Exception):
response = self.client.post(
reverse('request_shoutout:fulfill'),
self.request_data,
format='multipart'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(telegram_request_post.mock_calls, [expected_call])
|
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm['name']}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
import torch
from torchvision.transforms import functional as TFF
import matplotlib.pyplot as plt
from theseus.base.trainer.supervised_trainer import SupervisedTrainer
from theseus.utilities.loading import load_state_dict
from theseus.classification.utilities.gradcam import CAMWrapper, show_cam_on_image
from theseus.utilities.visualization.visualizer import Visualizer
from theseus.utilities.analysis.analyzer import ClassificationAnalyzer
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger("main")
class ClassificationTrainer(SupervisedTrainer):
"""Trainer for classification tasks
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def check_best(self, metric_dict):
"""
Hook function, called after metrics are calculated
"""
if metric_dict['bl_acc'] > self.best_value:
if self.iters > 0: # Have been training, else in evaluation-only mode or just sanity check
LOGGER.text(
f"Evaluation improved from {self.best_value} to {metric_dict["bl_acc"]}",
level=LoggerObserver.INFO)
self.best_value = metric_dict['bl_acc']
self.save_checkpoint('best')
else:
if self.visualize_when_val:
self.visualize_pred()
def save_checkpoint(self, outname='last'):
"""
Save all information of the current iteration
"""
weights = {
'model': self.model.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'iters': self.iters,
'best_value': self.best_value,
}
if self.scaler is not None:
weights[self.scaler.state_dict_key] = self.scaler.state_dict()
self.checkpoint.save(weights, outname)
def load_checkpoint(self, path:str):
"""
Load all information the current iteration from checkpoint
"""
LOGGER.text("Loading checkpoints...", level=LoggerObserver.INFO)
state_dict = torch.load(path, map_location='cpu')
self.iters = load_state_dict(self.iters, state_dict, 'iters')
self.best_value = load_state_dict(self.best_value, state_dict, 'best_value')
self.scaler = load_state_dict(self.scaler, state_dict, self.scaler.state_dict_key)
def visualize_gt(self):
"""
Visualize dataloader for sanity check
"""
LOGGER.text("Visualizing dataset...", level=LoggerObserver.DEBUG)
visualizer = Visualizer()
# Train batch
batch = next(iter(self.trainloader))
images = batch["inputs"]
batch = []
for idx, inputs in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8,8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Sanitycheck/batch/train",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Validation batch
batch = next(iter(self.valloader))
images = batch["inputs"]
batch = []
for idx, inputs in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8,8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Sanitycheck/batch/val",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
@torch.enable_grad() #enable grad for CAM
def visualize_pred(self):
r"""Visualize model prediction and CAM
"""
# Vizualize Grad Class Activation Mapping and model predictions
LOGGER.text("Visualizing model predictions...", level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.valloader))
images = batch["inputs"]
targets = batch["targets"]
self.model.eval()
model_name = self.model.model.name
grad_cam = CAMWrapper.get_method(
name='gradcam',
model=self.model.model.get_model(),
model_name=model_name, use_cuda=next(self.model.parameters()).is_cuda)
grayscale_cams, label_indices, scores = grad_cam(images, return_probs=True)
gradcam_batch = []
pred_batch = []
for idx in range(len(grayscale_cams)):
image = images[idx]
target = targets[idx].item()
label = label_indices[idx]
grayscale_cam = grayscale_cams[idx, :]
score = scores[idx]
img_show = visualizer.denormalize(image)
visualizer.set_image(img_show)
if self.valloader.dataset.classnames is not None:
label = self.valloader.dataset.classnames[label]
target = self.valloader.dataset.classnames[target]
if label == target:
color = [0,1,0]
else:
color = [1,0,0]
visualizer.draw_label(
f"GT: {target}\nP: {label}\nC: {score:.4f}",
fontColor=color,
fontScale=0.8,
thickness=2,
outline=None,
offset=100
)
img_cam =show_cam_on_image(img_show, grayscale_cam, use_rgb=True)
img_cam = TFF.to_tensor(img_cam)
gradcam_batch.append(img_cam)
pred_img = visualizer.get_image()
pred_img = TFF.to_tensor(pred_img)
pred_batch.append(pred_img)
if idx == 63: # limit number of images
break
# GradCAM images
gradcam_grid_img = visualizer.make_grid(gradcam_batch)
fig = plt.figure(figsize=(8,8))
plt.imshow(gradcam_grid_img)
plt.axis("off")
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Validation/gradcam",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Prediction images
pred_grid_img = visualizer.make_grid(pred_batch)
fig = plt.figure(figsize=(10,10))
plt.imshow(pred_grid_img)
plt.axis("off")
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Validation/prediction",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Zeroing gradients in optimizer for safety
self.optimizer.zero_grad()
@torch.no_grad()
def visualize_model(self):
# Vizualize Model Graph
LOGGER.text("Visualizing architecture...", level=LoggerObserver.DEBUG)
batch = next(iter(self.valloader))
images = batch["inputs"].to(self.model.device)
LOGGER.log([{
'tag': "Sanitycheck/analysis/architecture",
'value': self.model.model.get_model(),
'type': LoggerObserver.TORCH_MODULE,
'kwargs': {
'inputs': images
}
}])
def analyze_gt(self):
"""
Perform simple data analysis
"""
LOGGER.text("Analyzing datasets...", level=LoggerObserver.DEBUG)
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.trainloader.dataset)
fig = analyzer.analyze(figsize=(10,5))
LOGGER.log([{
'tag': "Sanitycheck/analysis/train",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.valloader.dataset)
fig = analyzer.analyze(figsize=(10,5))
LOGGER.log([{
'tag': "Sanitycheck/analysis/val",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
def on_evaluate_end(self):
if self.visualize_when_val:
self.visualize_pred()
self.save_checkpoint()
def on_start(self):
if self.resume is not None:
self.load_checkpoint(self.resume)
def sanitycheck(self):
"""Sanity check before training
"""
self.visualize_gt()
self.analyze_gt()
self.visualize_model()
self.evaluate_epoch()
| import torch
from torchvision.transforms import functional as TFF
import matplotlib.pyplot as plt
from theseus.base.trainer.supervised_trainer import SupervisedTrainer
from theseus.utilities.loading import load_state_dict
from theseus.classification.utilities.gradcam import CAMWrapper, show_cam_on_image
from theseus.utilities.visualization.visualizer import Visualizer
from theseus.utilities.analysis.analyzer import ClassificationAnalyzer
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger("main")
class ClassificationTrainer(SupervisedTrainer):
"""Trainer for classification tasks
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def check_best(self, metric_dict):
"""
Hook function, called after metrics are calculated
"""
if metric_dict['bl_acc'] > self.best_value:
if self.iters > 0: # Have been training, else in evaluation-only mode or just sanity check
LOGGER.text(
f"Evaluation improved from {self.best_value} to {metric_dict['bl_acc']}",
level=LoggerObserver.INFO)
self.best_value = metric_dict['bl_acc']
self.save_checkpoint('best')
else:
if self.visualize_when_val:
self.visualize_pred()
def save_checkpoint(self, outname='last'):
"""
Save all information of the current iteration
"""
weights = {
'model': self.model.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'iters': self.iters,
'best_value': self.best_value,
}
if self.scaler is not None:
weights[self.scaler.state_dict_key] = self.scaler.state_dict()
self.checkpoint.save(weights, outname)
def load_checkpoint(self, path:str):
"""
Load all information the current iteration from checkpoint
"""
LOGGER.text("Loading checkpoints...", level=LoggerObserver.INFO)
state_dict = torch.load(path, map_location='cpu')
self.iters = load_state_dict(self.iters, state_dict, 'iters')
self.best_value = load_state_dict(self.best_value, state_dict, 'best_value')
self.scaler = load_state_dict(self.scaler, state_dict, self.scaler.state_dict_key)
def visualize_gt(self):
"""
Visualize dataloader for sanity check
"""
LOGGER.text("Visualizing dataset...", level=LoggerObserver.DEBUG)
visualizer = Visualizer()
# Train batch
batch = next(iter(self.trainloader))
images = batch["inputs"]
batch = []
for idx, inputs in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8,8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Sanitycheck/batch/train",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Validation batch
batch = next(iter(self.valloader))
images = batch["inputs"]
batch = []
for idx, inputs in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8,8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Sanitycheck/batch/val",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
@torch.enable_grad() #enable grad for CAM
def visualize_pred(self):
r"""Visualize model prediction and CAM
"""
# Vizualize Grad Class Activation Mapping and model predictions
LOGGER.text("Visualizing model predictions...", level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.valloader))
images = batch["inputs"]
targets = batch["targets"]
self.model.eval()
model_name = self.model.model.name
grad_cam = CAMWrapper.get_method(
name='gradcam',
model=self.model.model.get_model(),
model_name=model_name, use_cuda=next(self.model.parameters()).is_cuda)
grayscale_cams, label_indices, scores = grad_cam(images, return_probs=True)
gradcam_batch = []
pred_batch = []
for idx in range(len(grayscale_cams)):
image = images[idx]
target = targets[idx].item()
label = label_indices[idx]
grayscale_cam = grayscale_cams[idx, :]
score = scores[idx]
img_show = visualizer.denormalize(image)
visualizer.set_image(img_show)
if self.valloader.dataset.classnames is not None:
label = self.valloader.dataset.classnames[label]
target = self.valloader.dataset.classnames[target]
if label == target:
color = [0,1,0]
else:
color = [1,0,0]
visualizer.draw_label(
f"GT: {target}\nP: {label}\nC: {score:.4f}",
fontColor=color,
fontScale=0.8,
thickness=2,
outline=None,
offset=100
)
img_cam =show_cam_on_image(img_show, grayscale_cam, use_rgb=True)
img_cam = TFF.to_tensor(img_cam)
gradcam_batch.append(img_cam)
pred_img = visualizer.get_image()
pred_img = TFF.to_tensor(pred_img)
pred_batch.append(pred_img)
if idx == 63: # limit number of images
break
# GradCAM images
gradcam_grid_img = visualizer.make_grid(gradcam_batch)
fig = plt.figure(figsize=(8,8))
plt.imshow(gradcam_grid_img)
plt.axis("off")
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Validation/gradcam",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Prediction images
pred_grid_img = visualizer.make_grid(pred_batch)
fig = plt.figure(figsize=(10,10))
plt.imshow(pred_grid_img)
plt.axis("off")
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Validation/prediction",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Zeroing gradients in optimizer for safety
self.optimizer.zero_grad()
@torch.no_grad()
def visualize_model(self):
# Vizualize Model Graph
LOGGER.text("Visualizing architecture...", level=LoggerObserver.DEBUG)
batch = next(iter(self.valloader))
images = batch["inputs"].to(self.model.device)
LOGGER.log([{
'tag': "Sanitycheck/analysis/architecture",
'value': self.model.model.get_model(),
'type': LoggerObserver.TORCH_MODULE,
'kwargs': {
'inputs': images
}
}])
def analyze_gt(self):
"""
Perform simple data analysis
"""
LOGGER.text("Analyzing datasets...", level=LoggerObserver.DEBUG)
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.trainloader.dataset)
fig = analyzer.analyze(figsize=(10,5))
LOGGER.log([{
'tag': "Sanitycheck/analysis/train",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.valloader.dataset)
fig = analyzer.analyze(figsize=(10,5))
LOGGER.log([{
'tag': "Sanitycheck/analysis/val",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
def on_evaluate_end(self):
if self.visualize_when_val:
self.visualize_pred()
self.save_checkpoint()
def on_start(self):
if self.resume is not None:
self.load_checkpoint(self.resume)
def sanitycheck(self):
"""Sanity check before training
"""
self.visualize_gt()
self.analyze_gt()
self.visualize_model()
self.evaluate_epoch()
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from typing import Dict, List, Tuple, Type, Union
import librosa
import numpy as np
import soundfile as sf
import torch
from omegaconf import OmegaConf
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.metrics.wer import WER
from nemo.collections.asr.metrics.wer_bpe import WERBPE
from nemo.collections.asr.models import EncDecCTCModel, EncDecCTCModelBPE
from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map, get_uniqname_from_filepath
from nemo.collections.asr.parts.utils.streaming_utils import AudioFeatureIterator, FrameBatchASR
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = ['ASR_TIMESTAMPS']
try:
from pyctcdecode import build_ctcdecoder
PYCTCDECODE = True
except ImportError:
PYCTCDECODE = False
def if_none_get_default(param, default_value):
return (param, default_value)[param is None]
class WERBPE_TS(WERBPE):
"""
This is WERBPE_TS class that is modified for generating word_timestamps with logits.
The functions in WER class is modified to save the word_timestamps whenever BPE token
is being saved into a list.
This class is designed to support ASR models based on CTC and BPE.
Please refer to the definition of WERBPE class for more information.
"""
def __init__(
self,
tokenizer: TokenizerSpec,
batch_dim_index=0,
use_cer=False,
ctc_decode=True,
log_prediction=True,
dist_sync_on_step=False,
):
super().__init__(tokenizer, batch_dim_index, use_cer, ctc_decode, log_prediction, dist_sync_on_step)
def ctc_decoder_predictions_tensor_with_ts(
self, time_stride, predictions: torch.Tensor, predictions_len: torch.Tensor = None
) -> List[str]:
hypotheses, timestamps, word_timestamps = [], [], []
# '⁇' string should be removed since it causes error during string split.
unk = '⁇'
prediction_cpu_tensor = predictions.long().cpu()
# iterate over batch
self.time_stride = time_stride
for ind in range(prediction_cpu_tensor.shape[self.batch_dim_index]):
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure
decoded_prediction, char_ts, timestamp_list = [], [], []
previous = self.blank_id
for pdx, p in enumerate(prediction):
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
char_ts.append(round(pdx * self.time_stride, 2))
timestamp_list.append(round(pdx * self.time_stride, 2))
previous = p
hypothesis = self.decode_tokens_to_str_with_ts(decoded_prediction)
hypothesis = hypothesis.replace(unk, '')
word_ts = self.get_ts_from_decoded_prediction(decoded_prediction, hypothesis, char_ts)
hypotheses.append(hypothesis)
timestamps.append(timestamp_list)
word_timestamps.append(word_ts)
return hypotheses, timestamps, word_timestamps
def decode_tokens_to_str_with_ts(self, tokens: List[int]) -> str:
hypothesis = self.tokenizer.ids_to_text(tokens)
return hypothesis
def decode_ids_to_tokens_with_ts(self, tokens: List[int]) -> List[str]:
token_list = self.tokenizer.ids_to_tokens(tokens)
return token_list
def get_ts_from_decoded_prediction(self, decoded_prediction: List[str], hypothesis: List[str], char_ts: List[str]):
decoded_char_list = self.tokenizer.ids_to_tokens(decoded_prediction)
stt_idx, end_idx = 0, len(decoded_char_list) - 1
stt_ch_idx, end_ch_idx = 0, 0
space = '▁'
word_ts, word_seq = [], []
word_open_flag = False
for idx, ch in enumerate(decoded_char_list):
if idx != end_idx and (space == ch and space in decoded_char_list[idx + 1]):
continue
if (idx == stt_idx or space == decoded_char_list[idx - 1] or (space in ch and len(ch) > 1)) and (
ch != space
):
_stt = char_ts[idx]
stt_ch_idx = idx
word_open_flag = True
if word_open_flag and ch != space and (idx == end_idx or space in decoded_char_list[idx + 1]):
_end = round(char_ts[idx] + self.time_stride, 2)
end_ch_idx = idx
word_open_flag = False
word_ts.append([_stt, _end])
stitched_word = ''.join(decoded_char_list[stt_ch_idx : end_ch_idx + 1]).replace(space, '')
word_seq.append(stitched_word)
assert len(word_ts) == len(hypothesis.split()), "Hypothesis does not match word time stamp."
return word_ts
class WER_TS(WER):
"""
This is WER class that is modified for generating timestamps with logits.
The functions in WER class is modified to save the timestamps whenever character
is being saved into a list.
This class is designed to support ASR models based on CTC and Character-level tokens.
Please refer to the definition of WER class for more information.
"""
def __init__(
self,
vocabulary,
batch_dim_index=0,
use_cer=False,
ctc_decode=True,
log_prediction=True,
dist_sync_on_step=False,
):
super().__init__(vocabulary, batch_dim_index, use_cer, ctc_decode, log_prediction, dist_sync_on_step)
def decode_tokens_to_str_with_ts(self, tokens: List[int], timestamps: List[int]) -> str:
"""
Take frame-level tokens and timestamp list and collect the timestamps for
start and end of each word.
"""
token_list, timestamp_list = self.decode_ids_to_tokens_with_ts(tokens, timestamps)
hypothesis = ''.join(self.decode_ids_to_tokens(tokens))
return hypothesis, timestamp_list
def decode_ids_to_tokens_with_ts(self, tokens: List[int], timestamps: List[int]) -> List[str]:
token_list, timestamp_list = [], []
for i, c in enumerate(tokens):
if c != self.blank_id:
token_list.append(self.labels_map[c])
timestamp_list.append(timestamps[i])
return token_list, timestamp_list
def ctc_decoder_predictions_tensor_with_ts(
self, predictions: torch.Tensor, predictions_len: torch.Tensor = None,
) -> List[str]:
"""
A shortened version of the original function ctc_decoder_predictions_tensor().
Replaced decode_tokens_to_str() function with decode_tokens_to_str_with_ts().
"""
hypotheses, timestamps = [], []
prediction_cpu_tensor = predictions.long().cpu()
for ind in range(prediction_cpu_tensor.shape[self.batch_dim_index]):
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure with timestamps
decoded_prediction, decoded_timing_list = [], []
previous = self.blank_id
for pdx, p in enumerate(prediction):
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
decoded_timing_list.append(pdx)
previous = p
text, timestamp_list = self.decode_tokens_to_str_with_ts(decoded_prediction, decoded_timing_list)
hypotheses.append(text)
timestamps.append(timestamp_list)
return hypotheses, timestamps
def get_wer_feat_logit(audio_file_path, asr, frame_len, tokens_per_chunk, delay, model_stride_in_secs):
"""
Create a preprocessor to convert audio samples into raw features,
Normalization will be done per buffer in frame_bufferer.
"""
asr.reset()
asr.read_audio_file_and_return(audio_file_path, delay, model_stride_in_secs)
hyp, tokens, log_prob = asr.transcribe_with_ts(tokens_per_chunk, delay)
return hyp, tokens, log_prob
def get_samples(audio_file, target_sr=16000):
"""
Read the samples from the given audio_file path.
"""
with sf.SoundFile(audio_file, 'r') as f:
dtype = 'int16'
sample_rate = f.samplerate
samples = f.read(dtype=dtype)
if sample_rate != target_sr:
samples = librosa.core.resample(samples, sample_rate, target_sr)
samples = samples.astype('float32') / 32768
samples = samples.transpose()
del f
return samples
class FrameBatchASR_Logits(FrameBatchASR):
"""
A class for streaming frame-based ASR.
Inherits from FrameBatchASR and adds new capability of returning the logit output.
Please refer to FrameBatchASR for more detailed information.
"""
def __init__(
self,
asr_model: Type[EncDecCTCModelBPE],
frame_len: float = 1.6,
total_buffer: float = 4.0,
batch_size: int = 4,
):
super().__init__(asr_model, frame_len, total_buffer, batch_size)
self.all_logprobs = []
def clear_buffer(self):
self.all_logprobs = []
self.all_preds = []
def read_audio_file_and_return(self, audio_filepath: str, delay: float, model_stride_in_secs: float):
samples = get_samples(audio_filepath)
samples = np.pad(samples, (0, int(delay * model_stride_in_secs * self.asr_model._cfg.sample_rate)))
frame_reader = AudioFeatureIterator(samples, self.frame_len, self.raw_preprocessor, self.asr_model.device)
self.set_frame_reader(frame_reader)
@torch.no_grad()
def _get_batch_preds(self):
device = self.asr_model.device
for batch in iter(self.data_loader):
feat_signal, feat_signal_len = batch
feat_signal, feat_signal_len = feat_signal.to(device), feat_signal_len.to(device)
log_probs, encoded_len, predictions = self.asr_model(
processed_signal=feat_signal, processed_signal_length=feat_signal_len
)
preds = torch.unbind(predictions)
for pred in preds:
self.all_preds.append(pred.cpu().numpy())
log_probs_tup = torch.unbind(log_probs)
for log_prob in log_probs_tup:
self.all_logprobs.append(log_prob)
del encoded_len
del predictions
def transcribe_with_ts(
self, tokens_per_chunk: int, delay: int,
):
self.infer_logits()
self.unmerged = []
self.part_logprobs = []
for idx, pred in enumerate(self.all_preds):
decoded = pred.tolist()
_stt, _end = len(decoded) - 1 - delay, len(decoded) - 1 - delay + tokens_per_chunk
self.unmerged += decoded[len(decoded) - 1 - delay : len(decoded) - 1 - delay + tokens_per_chunk]
self.part_logprobs.append(self.all_logprobs[idx][_stt:_end, :])
self.unmerged_logprobs = torch.cat(self.part_logprobs, 0)
assert (
len(self.unmerged) == self.unmerged_logprobs.shape[0]
), "Unmerged decoded result and log prob lengths are different."
return self.greedy_merge(self.unmerged), self.unmerged, self.unmerged_logprobs
class ASR_TIMESTAMPS:
"""
A class designed for extracting word timestamps while the ASR decoding process.
This class contains a few setups for a slew of NeMo ASR models such as QuartzNet, CitriNet and ConformerCTC models.
"""
def __init__(self, **cfg_diarizer):
self.manifest_filepath = cfg_diarizer['manifest_filepath']
self.params = cfg_diarizer['asr']['parameters']
self.ctc_decoder_params = cfg_diarizer['asr']['ctc_decoder_parameters']
self.ASR_model_name = cfg_diarizer['asr']['model_path']
self.nonspeech_threshold = self.params['asr_based_vad_threshold']
self.root_path = None
self.run_ASR = None
self.encdec_class = None
self.AUDIO_RTTM_MAP = audio_rttm_map(self.manifest_filepath)
self.audio_file_list = [value['audio_filepath'] for _, value in self.AUDIO_RTTM_MAP.items()]
def set_asr_model(self):
"""
Initialize the parameters for the given ASR model.
Currently, the following NGC models are supported:
stt_en_quartznet15x5,
stt_en_citrinet*,
stt_en_conformer_ctc*
To assign a proper decoding function for generating timestamp output,
the name of .nemo file should include the architecture name such as:
'quartznet', 'conformer', and 'citrinet'.
decoder_delay_in_sec is the amount of delay that is compensated during the word timestamp extraction.
word_ts_anchor_offset is the reference point for a word and used for matching the word with diarization labels.
Each ASR model has a different optimal decoder delay and word timestamp anchor offset.
To obtain an optimized diarization result with ASR, decoder_delay_in_sec and word_ts_anchor_offset
need to be searched on a development set.
"""
if 'quartznet' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_QuartzNet_CTC
self.encdec_class = EncDecCTCModel
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.04)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.12)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 4)
self.model_stride_in_secs = 0.02
elif 'conformer' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_BPE_CTC
self.encdec_class = EncDecCTCModelBPE
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.08)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.12)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 16)
self.model_stride_in_secs = 0.04
# Conformer requires buffered inference and the parameters for buffered processing.
self.chunk_len_in_sec = 5
self.total_buffer_in_secs = 25
elif 'citrinet' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_CitriNet_CTC
self.encdec_class = EncDecCTCModelBPE
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.16)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.2)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 4)
self.model_stride_in_secs = 0.08
else:
raise ValueError(f"Cannot find the ASR model class for: {self.params["self.ASR_model_name"]}")
if self.ASR_model_name.endswith('.nemo'):
asr_model = self.encdec_class.restore_from(restore_path=self.ASR_model_name)
else:
asr_model = self.encdec_class.from_pretrained(model_name=self.ASR_model_name, strict=False)
if self.ctc_decoder_params['pretrained_language_model']:
if not PYCTCDECODE:
raise ImportError(
'LM for beam search decoding is provided but pyctcdecode is not installed. Install pyctcdecode using PyPI: pip install pyctcdecode'
)
self.beam_search_decoder = self.load_LM_for_CTC_decoder(asr_model)
else:
self.beam_search_decoder = None
asr_model.eval()
return asr_model
def load_LM_for_CTC_decoder(self, asr_model: Type[Union[EncDecCTCModel, EncDecCTCModelBPE]]):
"""
Load a language model for CTC decoder (pyctcdecode).
Note that only EncDecCTCModel and EncDecCTCModelBPE models can use pyctcdecode.
"""
kenlm_model = self.ctc_decoder_params['pretrained_language_model']
logging.info(f"Loading language model : {self.ctc_decoder_params["pretrained_language_model"]}")
if 'EncDecCTCModel' in str(type(asr_model)):
labels = asr_model.decoder.vocabulary
elif 'EncDecCTCModelBPE' in str(type(asr_model)):
vocab = asr_model.tokenizer.tokenizer.get_vocab()
labels = list(vocab.keys())
labels[0] = "<unk>"
else:
raise ValueError(f"Cannot find a vocabulary or tokenizer for: {self.params["self.ASR_model_name"]}")
decoder = build_ctcdecoder(
labels, kenlm_model, alpha=self.ctc_decoder_params['alpha'], beta=self.ctc_decoder_params['beta']
)
return decoder
def run_ASR_QuartzNet_CTC(self, asr_model: Type[EncDecCTCModel]) -> Tuple[Dict, Dict]:
"""
Launch QuartzNet ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary of the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary of the time-stamps of words.
"""
words_dict, word_ts_dict = {}, {}
wer_ts = WER_TS(
vocabulary=asr_model.decoder.vocabulary,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
with torch.cuda.amp.autocast():
transcript_logits_list = asr_model.transcribe(
self.audio_file_list, batch_size=self.asr_batch_size, logprobs=True
)
for idx, logit_np in enumerate(transcript_logits_list):
uniq_id = get_uniqname_from_filepath(self.audio_file_list[idx])
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder on {uniq_id} with LM {self.ctc_decoder_params["pretrained_language_model"]}"
)
hyp_words, word_ts = self.run_pyctcdecode(logit_np)
else:
log_prob = torch.from_numpy(logit_np)
logits_len = torch.from_numpy(np.array([log_prob.shape[0]]))
greedy_predictions = log_prob.argmax(dim=-1, keepdim=False).unsqueeze(0)
text, char_ts = wer_ts.ctc_decoder_predictions_tensor_with_ts(
greedy_predictions, predictions_len=logits_len
)
trans, char_ts_in_feature_frame_idx = self.clean_trans_and_TS(text[0], char_ts[0])
spaces_in_sec, hyp_words = self._get_spaces(
trans, char_ts_in_feature_frame_idx, self.model_stride_in_secs
)
word_ts = self.get_word_ts_from_spaces(
char_ts_in_feature_frame_idx, spaces_in_sec, end_stamp=logit_np.shape[0]
)
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
@staticmethod
def clean_trans_and_TS(trans: str, char_ts: List[str]) -> Tuple[str, List[str]]:
"""
Remove the spaces in the beginning and the end.
The char_ts need to be changed and synced accordingly.
Args:
trans (list):
List of character output (str).
char_ts (list):
List of timestamps (int) for each character.
Returns:
trans (list):
List of the cleaned character output.
char_ts (list):
List of the cleaned timestamps for each character.
"""
assert (len(trans) > 0) and (len(char_ts) > 0)
assert len(trans) == len(char_ts)
trans = trans.lstrip()
diff_L = len(char_ts) - len(trans)
char_ts = char_ts[diff_L:]
trans = trans.rstrip()
diff_R = len(char_ts) - len(trans)
if diff_R > 0:
char_ts = char_ts[: -1 * diff_R]
return trans, char_ts
def _get_spaces(self, trans: str, char_ts: List[str], time_stride: float) -> Tuple[float, List[str]]:
"""
Collect the space symbols with a list of words.
Args:
trans (list):
List of character output (str).
char_ts (list):
List of timestamps of the characters.
time_stride (float):
The size of stride of the model in second.
Returns:
spaces_in_sec (list):
List of the ranges of spaces
word_list (list):
List of the words from ASR inference.
"""
assert (len(trans) > 0) and (len(char_ts) > 0), "Transcript and char_ts length should not be 0."
assert len(trans) == len(char_ts), "Transcript and timestamp lengths do not match."
spaces_in_sec, word_list = [], []
stt_idx = 0
for k, s in enumerate(trans):
if s == ' ':
spaces_in_sec.append(
[round(char_ts[k] * time_stride, 2), round((char_ts[k + 1] - 1) * time_stride, 2)]
)
word_list.append(trans[stt_idx:k])
stt_idx = k + 1
if len(trans) > stt_idx and trans[stt_idx] != ' ':
word_list.append(trans[stt_idx:])
return spaces_in_sec, word_list
def run_ASR_CitriNet_CTC(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[Dict, Dict]:
"""
Launch CitriNet ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary of the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary of the timestamps of hypothesis words.
"""
words_dict, word_ts_dict = {}, {}
werbpe_ts = WERBPE_TS(
tokenizer=asr_model.tokenizer,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
with torch.cuda.amp.autocast():
transcript_logits_list = asr_model.transcribe(
self.audio_file_list, batch_size=self.asr_batch_size, logprobs=True
)
for idx, logit_np in enumerate(transcript_logits_list):
uniq_id = get_uniqname_from_filepath(self.audio_file_list[idx])
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder with LM {self.ctc_decoder_params["pretrained_language_model"]}"
)
hyp_words, word_ts = self.run_pyctcdecode(logit_np)
else:
log_prob = torch.from_numpy(logit_np)
greedy_predictions = log_prob.argmax(dim=-1, keepdim=False).unsqueeze(0)
logits_len = torch.from_numpy(np.array([log_prob.shape[0]]))
text, char_ts, word_ts = werbpe_ts.ctc_decoder_predictions_tensor_with_ts(
self.model_stride_in_secs, greedy_predictions, predictions_len=logits_len
)
hyp_words, word_ts = text[0].split(), word_ts[0]
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
def set_buffered_infer_params(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[float, float, float]:
"""
Prepare the parameters for the buffered inference.
"""
cfg = copy.deepcopy(asr_model._cfg)
OmegaConf.set_struct(cfg.preprocessor, False)
# some changes for streaming scenario
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
cfg.preprocessor.normalize = "None"
preprocessor = nemo_asr.models.EncDecCTCModelBPE.from_config_dict(cfg.preprocessor)
preprocessor.to(asr_model.device)
if cfg.preprocessor.normalize != "per_feature":
logging.error(
"Only EncDecCTCModelBPE models trained with per_feature normalization are supported currently"
)
# Disable config overwriting
OmegaConf.set_struct(cfg.preprocessor, True)
onset_delay = (
math.ceil(((self.total_buffer_in_secs - self.chunk_len_in_sec) / 2) / self.model_stride_in_secs) + 1
)
mid_delay = math.ceil(
(self.chunk_len_in_sec + (self.total_buffer_in_secs - self.chunk_len_in_sec) / 2)
/ self.model_stride_in_secs
)
tokens_per_chunk = math.ceil(self.chunk_len_in_sec / self.model_stride_in_secs)
return onset_delay, mid_delay, tokens_per_chunk
def run_ASR_BPE_CTC(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[Dict, Dict]:
"""
Launch CTC-BPE based ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary of the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary of the time-stamps of words.
"""
torch.manual_seed(0)
torch.set_grad_enabled(False)
words_dict, word_ts_dict = {}, {}
werbpe_ts = WERBPE_TS(
tokenizer=asr_model.tokenizer,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
frame_asr = FrameBatchASR_Logits(
asr_model=asr_model,
frame_len=self.chunk_len_in_sec,
total_buffer=self.total_buffer_in_secs,
batch_size=self.asr_batch_size,
)
onset_delay, mid_delay, tokens_per_chunk = self.set_buffered_infer_params(asr_model)
onset_delay_in_sec = round(onset_delay * self.model_stride_in_secs, 2)
with torch.cuda.amp.autocast():
logging.info(f"Running ASR model {self.ASR_model_name}")
for idx, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
logging.info(f"[{idx+1}/{len(self.audio_file_list)}] FrameBatchASR: {audio_file_path}")
frame_asr.clear_buffer()
hyp, greedy_predictions_list, log_prob = get_wer_feat_logit(
audio_file_path,
frame_asr,
self.chunk_len_in_sec,
tokens_per_chunk,
mid_delay,
self.model_stride_in_secs,
)
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder with LM {self.ctc_decoder_params["pretrained_language_model"]}"
)
log_prob = log_prob.unsqueeze(0).cpu().numpy()[0]
hyp_words, word_ts = self.run_pyctcdecode(log_prob, onset_delay_in_sec=onset_delay_in_sec)
else:
logits_len = torch.from_numpy(np.array([len(greedy_predictions_list)]))
greedy_predictions_list = greedy_predictions_list[onset_delay:-mid_delay]
greedy_predictions = torch.from_numpy(np.array(greedy_predictions_list)).unsqueeze(0)
text, char_ts, word_ts = werbpe_ts.ctc_decoder_predictions_tensor_with_ts(
self.model_stride_in_secs, greedy_predictions, predictions_len=logits_len
)
hyp_words, word_ts = text[0].split(), word_ts[0]
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
def get_word_ts_from_spaces(self, char_ts: List[float], spaces_in_sec: List[float], end_stamp: float) -> List[str]:
"""
Take word timestamps from the spaces from the decoded prediction.
Args:
char_ts (list):
List containing the timestamp for each character.
spaces_in_sec (list):
List containing the start and the end time of each space token.
end_stamp (float):
The end time of the session in sec.
Returns:
word_timestamps (list):
List of the timestamps for the resulting words.
"""
start_stamp_in_sec = round(char_ts[0] * self.model_stride_in_secs, 2)
end_stamp_in_sec = round(end_stamp * self.model_stride_in_secs, 2)
word_timetamps_middle = [
[round(spaces_in_sec[k][1], 2), round(spaces_in_sec[k + 1][0], 2),] for k in range(len(spaces_in_sec) - 1)
]
word_timestamps = (
[[start_stamp_in_sec, round(spaces_in_sec[0][0], 2)]]
+ word_timetamps_middle
+ [[round(spaces_in_sec[-1][1], 2), end_stamp_in_sec]]
)
return word_timestamps
def run_pyctcdecode(
self, logprob: np.ndarray, onset_delay_in_sec: float = 0, beam_width: int = 32
) -> Tuple[List[str], List[str]]:
"""
Launch pyctcdecode with the loaded pretrained language model.
Args:
logprob (np.ndarray):
The log probability from the ASR model inference in numpy array format.
onset_delay_in_sec (float):
The amount of delay that needs to be compensated for the timestamp outputs froM pyctcdecode.
beam_width (int):
The beam width parameter for beam search decodring.
Returns:
hyp_words (list):
List of words in the hypothesis.
word_ts (list):
List of word timestamps from the decoder.
"""
beams = self.beam_search_decoder.decode_beams(logprob, beam_width=self.ctc_decoder_params['beam_width'])
word_ts_beam, words_beam = [], []
for idx, (word, _) in enumerate(beams[0][2]):
ts = self.get_word_ts_from_wordframes(idx, beams[0][2], self.model_stride_in_secs, onset_delay_in_sec)
word_ts_beam.append(ts)
words_beam.append(word)
hyp_words, word_ts = words_beam, word_ts_beam
return hyp_words, word_ts
@staticmethod
def get_word_ts_from_wordframes(idx, word_frames: List[List[float]], frame_duration: float, onset_delay: float):
"""
Extract word timestamps from word frames generated from pyctcdecode.
"""
offset = -1 * 2.25 * frame_duration - onset_delay
frame_begin = word_frames[idx][1][0]
if frame_begin == -1:
frame_begin = word_frames[idx - 1][1][1] if idx != 0 else 0
frame_end = word_frames[idx][1][1]
return [
round(max(frame_begin * frame_duration + offset, 0), 2),
round(max(frame_end * frame_duration + offset, 0), 2),
]
@staticmethod
def align_decoder_delay(word_ts, decoder_delay_in_sec: float):
"""
Subtract decoder_delay_in_sec from the word timestamp output.
"""
for k in range(len(word_ts)):
word_ts[k] = [
round(word_ts[k][0] - decoder_delay_in_sec, 2),
round(word_ts[k][1] - decoder_delay_in_sec, 2),
]
return word_ts
| # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from typing import Dict, List, Tuple, Type, Union
import librosa
import numpy as np
import soundfile as sf
import torch
from omegaconf import OmegaConf
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.metrics.wer import WER
from nemo.collections.asr.metrics.wer_bpe import WERBPE
from nemo.collections.asr.models import EncDecCTCModel, EncDecCTCModelBPE
from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map, get_uniqname_from_filepath
from nemo.collections.asr.parts.utils.streaming_utils import AudioFeatureIterator, FrameBatchASR
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = ['ASR_TIMESTAMPS']
try:
from pyctcdecode import build_ctcdecoder
PYCTCDECODE = True
except ImportError:
PYCTCDECODE = False
def if_none_get_default(param, default_value):
return (param, default_value)[param is None]
class WERBPE_TS(WERBPE):
"""
This is WERBPE_TS class that is modified for generating word_timestamps with logits.
The functions in WER class is modified to save the word_timestamps whenever BPE token
is being saved into a list.
This class is designed to support ASR models based on CTC and BPE.
Please refer to the definition of WERBPE class for more information.
"""
def __init__(
self,
tokenizer: TokenizerSpec,
batch_dim_index=0,
use_cer=False,
ctc_decode=True,
log_prediction=True,
dist_sync_on_step=False,
):
super().__init__(tokenizer, batch_dim_index, use_cer, ctc_decode, log_prediction, dist_sync_on_step)
def ctc_decoder_predictions_tensor_with_ts(
self, time_stride, predictions: torch.Tensor, predictions_len: torch.Tensor = None
) -> List[str]:
hypotheses, timestamps, word_timestamps = [], [], []
# '⁇' string should be removed since it causes error during string split.
unk = '⁇'
prediction_cpu_tensor = predictions.long().cpu()
# iterate over batch
self.time_stride = time_stride
for ind in range(prediction_cpu_tensor.shape[self.batch_dim_index]):
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure
decoded_prediction, char_ts, timestamp_list = [], [], []
previous = self.blank_id
for pdx, p in enumerate(prediction):
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
char_ts.append(round(pdx * self.time_stride, 2))
timestamp_list.append(round(pdx * self.time_stride, 2))
previous = p
hypothesis = self.decode_tokens_to_str_with_ts(decoded_prediction)
hypothesis = hypothesis.replace(unk, '')
word_ts = self.get_ts_from_decoded_prediction(decoded_prediction, hypothesis, char_ts)
hypotheses.append(hypothesis)
timestamps.append(timestamp_list)
word_timestamps.append(word_ts)
return hypotheses, timestamps, word_timestamps
def decode_tokens_to_str_with_ts(self, tokens: List[int]) -> str:
hypothesis = self.tokenizer.ids_to_text(tokens)
return hypothesis
def decode_ids_to_tokens_with_ts(self, tokens: List[int]) -> List[str]:
token_list = self.tokenizer.ids_to_tokens(tokens)
return token_list
def get_ts_from_decoded_prediction(self, decoded_prediction: List[str], hypothesis: List[str], char_ts: List[str]):
decoded_char_list = self.tokenizer.ids_to_tokens(decoded_prediction)
stt_idx, end_idx = 0, len(decoded_char_list) - 1
stt_ch_idx, end_ch_idx = 0, 0
space = '▁'
word_ts, word_seq = [], []
word_open_flag = False
for idx, ch in enumerate(decoded_char_list):
if idx != end_idx and (space == ch and space in decoded_char_list[idx + 1]):
continue
if (idx == stt_idx or space == decoded_char_list[idx - 1] or (space in ch and len(ch) > 1)) and (
ch != space
):
_stt = char_ts[idx]
stt_ch_idx = idx
word_open_flag = True
if word_open_flag and ch != space and (idx == end_idx or space in decoded_char_list[idx + 1]):
_end = round(char_ts[idx] + self.time_stride, 2)
end_ch_idx = idx
word_open_flag = False
word_ts.append([_stt, _end])
stitched_word = ''.join(decoded_char_list[stt_ch_idx : end_ch_idx + 1]).replace(space, '')
word_seq.append(stitched_word)
assert len(word_ts) == len(hypothesis.split()), "Hypothesis does not match word time stamp."
return word_ts
class WER_TS(WER):
"""
This is WER class that is modified for generating timestamps with logits.
The functions in WER class is modified to save the timestamps whenever character
is being saved into a list.
This class is designed to support ASR models based on CTC and Character-level tokens.
Please refer to the definition of WER class for more information.
"""
def __init__(
self,
vocabulary,
batch_dim_index=0,
use_cer=False,
ctc_decode=True,
log_prediction=True,
dist_sync_on_step=False,
):
super().__init__(vocabulary, batch_dim_index, use_cer, ctc_decode, log_prediction, dist_sync_on_step)
def decode_tokens_to_str_with_ts(self, tokens: List[int], timestamps: List[int]) -> str:
"""
Take frame-level tokens and timestamp list and collect the timestamps for
start and end of each word.
"""
token_list, timestamp_list = self.decode_ids_to_tokens_with_ts(tokens, timestamps)
hypothesis = ''.join(self.decode_ids_to_tokens(tokens))
return hypothesis, timestamp_list
def decode_ids_to_tokens_with_ts(self, tokens: List[int], timestamps: List[int]) -> List[str]:
token_list, timestamp_list = [], []
for i, c in enumerate(tokens):
if c != self.blank_id:
token_list.append(self.labels_map[c])
timestamp_list.append(timestamps[i])
return token_list, timestamp_list
def ctc_decoder_predictions_tensor_with_ts(
self, predictions: torch.Tensor, predictions_len: torch.Tensor = None,
) -> List[str]:
"""
A shortened version of the original function ctc_decoder_predictions_tensor().
Replaced decode_tokens_to_str() function with decode_tokens_to_str_with_ts().
"""
hypotheses, timestamps = [], []
prediction_cpu_tensor = predictions.long().cpu()
for ind in range(prediction_cpu_tensor.shape[self.batch_dim_index]):
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure with timestamps
decoded_prediction, decoded_timing_list = [], []
previous = self.blank_id
for pdx, p in enumerate(prediction):
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
decoded_timing_list.append(pdx)
previous = p
text, timestamp_list = self.decode_tokens_to_str_with_ts(decoded_prediction, decoded_timing_list)
hypotheses.append(text)
timestamps.append(timestamp_list)
return hypotheses, timestamps
def get_wer_feat_logit(audio_file_path, asr, frame_len, tokens_per_chunk, delay, model_stride_in_secs):
"""
Create a preprocessor to convert audio samples into raw features,
Normalization will be done per buffer in frame_bufferer.
"""
asr.reset()
asr.read_audio_file_and_return(audio_file_path, delay, model_stride_in_secs)
hyp, tokens, log_prob = asr.transcribe_with_ts(tokens_per_chunk, delay)
return hyp, tokens, log_prob
def get_samples(audio_file, target_sr=16000):
"""
Read the samples from the given audio_file path.
"""
with sf.SoundFile(audio_file, 'r') as f:
dtype = 'int16'
sample_rate = f.samplerate
samples = f.read(dtype=dtype)
if sample_rate != target_sr:
samples = librosa.core.resample(samples, sample_rate, target_sr)
samples = samples.astype('float32') / 32768
samples = samples.transpose()
del f
return samples
class FrameBatchASR_Logits(FrameBatchASR):
"""
A class for streaming frame-based ASR.
Inherits from FrameBatchASR and adds new capability of returning the logit output.
Please refer to FrameBatchASR for more detailed information.
"""
def __init__(
self,
asr_model: Type[EncDecCTCModelBPE],
frame_len: float = 1.6,
total_buffer: float = 4.0,
batch_size: int = 4,
):
super().__init__(asr_model, frame_len, total_buffer, batch_size)
self.all_logprobs = []
def clear_buffer(self):
self.all_logprobs = []
self.all_preds = []
def read_audio_file_and_return(self, audio_filepath: str, delay: float, model_stride_in_secs: float):
samples = get_samples(audio_filepath)
samples = np.pad(samples, (0, int(delay * model_stride_in_secs * self.asr_model._cfg.sample_rate)))
frame_reader = AudioFeatureIterator(samples, self.frame_len, self.raw_preprocessor, self.asr_model.device)
self.set_frame_reader(frame_reader)
@torch.no_grad()
def _get_batch_preds(self):
device = self.asr_model.device
for batch in iter(self.data_loader):
feat_signal, feat_signal_len = batch
feat_signal, feat_signal_len = feat_signal.to(device), feat_signal_len.to(device)
log_probs, encoded_len, predictions = self.asr_model(
processed_signal=feat_signal, processed_signal_length=feat_signal_len
)
preds = torch.unbind(predictions)
for pred in preds:
self.all_preds.append(pred.cpu().numpy())
log_probs_tup = torch.unbind(log_probs)
for log_prob in log_probs_tup:
self.all_logprobs.append(log_prob)
del encoded_len
del predictions
def transcribe_with_ts(
self, tokens_per_chunk: int, delay: int,
):
self.infer_logits()
self.unmerged = []
self.part_logprobs = []
for idx, pred in enumerate(self.all_preds):
decoded = pred.tolist()
_stt, _end = len(decoded) - 1 - delay, len(decoded) - 1 - delay + tokens_per_chunk
self.unmerged += decoded[len(decoded) - 1 - delay : len(decoded) - 1 - delay + tokens_per_chunk]
self.part_logprobs.append(self.all_logprobs[idx][_stt:_end, :])
self.unmerged_logprobs = torch.cat(self.part_logprobs, 0)
assert (
len(self.unmerged) == self.unmerged_logprobs.shape[0]
), "Unmerged decoded result and log prob lengths are different."
return self.greedy_merge(self.unmerged), self.unmerged, self.unmerged_logprobs
class ASR_TIMESTAMPS:
"""
A class designed for extracting word timestamps while the ASR decoding process.
This class contains a few setups for a slew of NeMo ASR models such as QuartzNet, CitriNet and ConformerCTC models.
"""
def __init__(self, **cfg_diarizer):
self.manifest_filepath = cfg_diarizer['manifest_filepath']
self.params = cfg_diarizer['asr']['parameters']
self.ctc_decoder_params = cfg_diarizer['asr']['ctc_decoder_parameters']
self.ASR_model_name = cfg_diarizer['asr']['model_path']
self.nonspeech_threshold = self.params['asr_based_vad_threshold']
self.root_path = None
self.run_ASR = None
self.encdec_class = None
self.AUDIO_RTTM_MAP = audio_rttm_map(self.manifest_filepath)
self.audio_file_list = [value['audio_filepath'] for _, value in self.AUDIO_RTTM_MAP.items()]
def set_asr_model(self):
"""
Initialize the parameters for the given ASR model.
Currently, the following NGC models are supported:
stt_en_quartznet15x5,
stt_en_citrinet*,
stt_en_conformer_ctc*
To assign a proper decoding function for generating timestamp output,
the name of .nemo file should include the architecture name such as:
'quartznet', 'conformer', and 'citrinet'.
decoder_delay_in_sec is the amount of delay that is compensated during the word timestamp extraction.
word_ts_anchor_offset is the reference point for a word and used for matching the word with diarization labels.
Each ASR model has a different optimal decoder delay and word timestamp anchor offset.
To obtain an optimized diarization result with ASR, decoder_delay_in_sec and word_ts_anchor_offset
need to be searched on a development set.
"""
if 'quartznet' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_QuartzNet_CTC
self.encdec_class = EncDecCTCModel
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.04)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.12)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 4)
self.model_stride_in_secs = 0.02
elif 'conformer' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_BPE_CTC
self.encdec_class = EncDecCTCModelBPE
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.08)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.12)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 16)
self.model_stride_in_secs = 0.04
# Conformer requires buffered inference and the parameters for buffered processing.
self.chunk_len_in_sec = 5
self.total_buffer_in_secs = 25
elif 'citrinet' in self.ASR_model_name.lower():
self.run_ASR = self.run_ASR_CitriNet_CTC
self.encdec_class = EncDecCTCModelBPE
self.decoder_delay_in_sec = if_none_get_default(self.params['decoder_delay_in_sec'], 0.16)
self.word_ts_anchor_offset = if_none_get_default(self.params['word_ts_anchor_offset'], 0.2)
self.asr_batch_size = if_none_get_default(self.params['asr_batch_size'], 4)
self.model_stride_in_secs = 0.08
else:
raise ValueError(f"Cannot find the ASR model class for: {self.params['self.ASR_model_name']}")
if self.ASR_model_name.endswith('.nemo'):
asr_model = self.encdec_class.restore_from(restore_path=self.ASR_model_name)
else:
asr_model = self.encdec_class.from_pretrained(model_name=self.ASR_model_name, strict=False)
if self.ctc_decoder_params['pretrained_language_model']:
if not PYCTCDECODE:
raise ImportError(
'LM for beam search decoding is provided but pyctcdecode is not installed. Install pyctcdecode using PyPI: pip install pyctcdecode'
)
self.beam_search_decoder = self.load_LM_for_CTC_decoder(asr_model)
else:
self.beam_search_decoder = None
asr_model.eval()
return asr_model
def load_LM_for_CTC_decoder(self, asr_model: Type[Union[EncDecCTCModel, EncDecCTCModelBPE]]):
"""
Load a language model for CTC decoder (pyctcdecode).
Note that only EncDecCTCModel and EncDecCTCModelBPE models can use pyctcdecode.
"""
kenlm_model = self.ctc_decoder_params['pretrained_language_model']
logging.info(f"Loading language model : {self.ctc_decoder_params['pretrained_language_model']}")
if 'EncDecCTCModel' in str(type(asr_model)):
labels = asr_model.decoder.vocabulary
elif 'EncDecCTCModelBPE' in str(type(asr_model)):
vocab = asr_model.tokenizer.tokenizer.get_vocab()
labels = list(vocab.keys())
labels[0] = "<unk>"
else:
raise ValueError(f"Cannot find a vocabulary or tokenizer for: {self.params['self.ASR_model_name']}")
decoder = build_ctcdecoder(
labels, kenlm_model, alpha=self.ctc_decoder_params['alpha'], beta=self.ctc_decoder_params['beta']
)
return decoder
def run_ASR_QuartzNet_CTC(self, asr_model: Type[EncDecCTCModel]) -> Tuple[Dict, Dict]:
"""
Launch QuartzNet ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary of the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary of the time-stamps of words.
"""
words_dict, word_ts_dict = {}, {}
wer_ts = WER_TS(
vocabulary=asr_model.decoder.vocabulary,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
with torch.cuda.amp.autocast():
transcript_logits_list = asr_model.transcribe(
self.audio_file_list, batch_size=self.asr_batch_size, logprobs=True
)
for idx, logit_np in enumerate(transcript_logits_list):
uniq_id = get_uniqname_from_filepath(self.audio_file_list[idx])
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder on {uniq_id} with LM {self.ctc_decoder_params['pretrained_language_model']}"
)
hyp_words, word_ts = self.run_pyctcdecode(logit_np)
else:
log_prob = torch.from_numpy(logit_np)
logits_len = torch.from_numpy(np.array([log_prob.shape[0]]))
greedy_predictions = log_prob.argmax(dim=-1, keepdim=False).unsqueeze(0)
text, char_ts = wer_ts.ctc_decoder_predictions_tensor_with_ts(
greedy_predictions, predictions_len=logits_len
)
trans, char_ts_in_feature_frame_idx = self.clean_trans_and_TS(text[0], char_ts[0])
spaces_in_sec, hyp_words = self._get_spaces(
trans, char_ts_in_feature_frame_idx, self.model_stride_in_secs
)
word_ts = self.get_word_ts_from_spaces(
char_ts_in_feature_frame_idx, spaces_in_sec, end_stamp=logit_np.shape[0]
)
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
@staticmethod
def clean_trans_and_TS(trans: str, char_ts: List[str]) -> Tuple[str, List[str]]:
"""
Remove the spaces in the beginning and the end.
The char_ts need to be changed and synced accordingly.
Args:
trans (list):
List of character output (str).
char_ts (list):
List of timestamps (int) for each character.
Returns:
trans (list):
List of the cleaned character output.
char_ts (list):
List of the cleaned timestamps for each character.
"""
assert (len(trans) > 0) and (len(char_ts) > 0)
assert len(trans) == len(char_ts)
trans = trans.lstrip()
diff_L = len(char_ts) - len(trans)
char_ts = char_ts[diff_L:]
trans = trans.rstrip()
diff_R = len(char_ts) - len(trans)
if diff_R > 0:
char_ts = char_ts[: -1 * diff_R]
return trans, char_ts
def _get_spaces(self, trans: str, char_ts: List[str], time_stride: float) -> Tuple[float, List[str]]:
"""
Collect the space symbols with a list of words.
Args:
trans (list):
List of character output (str).
char_ts (list):
List of timestamps of the characters.
time_stride (float):
The size of stride of the model in second.
Returns:
spaces_in_sec (list):
List of the ranges of spaces
word_list (list):
List of the words from ASR inference.
"""
assert (len(trans) > 0) and (len(char_ts) > 0), "Transcript and char_ts length should not be 0."
assert len(trans) == len(char_ts), "Transcript and timestamp lengths do not match."
spaces_in_sec, word_list = [], []
stt_idx = 0
for k, s in enumerate(trans):
if s == ' ':
spaces_in_sec.append(
[round(char_ts[k] * time_stride, 2), round((char_ts[k + 1] - 1) * time_stride, 2)]
)
word_list.append(trans[stt_idx:k])
stt_idx = k + 1
if len(trans) > stt_idx and trans[stt_idx] != ' ':
word_list.append(trans[stt_idx:])
return spaces_in_sec, word_list
def run_ASR_CitriNet_CTC(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[Dict, Dict]:
"""
Launch CitriNet ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary of the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary of the timestamps of hypothesis words.
"""
words_dict, word_ts_dict = {}, {}
werbpe_ts = WERBPE_TS(
tokenizer=asr_model.tokenizer,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
with torch.cuda.amp.autocast():
transcript_logits_list = asr_model.transcribe(
self.audio_file_list, batch_size=self.asr_batch_size, logprobs=True
)
for idx, logit_np in enumerate(transcript_logits_list):
uniq_id = get_uniqname_from_filepath(self.audio_file_list[idx])
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder with LM {self.ctc_decoder_params['pretrained_language_model']}"
)
hyp_words, word_ts = self.run_pyctcdecode(logit_np)
else:
log_prob = torch.from_numpy(logit_np)
greedy_predictions = log_prob.argmax(dim=-1, keepdim=False).unsqueeze(0)
logits_len = torch.from_numpy(np.array([log_prob.shape[0]]))
text, char_ts, word_ts = werbpe_ts.ctc_decoder_predictions_tensor_with_ts(
self.model_stride_in_secs, greedy_predictions, predictions_len=logits_len
)
hyp_words, word_ts = text[0].split(), word_ts[0]
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
def set_buffered_infer_params(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[float, float, float]:
"""
Prepare the parameters for the buffered inference.
"""
cfg = copy.deepcopy(asr_model._cfg)
OmegaConf.set_struct(cfg.preprocessor, False)
# some changes for streaming scenario
cfg.preprocessor.dither = 0.0
cfg.preprocessor.pad_to = 0
cfg.preprocessor.normalize = "None"
preprocessor = nemo_asr.models.EncDecCTCModelBPE.from_config_dict(cfg.preprocessor)
preprocessor.to(asr_model.device)
if cfg.preprocessor.normalize != "per_feature":
logging.error(
"Only EncDecCTCModelBPE models trained with per_feature normalization are supported currently"
)
# Disable config overwriting
OmegaConf.set_struct(cfg.preprocessor, True)
onset_delay = (
math.ceil(((self.total_buffer_in_secs - self.chunk_len_in_sec) / 2) / self.model_stride_in_secs) + 1
)
mid_delay = math.ceil(
(self.chunk_len_in_sec + (self.total_buffer_in_secs - self.chunk_len_in_sec) / 2)
/ self.model_stride_in_secs
)
tokens_per_chunk = math.ceil(self.chunk_len_in_sec / self.model_stride_in_secs)
return onset_delay, mid_delay, tokens_per_chunk
def run_ASR_BPE_CTC(self, asr_model: Type[EncDecCTCModelBPE]) -> Tuple[Dict, Dict]:
"""
Launch CTC-BPE based ASR model and collect logit, timestamps and text output.
Args:
asr_model (class):
The loaded NeMo ASR model.
Returns:
words_dict (dict):
Dictionary of the sequence of words from hypothesis.
word_ts_dict (dict):
Dictionary of the time-stamps of words.
"""
torch.manual_seed(0)
torch.set_grad_enabled(False)
words_dict, word_ts_dict = {}, {}
werbpe_ts = WERBPE_TS(
tokenizer=asr_model.tokenizer,
batch_dim_index=0,
use_cer=asr_model._cfg.get('use_cer', False),
ctc_decode=True,
dist_sync_on_step=True,
log_prediction=asr_model._cfg.get("log_prediction", False),
)
frame_asr = FrameBatchASR_Logits(
asr_model=asr_model,
frame_len=self.chunk_len_in_sec,
total_buffer=self.total_buffer_in_secs,
batch_size=self.asr_batch_size,
)
onset_delay, mid_delay, tokens_per_chunk = self.set_buffered_infer_params(asr_model)
onset_delay_in_sec = round(onset_delay * self.model_stride_in_secs, 2)
with torch.cuda.amp.autocast():
logging.info(f"Running ASR model {self.ASR_model_name}")
for idx, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
logging.info(f"[{idx+1}/{len(self.audio_file_list)}] FrameBatchASR: {audio_file_path}")
frame_asr.clear_buffer()
hyp, greedy_predictions_list, log_prob = get_wer_feat_logit(
audio_file_path,
frame_asr,
self.chunk_len_in_sec,
tokens_per_chunk,
mid_delay,
self.model_stride_in_secs,
)
if self.beam_search_decoder:
logging.info(
f"Running beam-search decoder with LM {self.ctc_decoder_params['pretrained_language_model']}"
)
log_prob = log_prob.unsqueeze(0).cpu().numpy()[0]
hyp_words, word_ts = self.run_pyctcdecode(log_prob, onset_delay_in_sec=onset_delay_in_sec)
else:
logits_len = torch.from_numpy(np.array([len(greedy_predictions_list)]))
greedy_predictions_list = greedy_predictions_list[onset_delay:-mid_delay]
greedy_predictions = torch.from_numpy(np.array(greedy_predictions_list)).unsqueeze(0)
text, char_ts, word_ts = werbpe_ts.ctc_decoder_predictions_tensor_with_ts(
self.model_stride_in_secs, greedy_predictions, predictions_len=logits_len
)
hyp_words, word_ts = text[0].split(), word_ts[0]
word_ts = self.align_decoder_delay(word_ts, self.decoder_delay_in_sec)
assert len(hyp_words) == len(word_ts), "Words and word timestamp list length does not match."
words_dict[uniq_id] = hyp_words
word_ts_dict[uniq_id] = word_ts
return words_dict, word_ts_dict
def get_word_ts_from_spaces(self, char_ts: List[float], spaces_in_sec: List[float], end_stamp: float) -> List[str]:
"""
Take word timestamps from the spaces from the decoded prediction.
Args:
char_ts (list):
List containing the timestamp for each character.
spaces_in_sec (list):
List containing the start and the end time of each space token.
end_stamp (float):
The end time of the session in sec.
Returns:
word_timestamps (list):
List of the timestamps for the resulting words.
"""
start_stamp_in_sec = round(char_ts[0] * self.model_stride_in_secs, 2)
end_stamp_in_sec = round(end_stamp * self.model_stride_in_secs, 2)
word_timetamps_middle = [
[round(spaces_in_sec[k][1], 2), round(spaces_in_sec[k + 1][0], 2),] for k in range(len(spaces_in_sec) - 1)
]
word_timestamps = (
[[start_stamp_in_sec, round(spaces_in_sec[0][0], 2)]]
+ word_timetamps_middle
+ [[round(spaces_in_sec[-1][1], 2), end_stamp_in_sec]]
)
return word_timestamps
def run_pyctcdecode(
self, logprob: np.ndarray, onset_delay_in_sec: float = 0, beam_width: int = 32
) -> Tuple[List[str], List[str]]:
"""
Launch pyctcdecode with the loaded pretrained language model.
Args:
logprob (np.ndarray):
The log probability from the ASR model inference in numpy array format.
onset_delay_in_sec (float):
The amount of delay that needs to be compensated for the timestamp outputs froM pyctcdecode.
beam_width (int):
The beam width parameter for beam search decodring.
Returns:
hyp_words (list):
List of words in the hypothesis.
word_ts (list):
List of word timestamps from the decoder.
"""
beams = self.beam_search_decoder.decode_beams(logprob, beam_width=self.ctc_decoder_params['beam_width'])
word_ts_beam, words_beam = [], []
for idx, (word, _) in enumerate(beams[0][2]):
ts = self.get_word_ts_from_wordframes(idx, beams[0][2], self.model_stride_in_secs, onset_delay_in_sec)
word_ts_beam.append(ts)
words_beam.append(word)
hyp_words, word_ts = words_beam, word_ts_beam
return hyp_words, word_ts
@staticmethod
def get_word_ts_from_wordframes(idx, word_frames: List[List[float]], frame_duration: float, onset_delay: float):
"""
Extract word timestamps from word frames generated from pyctcdecode.
"""
offset = -1 * 2.25 * frame_duration - onset_delay
frame_begin = word_frames[idx][1][0]
if frame_begin == -1:
frame_begin = word_frames[idx - 1][1][1] if idx != 0 else 0
frame_end = word_frames[idx][1][1]
return [
round(max(frame_begin * frame_duration + offset, 0), 2),
round(max(frame_end * frame_duration + offset, 0), 2),
]
@staticmethod
def align_decoder_delay(word_ts, decoder_delay_in_sec: float):
"""
Subtract decoder_delay_in_sec from the word timestamp output.
"""
for k in range(len(word_ts)):
word_ts[k] = [
round(word_ts[k][0] - decoder_delay_in_sec, 2),
round(word_ts[k][1] - decoder_delay_in_sec, 2),
]
return word_ts
|
"""Common classes and elements for Omnilogic Integration."""
from datetime import timedelta
import logging
from omnilogic import OmniLogicException
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ALL_ITEM_KINDS,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class OmniLogicUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching update data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
api: str,
name: str,
polling_interval: int,
):
"""Initialize the global Omnilogic data updater."""
self.api = api
super().__init__(
hass=hass,
logger=_LOGGER,
name=name,
update_interval=timedelta(seconds=polling_interval),
)
async def _async_update_data(self):
"""Fetch data from OmniLogic."""
try:
data = await self.api.get_telemetry_data()
except OmniLogicException as error:
raise UpdateFailed(f"Error updating from OmniLogic: {error}") from error
parsed_data = {}
def get_item_data(item, item_kind, current_id, data):
"""Get data per kind of Omnilogic API item."""
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if "systemId" in item:
system_id = item["systemId"]
current_id = current_id + (item_kind, system_id)
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if kind in item:
data = get_item_data(item[kind], kind, current_id, data)
return data
parsed_data = get_item_data(data, "Backyard", (), parsed_data)
return parsed_data
class OmniLogicEntity(CoordinatorEntity):
"""Defines the base OmniLogic entity."""
def __init__(
self,
coordinator: OmniLogicUpdateCoordinator,
kind: str,
name: str,
item_id: tuple,
icon: str,
):
"""Initialize the OmniLogic Entity."""
super().__init__(coordinator)
bow_id = None
entity_data = coordinator.data[item_id]
backyard_id = item_id[:2]
if len(item_id) == 6:
bow_id = item_id[:4]
msp_system_id = coordinator.data[backyard_id]["systemId"]
entity_friendly_name = f"{coordinator.data[backyard_id]["BackyardName"]} "
unique_id = f"{msp_system_id}"
if bow_id is not None:
unique_id = f"{unique_id}_{coordinator.data[bow_id]["systemId"]}"
entity_friendly_name = (
f"{entity_friendly_name}{coordinator.data[bow_id]["Name"]} "
)
unique_id = f"{unique_id}_{coordinator.data[item_id]["systemId"]}_{kind}"
if entity_data.get("Name") is not None:
entity_friendly_name = f"{entity_friendly_name} {entity_data["Name"]}"
entity_friendly_name = f"{entity_friendly_name} {name}"
unique_id = unique_id.replace(" ", "_")
self._kind = kind
self._name = entity_friendly_name
self._unique_id = unique_id
self._item_id = item_id
self._icon = icon
self._attrs = {}
self._msp_system_id = msp_system_id
self._backyard_name = coordinator.data[backyard_id]["BackyardName"]
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self):
"""Return the icon for the entity."""
return self._icon
@property
def device_state_attributes(self):
"""Return the attributes."""
return self._attrs
@property
def device_info(self):
"""Define the device as back yard/MSP System."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._msp_system_id)},
ATTR_NAME: self._backyard_name,
ATTR_MANUFACTURER: "Hayward",
ATTR_MODEL: "OmniLogic",
}
| """Common classes and elements for Omnilogic Integration."""
from datetime import timedelta
import logging
from omnilogic import OmniLogicException
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ALL_ITEM_KINDS,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class OmniLogicUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching update data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
api: str,
name: str,
polling_interval: int,
):
"""Initialize the global Omnilogic data updater."""
self.api = api
super().__init__(
hass=hass,
logger=_LOGGER,
name=name,
update_interval=timedelta(seconds=polling_interval),
)
async def _async_update_data(self):
"""Fetch data from OmniLogic."""
try:
data = await self.api.get_telemetry_data()
except OmniLogicException as error:
raise UpdateFailed(f"Error updating from OmniLogic: {error}") from error
parsed_data = {}
def get_item_data(item, item_kind, current_id, data):
"""Get data per kind of Omnilogic API item."""
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if "systemId" in item:
system_id = item["systemId"]
current_id = current_id + (item_kind, system_id)
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if kind in item:
data = get_item_data(item[kind], kind, current_id, data)
return data
parsed_data = get_item_data(data, "Backyard", (), parsed_data)
return parsed_data
class OmniLogicEntity(CoordinatorEntity):
"""Defines the base OmniLogic entity."""
def __init__(
self,
coordinator: OmniLogicUpdateCoordinator,
kind: str,
name: str,
item_id: tuple,
icon: str,
):
"""Initialize the OmniLogic Entity."""
super().__init__(coordinator)
bow_id = None
entity_data = coordinator.data[item_id]
backyard_id = item_id[:2]
if len(item_id) == 6:
bow_id = item_id[:4]
msp_system_id = coordinator.data[backyard_id]["systemId"]
entity_friendly_name = f"{coordinator.data[backyard_id]['BackyardName']} "
unique_id = f"{msp_system_id}"
if bow_id is not None:
unique_id = f"{unique_id}_{coordinator.data[bow_id]['systemId']}"
entity_friendly_name = (
f"{entity_friendly_name}{coordinator.data[bow_id]['Name']} "
)
unique_id = f"{unique_id}_{coordinator.data[item_id]['systemId']}_{kind}"
if entity_data.get("Name") is not None:
entity_friendly_name = f"{entity_friendly_name} {entity_data['Name']}"
entity_friendly_name = f"{entity_friendly_name} {name}"
unique_id = unique_id.replace(" ", "_")
self._kind = kind
self._name = entity_friendly_name
self._unique_id = unique_id
self._item_id = item_id
self._icon = icon
self._attrs = {}
self._msp_system_id = msp_system_id
self._backyard_name = coordinator.data[backyard_id]["BackyardName"]
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self):
"""Return the icon for the entity."""
return self._icon
@property
def device_state_attributes(self):
"""Return the attributes."""
return self._attrs
@property
def device_info(self):
"""Define the device as back yard/MSP System."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._msp_system_id)},
ATTR_NAME: self._backyard_name,
ATTR_MANUFACTURER: "Hayward",
ATTR_MODEL: "OmniLogic",
}
|
from bs4 import BeautifulSoup
from spotipy.oauth2 import SpotifyOAuth
import requests
import spotipy
SPOTIFY_CLIENT_ID = "YOUR_SPOTIFY_CLIENT_ID"
SPOTIFY_CLIENT_SECRET = "YOUR_SPOTIFY_CLIENT_SECRET"
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
client_id=SPOTIFY_CLIENT_ID,
client_secret=SPOTIFY_CLIENT_SECRET,
redirect_uri="https://www.example.com",
scope="playlist-modify-private",
show_dialog=True,
cache_path="token.txt"
)
)
user_id = sp.current_user()["id"]
travel_date = input("Which year do you want to travel to? Type the date in this format YYYY-MM-DD:")
travel_year = travel_date[:4]
billboard_url = f"https://www.billboard.com/charts/hot-100/{travel_date}"
response = requests.get(billboard_url)
soup = BeautifulSoup(response.text, "html.parser")
song_names = [name.getText() for name in soup.select(".chart-element__information__song")]
song_artists = [name.getText() for name in soup.select(".chart-element__information__artist")]
songs = [{
"artist": song_artists[i],
"name": song_names[i]
} for i in range(len(song_artists))]
print(songs)
song_urls = []
for song in songs:
sp_song = sp.search(f"track:{song["name"]} year:{travel_year}", type="track")
try:
url = sp_song["tracks"]["items"][0]["uri"]
song_urls.append(url)
except IndexError:
print(f"{song["name"]} doesn't exist in Spotify. Skipped.")
playlist = sp.user_playlist_create(user=user_id, name=f"{travel_date} Billboard 100", public=False)
sp.playlist_add_items(playlist_id=playlist["id"], items=song_urls)
| from bs4 import BeautifulSoup
from spotipy.oauth2 import SpotifyOAuth
import requests
import spotipy
SPOTIFY_CLIENT_ID = "YOUR_SPOTIFY_CLIENT_ID"
SPOTIFY_CLIENT_SECRET = "YOUR_SPOTIFY_CLIENT_SECRET"
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
client_id=SPOTIFY_CLIENT_ID,
client_secret=SPOTIFY_CLIENT_SECRET,
redirect_uri="https://www.example.com",
scope="playlist-modify-private",
show_dialog=True,
cache_path="token.txt"
)
)
user_id = sp.current_user()["id"]
travel_date = input("Which year do you want to travel to? Type the date in this format YYYY-MM-DD:")
travel_year = travel_date[:4]
billboard_url = f"https://www.billboard.com/charts/hot-100/{travel_date}"
response = requests.get(billboard_url)
soup = BeautifulSoup(response.text, "html.parser")
song_names = [name.getText() for name in soup.select(".chart-element__information__song")]
song_artists = [name.getText() for name in soup.select(".chart-element__information__artist")]
songs = [{
"artist": song_artists[i],
"name": song_names[i]
} for i in range(len(song_artists))]
print(songs)
song_urls = []
for song in songs:
sp_song = sp.search(f"track:{song['name']} year:{travel_year}", type="track")
try:
url = sp_song["tracks"]["items"][0]["uri"]
song_urls.append(url)
except IndexError:
print(f"{song['name']} doesn't exist in Spotify. Skipped.")
playlist = sp.user_playlist_create(user=user_id, name=f"{travel_date} Billboard 100", public=False)
sp.playlist_add_items(playlist_id=playlist["id"], items=song_urls)
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2022 Kyle Balisnomo, Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
lines = []
for line in data.splitlines():
lines.append(line)
code = lines[0].split()[1]
return code
def get_headers(self,data):
return None
def get_body(self, data):
body = data.split("\r\n\r\n")[1]
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
parsed_url = urllib.parse.urlparse(url)
port = parsed_url.port
if port == None:
port = 80
self.connect(parsed_url.hostname, port)
path = parsed_url.path if parsed_url.path != "" else "/"
host = parsed_url.hostname + ":" + str(port)
lines = [f"GET {path} HTTP/1.1",
f"Host: {host}",
f"Connection: close",
f"Accept: */*\r\n\r\n"
]
request = "\r\n".join(lines)
self.sendall(request)
response = self.recvall(self.socket)
print(response)
code = self.get_code(response)
body = self.get_body(response)
self.close()
return HTTPResponse(int(code), body)
def POST(self, url, args=None):
parsed_url = urllib.parse.urlparse(url)
port = parsed_url.port
if port == None:
port = 80
self.connect(parsed_url.hostname, port)
# Create body
args_list = [f"{key}={value}" for key, value in args.items()] if args else []
body = "&".join(args_list)
path = parsed_url.path if parsed_url.path != "" else "/"
host = parsed_url.hostname + ":" + str(port)
lines = [f"POST {path} HTTP/1.1",
f"Host: {host}",
f"Content-Type: application/x-www-form-urlencoded",
f"Content-Length: {len(body.encode("utf-8"))}",
f"Connection: close",
f"Accept: */*",
f"\r\n{body}"
]
request = "\r\n".join(lines)
self.sendall(request)
response = self.recvall(self.socket)
print(response)
code = self.get_code(response)
body = self.get_body(response)
self.close()
return HTTPResponse(int(code), body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| #!/usr/bin/env python3
# coding: utf-8
# Copyright 2022 Kyle Balisnomo, Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
lines = []
for line in data.splitlines():
lines.append(line)
code = lines[0].split()[1]
return code
def get_headers(self,data):
return None
def get_body(self, data):
body = data.split("\r\n\r\n")[1]
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
parsed_url = urllib.parse.urlparse(url)
port = parsed_url.port
if port == None:
port = 80
self.connect(parsed_url.hostname, port)
path = parsed_url.path if parsed_url.path != "" else "/"
host = parsed_url.hostname + ":" + str(port)
lines = [f"GET {path} HTTP/1.1",
f"Host: {host}",
f"Connection: close",
f"Accept: */*\r\n\r\n"
]
request = "\r\n".join(lines)
self.sendall(request)
response = self.recvall(self.socket)
print(response)
code = self.get_code(response)
body = self.get_body(response)
self.close()
return HTTPResponse(int(code), body)
def POST(self, url, args=None):
parsed_url = urllib.parse.urlparse(url)
port = parsed_url.port
if port == None:
port = 80
self.connect(parsed_url.hostname, port)
# Create body
args_list = [f"{key}={value}" for key, value in args.items()] if args else []
body = "&".join(args_list)
path = parsed_url.path if parsed_url.path != "" else "/"
host = parsed_url.hostname + ":" + str(port)
lines = [f"POST {path} HTTP/1.1",
f"Host: {host}",
f"Content-Type: application/x-www-form-urlencoded",
f"Content-Length: {len(body.encode('utf-8'))}",
f"Connection: close",
f"Accept: */*",
f"\r\n{body}"
]
request = "\r\n".join(lines)
self.sendall(request)
response = self.recvall(self.socket)
print(response)
code = self.get_code(response)
body = self.get_body(response)
self.close()
return HTTPResponse(int(code), body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
|
import os
import time
import json
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import vgtk
# TODO add dataparallel
# TODO add the_world = ipdb.set_trace
class Trainer():
def __init__(self, opt):
super(Trainer, self).__init__()
opt_dict = vgtk.dump_args(opt)
self.check_opt(opt)
# set random seed
random.seed(self.opt.seed)
np.random.seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed_all(self.opt.seed)
# np.set_printoptions(precision=3, suppress=True)
# create model dir
experiment_id = self.opt.experiment_id if self.opt.mode == 'train' else f"{self.opt.experiment_id}_{self.opt.mode}"
model_id = f'model_{time.strftime('%Y%m%d_%H:%M:%S')}'
self.root_dir = os.path.join(self.opt.model_dir, experiment_id, model_id)
os.makedirs(self.root_dir, exist_ok=True)
# saving opt
opt_path = os.path.join(self.root_dir, 'opt.txt')
# TODO: hierarchical args are not compatible wit json dump
with open(opt_path, 'w') as fout:
json.dump(opt_dict, fout, indent=2)
# create logger
log_path = os.path.join(self.root_dir, 'log.txt')
self.logger = vgtk.Logger(log_file=log_path)
self.logger.log('Setup', f'Logger created! Hello World!')
self.logger.log('Setup', f'Random seed has been set to {self.opt.seed}')
self.logger.log('Setup', f'Experiment id: {experiment_id}')
self.logger.log('Setup', f'Model id: {model_id}')
# ckpt dir
self.ckpt_dir = os.path.join(self.root_dir, 'ckpt')
os.makedirs(self.ckpt_dir, exist_ok=True)
self.logger.log('Setup', f'Checkpoint dir created!')
# build dataset
self._setup_datasets()
# create network
self._setup_model()
self._setup_optim()
self._setup_metric()
# init
self.start_epoch = 0
self.start_iter = 0
# check resuming
self._resume_from_ckpt(opt.resume_path)
self._setup_model_multi_gpu()
# setup summary
self.summary = vgtk.Summary()
# setup timer
self.timer = vgtk.Timer()
self.summary.register(['Time'])
# done
self.logger.log('Setup', 'Setup finished!')
def train(self):
self.opt.mode = 'train'
self.model.train()
if self.opt.num_epochs is not None:
self.train_epoch()
else:
self.train_iter()
def test(self):
self.opt.mode = 'test'
self.model.eval()
def train_iter(self):
for i in range(self.opt.num_iterations):
self.timer.set_point('train_iter')
self.lr_schedule.step()
self.step()
# print({'Time': self.timer.reset_point('train_iter')})
self.summary.update({'Time': self.timer.reset_point('train_iter')})
if i % self.opt.log_freq == 0:
if hasattr(self, 'epoch_counter'):
step = f'Epoch {self.epoch_counter}, Iter {i}'
else:
step = f'Iter {i}'
self._print_running_stats(step)
if i > 0 and i % self.opt.save_freq == 0:
self._save_network(f'Iter{i}')
self.test()
def train_epoch(self):
for i in range(self.opt.num_epochs):
self.lr_schedule.step()
self.epoch_step()
if i % self.opt.log_freq == 0:
self._print_running_stats(f'Epoch {i}')
if i > 0 and i % self.opt.save_freq == 0:
self._save_network(f'Epoch{i}')
# TODO: check that the options have the required key collection
def check_opt(self, opt, print_opt=True):
self.opt = opt
self.opt.device = torch.device('cuda')
def _print_running_stats(self, step):
stats = self.summary.get()
self.logger.log('Training', f'{step}: {stats}')
def step(self):
raise NotImplementedError('Not implemented')
def epoch_step(self):
raise NotImplementedError('Not implemented')
def _setup_datasets(self):
self.logger.log('Setup', 'Setup datasets!')
self.dataset_train = None
self.dataset_val = None
self.dataset_test = None
raise NotImplementedError('Not implemented')
def _setup_model(self):
self.logger.log('Setup', 'Setup model!')
self.model = None
raise NotImplementedError('Not implemented')
def _setup_model_multi_gpu(self):
if torch.cuda.device_count() > 1:
self.logger.log('Setup', 'Using Multi-gpu and DataParallel!')
self._use_multi_gpu = True
self.model = nn.DataParallel(self.model)
else:
self.logger.log('Setup', 'Using Single-gpu!')
self._use_multi_gpu = False
def _setup_optim(self):
self.logger.log('Setup', 'Setup optimizer!')
# torch.autograd.set_detect_anomaly(True)
self.optimizer = optim.Adam(self.model.parameters(),
lr=self.opt.train_lr.init_lr)
self.lr_schedule = vgtk.LearningRateScheduler(self.optimizer,
**vars(self.opt.train_lr))
self.logger.log('Setup', 'Optimizer all-set!')
def _setup_metric(self):
self.logger.log('Setup', 'Setup metric!')
self.metric = None
raise NotImplementedError('Not implemented')
# def _resume_from_ckpt(self, resume_path):
# if resume_path is None:
# self.logger.log('Setup', f'Seems like we train from scratch!')
# return
# self.logger.log('Setup', f'Resume from checkpoint: {resume_path}')
# state_dicts = torch.load(resume_path)
# self.model.load_state_dict(state_dicts['model'])
# self.optimizer.load_state_dict(state_dicts['optimizer'])
# self.start_epoch = state_dicts['epoch']
# self.start_iter = state_dicts['iter']
# self.logger.log('Setup', f'Resume finished! Great!')
def _resume_from_ckpt(self, resume_path):
if resume_path is None:
self.logger.log('Setup', f'Seems like we train from scratch!')
return
self.logger.log('Setup', f'Resume from checkpoint: {resume_path}')
state_dicts = torch.load(resume_path)
# self.model = nn.DataParallel(self.model)
self.model.load_state_dict(state_dicts)
# self.model = self.model.module
# self.optimizer.load_state_dict(state_dicts['optimizer'])
# self.start_epoch = state_dicts['epoch']
# self.start_iter = state_dicts['iter']
self.logger.log('Setup', f'Resume finished! Great!')
# TODO
def _save_network(self, step, label=None,path=None):
label = self.opt.experiment_id if label is None else label
if path is None:
save_filename = '%s_net_%s.pth' % (label, step)
save_path = os.path.join(self.root_dir, 'ckpt', save_filename)
else:
save_path = f'{path}.pth'
if self._use_multi_gpu:
params = self.model.module.cpu().state_dict()
else:
params = self.model.cpu().state_dict()
torch.save(params, save_path)
if torch.cuda.is_available():
# torch.cuda.device(gpu_id)
self.model.to(self.opt.device)
self.logger.log('Training', f'Checkpoint saved to: {save_path}!')
|
import os
import time
import json
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import vgtk
# TODO add dataparallel
# TODO add the_world = ipdb.set_trace
class Trainer():
def __init__(self, opt):
super(Trainer, self).__init__()
opt_dict = vgtk.dump_args(opt)
self.check_opt(opt)
# set random seed
random.seed(self.opt.seed)
np.random.seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed_all(self.opt.seed)
# np.set_printoptions(precision=3, suppress=True)
# create model dir
experiment_id = self.opt.experiment_id if self.opt.mode == 'train' else f"{self.opt.experiment_id}_{self.opt.mode}"
model_id = f'model_{time.strftime("%Y%m%d_%H:%M:%S")}'
self.root_dir = os.path.join(self.opt.model_dir, experiment_id, model_id)
os.makedirs(self.root_dir, exist_ok=True)
# saving opt
opt_path = os.path.join(self.root_dir, 'opt.txt')
# TODO: hierarchical args are not compatible wit json dump
with open(opt_path, 'w') as fout:
json.dump(opt_dict, fout, indent=2)
# create logger
log_path = os.path.join(self.root_dir, 'log.txt')
self.logger = vgtk.Logger(log_file=log_path)
self.logger.log('Setup', f'Logger created! Hello World!')
self.logger.log('Setup', f'Random seed has been set to {self.opt.seed}')
self.logger.log('Setup', f'Experiment id: {experiment_id}')
self.logger.log('Setup', f'Model id: {model_id}')
# ckpt dir
self.ckpt_dir = os.path.join(self.root_dir, 'ckpt')
os.makedirs(self.ckpt_dir, exist_ok=True)
self.logger.log('Setup', f'Checkpoint dir created!')
# build dataset
self._setup_datasets()
# create network
self._setup_model()
self._setup_optim()
self._setup_metric()
# init
self.start_epoch = 0
self.start_iter = 0
# check resuming
self._resume_from_ckpt(opt.resume_path)
self._setup_model_multi_gpu()
# setup summary
self.summary = vgtk.Summary()
# setup timer
self.timer = vgtk.Timer()
self.summary.register(['Time'])
# done
self.logger.log('Setup', 'Setup finished!')
def train(self):
self.opt.mode = 'train'
self.model.train()
if self.opt.num_epochs is not None:
self.train_epoch()
else:
self.train_iter()
def test(self):
self.opt.mode = 'test'
self.model.eval()
def train_iter(self):
for i in range(self.opt.num_iterations):
self.timer.set_point('train_iter')
self.lr_schedule.step()
self.step()
# print({'Time': self.timer.reset_point('train_iter')})
self.summary.update({'Time': self.timer.reset_point('train_iter')})
if i % self.opt.log_freq == 0:
if hasattr(self, 'epoch_counter'):
step = f'Epoch {self.epoch_counter}, Iter {i}'
else:
step = f'Iter {i}'
self._print_running_stats(step)
if i > 0 and i % self.opt.save_freq == 0:
self._save_network(f'Iter{i}')
self.test()
def train_epoch(self):
for i in range(self.opt.num_epochs):
self.lr_schedule.step()
self.epoch_step()
if i % self.opt.log_freq == 0:
self._print_running_stats(f'Epoch {i}')
if i > 0 and i % self.opt.save_freq == 0:
self._save_network(f'Epoch{i}')
# TODO: check that the options have the required key collection
def check_opt(self, opt, print_opt=True):
self.opt = opt
self.opt.device = torch.device('cuda')
def _print_running_stats(self, step):
stats = self.summary.get()
self.logger.log('Training', f'{step}: {stats}')
def step(self):
raise NotImplementedError('Not implemented')
def epoch_step(self):
raise NotImplementedError('Not implemented')
def _setup_datasets(self):
self.logger.log('Setup', 'Setup datasets!')
self.dataset_train = None
self.dataset_val = None
self.dataset_test = None
raise NotImplementedError('Not implemented')
def _setup_model(self):
self.logger.log('Setup', 'Setup model!')
self.model = None
raise NotImplementedError('Not implemented')
def _setup_model_multi_gpu(self):
if torch.cuda.device_count() > 1:
self.logger.log('Setup', 'Using Multi-gpu and DataParallel!')
self._use_multi_gpu = True
self.model = nn.DataParallel(self.model)
else:
self.logger.log('Setup', 'Using Single-gpu!')
self._use_multi_gpu = False
def _setup_optim(self):
self.logger.log('Setup', 'Setup optimizer!')
# torch.autograd.set_detect_anomaly(True)
self.optimizer = optim.Adam(self.model.parameters(),
lr=self.opt.train_lr.init_lr)
self.lr_schedule = vgtk.LearningRateScheduler(self.optimizer,
**vars(self.opt.train_lr))
self.logger.log('Setup', 'Optimizer all-set!')
def _setup_metric(self):
self.logger.log('Setup', 'Setup metric!')
self.metric = None
raise NotImplementedError('Not implemented')
# def _resume_from_ckpt(self, resume_path):
# if resume_path is None:
# self.logger.log('Setup', f'Seems like we train from scratch!')
# return
# self.logger.log('Setup', f'Resume from checkpoint: {resume_path}')
# state_dicts = torch.load(resume_path)
# self.model.load_state_dict(state_dicts['model'])
# self.optimizer.load_state_dict(state_dicts['optimizer'])
# self.start_epoch = state_dicts['epoch']
# self.start_iter = state_dicts['iter']
# self.logger.log('Setup', f'Resume finished! Great!')
def _resume_from_ckpt(self, resume_path):
if resume_path is None:
self.logger.log('Setup', f'Seems like we train from scratch!')
return
self.logger.log('Setup', f'Resume from checkpoint: {resume_path}')
state_dicts = torch.load(resume_path)
# self.model = nn.DataParallel(self.model)
self.model.load_state_dict(state_dicts)
# self.model = self.model.module
# self.optimizer.load_state_dict(state_dicts['optimizer'])
# self.start_epoch = state_dicts['epoch']
# self.start_iter = state_dicts['iter']
self.logger.log('Setup', f'Resume finished! Great!')
# TODO
def _save_network(self, step, label=None,path=None):
label = self.opt.experiment_id if label is None else label
if path is None:
save_filename = '%s_net_%s.pth' % (label, step)
save_path = os.path.join(self.root_dir, 'ckpt', save_filename)
else:
save_path = f'{path}.pth'
if self._use_multi_gpu:
params = self.model.module.cpu().state_dict()
else:
params = self.model.cpu().state_dict()
torch.save(params, save_path)
if torch.cuda.is_available():
# torch.cuda.device(gpu_id)
self.model.to(self.opt.device)
self.logger.log('Training', f'Checkpoint saved to: {save_path}!')
|
import os
import subprocess
from refactorings.utils.utils2 import get_program, Rewriter, get_filenames_in_dir
from refactorings.utils.scope_listener import get_program2
from refactorings.utils.utils_listener_fast import TokensInfo, Field, Class, Method, LocalVariable, ClassImport
from antlr4.TokenStreamRewriter import TokenStreamRewriter
class UnResolvedMetaError(Exception):
pass
class NonStaticFieldRefactorError(Exception):
pass
class MoveFieldRefactoring:
def __init__(self, source_filenames: list, package_name: str,
class_name: str, field_name: str, target_class_name: str,
target_package_name: str):
self.source_filenames = source_filenames
self.package_name = package_name
self.class_name = class_name
self.field_name = field_name
self.target_class_name = target_class_name
self.target_package_name = target_package_name
self.formatter = os.path.abspath("../assets/formatter/google-java-format-1.10.0-all-deps.jar")
def get_metadata(self, program):
"""
:param program: The program which is extracted from the get_program() method
:return: The source class, target_class and the field which is to be moved
if there are no such classes or fields or packages in the program, KeyError will be raised
"""
class_name = program.packages[self.package_name].classes[self.class_name]
target_class = program.packages[self.target_package_name].classes[self.target_class_name]
field = program.packages[self.package_name].classes[self.class_name].fields[self.field_name]
return class_name, target_class, field
@staticmethod
def __stringify(tokens, start, end):
"""
:param tokens: a list of tokens
:param start: the index of the first token you want
:param end: the index of the last token you want
:return: String of the desired tokens
Converts list of tokens into strings
"""
string = ""
for t in tokens[start: end]:
if t.text != ' ':
string += t.text
return string
def __is_var_in_method_params(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked for its parameters
:return: Whether the variable is in the method's parameters or not
Checks if given token is related to a method parameter or not
"""
method_params = list(map(lambda p: p[1], method.parameters))
if token.text in method_params:
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if method.class_name == self.class_name:
return selector not in ['this.', self.class_name + '.']
return selector == self.class_name + '.'
return False
def __is_declared_in_method(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked for its local variables
:return: Whether the variable is declared in the method or not
Checks if given token is related to a new declared variable in a method
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if method.class_name == self.class_name:
if selector in ['this.', self.class_name + '.']:
return False
elif selector == self.class_name + '.':
return False
local_exp_var = method.body_local_vars_and_expr_names
try:
local_var_definition = next(filter(lambda x: isinstance(x, LocalVariable) and
x.identifier == token.text, local_exp_var))
start = local_var_definition.parser_context.start.start
if start <= token.start:
return True
return False
except StopIteration:
return False
def __is_declared_in_class(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked for its fields
:return: Whether the variable is declared in the method's class or not
Checks if given token is related to a new declared variable in a method
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if method.class_name == self.class_name:
if selector in ['this.', self.class_name + '.']:
return False
elif selector == self.class_name + '.':
return False
def __is_a_usage(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked
:return: Whether the field is used or not
Checks if given token is related to the static field, program searching for
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
method_package_name = method.package_name or ""
if method.class_name == self.class_name and method_package_name != self.package_name and method_package_name != self.target_package_name:
return False
if selector == 'this.':
if method.class_name == self.class_name:
return True
return False
return True
def __is_a_usage_in_class(self, tokens, token, field):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param field: The field that is going to be checked
:return: Whether the field is used in the class or not
Checks if given token is related to the static field, program searching for
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if selector == self.class_name + '.':
return True
if selector == 'this.':
if field.class_name == self.class_name:
return True
return False
return field.class_name == self.class_name
def __get_usages_in_class_body(self, src):
"""
:param src: The source in which we want to extract the field's usages
:return: A `list` of all the field's usages in the class body
"""
usages = list()
fields: dict = src.fields
for field_name, field in fields.items():
if field_name == self.field_name and src.name == self.class_name:
continue
tokens_info = TokensInfo(field.parser_context) # tokens of ctx method
exps = tokens_info.get_token_index(tokens_info.token_stream.tokens, tokens_info.start, tokens_info.stop)
for token in exps:
if token.text == self.field_name:
if self.__is_a_usage_in_class(tokens_info.token_stream.tokens, token, field):
new_case = {
'meta_data': field,
'tokens': list(filter(lambda t: t.line == token.line, exps))
}
usages.append(new_case)
return usages
def __get_usages_in_methods(self, src):
"""
:param src: The source in which we want to extract the field's usages
:return: A `list` of all the field's usages in a method
Finds method based usages of a field
"""
usages = list()
methods: dict = src.methods
for method_name, method in methods.items():
# if hasattr(method, "scope"):
# print(method.scope)
# method.scope.declared_vars
tokens_info = TokensInfo(method.parser_context) # tokens of ctx method
param_tokens_info = TokensInfo(method.formalparam_context)
method_declaration_info = TokensInfo(method.method_declaration_context)
exps = tokens_info.get_token_index(tokens_info.token_stream.tokens, tokens_info.start, tokens_info.stop)
for token in exps:
if token.text == self.field_name:
is_method_param = self.__is_var_in_method_params(tokens_info.token_stream.tokens, token, method)
is_new_declaration = self.__is_declared_in_method(tokens_info.token_stream.tokens, token, method)
is_a_usage = self.__is_a_usage(tokens_info.token_stream.tokens, token, method)
if is_new_declaration or is_method_param or not is_a_usage:
continue
new_case = {
'meta_data': method,
'tokens': list(filter(lambda t: t.line == token.line, exps))
}
usages.append(new_case)
return usages
def __should_add_import(self, klass: Class):
"""
:param klass: The class which might need an import statement
:return: Whether the class needs import or not
Check whether the file needs a certain import statement
"""
# we don't need to handle target class
if klass.name == self.target_class_name:
return False
# check package imports
for package_import in klass.file_info.package_imports:
if package_import.package_name == self.target_package_name:
return False
# if target class not imported as package then check class imports
for class_import in klass.file_info.class_imports:
if class_import.class_name == self.target_class_name:
return False
# if target class is not imported add the import
return True
def __is_field_in_class(self, field, target_class):
"""
:param field: The field which is to be checked
:param target_class: The target class name
:return: Whether the field is in the target class or not
"""
class_fields = target_class.fields
for f in class_fields:
if f == field.name:
return True
return False
def __get_usage(self):
"""
:return: A list of all the usages of the field
Finds usages of a field inside project files
"""
program = get_program(self.source_filenames)
try:
source_class, target_class, field = self.get_metadata(program)
except KeyError:
raise UnResolvedMetaError("Source or destination not found!")
if 'static' not in field.modifiers:
raise NonStaticFieldRefactorError("Non-static fields cannot be refactored!")
if self.__is_field_in_class(field, target_class):
raise Exception("A field with the same name exists in target class!")
usages = list()
for p_name, package in program.packages.items():
for cls_name, cls in package.classes.items():
new_usages = self.__get_usages_in_methods(cls)
usages.extend(new_usages)
new_usages = self.__get_usages_in_class_body(cls)
usages.extend(new_usages)
should_import = self.__should_add_import(cls)
if not should_import:
continue
usages.append({
"import": cls,
})
return usages, program
def __propagate(self, usages: list, rewriter: Rewriter):
"""
:param rewriter: The rewriter object which is going to rewrite the files
:param usages: the usages of the field in the program
:return: void
Propagates the changes made to the files and the field
"""
local_var_declared = False
for usage in usages:
if "import" in usage:
self.__add_import(usage["import"], rewriter)
continue
method_tokens = TokensInfo(usage["meta_data"].parser_context)
for i, token in enumerate(usage['tokens']):
if token.text != self.field_name:
continue
method_tokens.start = token.tokenIndex
method_tokens.stop = token.tokenIndex
if i > 1:
if usage["tokens"][i - 2].text == "this" or \
usage["tokens"][i - 2].text == self.class_name:
method_tokens.start -= 2
# else:
# if local_var_declared:
# continue
# local_var_declared = True
# continue
# else:
# if local_var_declared:
# continue
token_stream = usage["meta_data"].parser_context.parser.getTokenStream()
if token_stream not in rewriter.token_streams.keys():
rewriter.token_streams[token_stream] = (
usage["meta_data"].filename,
TokenStreamRewriter(token_stream),
usage["meta_data"].filename
)
rewriter.replace(method_tokens, f'{self.target_class_name}.{self.field_name}')
def move(self):
"""
:return: Whether the refactoring is completed or not
Performs the move field refactoring
"""
usages, program = self.__get_usage()
source_package = program.packages[self.package_name]
target_package = program.packages[self.target_package_name]
source_class = source_package.classes[self.class_name]
target_class = target_package.classes[self.target_class_name]
field = source_class.fields[self.field_name]
rewriter = Rewriter(program,
lambda x: f"{os.path.dirname(x)}/{os.path.splitext(os.path.basename(x))[0]}.java")
self.__remove_field_from_src(field, rewriter)
self.__move_field_to_dst(target_class, field, rewriter)
self.__propagate(usages, rewriter)
rewriter.apply()
modified_files = set(map(lambda x: x["meta_data"].filename,
filter(lambda x: "meta_data" in x, usages)))
modified_files.union(set(map(lambda x: x["import"].filename,
filter(lambda x: "import" in x, usages))))
modified_files.add(source_class.filename)
modified_files.add(target_class.filename)
self.__reformat(list(modified_files))
return True
def __remove_field_from_src(self, field: Field, rewriter: Rewriter):
"""
:param field: The field which is to be moved in the refactoring
:param rewriter: The rewriter object which is going to rewrite the files
:return: void
Remove the field from the source class
"""
tokens = TokensInfo(field.parser_context)
tokens.stop += 1
rewriter.replace(tokens, "")
for mod_ctx in field.modifiers_parser_contexts:
mod_tokens = TokensInfo(mod_ctx)
mod_tokens.stop += 1
rewriter.replace(mod_tokens, "")
def __move_field_to_dst(self, target: Class, field: Field, rewriter: Rewriter):
"""
:param target: The target class that the field is going to be moved to
:param field: The field which is to be moved in the refactoring
:param rewriter: The rewriter object which is going to rewrite the files
:return: void
Move the field from the source to the target class
"""
# this nasty if is because the grammar sucks. converts new SomeClass() to newSomeClass()
if field.initializer is not None and field.initializer.startswith("new"):
field.initializer = field.initializer.replace("new", "new ", 1)
self.__modify_access_modifiers(field)
new_field = f'\n\t{' '.join(field.modifiers)} {field.datatype} {field.name}{f' = {field.initializer};' if field.initializer else ';'}\n'
target_class_tokens = TokensInfo(target.body_context)
rewriter.insert_after_start(target_class_tokens, new_field)
def __modify_access_modifiers(self, field: Field):
"""
:param field: The field which is going to be modified
:return: void
Changes the moving field's access modifiers
"""
index = -1
for i, mod in enumerate(field.modifiers):
if mod == "private" or mod == "protected" or mod == "public":
index = i
break
if index != -1:
field.modifiers.pop(index)
field.modifiers.insert(0, "public")
def __reformat(self, modified_files: list):
"""
:param modified_files: The files that have been modified since the refactoring
:return: void
reformats the java files based on google's java pretty format
"""
temp = ["java", "-jar", self.formatter, "--replace"]
temp.extend(modified_files)
subprocess.call(temp)
def __add_import(self, klass: Class, rewriter):
"""
:param klass: The class where the import should be added to
:param rewriter: The rewriter object which is going to rewrite the files
:return: void
Adds the imports that are needed in the file since the refactorings
"""
# if there are no imports in the class appends before the start of class
if not self.target_package_name:
return
import_line = f"import {self.target_package_name}.{self.target_class_name};"
if len(klass.file_info.all_imports) == 0:
tokens_info = TokensInfo(klass.parser_context)
tokens_info.start -= len(klass.modifiers_parser_contexts) * 2
tokens_info.stop += 1
rewriter.insert_before_start(tokens_info, import_line)
return
# if however we have some imports append new import at the end of last import
tokens_info = TokensInfo(klass.file_info.all_imports[-1].parser_context)
tokens_info.stop += 1
rewriter.insert_after(tokens_info, import_line)
if __name__ == '__main__':
path = "/home/ali/Desktop/JavaTestProject/"
my_list = get_filenames_in_dir(path)
refactoring = MoveFieldRefactoring(my_list, "", "SourceClass", "field_for_move",
"TargetClass", "")
refac = refactoring.move()
print(refac)
| import os
import subprocess
from refactorings.utils.utils2 import get_program, Rewriter, get_filenames_in_dir
from refactorings.utils.scope_listener import get_program2
from refactorings.utils.utils_listener_fast import TokensInfo, Field, Class, Method, LocalVariable, ClassImport
from antlr4.TokenStreamRewriter import TokenStreamRewriter
class UnResolvedMetaError(Exception):
pass
class NonStaticFieldRefactorError(Exception):
pass
class MoveFieldRefactoring:
def __init__(self, source_filenames: list, package_name: str,
class_name: str, field_name: str, target_class_name: str,
target_package_name: str):
self.source_filenames = source_filenames
self.package_name = package_name
self.class_name = class_name
self.field_name = field_name
self.target_class_name = target_class_name
self.target_package_name = target_package_name
self.formatter = os.path.abspath("../assets/formatter/google-java-format-1.10.0-all-deps.jar")
def get_metadata(self, program):
"""
:param program: The program which is extracted from the get_program() method
:return: The source class, target_class and the field which is to be moved
if there are no such classes or fields or packages in the program, KeyError will be raised
"""
class_name = program.packages[self.package_name].classes[self.class_name]
target_class = program.packages[self.target_package_name].classes[self.target_class_name]
field = program.packages[self.package_name].classes[self.class_name].fields[self.field_name]
return class_name, target_class, field
@staticmethod
def __stringify(tokens, start, end):
"""
:param tokens: a list of tokens
:param start: the index of the first token you want
:param end: the index of the last token you want
:return: String of the desired tokens
Converts list of tokens into strings
"""
string = ""
for t in tokens[start: end]:
if t.text != ' ':
string += t.text
return string
def __is_var_in_method_params(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked for its parameters
:return: Whether the variable is in the method's parameters or not
Checks if given token is related to a method parameter or not
"""
method_params = list(map(lambda p: p[1], method.parameters))
if token.text in method_params:
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if method.class_name == self.class_name:
return selector not in ['this.', self.class_name + '.']
return selector == self.class_name + '.'
return False
def __is_declared_in_method(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked for its local variables
:return: Whether the variable is declared in the method or not
Checks if given token is related to a new declared variable in a method
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if method.class_name == self.class_name:
if selector in ['this.', self.class_name + '.']:
return False
elif selector == self.class_name + '.':
return False
local_exp_var = method.body_local_vars_and_expr_names
try:
local_var_definition = next(filter(lambda x: isinstance(x, LocalVariable) and
x.identifier == token.text, local_exp_var))
start = local_var_definition.parser_context.start.start
if start <= token.start:
return True
return False
except StopIteration:
return False
def __is_declared_in_class(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked for its fields
:return: Whether the variable is declared in the method's class or not
Checks if given token is related to a new declared variable in a method
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if method.class_name == self.class_name:
if selector in ['this.', self.class_name + '.']:
return False
elif selector == self.class_name + '.':
return False
def __is_a_usage(self, tokens, token, method):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param method: The method that is going to be checked
:return: Whether the field is used or not
Checks if given token is related to the static field, program searching for
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
method_package_name = method.package_name or ""
if method.class_name == self.class_name and method_package_name != self.package_name and method_package_name != self.target_package_name:
return False
if selector == 'this.':
if method.class_name == self.class_name:
return True
return False
return True
def __is_a_usage_in_class(self, tokens, token, field):
"""
:param tokens: a list of all the tokens of the file in which the method is
:param token: The token of the var that is to be checked
:param field: The field that is going to be checked
:return: Whether the field is used in the class or not
Checks if given token is related to the static field, program searching for
"""
selector = self.__stringify(tokens, token.tokenIndex - 2, token.tokenIndex)
if selector == self.class_name + '.':
return True
if selector == 'this.':
if field.class_name == self.class_name:
return True
return False
return field.class_name == self.class_name
def __get_usages_in_class_body(self, src):
"""
:param src: The source in which we want to extract the field's usages
:return: A `list` of all the field's usages in the class body
"""
usages = list()
fields: dict = src.fields
for field_name, field in fields.items():
if field_name == self.field_name and src.name == self.class_name:
continue
tokens_info = TokensInfo(field.parser_context) # tokens of ctx method
exps = tokens_info.get_token_index(tokens_info.token_stream.tokens, tokens_info.start, tokens_info.stop)
for token in exps:
if token.text == self.field_name:
if self.__is_a_usage_in_class(tokens_info.token_stream.tokens, token, field):
new_case = {
'meta_data': field,
'tokens': list(filter(lambda t: t.line == token.line, exps))
}
usages.append(new_case)
return usages
def __get_usages_in_methods(self, src):
"""
:param src: The source in which we want to extract the field's usages
:return: A `list` of all the field's usages in a method
Finds method based usages of a field
"""
usages = list()
methods: dict = src.methods
for method_name, method in methods.items():
# if hasattr(method, "scope"):
# print(method.scope)
# method.scope.declared_vars
tokens_info = TokensInfo(method.parser_context) # tokens of ctx method
param_tokens_info = TokensInfo(method.formalparam_context)
method_declaration_info = TokensInfo(method.method_declaration_context)
exps = tokens_info.get_token_index(tokens_info.token_stream.tokens, tokens_info.start, tokens_info.stop)
for token in exps:
if token.text == self.field_name:
is_method_param = self.__is_var_in_method_params(tokens_info.token_stream.tokens, token, method)
is_new_declaration = self.__is_declared_in_method(tokens_info.token_stream.tokens, token, method)
is_a_usage = self.__is_a_usage(tokens_info.token_stream.tokens, token, method)
if is_new_declaration or is_method_param or not is_a_usage:
continue
new_case = {
'meta_data': method,
'tokens': list(filter(lambda t: t.line == token.line, exps))
}
usages.append(new_case)
return usages
def __should_add_import(self, klass: Class):
"""
:param klass: The class which might need an import statement
:return: Whether the class needs import or not
Check whether the file needs a certain import statement
"""
# we don't need to handle target class
if klass.name == self.target_class_name:
return False
# check package imports
for package_import in klass.file_info.package_imports:
if package_import.package_name == self.target_package_name:
return False
# if target class not imported as package then check class imports
for class_import in klass.file_info.class_imports:
if class_import.class_name == self.target_class_name:
return False
# if target class is not imported add the import
return True
def __is_field_in_class(self, field, target_class):
"""
:param field: The field which is to be checked
:param target_class: The target class name
:return: Whether the field is in the target class or not
"""
class_fields = target_class.fields
for f in class_fields:
if f == field.name:
return True
return False
def __get_usage(self):
"""
:return: A list of all the usages of the field
Finds usages of a field inside project files
"""
program = get_program(self.source_filenames)
try:
source_class, target_class, field = self.get_metadata(program)
except KeyError:
raise UnResolvedMetaError("Source or destination not found!")
if 'static' not in field.modifiers:
raise NonStaticFieldRefactorError("Non-static fields cannot be refactored!")
if self.__is_field_in_class(field, target_class):
raise Exception("A field with the same name exists in target class!")
usages = list()
for p_name, package in program.packages.items():
for cls_name, cls in package.classes.items():
new_usages = self.__get_usages_in_methods(cls)
usages.extend(new_usages)
new_usages = self.__get_usages_in_class_body(cls)
usages.extend(new_usages)
should_import = self.__should_add_import(cls)
if not should_import:
continue
usages.append({
"import": cls,
})
return usages, program
def __propagate(self, usages: list, rewriter: Rewriter):
"""
:param rewriter: The rewriter object which is going to rewrite the files
:param usages: the usages of the field in the program
:return: void
Propagates the changes made to the files and the field
"""
local_var_declared = False
for usage in usages:
if "import" in usage:
self.__add_import(usage["import"], rewriter)
continue
method_tokens = TokensInfo(usage["meta_data"].parser_context)
for i, token in enumerate(usage['tokens']):
if token.text != self.field_name:
continue
method_tokens.start = token.tokenIndex
method_tokens.stop = token.tokenIndex
if i > 1:
if usage["tokens"][i - 2].text == "this" or \
usage["tokens"][i - 2].text == self.class_name:
method_tokens.start -= 2
# else:
# if local_var_declared:
# continue
# local_var_declared = True
# continue
# else:
# if local_var_declared:
# continue
token_stream = usage["meta_data"].parser_context.parser.getTokenStream()
if token_stream not in rewriter.token_streams.keys():
rewriter.token_streams[token_stream] = (
usage["meta_data"].filename,
TokenStreamRewriter(token_stream),
usage["meta_data"].filename
)
rewriter.replace(method_tokens, f'{self.target_class_name}.{self.field_name}')
def move(self):
"""
:return: Whether the refactoring is completed or not
Performs the move field refactoring
"""
usages, program = self.__get_usage()
source_package = program.packages[self.package_name]
target_package = program.packages[self.target_package_name]
source_class = source_package.classes[self.class_name]
target_class = target_package.classes[self.target_class_name]
field = source_class.fields[self.field_name]
rewriter = Rewriter(program,
lambda x: f"{os.path.dirname(x)}/{os.path.splitext(os.path.basename(x))[0]}.java")
self.__remove_field_from_src(field, rewriter)
self.__move_field_to_dst(target_class, field, rewriter)
self.__propagate(usages, rewriter)
rewriter.apply()
modified_files = set(map(lambda x: x["meta_data"].filename,
filter(lambda x: "meta_data" in x, usages)))
modified_files.union(set(map(lambda x: x["import"].filename,
filter(lambda x: "import" in x, usages))))
modified_files.add(source_class.filename)
modified_files.add(target_class.filename)
self.__reformat(list(modified_files))
return True
def __remove_field_from_src(self, field: Field, rewriter: Rewriter):
"""
:param field: The field which is to be moved in the refactoring
:param rewriter: The rewriter object which is going to rewrite the files
:return: void
Remove the field from the source class
"""
tokens = TokensInfo(field.parser_context)
tokens.stop += 1
rewriter.replace(tokens, "")
for mod_ctx in field.modifiers_parser_contexts:
mod_tokens = TokensInfo(mod_ctx)
mod_tokens.stop += 1
rewriter.replace(mod_tokens, "")
def __move_field_to_dst(self, target: Class, field: Field, rewriter: Rewriter):
"""
:param target: The target class that the field is going to be moved to
:param field: The field which is to be moved in the refactoring
:param rewriter: The rewriter object which is going to rewrite the files
:return: void
Move the field from the source to the target class
"""
# this nasty if is because the grammar sucks. converts new SomeClass() to newSomeClass()
if field.initializer is not None and field.initializer.startswith("new"):
field.initializer = field.initializer.replace("new", "new ", 1)
self.__modify_access_modifiers(field)
new_field = f'\n\t{" ".join(field.modifiers)} {field.datatype} {field.name}{f" = {field.initializer};" if field.initializer else ";"}\n'
target_class_tokens = TokensInfo(target.body_context)
rewriter.insert_after_start(target_class_tokens, new_field)
def __modify_access_modifiers(self, field: Field):
"""
:param field: The field which is going to be modified
:return: void
Changes the moving field's access modifiers
"""
index = -1
for i, mod in enumerate(field.modifiers):
if mod == "private" or mod == "protected" or mod == "public":
index = i
break
if index != -1:
field.modifiers.pop(index)
field.modifiers.insert(0, "public")
def __reformat(self, modified_files: list):
"""
:param modified_files: The files that have been modified since the refactoring
:return: void
reformats the java files based on google's java pretty format
"""
temp = ["java", "-jar", self.formatter, "--replace"]
temp.extend(modified_files)
subprocess.call(temp)
def __add_import(self, klass: Class, rewriter):
"""
:param klass: The class where the import should be added to
:param rewriter: The rewriter object which is going to rewrite the files
:return: void
Adds the imports that are needed in the file since the refactorings
"""
# if there are no imports in the class appends before the start of class
if not self.target_package_name:
return
import_line = f"import {self.target_package_name}.{self.target_class_name};"
if len(klass.file_info.all_imports) == 0:
tokens_info = TokensInfo(klass.parser_context)
tokens_info.start -= len(klass.modifiers_parser_contexts) * 2
tokens_info.stop += 1
rewriter.insert_before_start(tokens_info, import_line)
return
# if however we have some imports append new import at the end of last import
tokens_info = TokensInfo(klass.file_info.all_imports[-1].parser_context)
tokens_info.stop += 1
rewriter.insert_after(tokens_info, import_line)
if __name__ == '__main__':
path = "/home/ali/Desktop/JavaTestProject/"
my_list = get_filenames_in_dir(path)
refactoring = MoveFieldRefactoring(my_list, "", "SourceClass", "field_for_move",
"TargetClass", "")
refac = refactoring.move()
print(refac)
|
from argparse import ArgumentTypeError
from enum import Enum
from typing import TYPE_CHECKING
from shogun.argparse_.action import FieldAction
from shogun.dispatch.base import DispatchPriority, DispatcherIsSubclass
from shogun.dispatch.concrete.default import DispatcherDefault
if TYPE_CHECKING:
from shogun.records.generic import RecordField
class DispatcherEnum(DispatcherIsSubclass):
priority: int = DispatchPriority.SIMPLE_TYPES
type_ = Enum
@classmethod
def build_action(cls, field: "RecordField") -> FieldAction:
def enum_type_func(value: str):
result = field.type.__members__.get(value)
if not result:
raise ArgumentTypeError(
f"invalid choice: {value!r} (choose from {[e.name for e in field.type]})"
)
return result
return DispatcherDefault.build_action(
field,
type=enum_type_func,
choices=field.type,
metavar=f"{{{",".join(field.type.__members__)}}}",
)
| from argparse import ArgumentTypeError
from enum import Enum
from typing import TYPE_CHECKING
from shogun.argparse_.action import FieldAction
from shogun.dispatch.base import DispatchPriority, DispatcherIsSubclass
from shogun.dispatch.concrete.default import DispatcherDefault
if TYPE_CHECKING:
from shogun.records.generic import RecordField
class DispatcherEnum(DispatcherIsSubclass):
priority: int = DispatchPriority.SIMPLE_TYPES
type_ = Enum
@classmethod
def build_action(cls, field: "RecordField") -> FieldAction:
def enum_type_func(value: str):
result = field.type.__members__.get(value)
if not result:
raise ArgumentTypeError(
f"invalid choice: {value!r} (choose from {[e.name for e in field.type]})"
)
return result
return DispatcherDefault.build_action(
field,
type=enum_type_func,
choices=field.type,
metavar=f"{{{','.join(field.type.__members__)}}}",
)
|
import youtube_dl
from requests_html import HTMLSession
import re
import sys
import json
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
try:
language = sys.argv[1]
arg = sys.argv[2]
except:
print("""Arguments available:
Language :
-en : English
-fr : French
Arguments :
-e = Download episode per episode
-s = Download season per season
""")
else:
saison = "0"
episode = "0"
try:
verbose = sys.argv[3]
if(verbose == '-v'):
class MyLogger(object):
def debug(self, msg):
print(msg)
def warning(self, msg):
print(msg)
def error(self, msg):
print(msg)
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True,
'logger': MyLogger(),
'verbose': True
}
print("Verbose mode enabled.")
except:
class MyLogger(object):
def debug(self, msg):
print(msg)
def warning(self, msg):
pass
def error(self, msg):
pass
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True,
'logger': MyLogger()
}
def bypassLogin(saison, episode, link):
if(int(saison) < 10):
tmp = str(saison)
if(tmp[0] == "0"):
saison = saison[1:]
seasonZero = "0" + str(saison)
else:
seasonZero = str(saison)
if(int(episode) < 10):
tmp = str(episode)
if(tmp[0] == "0"):
episode = episode[1:]
episodeZero = "0" + str(episode)
else:
episodeZero = str(episode)
ver1 = "https://pixavideo.club/video/simpsons/" + f"{saison}/S{seasonZero}E{episodeZero}EN.mp4"
link = link.split("/the-simpsons/")[1]
linkSplited = link.split("/")
linkSplited[1] = f"{episode}-" + linkSplited[1] + "-EN.mp4"
ver2 = "https://pixavideo.club/video/the-simpsons/" + linkSplited[0] + "/" + linkSplited[1]
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([ver1])
except:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([ver2])
def englishEpisode():
saison = input("Season ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
session = HTMLSession()
url = f'http://pixa-club.com/en/the-simpsons/season-{saison}/'
API = session.get(url)
episodeURL = API.html.find("div.col-md-8 > div.row")
links = episodeURL[0].absolute_links
episodeLink = []
for link in links:
episodeLink.append(link)
print(f"{len(episodeLink)} episodes found for this season")
episodeLink.append("http://pixa-club.com/en/")
episodeLink.sort(key=natural_keys)
episode = input("Episode ? \n")
if(str(episode[0]) == "0"):
episode = episode[1:]
print("---------------------DOWNLOAD---------------------")
bypassLogin(saison, episode, episodeLink[int(episode)])
def englishSeason():
saison = input("Season ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
session = HTMLSession()
url = f'http://pixa-club.com/en/the-simpsons/season-{saison}/'
API = session.get(url)
episodeURL = API.html.find("div.col-md-8 > div.row")
links = episodeURL[0].absolute_links
episodeLink = []
for link in links:
episodeLink.append(link)
print(f"{len(episodeLink)} episodes found for this season")
episodeLink.append("http://pixa-club.com/en/")
episodeLink.sort(key=natural_keys)
episodes = len(episodeLink)
print("---------------------DOWNLOAD---------------------")
for episode in range(len(episodeLink)):
episode = episode + 1
print(f'{episode}/{episodes}')
url = episodeLink[int(episode)]
bypassLogin(saison, episode, url)
def frenchEpisode():
with open("links.json", "r") as json_file :
library = json.load(json_file)
print("Répertoire chargé.")
saison = input("Saison ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
print(f"{len(library[f"S{saison}"])} épisodes trouvés pour cette saison.")
episode = input("Épisode ? \n")
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True
}
print("---------------------TÉLÉCHARGEMENT---------------------")
session = HTMLSession()
url = library[f'S{saison}'][f'E{episode}']
API = session.get(url)
API.html.render()
downloadable = API.html.find('video#videoPlayer_html5_api > source')[0].attrs['src']
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([downloadable])
except:
print("Téléchargement échoué")
print("Vous pouvez créer une issue sur Github : https://github.com/LLinoor/Simpson-Downloader")
def frenchSeason():
with open("links.json", "r") as json_file :
library = json.load(json_file)
print("Répertoire chargé.")
saison = input("Saison ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
print(f"{len(library[f"S{saison}"])} épisodes trouvés pour cette saison.")
print("---------------------TÉLÉCHARGEMENT---------------------")
for episode in range(len(library[f'S{saison}'])):
episode = episode + 1
episode = str(episode)
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True
}
session = HTMLSession()
url = library[f'S{saison}'][f'E{episode}']
API = session.get(url)
API.html.render()
downloadable = API.html.find('video#videoPlayer_html5_api > source')[0].attrs['src']
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([downloadable])
except:
print("Téléchargement échoué")
print("Vous pouvez créer une issue sur Github : https://github.com/LLinoor/Simpson-Downloader")
if(language == '-en' and arg == '-e'):
englishEpisode()
elif(language == "-en" and arg == "-s"):
englishSeason()
elif(language == "-fr" and arg == "-e"):
frenchEpisode()
elif(language == "-fr" and arg == "-s"):
frenchSeason()
elif(language != '-en' or language !='-fr' or arg != '-e' or arg != '-s' or arg != '-u'):
print("""Arguments available:
Language :
-en : English
-fr : French
Arguments :
-e = Download episode per episode
-s = Download season per season
""") | import youtube_dl
from requests_html import HTMLSession
import re
import sys
import json
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
try:
language = sys.argv[1]
arg = sys.argv[2]
except:
print("""Arguments available:
Language :
-en : English
-fr : French
Arguments :
-e = Download episode per episode
-s = Download season per season
""")
else:
saison = "0"
episode = "0"
try:
verbose = sys.argv[3]
if(verbose == '-v'):
class MyLogger(object):
def debug(self, msg):
print(msg)
def warning(self, msg):
print(msg)
def error(self, msg):
print(msg)
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True,
'logger': MyLogger(),
'verbose': True
}
print("Verbose mode enabled.")
except:
class MyLogger(object):
def debug(self, msg):
print(msg)
def warning(self, msg):
pass
def error(self, msg):
pass
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True,
'logger': MyLogger()
}
def bypassLogin(saison, episode, link):
if(int(saison) < 10):
tmp = str(saison)
if(tmp[0] == "0"):
saison = saison[1:]
seasonZero = "0" + str(saison)
else:
seasonZero = str(saison)
if(int(episode) < 10):
tmp = str(episode)
if(tmp[0] == "0"):
episode = episode[1:]
episodeZero = "0" + str(episode)
else:
episodeZero = str(episode)
ver1 = "https://pixavideo.club/video/simpsons/" + f"{saison}/S{seasonZero}E{episodeZero}EN.mp4"
link = link.split("/the-simpsons/")[1]
linkSplited = link.split("/")
linkSplited[1] = f"{episode}-" + linkSplited[1] + "-EN.mp4"
ver2 = "https://pixavideo.club/video/the-simpsons/" + linkSplited[0] + "/" + linkSplited[1]
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([ver1])
except:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([ver2])
def englishEpisode():
saison = input("Season ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
session = HTMLSession()
url = f'http://pixa-club.com/en/the-simpsons/season-{saison}/'
API = session.get(url)
episodeURL = API.html.find("div.col-md-8 > div.row")
links = episodeURL[0].absolute_links
episodeLink = []
for link in links:
episodeLink.append(link)
print(f"{len(episodeLink)} episodes found for this season")
episodeLink.append("http://pixa-club.com/en/")
episodeLink.sort(key=natural_keys)
episode = input("Episode ? \n")
if(str(episode[0]) == "0"):
episode = episode[1:]
print("---------------------DOWNLOAD---------------------")
bypassLogin(saison, episode, episodeLink[int(episode)])
def englishSeason():
saison = input("Season ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
session = HTMLSession()
url = f'http://pixa-club.com/en/the-simpsons/season-{saison}/'
API = session.get(url)
episodeURL = API.html.find("div.col-md-8 > div.row")
links = episodeURL[0].absolute_links
episodeLink = []
for link in links:
episodeLink.append(link)
print(f"{len(episodeLink)} episodes found for this season")
episodeLink.append("http://pixa-club.com/en/")
episodeLink.sort(key=natural_keys)
episodes = len(episodeLink)
print("---------------------DOWNLOAD---------------------")
for episode in range(len(episodeLink)):
episode = episode + 1
print(f'{episode}/{episodes}')
url = episodeLink[int(episode)]
bypassLogin(saison, episode, url)
def frenchEpisode():
with open("links.json", "r") as json_file :
library = json.load(json_file)
print("Répertoire chargé.")
saison = input("Saison ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
print(f"{len(library[f'S{saison}'])} épisodes trouvés pour cette saison.")
episode = input("Épisode ? \n")
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True
}
print("---------------------TÉLÉCHARGEMENT---------------------")
session = HTMLSession()
url = library[f'S{saison}'][f'E{episode}']
API = session.get(url)
API.html.render()
downloadable = API.html.find('video#videoPlayer_html5_api > source')[0].attrs['src']
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([downloadable])
except:
print("Téléchargement échoué")
print("Vous pouvez créer une issue sur Github : https://github.com/LLinoor/Simpson-Downloader")
def frenchSeason():
with open("links.json", "r") as json_file :
library = json.load(json_file)
print("Répertoire chargé.")
saison = input("Saison ? \n")
if(str(saison[0]) == "0"):
saison = saison[1:]
print(f"{len(library[f'S{saison}'])} épisodes trouvés pour cette saison.")
print("---------------------TÉLÉCHARGEMENT---------------------")
for episode in range(len(library[f'S{saison}'])):
episode = episode + 1
episode = str(episode)
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': f'The Simpson/The Simpson - S{saison}E{episode}.mp4',
'no_warnings' : True
}
session = HTMLSession()
url = library[f'S{saison}'][f'E{episode}']
API = session.get(url)
API.html.render()
downloadable = API.html.find('video#videoPlayer_html5_api > source')[0].attrs['src']
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([downloadable])
except:
print("Téléchargement échoué")
print("Vous pouvez créer une issue sur Github : https://github.com/LLinoor/Simpson-Downloader")
if(language == '-en' and arg == '-e'):
englishEpisode()
elif(language == "-en" and arg == "-s"):
englishSeason()
elif(language == "-fr" and arg == "-e"):
frenchEpisode()
elif(language == "-fr" and arg == "-s"):
frenchSeason()
elif(language != '-en' or language !='-fr' or arg != '-e' or arg != '-s' or arg != '-u'):
print("""Arguments available:
Language :
-en : English
-fr : French
Arguments :
-e = Download episode per episode
-s = Download season per season
""") |
#!/usr/bin/python3
import importlib
import json
import os
import shutil
import sys
import warnings
import zipfile
from base64 import b64encode
from hashlib import sha1
from io import BytesIO
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, Iterator, KeysView, List, Optional, Set, Tuple, Union
from urllib.parse import urlparse
import requests
import yaml
from semantic_version import Version
from tqdm import tqdm
from brownie._config import (
CONFIG,
REQUEST_HEADERS,
_get_data_folder,
_load_project_compiler_config,
_load_project_config,
_load_project_dependencies,
_load_project_structure_config,
)
from brownie.exceptions import (
BrownieEnvironmentWarning,
InvalidPackage,
ProjectAlreadyLoaded,
ProjectNotFound,
)
from brownie.network import web3
from brownie.network.contract import (
Contract,
ContractContainer,
InterfaceContainer,
ProjectContract,
)
from brownie.network.state import _add_contract, _remove_contract, _revert_register
from brownie.project import compiler, ethpm
from brownie.project.build import BUILD_KEYS, INTERFACE_KEYS, Build
from brownie.project.ethpm import get_deployment_addresses, get_manifest
from brownie.project.sources import Sources, get_pragma_spec
from brownie.utils import notify
BUILD_FOLDERS = ["contracts", "deployments", "interfaces"]
MIXES_URL = "https://github.com/brownie-mix/{}-mix/archive/master.zip"
GITIGNORE = """__pycache__
.history
.hypothesis/
build/
reports/
"""
GITATTRIBUTES = """*.sol linguist-language=Solidity
*.vy linguist-language=Python
"""
_loaded_projects = []
class _ProjectBase:
_path: Optional[Path]
_build_path: Optional[Path]
_sources: Sources
_build: Build
def _compile(self, contract_sources: Dict, compiler_config: Dict, silent: bool) -> None:
compiler_config.setdefault("solc", {})
allow_paths = None
cwd = os.getcwd()
if self._path is not None:
_install_dependencies(self._path)
allow_paths = self._path.as_posix()
os.chdir(self._path)
try:
build_json = compiler.compile_and_format(
contract_sources,
solc_version=compiler_config["solc"].get("version", None),
optimize=compiler_config["solc"].get("optimize", None),
runs=compiler_config["solc"].get("runs", None),
evm_version=compiler_config["evm_version"],
silent=silent,
allow_paths=allow_paths,
remappings=compiler_config["solc"].get("remappings", []),
optimizer=compiler_config["solc"].get("optimizer", None),
)
finally:
os.chdir(cwd)
for data in build_json.values():
if self._build_path is not None:
path = self._build_path.joinpath(f"contracts/{data["contractName"]}.json")
with path.open("w") as fp:
json.dump(data, fp, sort_keys=True, indent=2, default=sorted)
self._build._add(data)
def _create_containers(self) -> None:
# create container objects
self.interface = InterfaceContainer(self)
self._containers: Dict = {}
for key, data in self._build.items():
if data["type"] == "interface":
self.interface._add(data["contractName"], data["abi"])
if data.get("bytecode"):
container = ContractContainer(self, data)
self._containers[key] = container
setattr(self, container._name, container)
def __getitem__(self, key: str) -> ContractContainer:
return self._containers[key]
def __iter__(self) -> Iterator[ContractContainer]:
return iter(self._containers[i] for i in sorted(self._containers))
def __len__(self) -> int:
return len(self._containers)
def __contains__(self, item: ContractContainer) -> bool:
return item in self._containers
def dict(self) -> Dict:
return dict(self._containers)
def keys(self) -> KeysView[Any]:
return self._containers.keys()
class Project(_ProjectBase):
"""
Top level dict-like container that holds data and objects related to
a brownie project.
Attributes:
_path: Path object, absolute path to the project
_name: Name that the project is loaded as
_sources: project Source object
_build: project Build object
"""
def __init__(self, name: str, project_path: Path) -> None:
self._path: Path = project_path
self._structure = _load_project_structure_config(project_path)
self._build_path: Path = project_path.joinpath(self._structure["build"])
self._name = name
self._active = False
self.load()
def load(self) -> None:
"""Compiles the project contracts, creates ContractContainer objects and
populates the namespace."""
if self._active:
raise ProjectAlreadyLoaded("Project is already active")
contract_sources = _load_sources(self._path, self._structure["contracts"], False)
interface_sources = _load_sources(self._path, self._structure["interfaces"], True)
self._sources = Sources(contract_sources, interface_sources)
self._build = Build(self._sources)
contract_list = self._sources.get_contract_list()
for path in list(self._build_path.glob("contracts/*.json")):
try:
with path.open() as fp:
build_json = json.load(fp)
except json.JSONDecodeError:
build_json = {}
if not set(BUILD_KEYS).issubset(build_json) or path.stem not in contract_list:
path.unlink()
continue
if isinstance(build_json["allSourcePaths"], list):
# this handles the format change in v1.7.0, it can be removed in a future release
path.unlink()
test_path = self._build_path.joinpath("tests.json")
if test_path.exists():
test_path.unlink()
continue
if not self._path.joinpath(build_json["sourcePath"]).exists():
path.unlink()
continue
self._build._add(build_json)
interface_hashes = {}
interface_list = self._sources.get_interface_list()
for path in list(self._build_path.glob("interfaces/*.json")):
try:
with path.open() as fp:
build_json = json.load(fp)
except json.JSONDecodeError:
build_json = {}
if not set(INTERFACE_KEYS).issubset(build_json) or path.stem not in interface_list:
path.unlink()
continue
self._build._add(build_json)
interface_hashes[path.stem] = build_json["sha1"]
self._compiler_config = _load_project_compiler_config(self._path)
# compile updated sources, update build
changed = self._get_changed_contracts(interface_hashes)
self._compile(changed, self._compiler_config, False)
self._compile_interfaces(interface_hashes)
self._create_containers()
self._load_deployments()
# add project to namespaces, apply import blackmagic
name = self._name
self.__all__ = list(self._containers) + ["interface"]
sys.modules[f"brownie.project.{name}"] = self # type: ignore
sys.modules["brownie.project"].__dict__[name] = self
sys.modules["brownie.project"].__all__.append(name) # type: ignore
sys.modules["brownie.project"].__console_dir__.append(name) # type: ignore
self._namespaces = [
sys.modules["__main__"].__dict__,
sys.modules["brownie.project"].__dict__,
]
# register project for revert and reset
_revert_register(self)
self._active = True
_loaded_projects.append(self)
def _get_changed_contracts(self, compiled_hashes: Dict) -> Dict:
# get list of changed interfaces and contracts
new_hashes = self._sources.get_interface_hashes()
interfaces = [k for k, v in new_hashes.items() if compiled_hashes.get(k, None) != v]
contracts = [i for i in self._sources.get_contract_list() if self._compare_build_json(i)]
# get dependents of changed sources
final = set(contracts + interfaces)
for contract_name in list(final):
final.update(self._build.get_dependents(contract_name))
# remove outdated build artifacts
for name in [i for i in final if self._build.contains(i)]:
self._build._remove(name)
# get final list of changed source paths
final.difference_update(interfaces)
changed_set: Set = set(self._sources.get_source_path(i) for i in final)
return {i: self._sources.get(i) for i in changed_set}
def _compare_build_json(self, contract_name: str) -> bool:
config = self._compiler_config
# confirm that this contract was previously compiled
try:
source = self._sources.get(contract_name)
build_json = self._build.get(contract_name)
except KeyError:
return True
# compare source hashes
if build_json["sha1"] != sha1(source.encode()).hexdigest():
return True
# compare compiler settings
if _compare_settings(config, build_json["compiler"]):
return True
if build_json["language"] == "Solidity":
# compare solc-specific compiler settings
solc_config = config["solc"].copy()
solc_config["remappings"] = None
if _compare_settings(solc_config, build_json["compiler"]):
return True
# compare solc pragma against compiled version
if Version(build_json["compiler"]["version"]) not in get_pragma_spec(source):
return True
return False
def _compile_interfaces(self, compiled_hashes: Dict) -> None:
new_hashes = self._sources.get_interface_hashes()
changed_paths = [
self._sources.get_source_path(k)
for k, v in new_hashes.items()
if compiled_hashes.get(k, None) != v
]
if not changed_paths:
return
print("Generating interface ABIs...")
changed_sources = {i: self._sources.get(i) for i in changed_paths}
abi_json = compiler.get_abi(
changed_sources,
allow_paths=self._path.as_posix(),
remappings=self._compiler_config["solc"].get("remappings", []),
)
for name, abi in abi_json.items():
with self._build_path.joinpath(f"interfaces/{name}.json").open("w") as fp:
json.dump(abi, fp, sort_keys=True, indent=2, default=sorted)
self._build._add(abi)
def _load_deployments(self) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
path = self._build_path.joinpath(f"deployments/{chainid}")
path.mkdir(exist_ok=True)
deployments = list(path.glob("*.json"))
deployments.sort(key=lambda k: k.stat().st_mtime)
deployment_map = self._load_deployment_map()
for build_json in deployments:
with build_json.open() as fp:
build = json.load(fp)
contract_name = build["contractName"]
if contract_name not in self._containers:
build_json.unlink()
continue
if "pcMap" in build:
contract = ProjectContract(self, build, build_json.stem)
else:
contract = Contract.from_abi( # type: ignore
contract_name, build_json.stem, build["abi"]
)
contract._project = self
container = self._containers[contract_name]
_add_contract(contract)
container._contracts.append(contract)
# update deployment map for the current chain
instances = deployment_map.setdefault(chainid, {}).setdefault(contract_name, [])
if build_json.stem in instances:
instances.remove(build_json.stem)
instances.insert(0, build_json.stem)
self._save_deployment_map(deployment_map)
def _load_deployment_map(self) -> Dict:
deployment_map: Dict = {}
map_path = self._build_path.joinpath("deployments/map.json")
if map_path.exists():
with map_path.open("r") as fp:
deployment_map = json.load(fp)
return deployment_map
def _save_deployment_map(self, deployment_map: Dict) -> None:
with self._build_path.joinpath("deployments/map.json").open("w") as fp:
json.dump(deployment_map, fp, sort_keys=True, indent=2, default=sorted)
def _remove_from_deployment_map(self, contract: ProjectContract) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_map = self._load_deployment_map()
try:
deployment_map[chainid][contract._name].remove(contract.address)
if not deployment_map[chainid][contract._name]:
del deployment_map[chainid][contract._name]
if not deployment_map[chainid]:
del deployment_map[chainid]
except (KeyError, ValueError):
pass
self._save_deployment_map(deployment_map)
def _add_to_deployment_map(self, contract: ProjectContract) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_map = self._load_deployment_map()
try:
deployment_map[chainid][contract._name].remove(contract.address)
except (ValueError, KeyError):
pass
deployment_map.setdefault(chainid, {}).setdefault(contract._name, []).insert(
0, contract.address
)
self._save_deployment_map(deployment_map)
def _update_and_register(self, dict_: Any) -> None:
dict_.update(self)
if "interface" not in dict_:
dict_["interface"] = self.interface
self._namespaces.append(dict_)
def _add_to_main_namespace(self) -> None:
# temporarily adds project objects to the main namespace
brownie: Any = sys.modules["brownie"]
if "interface" not in brownie.__dict__:
brownie.__dict__["interface"] = self.interface
brownie.__dict__.update(self._containers)
brownie.__all__.extend(self.__all__)
def _remove_from_main_namespace(self) -> None:
# removes project objects from the main namespace
brownie: Any = sys.modules["brownie"]
if brownie.__dict__.get("interface") == self.interface:
del brownie.__dict__["interface"]
for key in self._containers:
brownie.__dict__.pop(key, None)
for key in self.__all__:
if key in brownie.__all__:
brownie.__all__.remove(key)
def __repr__(self) -> str:
return f"<Project '{self._name}'>"
def load_config(self) -> None:
"""Loads the project config file settings"""
if isinstance(self._path, Path):
_load_project_config(self._path)
def close(self, raises: bool = True) -> None:
"""Removes pointers to the project's ContractContainer objects and this object."""
if not self._active:
if not raises:
return
raise ProjectNotFound("Project is not currently loaded.")
# remove objects from namespace
for dict_ in self._namespaces:
for key in [
k
for k, v in dict_.items()
if v == self or (k in self and v == self[k]) # type: ignore
]:
del dict_[key]
# remove contracts
for contract in [x for v in self._containers.values() for x in v._contracts]:
_remove_contract(contract)
for container in self._containers.values():
container._contracts.clear()
self._containers.clear()
# undo black-magic
self._remove_from_main_namespace()
name = self._name
del sys.modules[f"brownie.project.{name}"]
sys.modules["brownie.project"].__all__.remove(name) # type: ignore
sys.modules["brownie.project"].__console_dir__.remove(name) # type: ignore
self._active = False
_loaded_projects.remove(self)
# clear paths
try:
sys.path.remove(str(self._path))
except ValueError:
pass
def _clear_dev_deployments(self, height: int) -> None:
path = self._build_path.joinpath("deployments/dev")
if path.exists():
deployment_map = self._load_deployment_map()
for deployment in path.glob("*.json"):
if height == 0:
deployment.unlink()
else:
with deployment.open("r") as fp:
deployment_artifact = json.load(fp)
block_height = deployment_artifact["deployment"]["blockHeight"]
address = deployment_artifact["deployment"]["address"]
contract_name = deployment_artifact["contractName"]
if block_height > height:
deployment.unlink()
try:
deployment_map["dev"][contract_name].remove(address)
except (KeyError, ValueError):
pass
if "dev" in deployment_map and (height == 0 or not deployment_map["dev"]):
del deployment_map["dev"]
shutil.rmtree(path)
self._save_deployment_map(deployment_map)
def _revert(self, height: int) -> None:
self._clear_dev_deployments(height)
def _reset(self) -> None:
self._clear_dev_deployments(0)
class TempProject(_ProjectBase):
"""Simplified Project class used to hold temporary contracts that are
compiled via project.compile_source"""
def __init__(self, name: str, contract_sources: Dict, compiler_config: Dict) -> None:
self._path = None
self._build_path = None
self._name = name
self._sources = Sources(contract_sources, {})
self._build = Build(self._sources)
self._compile(contract_sources, compiler_config, True)
self._create_containers()
def __repr__(self) -> str:
return f"<TempProject '{self._name}'>"
def check_for_project(path: Union[Path, str] = ".") -> Optional[Path]:
"""Checks for a Brownie project."""
path = Path(path).resolve()
for folder in [path] + list(path.parents):
structure_config = _load_project_structure_config(folder)
contracts_path = folder.joinpath(structure_config["contracts"])
tests_path = folder.joinpath(structure_config["tests"])
if next((i for i in contracts_path.glob("**/*") if i.suffix in (".vy", ".sol")), None):
return folder
if contracts_path.is_dir() and tests_path.is_dir():
return folder
return None
def get_loaded_projects() -> List["Project"]:
"""Returns a list of currently loaded Project objects."""
return _loaded_projects.copy()
def new(
project_path_str: str = ".", ignore_subfolder: bool = False, ignore_existing: bool = False
) -> str:
"""Initializes a new project.
Args:
project_path: Path to initialize the project at. If not exists, it will be created.
ignore_subfolder: (deprecated)
ignore_existing: If True, will not raise when initiating in a non-empty directory.
Returns the path to the project as a string.
"""
project_path = Path(project_path_str).resolve()
if not ignore_existing and project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Directory is not empty: {project_path}")
project_path.mkdir(exist_ok=True)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_brownie_mix(
project_name: str, project_path: Union[Path, str] = None, ignore_subfolder: bool = False
) -> str:
"""Initializes a new project via a template. Templates are downloaded from
https://www.github.com/brownie-mix
Args:
project_path: Path to initialize the project at.
ignore_subfolders: (deprecated)
Returns the path to the project as a string.
"""
project_name = str(project_name).replace("-mix", "")
url = MIXES_URL.format(project_name)
if project_path is None:
project_path = Path(".").joinpath(project_name)
project_path = Path(project_path).resolve()
if project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Folder already exists - {project_path}")
print(f"Downloading from {url}...")
_stream_download(url, str(project_path.parent))
project_path.parent.joinpath(project_name + "-mix-master").rename(project_path)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_ethpm(uri: str) -> "TempProject":
"""
Generates a TempProject from an ethPM package.
"""
manifest = get_manifest(uri)
compiler_config = {
"evm_version": None,
"solc": {"version": None, "optimize": True, "runs": 200},
}
project = TempProject(manifest["package_name"], manifest["sources"], compiler_config)
if web3.isConnected():
for contract_name in project.keys():
for address in get_deployment_addresses(manifest, contract_name):
project[contract_name].at(address)
return project
def compile_source(
source: str,
solc_version: Optional[str] = None,
optimize: bool = True,
runs: Optional[int] = 200,
evm_version: Optional[str] = None,
) -> "TempProject":
"""Compiles the given source code string and returns a TempProject container with
the ContractContainer instances."""
compiler_config: Dict = {"evm_version": evm_version}
if solc_version is not None or source.lstrip().startswith("pragma"):
compiler_config["solc"] = {"version": solc_version, "optimize": optimize, "runs": runs}
return TempProject("TempSolcProject", {"<stdin>.sol": source}, compiler_config)
return TempProject("TempVyperProject", {"<stdin>.vy": source}, compiler_config)
def load(project_path: Union[Path, str, None] = None, name: Optional[str] = None) -> "Project":
"""Loads a project and instantiates various related objects.
Args:
project_path: Path of the project to load. If None, will attempt to
locate a project using check_for_project()
name: Name to assign to the project. If None, the name is generated
from the name of the project folder
Returns a Project object.
"""
# checks
if project_path is None:
project_path = check_for_project(".")
if project_path is not None and project_path != Path(".").absolute():
warnings.warn(
f"Loaded project has a root folder of '{project_path}' "
"which is different from the current working directory",
BrownieEnvironmentWarning,
)
elif Path(project_path).resolve() != check_for_project(project_path):
project_path = None
if project_path is None:
raise ProjectNotFound("Could not find Brownie project")
project_path = Path(project_path).resolve()
if name is None:
name = project_path.name
if not name.lower().endswith("project"):
name += " project"
name = "".join(i for i in name.title() if i.isalpha())
if next((True for i in _loaded_projects if i._name == name), False):
raise ProjectAlreadyLoaded("There is already a project loaded with this name")
# paths
_create_folders(project_path)
_add_to_sys_path(project_path)
# load sources and build
return Project(name, project_path)
def _install_dependencies(path: Path) -> None:
for package_id in _load_project_dependencies(path):
try:
install_package(package_id)
except FileExistsError:
pass
def install_package(package_id: str) -> str:
"""
Install a package.
Arguments
---------
package_id : str
Package ID or ethPM URI.
Returns
-------
str
ID of the installed package.
"""
if urlparse(package_id).scheme in ("erc1319", "ethpm"):
return _install_from_ethpm(package_id)
else:
return _install_from_github(package_id)
def _install_from_ethpm(uri: str) -> str:
manifest = get_manifest(uri)
org = manifest["meta_brownie"]["registry_address"]
repo = manifest["package_name"]
version = manifest["version"]
install_path = _get_data_folder().joinpath(f"packages/{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is aleady installed")
try:
new(str(install_path), ignore_existing=True)
ethpm.install_package(install_path, uri)
project = load(install_path)
project.close()
except Exception as e:
shutil.rmtree(install_path)
raise e
return f"{org}/{repo}@{version}"
def _install_from_github(package_id: str) -> str:
try:
path, version = package_id.split("@")
org, repo = path.split("/")
except ValueError:
raise ValueError(
"Invalid package ID. Must be given as [ORG]/[REPO]@[VERSION]"
"\ne.g. 'OpenZeppelin/openzeppelin-contracts@v2.5.0'"
) from None
base_install_path = _get_data_folder().joinpath("packages")
install_path = base_install_path.joinpath(f"{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is aleady installed")
headers = REQUEST_HEADERS.copy()
if os.getenv("GITHUB_TOKEN"):
auth = b64encode(os.environ["GITHUB_TOKEN"].encode()).decode()
headers.update({"Authorization": "Basic {}".format(auth)})
response = requests.get(
f"https://api.github.com/repos/{org}/{repo}/tags?per_page=100", headers=headers
)
if response.status_code != 200:
msg = "Status {} when getting package versions from Github: '{}'".format(
response.status_code, response.json()["message"]
)
if response.status_code == 403:
msg += (
"\n\nIf this issue persists, generate a Github API token and store"
" it as the environment variable `GITHUB_TOKEN`:\n"
"https://github.blog/2013-05-16-personal-api-tokens/"
)
raise ConnectionError(msg)
data = response.json()
if not data:
raise ValueError("Github repository has no tags set")
org, repo = data[0]["zipball_url"].split("/")[3:5]
tags = [i["name"].lstrip("v") for i in data]
if version not in tags:
raise ValueError(
"Invalid version for this package. Available versions are:\n" + ", ".join(tags)
) from None
download_url = next(i["zipball_url"] for i in data if i["name"].lstrip("v") == version)
existing = list(install_path.parent.iterdir())
_stream_download(download_url, str(install_path.parent))
installed = next(i for i in install_path.parent.iterdir() if i not in existing)
shutil.move(installed, install_path)
try:
if not install_path.joinpath("brownie-config.yaml").exists():
brownie_config: Dict = {"project_structure": {}}
contract_paths = set(
i.relative_to(install_path).parts[0] for i in install_path.glob("**/*.sol")
)
contract_paths.update(
i.relative_to(install_path).parts[0] for i in install_path.glob("**/*.vy")
)
if not contract_paths:
raise InvalidPackage(f"{package_id} does not contain any .sol or .vy files")
if install_path.joinpath("contracts").is_dir():
brownie_config["project_structure"]["contracts"] = "contracts"
elif len(contract_paths) == 1:
brownie_config["project_structure"]["contracts"] = contract_paths.pop()
else:
raise InvalidPackage(
f"{package_id} has no `contracts/` subdirectory, and "
"multiple directories containing source files"
)
with install_path.joinpath("brownie-config.yaml").open("w") as fp:
yaml.dump(brownie_config, fp)
project = load(install_path)
project.close()
except InvalidPackage:
shutil.rmtree(install_path)
raise
except Exception as e:
notify(
"WARNING",
f"Unable to compile {package_id} due to a {type(e).__name__} - you may still be able to"
" import sources from the package, but will be unable to load the package directly.\n",
)
return f"{org}/{repo}@{version}"
def _create_gitfiles(project_path: Path) -> None:
gitignore = project_path.joinpath(".gitignore")
if not gitignore.exists():
with gitignore.open("w") as fp:
fp.write(GITIGNORE)
gitattributes = project_path.joinpath(".gitattributes")
if not gitattributes.exists():
with gitattributes.open("w") as fp:
fp.write(GITATTRIBUTES)
def _create_folders(project_path: Path) -> None:
structure = _load_project_structure_config(project_path)
for path in structure.values():
project_path.joinpath(path).mkdir(exist_ok=True)
build_path = project_path.joinpath(structure["build"])
for path in BUILD_FOLDERS:
build_path.joinpath(path).mkdir(exist_ok=True)
def _add_to_sys_path(project_path: Path) -> None:
project_path_string = str(project_path)
if project_path_string in sys.path:
return
sys.path.insert(0, project_path_string)
def _compare_settings(left: Dict, right: Dict) -> bool:
return next(
(True for k, v in left.items() if v and not isinstance(v, dict) and v != right.get(k)),
False,
)
def _load_sources(project_path: Path, subfolder: str, allow_json: bool) -> Dict:
contract_sources: Dict = {}
suffixes: Tuple = (".sol", ".vy")
if allow_json:
suffixes = suffixes + (".json",)
# one day this will be a beautiful plugin system
hooks: Optional[ModuleType] = None
if project_path.joinpath("brownie_hooks.py").exists():
hooks = importlib.import_module("brownie_hooks")
for path in project_path.glob(f"{subfolder}/**/*"):
if path.suffix not in suffixes:
continue
if next((i for i in path.relative_to(project_path).parts if i.startswith("_")), False):
continue
with path.open() as fp:
source = fp.read()
if hasattr(hooks, "brownie_load_source"):
source = hooks.brownie_load_source(path, source) # type: ignore
path_str: str = path.relative_to(project_path).as_posix()
contract_sources[path_str] = source
return contract_sources
def _stream_download(download_url: str, target_path: str) -> None:
response = requests.get(download_url, stream=True, headers=REQUEST_HEADERS)
if response.status_code == 404:
raise ConnectionError(
f"404 error when attempting to download from {download_url} - "
"are you sure this is a valid mix? https://github.com/brownie-mix"
)
if response.status_code != 200:
raise ConnectionError(
f"Received status code {response.status_code} when attempting "
f"to download from {download_url}"
)
total_size = int(response.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
content = bytes()
for data in response.iter_content(1024, decode_unicode=True):
progress_bar.update(len(data))
content += data
progress_bar.close()
with zipfile.ZipFile(BytesIO(content)) as zf:
zf.extractall(target_path)
| #!/usr/bin/python3
import importlib
import json
import os
import shutil
import sys
import warnings
import zipfile
from base64 import b64encode
from hashlib import sha1
from io import BytesIO
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, Iterator, KeysView, List, Optional, Set, Tuple, Union
from urllib.parse import urlparse
import requests
import yaml
from semantic_version import Version
from tqdm import tqdm
from brownie._config import (
CONFIG,
REQUEST_HEADERS,
_get_data_folder,
_load_project_compiler_config,
_load_project_config,
_load_project_dependencies,
_load_project_structure_config,
)
from brownie.exceptions import (
BrownieEnvironmentWarning,
InvalidPackage,
ProjectAlreadyLoaded,
ProjectNotFound,
)
from brownie.network import web3
from brownie.network.contract import (
Contract,
ContractContainer,
InterfaceContainer,
ProjectContract,
)
from brownie.network.state import _add_contract, _remove_contract, _revert_register
from brownie.project import compiler, ethpm
from brownie.project.build import BUILD_KEYS, INTERFACE_KEYS, Build
from brownie.project.ethpm import get_deployment_addresses, get_manifest
from brownie.project.sources import Sources, get_pragma_spec
from brownie.utils import notify
BUILD_FOLDERS = ["contracts", "deployments", "interfaces"]
MIXES_URL = "https://github.com/brownie-mix/{}-mix/archive/master.zip"
GITIGNORE = """__pycache__
.history
.hypothesis/
build/
reports/
"""
GITATTRIBUTES = """*.sol linguist-language=Solidity
*.vy linguist-language=Python
"""
_loaded_projects = []
class _ProjectBase:
_path: Optional[Path]
_build_path: Optional[Path]
_sources: Sources
_build: Build
def _compile(self, contract_sources: Dict, compiler_config: Dict, silent: bool) -> None:
compiler_config.setdefault("solc", {})
allow_paths = None
cwd = os.getcwd()
if self._path is not None:
_install_dependencies(self._path)
allow_paths = self._path.as_posix()
os.chdir(self._path)
try:
build_json = compiler.compile_and_format(
contract_sources,
solc_version=compiler_config["solc"].get("version", None),
optimize=compiler_config["solc"].get("optimize", None),
runs=compiler_config["solc"].get("runs", None),
evm_version=compiler_config["evm_version"],
silent=silent,
allow_paths=allow_paths,
remappings=compiler_config["solc"].get("remappings", []),
optimizer=compiler_config["solc"].get("optimizer", None),
)
finally:
os.chdir(cwd)
for data in build_json.values():
if self._build_path is not None:
path = self._build_path.joinpath(f"contracts/{data['contractName']}.json")
with path.open("w") as fp:
json.dump(data, fp, sort_keys=True, indent=2, default=sorted)
self._build._add(data)
def _create_containers(self) -> None:
# create container objects
self.interface = InterfaceContainer(self)
self._containers: Dict = {}
for key, data in self._build.items():
if data["type"] == "interface":
self.interface._add(data["contractName"], data["abi"])
if data.get("bytecode"):
container = ContractContainer(self, data)
self._containers[key] = container
setattr(self, container._name, container)
def __getitem__(self, key: str) -> ContractContainer:
return self._containers[key]
def __iter__(self) -> Iterator[ContractContainer]:
return iter(self._containers[i] for i in sorted(self._containers))
def __len__(self) -> int:
return len(self._containers)
def __contains__(self, item: ContractContainer) -> bool:
return item in self._containers
def dict(self) -> Dict:
return dict(self._containers)
def keys(self) -> KeysView[Any]:
return self._containers.keys()
class Project(_ProjectBase):
"""
Top level dict-like container that holds data and objects related to
a brownie project.
Attributes:
_path: Path object, absolute path to the project
_name: Name that the project is loaded as
_sources: project Source object
_build: project Build object
"""
def __init__(self, name: str, project_path: Path) -> None:
self._path: Path = project_path
self._structure = _load_project_structure_config(project_path)
self._build_path: Path = project_path.joinpath(self._structure["build"])
self._name = name
self._active = False
self.load()
def load(self) -> None:
"""Compiles the project contracts, creates ContractContainer objects and
populates the namespace."""
if self._active:
raise ProjectAlreadyLoaded("Project is already active")
contract_sources = _load_sources(self._path, self._structure["contracts"], False)
interface_sources = _load_sources(self._path, self._structure["interfaces"], True)
self._sources = Sources(contract_sources, interface_sources)
self._build = Build(self._sources)
contract_list = self._sources.get_contract_list()
for path in list(self._build_path.glob("contracts/*.json")):
try:
with path.open() as fp:
build_json = json.load(fp)
except json.JSONDecodeError:
build_json = {}
if not set(BUILD_KEYS).issubset(build_json) or path.stem not in contract_list:
path.unlink()
continue
if isinstance(build_json["allSourcePaths"], list):
# this handles the format change in v1.7.0, it can be removed in a future release
path.unlink()
test_path = self._build_path.joinpath("tests.json")
if test_path.exists():
test_path.unlink()
continue
if not self._path.joinpath(build_json["sourcePath"]).exists():
path.unlink()
continue
self._build._add(build_json)
interface_hashes = {}
interface_list = self._sources.get_interface_list()
for path in list(self._build_path.glob("interfaces/*.json")):
try:
with path.open() as fp:
build_json = json.load(fp)
except json.JSONDecodeError:
build_json = {}
if not set(INTERFACE_KEYS).issubset(build_json) or path.stem not in interface_list:
path.unlink()
continue
self._build._add(build_json)
interface_hashes[path.stem] = build_json["sha1"]
self._compiler_config = _load_project_compiler_config(self._path)
# compile updated sources, update build
changed = self._get_changed_contracts(interface_hashes)
self._compile(changed, self._compiler_config, False)
self._compile_interfaces(interface_hashes)
self._create_containers()
self._load_deployments()
# add project to namespaces, apply import blackmagic
name = self._name
self.__all__ = list(self._containers) + ["interface"]
sys.modules[f"brownie.project.{name}"] = self # type: ignore
sys.modules["brownie.project"].__dict__[name] = self
sys.modules["brownie.project"].__all__.append(name) # type: ignore
sys.modules["brownie.project"].__console_dir__.append(name) # type: ignore
self._namespaces = [
sys.modules["__main__"].__dict__,
sys.modules["brownie.project"].__dict__,
]
# register project for revert and reset
_revert_register(self)
self._active = True
_loaded_projects.append(self)
def _get_changed_contracts(self, compiled_hashes: Dict) -> Dict:
# get list of changed interfaces and contracts
new_hashes = self._sources.get_interface_hashes()
interfaces = [k for k, v in new_hashes.items() if compiled_hashes.get(k, None) != v]
contracts = [i for i in self._sources.get_contract_list() if self._compare_build_json(i)]
# get dependents of changed sources
final = set(contracts + interfaces)
for contract_name in list(final):
final.update(self._build.get_dependents(contract_name))
# remove outdated build artifacts
for name in [i for i in final if self._build.contains(i)]:
self._build._remove(name)
# get final list of changed source paths
final.difference_update(interfaces)
changed_set: Set = set(self._sources.get_source_path(i) for i in final)
return {i: self._sources.get(i) for i in changed_set}
def _compare_build_json(self, contract_name: str) -> bool:
config = self._compiler_config
# confirm that this contract was previously compiled
try:
source = self._sources.get(contract_name)
build_json = self._build.get(contract_name)
except KeyError:
return True
# compare source hashes
if build_json["sha1"] != sha1(source.encode()).hexdigest():
return True
# compare compiler settings
if _compare_settings(config, build_json["compiler"]):
return True
if build_json["language"] == "Solidity":
# compare solc-specific compiler settings
solc_config = config["solc"].copy()
solc_config["remappings"] = None
if _compare_settings(solc_config, build_json["compiler"]):
return True
# compare solc pragma against compiled version
if Version(build_json["compiler"]["version"]) not in get_pragma_spec(source):
return True
return False
def _compile_interfaces(self, compiled_hashes: Dict) -> None:
new_hashes = self._sources.get_interface_hashes()
changed_paths = [
self._sources.get_source_path(k)
for k, v in new_hashes.items()
if compiled_hashes.get(k, None) != v
]
if not changed_paths:
return
print("Generating interface ABIs...")
changed_sources = {i: self._sources.get(i) for i in changed_paths}
abi_json = compiler.get_abi(
changed_sources,
allow_paths=self._path.as_posix(),
remappings=self._compiler_config["solc"].get("remappings", []),
)
for name, abi in abi_json.items():
with self._build_path.joinpath(f"interfaces/{name}.json").open("w") as fp:
json.dump(abi, fp, sort_keys=True, indent=2, default=sorted)
self._build._add(abi)
def _load_deployments(self) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
path = self._build_path.joinpath(f"deployments/{chainid}")
path.mkdir(exist_ok=True)
deployments = list(path.glob("*.json"))
deployments.sort(key=lambda k: k.stat().st_mtime)
deployment_map = self._load_deployment_map()
for build_json in deployments:
with build_json.open() as fp:
build = json.load(fp)
contract_name = build["contractName"]
if contract_name not in self._containers:
build_json.unlink()
continue
if "pcMap" in build:
contract = ProjectContract(self, build, build_json.stem)
else:
contract = Contract.from_abi( # type: ignore
contract_name, build_json.stem, build["abi"]
)
contract._project = self
container = self._containers[contract_name]
_add_contract(contract)
container._contracts.append(contract)
# update deployment map for the current chain
instances = deployment_map.setdefault(chainid, {}).setdefault(contract_name, [])
if build_json.stem in instances:
instances.remove(build_json.stem)
instances.insert(0, build_json.stem)
self._save_deployment_map(deployment_map)
def _load_deployment_map(self) -> Dict:
deployment_map: Dict = {}
map_path = self._build_path.joinpath("deployments/map.json")
if map_path.exists():
with map_path.open("r") as fp:
deployment_map = json.load(fp)
return deployment_map
def _save_deployment_map(self, deployment_map: Dict) -> None:
with self._build_path.joinpath("deployments/map.json").open("w") as fp:
json.dump(deployment_map, fp, sort_keys=True, indent=2, default=sorted)
def _remove_from_deployment_map(self, contract: ProjectContract) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_map = self._load_deployment_map()
try:
deployment_map[chainid][contract._name].remove(contract.address)
if not deployment_map[chainid][contract._name]:
del deployment_map[chainid][contract._name]
if not deployment_map[chainid]:
del deployment_map[chainid]
except (KeyError, ValueError):
pass
self._save_deployment_map(deployment_map)
def _add_to_deployment_map(self, contract: ProjectContract) -> None:
if CONFIG.network_type != "live" and not CONFIG.settings["dev_deployment_artifacts"]:
return
chainid = CONFIG.active_network["chainid"] if CONFIG.network_type == "live" else "dev"
deployment_map = self._load_deployment_map()
try:
deployment_map[chainid][contract._name].remove(contract.address)
except (ValueError, KeyError):
pass
deployment_map.setdefault(chainid, {}).setdefault(contract._name, []).insert(
0, contract.address
)
self._save_deployment_map(deployment_map)
def _update_and_register(self, dict_: Any) -> None:
dict_.update(self)
if "interface" not in dict_:
dict_["interface"] = self.interface
self._namespaces.append(dict_)
def _add_to_main_namespace(self) -> None:
# temporarily adds project objects to the main namespace
brownie: Any = sys.modules["brownie"]
if "interface" not in brownie.__dict__:
brownie.__dict__["interface"] = self.interface
brownie.__dict__.update(self._containers)
brownie.__all__.extend(self.__all__)
def _remove_from_main_namespace(self) -> None:
# removes project objects from the main namespace
brownie: Any = sys.modules["brownie"]
if brownie.__dict__.get("interface") == self.interface:
del brownie.__dict__["interface"]
for key in self._containers:
brownie.__dict__.pop(key, None)
for key in self.__all__:
if key in brownie.__all__:
brownie.__all__.remove(key)
def __repr__(self) -> str:
return f"<Project '{self._name}'>"
def load_config(self) -> None:
"""Loads the project config file settings"""
if isinstance(self._path, Path):
_load_project_config(self._path)
def close(self, raises: bool = True) -> None:
"""Removes pointers to the project's ContractContainer objects and this object."""
if not self._active:
if not raises:
return
raise ProjectNotFound("Project is not currently loaded.")
# remove objects from namespace
for dict_ in self._namespaces:
for key in [
k
for k, v in dict_.items()
if v == self or (k in self and v == self[k]) # type: ignore
]:
del dict_[key]
# remove contracts
for contract in [x for v in self._containers.values() for x in v._contracts]:
_remove_contract(contract)
for container in self._containers.values():
container._contracts.clear()
self._containers.clear()
# undo black-magic
self._remove_from_main_namespace()
name = self._name
del sys.modules[f"brownie.project.{name}"]
sys.modules["brownie.project"].__all__.remove(name) # type: ignore
sys.modules["brownie.project"].__console_dir__.remove(name) # type: ignore
self._active = False
_loaded_projects.remove(self)
# clear paths
try:
sys.path.remove(str(self._path))
except ValueError:
pass
def _clear_dev_deployments(self, height: int) -> None:
path = self._build_path.joinpath("deployments/dev")
if path.exists():
deployment_map = self._load_deployment_map()
for deployment in path.glob("*.json"):
if height == 0:
deployment.unlink()
else:
with deployment.open("r") as fp:
deployment_artifact = json.load(fp)
block_height = deployment_artifact["deployment"]["blockHeight"]
address = deployment_artifact["deployment"]["address"]
contract_name = deployment_artifact["contractName"]
if block_height > height:
deployment.unlink()
try:
deployment_map["dev"][contract_name].remove(address)
except (KeyError, ValueError):
pass
if "dev" in deployment_map and (height == 0 or not deployment_map["dev"]):
del deployment_map["dev"]
shutil.rmtree(path)
self._save_deployment_map(deployment_map)
def _revert(self, height: int) -> None:
self._clear_dev_deployments(height)
def _reset(self) -> None:
self._clear_dev_deployments(0)
class TempProject(_ProjectBase):
"""Simplified Project class used to hold temporary contracts that are
compiled via project.compile_source"""
def __init__(self, name: str, contract_sources: Dict, compiler_config: Dict) -> None:
self._path = None
self._build_path = None
self._name = name
self._sources = Sources(contract_sources, {})
self._build = Build(self._sources)
self._compile(contract_sources, compiler_config, True)
self._create_containers()
def __repr__(self) -> str:
return f"<TempProject '{self._name}'>"
def check_for_project(path: Union[Path, str] = ".") -> Optional[Path]:
"""Checks for a Brownie project."""
path = Path(path).resolve()
for folder in [path] + list(path.parents):
structure_config = _load_project_structure_config(folder)
contracts_path = folder.joinpath(structure_config["contracts"])
tests_path = folder.joinpath(structure_config["tests"])
if next((i for i in contracts_path.glob("**/*") if i.suffix in (".vy", ".sol")), None):
return folder
if contracts_path.is_dir() and tests_path.is_dir():
return folder
return None
def get_loaded_projects() -> List["Project"]:
"""Returns a list of currently loaded Project objects."""
return _loaded_projects.copy()
def new(
project_path_str: str = ".", ignore_subfolder: bool = False, ignore_existing: bool = False
) -> str:
"""Initializes a new project.
Args:
project_path: Path to initialize the project at. If not exists, it will be created.
ignore_subfolder: (deprecated)
ignore_existing: If True, will not raise when initiating in a non-empty directory.
Returns the path to the project as a string.
"""
project_path = Path(project_path_str).resolve()
if not ignore_existing and project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Directory is not empty: {project_path}")
project_path.mkdir(exist_ok=True)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_brownie_mix(
project_name: str, project_path: Union[Path, str] = None, ignore_subfolder: bool = False
) -> str:
"""Initializes a new project via a template. Templates are downloaded from
https://www.github.com/brownie-mix
Args:
project_path: Path to initialize the project at.
ignore_subfolders: (deprecated)
Returns the path to the project as a string.
"""
project_name = str(project_name).replace("-mix", "")
url = MIXES_URL.format(project_name)
if project_path is None:
project_path = Path(".").joinpath(project_name)
project_path = Path(project_path).resolve()
if project_path.exists() and list(project_path.glob("*")):
raise FileExistsError(f"Folder already exists - {project_path}")
print(f"Downloading from {url}...")
_stream_download(url, str(project_path.parent))
project_path.parent.joinpath(project_name + "-mix-master").rename(project_path)
_create_folders(project_path)
_create_gitfiles(project_path)
_add_to_sys_path(project_path)
return str(project_path)
def from_ethpm(uri: str) -> "TempProject":
"""
Generates a TempProject from an ethPM package.
"""
manifest = get_manifest(uri)
compiler_config = {
"evm_version": None,
"solc": {"version": None, "optimize": True, "runs": 200},
}
project = TempProject(manifest["package_name"], manifest["sources"], compiler_config)
if web3.isConnected():
for contract_name in project.keys():
for address in get_deployment_addresses(manifest, contract_name):
project[contract_name].at(address)
return project
def compile_source(
source: str,
solc_version: Optional[str] = None,
optimize: bool = True,
runs: Optional[int] = 200,
evm_version: Optional[str] = None,
) -> "TempProject":
"""Compiles the given source code string and returns a TempProject container with
the ContractContainer instances."""
compiler_config: Dict = {"evm_version": evm_version}
if solc_version is not None or source.lstrip().startswith("pragma"):
compiler_config["solc"] = {"version": solc_version, "optimize": optimize, "runs": runs}
return TempProject("TempSolcProject", {"<stdin>.sol": source}, compiler_config)
return TempProject("TempVyperProject", {"<stdin>.vy": source}, compiler_config)
def load(project_path: Union[Path, str, None] = None, name: Optional[str] = None) -> "Project":
"""Loads a project and instantiates various related objects.
Args:
project_path: Path of the project to load. If None, will attempt to
locate a project using check_for_project()
name: Name to assign to the project. If None, the name is generated
from the name of the project folder
Returns a Project object.
"""
# checks
if project_path is None:
project_path = check_for_project(".")
if project_path is not None and project_path != Path(".").absolute():
warnings.warn(
f"Loaded project has a root folder of '{project_path}' "
"which is different from the current working directory",
BrownieEnvironmentWarning,
)
elif Path(project_path).resolve() != check_for_project(project_path):
project_path = None
if project_path is None:
raise ProjectNotFound("Could not find Brownie project")
project_path = Path(project_path).resolve()
if name is None:
name = project_path.name
if not name.lower().endswith("project"):
name += " project"
name = "".join(i for i in name.title() if i.isalpha())
if next((True for i in _loaded_projects if i._name == name), False):
raise ProjectAlreadyLoaded("There is already a project loaded with this name")
# paths
_create_folders(project_path)
_add_to_sys_path(project_path)
# load sources and build
return Project(name, project_path)
def _install_dependencies(path: Path) -> None:
for package_id in _load_project_dependencies(path):
try:
install_package(package_id)
except FileExistsError:
pass
def install_package(package_id: str) -> str:
"""
Install a package.
Arguments
---------
package_id : str
Package ID or ethPM URI.
Returns
-------
str
ID of the installed package.
"""
if urlparse(package_id).scheme in ("erc1319", "ethpm"):
return _install_from_ethpm(package_id)
else:
return _install_from_github(package_id)
def _install_from_ethpm(uri: str) -> str:
manifest = get_manifest(uri)
org = manifest["meta_brownie"]["registry_address"]
repo = manifest["package_name"]
version = manifest["version"]
install_path = _get_data_folder().joinpath(f"packages/{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is aleady installed")
try:
new(str(install_path), ignore_existing=True)
ethpm.install_package(install_path, uri)
project = load(install_path)
project.close()
except Exception as e:
shutil.rmtree(install_path)
raise e
return f"{org}/{repo}@{version}"
def _install_from_github(package_id: str) -> str:
try:
path, version = package_id.split("@")
org, repo = path.split("/")
except ValueError:
raise ValueError(
"Invalid package ID. Must be given as [ORG]/[REPO]@[VERSION]"
"\ne.g. 'OpenZeppelin/openzeppelin-contracts@v2.5.0'"
) from None
base_install_path = _get_data_folder().joinpath("packages")
install_path = base_install_path.joinpath(f"{org}")
install_path.mkdir(exist_ok=True)
install_path = install_path.joinpath(f"{repo}@{version}")
if install_path.exists():
raise FileExistsError("Package is aleady installed")
headers = REQUEST_HEADERS.copy()
if os.getenv("GITHUB_TOKEN"):
auth = b64encode(os.environ["GITHUB_TOKEN"].encode()).decode()
headers.update({"Authorization": "Basic {}".format(auth)})
response = requests.get(
f"https://api.github.com/repos/{org}/{repo}/tags?per_page=100", headers=headers
)
if response.status_code != 200:
msg = "Status {} when getting package versions from Github: '{}'".format(
response.status_code, response.json()["message"]
)
if response.status_code == 403:
msg += (
"\n\nIf this issue persists, generate a Github API token and store"
" it as the environment variable `GITHUB_TOKEN`:\n"
"https://github.blog/2013-05-16-personal-api-tokens/"
)
raise ConnectionError(msg)
data = response.json()
if not data:
raise ValueError("Github repository has no tags set")
org, repo = data[0]["zipball_url"].split("/")[3:5]
tags = [i["name"].lstrip("v") for i in data]
if version not in tags:
raise ValueError(
"Invalid version for this package. Available versions are:\n" + ", ".join(tags)
) from None
download_url = next(i["zipball_url"] for i in data if i["name"].lstrip("v") == version)
existing = list(install_path.parent.iterdir())
_stream_download(download_url, str(install_path.parent))
installed = next(i for i in install_path.parent.iterdir() if i not in existing)
shutil.move(installed, install_path)
try:
if not install_path.joinpath("brownie-config.yaml").exists():
brownie_config: Dict = {"project_structure": {}}
contract_paths = set(
i.relative_to(install_path).parts[0] for i in install_path.glob("**/*.sol")
)
contract_paths.update(
i.relative_to(install_path).parts[0] for i in install_path.glob("**/*.vy")
)
if not contract_paths:
raise InvalidPackage(f"{package_id} does not contain any .sol or .vy files")
if install_path.joinpath("contracts").is_dir():
brownie_config["project_structure"]["contracts"] = "contracts"
elif len(contract_paths) == 1:
brownie_config["project_structure"]["contracts"] = contract_paths.pop()
else:
raise InvalidPackage(
f"{package_id} has no `contracts/` subdirectory, and "
"multiple directories containing source files"
)
with install_path.joinpath("brownie-config.yaml").open("w") as fp:
yaml.dump(brownie_config, fp)
project = load(install_path)
project.close()
except InvalidPackage:
shutil.rmtree(install_path)
raise
except Exception as e:
notify(
"WARNING",
f"Unable to compile {package_id} due to a {type(e).__name__} - you may still be able to"
" import sources from the package, but will be unable to load the package directly.\n",
)
return f"{org}/{repo}@{version}"
def _create_gitfiles(project_path: Path) -> None:
gitignore = project_path.joinpath(".gitignore")
if not gitignore.exists():
with gitignore.open("w") as fp:
fp.write(GITIGNORE)
gitattributes = project_path.joinpath(".gitattributes")
if not gitattributes.exists():
with gitattributes.open("w") as fp:
fp.write(GITATTRIBUTES)
def _create_folders(project_path: Path) -> None:
structure = _load_project_structure_config(project_path)
for path in structure.values():
project_path.joinpath(path).mkdir(exist_ok=True)
build_path = project_path.joinpath(structure["build"])
for path in BUILD_FOLDERS:
build_path.joinpath(path).mkdir(exist_ok=True)
def _add_to_sys_path(project_path: Path) -> None:
project_path_string = str(project_path)
if project_path_string in sys.path:
return
sys.path.insert(0, project_path_string)
def _compare_settings(left: Dict, right: Dict) -> bool:
return next(
(True for k, v in left.items() if v and not isinstance(v, dict) and v != right.get(k)),
False,
)
def _load_sources(project_path: Path, subfolder: str, allow_json: bool) -> Dict:
contract_sources: Dict = {}
suffixes: Tuple = (".sol", ".vy")
if allow_json:
suffixes = suffixes + (".json",)
# one day this will be a beautiful plugin system
hooks: Optional[ModuleType] = None
if project_path.joinpath("brownie_hooks.py").exists():
hooks = importlib.import_module("brownie_hooks")
for path in project_path.glob(f"{subfolder}/**/*"):
if path.suffix not in suffixes:
continue
if next((i for i in path.relative_to(project_path).parts if i.startswith("_")), False):
continue
with path.open() as fp:
source = fp.read()
if hasattr(hooks, "brownie_load_source"):
source = hooks.brownie_load_source(path, source) # type: ignore
path_str: str = path.relative_to(project_path).as_posix()
contract_sources[path_str] = source
return contract_sources
def _stream_download(download_url: str, target_path: str) -> None:
response = requests.get(download_url, stream=True, headers=REQUEST_HEADERS)
if response.status_code == 404:
raise ConnectionError(
f"404 error when attempting to download from {download_url} - "
"are you sure this is a valid mix? https://github.com/brownie-mix"
)
if response.status_code != 200:
raise ConnectionError(
f"Received status code {response.status_code} when attempting "
f"to download from {download_url}"
)
total_size = int(response.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
content = bytes()
for data in response.iter_content(1024, decode_unicode=True):
progress_bar.update(len(data))
content += data
progress_bar.close()
with zipfile.ZipFile(BytesIO(content)) as zf:
zf.extractall(target_path)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_showdoc.ipynb (unless otherwise specified).
__all__ = ['is_enum', 'is_lib_module', 're_digits_first', 'try_external_doc_link', 'is_doc_name', 'doc_link',
'add_doc_links', 'colab_link', 'get_nb_source_link', 'nb_source_link', 'type_repr', 'format_param',
'is_source_available', 'show_doc', 'md2html', 'get_doc_link', 'doc']
# Cell
from .imports import *
from .export import *
from .sync import *
from nbconvert import HTMLExporter
from fastcore.docments import docments, isclass, _clean_comment, _tokens, _param_locs, _get_comment
from fastcore.utils import IN_NOTEBOOK
from fastcore.xtras import get_source_link, _unwrapped_type_dispatch_func
import string
from tokenize import COMMENT
if IN_NOTEBOOK:
from IPython.display import Markdown,display
from IPython.core import page
# Cell
def is_enum(cls):
"Check if `cls` is an enum or another type of class"
return type(cls) in (enum.Enum, enum.EnumMeta)
# Cell
def is_lib_module(name):
"Test if `name` is a library module."
if name.startswith('_'): return False
try:
_ = importlib.import_module(f'{get_config().lib_name}.{name}')
return True
except: return False
# Cell
re_digits_first = re.compile('^[0-9]+[a-z]*_')
# Cell
def try_external_doc_link(name, packages):
"Try to find a doc link for `name` in `packages`"
for p in packages:
try:
mod = importlib.import_module(f"{p}._nbdev")
try_pack = source_nb(name, is_name=True, mod=mod)
if try_pack:
page = re_digits_first.sub('', try_pack).replace('.ipynb', '')
return f'{mod.doc_url}{page}#{name}'
except ModuleNotFoundError: return None
# Cell
def is_doc_name(name):
"Test if `name` corresponds to a notebook that could be converted to a doc page"
for f in get_config().path("nbs_path").glob(f'*{name}.ipynb'):
if re_digits_first.sub('', f.name) == f'{name}.ipynb': return True
return False
# Cell
def doc_link(name, include_bt=True):
"Create link to documentation for `name`."
cname = f'`{name}`' if include_bt else name
try:
#Link to modules
if is_lib_module(name) and is_doc_name(name): return f"[{cname}]({get_config().doc_baseurl}{name}.html)"
#Link to local functions
try_local = source_nb(name, is_name=True)
if try_local:
page = re_digits_first.sub('', try_local).replace('.ipynb', '')
return f'[{cname}]({get_config().doc_baseurl}{page}.html#{name})'
##Custom links
mod = get_nbdev_module()
link = mod.custom_doc_links(name)
return f'[{cname}]({link})' if link is not None else cname
except: return cname
# Cell
_re_backticks = re.compile(r"""
# Catches any link of the form \[`obj`\](old_link) or just `obj`,
# to either update old links or add the link to the docs of obj
\[` # Opening [ and `
([^`]*) # Catching group with anything but a `
`\] # ` then closing ]
(?: # Beginning of non-catching group
\( # Opening (
[^)]* # Anything but a closing )
\) # Closing )
) # End of non-catching group
| # OR
` # Opening `
([^`]*) # Anything but a `
` # Closing `
""", re.VERBOSE)
# Cell
def add_doc_links(text, elt=None):
"Search for doc links for any item between backticks in `text` and insert them"
def _replace_link(m):
try:
if m.group(2) in inspect.signature(elt).parameters: return f'`{m.group(2)}`'
except: pass
return doc_link(m.group(1) or m.group(2))
return _re_backticks.sub(_replace_link, text)
# Cell
_re_header = re.compile(r"""
# Catches any header in markdown with the title in group 1
^\s* # Beginning of text followed by any number of whitespace
\#+ # One # or more
\s* # Any number of whitespace
(.*) # Catching group with anything
$ # End of text
""", re.VERBOSE)
# Cell
def colab_link(path):
"Get a link to the notebook at `path` on Colab"
cfg = get_config()
res = f'https://colab.research.google.com/github/{cfg.user}/{cfg.lib_name}/blob/{cfg.branch}/{cfg.path('nbs_path').name}/{path}.ipynb'
display(Markdown(f'[Open `{path}` in Colab]({res})'))
# Cell
def get_nb_source_link(func, local=False, is_name=None):
"Return a link to the notebook where `func` is defined."
func = _unwrapped_type_dispatch_func(func)
pref = '' if local else get_config().git_url.replace('github.com', 'nbviewer.jupyter.org/github')+ get_config().path("nbs_path").name+'/'
is_name = is_name or isinstance(func, str)
src = source_nb(func, is_name=is_name, return_all=True)
if src is None: return '' if is_name else get_source_link(func)
find_name,nb_name = src
nb = read_nb(nb_name)
pat = re.compile(f'^{find_name}\s+=|^(def|class)\s+{find_name}\s*\(', re.MULTILINE)
if len(find_name.split('.')) == 2:
clas,func = find_name.split('.')
pat2 = re.compile(f'@patch\s*\ndef\s+{func}\s*\([^:]*:\s*{clas}\s*(?:,|\))')
else: pat2 = None
for i,cell in enumerate(nb['cells']):
if cell['cell_type'] == 'code':
if re.search(pat, cell['source']): break
if pat2 is not None and re.search(pat2, cell['source']): break
if re.search(pat, cell['source']) is None and (pat2 is not None and re.search(pat2, cell['source']) is None):
return '' if is_name else get_function_source(func)
header_pat = re.compile(r'^\s*#+\s*(.*)$')
while i >= 0:
cell = nb['cells'][i]
if cell['cell_type'] == 'markdown' and _re_header.search(cell['source']):
title = _re_header.search(cell['source']).groups()[0]
anchor = '-'.join([s for s in title.split(' ') if len(s) > 0])
return f'{pref}{nb_name}#{anchor}'
i-=1
return f'{pref}{nb_name}'
# Cell
def nb_source_link(func, is_name=None, disp=True, local=True):
"Show a relative link to the notebook where `func` is defined"
is_name = is_name or isinstance(func, str)
func_name = func if is_name else qual_name(func)
link = get_nb_source_link(func, local=local, is_name=is_name)
text = func_name if local else f'{func_name} (GitHub)'
if disp: display(Markdown(f'[{text}]({link})'))
else: return link
# Cell
from fastcore.script import Param
# Cell
def _format_annos(anno, highlight=False):
"Returns a clean string representation of `anno` from either the `__qualname__` if it is a base class, or `str()` if not"
annos = listify(anno)
if len(annos) == 0: return "None" # If anno is none, listify has a length of 0
new_anno = "(" if len(annos) > 1 else ""
def _inner(o): return getattr(o, '__qualname__', str(o)) if '<' in str(o) else str(o)
for i, anno in enumerate(annos):
new_anno += _inner(anno) if not highlight else f'{doc_link(_inner(anno))}'
# if "." in new_anno: new_anno = new_anno.split('.')[-1]
if len(annos) > 1 and i < len(annos) - 1:
new_anno += ', '
return f'{new_anno})' if len(annos) > 1 else new_anno
# Cell
def type_repr(t):
"Representation of type `t` (in a type annotation)"
if (isinstance(t, Param)): return f'"{t.help}"'
if getattr(t, '__args__', None):
args = t.__args__
if len(args)==2 and args[1] == type(None):
return f'`Optional`\[{type_repr(args[0])}\]'
reprs = ', '.join([_format_annos(o, highlight=True) for o in args])
return f'{doc_link(get_name(t))}\[{reprs}\]'
else: return doc_link(_format_annos(t))
# Cell
_arg_prefixes = {inspect._VAR_POSITIONAL: '\*', inspect._VAR_KEYWORD:'\*\*'}
def format_param(p):
"Formats function param to `param:Type=val` with font weights: param=bold, val=italic"
arg_prefix = _arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs
res = f"**{arg_prefix}`{p.name}`**"
if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{type_repr(p.annotation)}'
if p.default != p.empty:
default = getattr(p.default, 'func', p.default) #For partials
if hasattr(default,'__name__'): default = getattr(default, '__name__')
else: default = repr(default)
if is_enum(default.__class__): #Enum have a crappy repr
res += f'=*`{default.__class__.__name__}.{default.name}`*'
else: res += f'=*`{default}`*'
return res
# Cell
def _format_enum_doc(enum, full_name):
"Formatted `enum` definition to show in documentation"
vals = ', '.join(enum.__members__.keys())
return f'<code>{full_name}</code>',f'<code>Enum</code> = [{vals}]'
# Cell
def _escape_chars(s):
return s.replace('_', '\_')
def _format_func_doc(func, full_name=None):
"Formatted `func` definition to show in documentation"
try:
sig = inspect.signature(func)
fmt_params = [format_param(param) for name,param
in sig.parameters.items() if name not in ('self','cls')]
except: fmt_params = []
name = f'<code>{full_name or func.__name__}</code>'
arg_str = f"({", ".join(fmt_params)})"
f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name
return f'{f_name}',f'{name}{arg_str}'
# Cell
def _format_cls_doc(cls, full_name):
"Formatted `cls` definition to show in documentation"
parent_class = inspect.getclasstree([cls])[-1][0][1][0]
name,args = _format_func_doc(cls, full_name)
if parent_class != object: args += f' :: {doc_link(get_name(parent_class))}'
return name,args
# Cell
def _has_docment(elt):
comments = {o.start[0]:_clean_comment(o.string) for o in _tokens(elt) if o.type==COMMENT}
params = _param_locs(elt, returns=True)
comments = [_get_comment(line,arg,comments,params) for line,arg in params.items()]
return any(c is not None for c in comments)
# Cell
def _generate_arg_string(argument_dict, has_docment=False, monospace=False):
"Turns a dictionary of argument information into a useful docstring"
arg_string = '||Type|Default|'
border_string = '|---|---|---|'
if has_docment:
arg_string += 'Details|'
border_string += '---|'
arg_string+= f'\n{border_string}\n'
for key, item in argument_dict.items():
is_required=True
if key == 'return': continue
if item['default'] != inspect._empty:
if item['default'] == '':
item['default'] = '""'
is_required = False
arg_string += f"|**`{key}`**|"
details_string = ""
if item['anno'] == None: item['anno'] = NoneType
if (item["default"] == None and item['anno'] == NoneType) or item['anno'] == inspect._empty:
details_string += "|"
else:
details_string += f"`{_format_annos(item["anno"]).replace("|", "or")}`|"
details_string += "|" if is_required else f"`{_format_annos(item["default"])}`|"
if has_docment:
if item['docment']:
item['docment'] = item['docment'].replace('\n', '<br />')
details_string += f"{item["docment"]}|" if item['docment'] is not None else "*No Content*|"
arg_string += add_doc_links(details_string)
arg_string += '\n'
return arg_string
# Cell
def _generate_return_string(return_dict:dict, has_docment=False):
"Turns a dictionary of return information into a useful docstring"
if return_dict['anno'] is None:
if not return_dict['docment']: return ''
else: return_dict['anno'] = NoneType
anno = _format_annos(return_dict['anno']).replace('|', 'or')
return_string = f"|**Returns**|`{anno}`||"
if has_docment:
if return_dict['docment']:
return_dict['docment'] = return_dict['docment'].replace('\n', '<br />')
else: return_dict['docment'] = ''
return return_string if not has_docment else f"{return_string}{return_dict["docment"]}|"
# Cell
def _is_static(func):
"Checks whether `func` is a static method in a class"
name = qual_name(func)
if len(name.split(".")) == 2:
cls, nm = name.split('.')
cls = getattr(sys.modules[func.__module__], cls)
method_type = inspect.getattr_static(cls, nm)
return isinstance(method_type, staticmethod)
return False
# Cell
def _format_args(elt, ment_dict:dict = None, kwargs = [], monospace=False, is_class=False):
"Generates a formatted argument string, potentially from an existing `ment_dict`"
if ment_dict is None:
ment_dict = docments(elt, full=True)
arg_string = ""
return_string = ""
if not _is_static(elt) and is_class:
ment_dict.pop("self", {})
ment_dict.pop("cls", {})
ret = ment_dict.pop("return", None)
has_docment = _has_docment(elt)
if len(ment_dict.keys()) > 0:
if len(kwargs) > 0:
kwarg_dict = filter_keys(ment_dict, lambda x: x in kwargs)
ment_dict = filter_keys(ment_dict, lambda x: x not in kwargs)
arg_string = _generate_arg_string(ment_dict, has_docment)
arg_string += "|||**Valid Keyword Arguments**||\n"
arg_string += _generate_arg_string(kwarg_dict, has_docment, monospace=monospace).replace("||Type|Default|Details|\n|---|---|---|---|\n", "")
else:
arg_string = _generate_arg_string(ment_dict, has_docment, monospace=monospace)
if not ret["anno"] == inspect._empty:
return_string = _generate_return_string(ret, has_docment)
return arg_string + return_string
# Cell
def is_source_available(
elt, # A python object
):
"Checks if it is possible to return the source code of `elt` mimicking `inspect.getfile`"
if inspect.ismodule(elt):
return True if getattr(object, '__file__', None) else False
elif isclass(elt):
if hasattr(elt, '__module__'):
module = sys.modules.get(elt.__module__)
return True if getattr(module, '__file__', None) else False
elif getattr(elt, '__name__', None) == "<lambda>":
return False
elif inspect.ismethod(elt) or inspect.isfunction(elt) or inspect.istraceback(elt) or inspect.isframe(elt) or inspect.iscode(elt):
return True
elif is_enum(elt):
return False
return False
# Cell
def _handle_delegates(elt):
"Generates a `docment` dict handling `@delegates` and returns names of the kwargs in `elt`"
kwargs = []
arg_dict = docments(elt, full=True)
delwrap_dict = docments(elt.__delwrap__, full=True)
drop = arg_dict.keys()
for k,v in arg_dict.items():
if k in delwrap_dict.keys() and v["docment"] is None and k != "return":
kwargs.append(k)
if delwrap_dict[k]["docment"] is not None:
v["docment"] = delwrap_dict[k]["docment"] + f" passed to `{qual_name(elt.__delwrap__)}`"
else:
v['docment'] = f"Argument passed to `{qual_name(elt.__delwrap__)}`"
return arg_dict, kwargs
# Cell
def _get_docments(elt, with_return=False, ment_dict=None, kwargs=[], monospace=False, is_class=False):
"Grabs docments for `elt` and formats with a potential `ment_dict` and valid kwarg names"
s = f"\n\n{_format_args(elt, ment_dict=ment_dict, kwargs=kwargs, monospace=monospace, is_class=is_class)}"
if not with_return: s = s.split("|**Returns**|")[0]
return s
# Cell
def show_doc(elt, doc_string:bool=True, name=None, title_level=None, disp=True, default_cls_level=2, show_all_docments=False, verbose=False):
"Show documentation for element `elt` with potential input documentation. Supported types: class, function, and enum."
elt = getattr(elt, '__func__', elt)
qname = name or qual_name(elt)
is_class = '.' in qname or inspect.isclass
if inspect.isclass(elt):
if is_enum(elt): name,args = _format_enum_doc(elt, qname)
else: name,args = _format_cls_doc (elt, qname)
elif callable(elt): name,args = _format_func_doc(elt, qname)
else: name,args = f"<code>{qname}</code>", ''
link = get_source_link(elt)
source_link = f'<a href="{link}" class="source_link" style="float:right">[source]</a>'
title_level = title_level or (default_cls_level if inspect.isclass(elt) else 4)
doc = f'<h{title_level} id="{qname}" class="doc_header">{name}{source_link}</h{title_level}>'
doc += f'\n\n> {args}\n\n' if len(args) > 0 else '\n\n'
s = ''
try:
monospace = get_config().d.getboolean('monospace_docstrings', False)
except FileNotFoundError:
monospace = False
if doc_string and inspect.getdoc(elt):
s = inspect.getdoc(elt)
# doc links don't work inside markdown pre/code blocks
s = f'```\n{s}\n```' if monospace else add_doc_links(s, elt)
doc += s
if len(args) > 0:
if hasattr(elt, '__init__') and isclass(elt):
elt = elt.__init__
if is_source_available(elt):
if show_all_docments or _has_docment(elt):
if hasattr(elt, "__delwrap__"):
arg_dict, kwargs = _handle_delegates(elt)
doc += _get_docments(elt, ment_dict=arg_dict, with_return=True, kwargs=kwargs, monospace=monospace, is_class=is_class)
else:
doc += _get_docments(elt, monospace=monospace, is_class=is_class)
elif verbose:
print(f'Warning: `docments` annotations will not work for built-in modules, classes, functions, and `enums` and are unavailable for {qual_name(elt)}. They will not be shown')
if disp: display(Markdown(doc))
else: return doc
# Cell
def md2html(md):
"Convert markdown `md` to HTML code"
import nbconvert
if nbconvert.__version__ < '5.5.0': return HTMLExporter().markdown2html(md)
else: return HTMLExporter().markdown2html(collections.defaultdict(lambda: collections.defaultdict(dict)), md)
# Cell
def get_doc_link(func):
mod = inspect.getmodule(func)
module = mod.__name__.replace('.', '/') + '.py'
try:
nbdev_mod = importlib.import_module(mod.__package__.split('.')[0] + '._nbdev')
try_pack = source_nb(func, mod=nbdev_mod)
if try_pack:
page = '.'.join(try_pack.partition('_')[-1:]).replace('.ipynb', '')
return f'{nbdev_mod.doc_url}{page}#{qual_name(func)}'
except: return None
# Cell
# Fancy CSS needed to make raw Jupyter rendering look nice
_TABLE_CSS = """<style>
table { border-collapse: collapse; border:thin solid #dddddd; margin: 25px 0px; ; }
table tr:first-child { background-color: #FFF}
table thead th { background-color: #eee; color: #000; text-align: center;}
tr, th, td { border: 1px solid #ccc; border-width: 1px 0 0 1px; border-collapse: collapse;
padding: 5px; }
tr:nth-child(even) {background: #eee;}</style>"""
# Cell
def doc(elt:int, show_all_docments:bool=True):
"Show `show_doc` info in preview window when used in a notebook"
md = show_doc(elt, disp=False, show_all_docments=show_all_docments)
doc_link = get_doc_link(elt)
if doc_link is not None:
md += f'\n\n<a href="{doc_link}" target="_blank" rel="noreferrer noopener">Show in docs</a>'
output = md2html(md)
if IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output + _TABLE_CSS)
else:
try: page.page({'text/html': output + _TABLE_CSS})
except: display(Markdown(md)) | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_showdoc.ipynb (unless otherwise specified).
__all__ = ['is_enum', 'is_lib_module', 're_digits_first', 'try_external_doc_link', 'is_doc_name', 'doc_link',
'add_doc_links', 'colab_link', 'get_nb_source_link', 'nb_source_link', 'type_repr', 'format_param',
'is_source_available', 'show_doc', 'md2html', 'get_doc_link', 'doc']
# Cell
from .imports import *
from .export import *
from .sync import *
from nbconvert import HTMLExporter
from fastcore.docments import docments, isclass, _clean_comment, _tokens, _param_locs, _get_comment
from fastcore.utils import IN_NOTEBOOK
from fastcore.xtras import get_source_link, _unwrapped_type_dispatch_func
import string
from tokenize import COMMENT
if IN_NOTEBOOK:
from IPython.display import Markdown,display
from IPython.core import page
# Cell
def is_enum(cls):
"Check if `cls` is an enum or another type of class"
return type(cls) in (enum.Enum, enum.EnumMeta)
# Cell
def is_lib_module(name):
"Test if `name` is a library module."
if name.startswith('_'): return False
try:
_ = importlib.import_module(f'{get_config().lib_name}.{name}')
return True
except: return False
# Cell
re_digits_first = re.compile('^[0-9]+[a-z]*_')
# Cell
def try_external_doc_link(name, packages):
"Try to find a doc link for `name` in `packages`"
for p in packages:
try:
mod = importlib.import_module(f"{p}._nbdev")
try_pack = source_nb(name, is_name=True, mod=mod)
if try_pack:
page = re_digits_first.sub('', try_pack).replace('.ipynb', '')
return f'{mod.doc_url}{page}#{name}'
except ModuleNotFoundError: return None
# Cell
def is_doc_name(name):
"Test if `name` corresponds to a notebook that could be converted to a doc page"
for f in get_config().path("nbs_path").glob(f'*{name}.ipynb'):
if re_digits_first.sub('', f.name) == f'{name}.ipynb': return True
return False
# Cell
def doc_link(name, include_bt=True):
"Create link to documentation for `name`."
cname = f'`{name}`' if include_bt else name
try:
#Link to modules
if is_lib_module(name) and is_doc_name(name): return f"[{cname}]({get_config().doc_baseurl}{name}.html)"
#Link to local functions
try_local = source_nb(name, is_name=True)
if try_local:
page = re_digits_first.sub('', try_local).replace('.ipynb', '')
return f'[{cname}]({get_config().doc_baseurl}{page}.html#{name})'
##Custom links
mod = get_nbdev_module()
link = mod.custom_doc_links(name)
return f'[{cname}]({link})' if link is not None else cname
except: return cname
# Cell
_re_backticks = re.compile(r"""
# Catches any link of the form \[`obj`\](old_link) or just `obj`,
# to either update old links or add the link to the docs of obj
\[` # Opening [ and `
([^`]*) # Catching group with anything but a `
`\] # ` then closing ]
(?: # Beginning of non-catching group
\( # Opening (
[^)]* # Anything but a closing )
\) # Closing )
) # End of non-catching group
| # OR
` # Opening `
([^`]*) # Anything but a `
` # Closing `
""", re.VERBOSE)
# Cell
def add_doc_links(text, elt=None):
"Search for doc links for any item between backticks in `text` and insert them"
def _replace_link(m):
try:
if m.group(2) in inspect.signature(elt).parameters: return f'`{m.group(2)}`'
except: pass
return doc_link(m.group(1) or m.group(2))
return _re_backticks.sub(_replace_link, text)
# Cell
_re_header = re.compile(r"""
# Catches any header in markdown with the title in group 1
^\s* # Beginning of text followed by any number of whitespace
\#+ # One # or more
\s* # Any number of whitespace
(.*) # Catching group with anything
$ # End of text
""", re.VERBOSE)
# Cell
def colab_link(path):
"Get a link to the notebook at `path` on Colab"
cfg = get_config()
res = f'https://colab.research.google.com/github/{cfg.user}/{cfg.lib_name}/blob/{cfg.branch}/{cfg.path("nbs_path").name}/{path}.ipynb'
display(Markdown(f'[Open `{path}` in Colab]({res})'))
# Cell
def get_nb_source_link(func, local=False, is_name=None):
"Return a link to the notebook where `func` is defined."
func = _unwrapped_type_dispatch_func(func)
pref = '' if local else get_config().git_url.replace('github.com', 'nbviewer.jupyter.org/github')+ get_config().path("nbs_path").name+'/'
is_name = is_name or isinstance(func, str)
src = source_nb(func, is_name=is_name, return_all=True)
if src is None: return '' if is_name else get_source_link(func)
find_name,nb_name = src
nb = read_nb(nb_name)
pat = re.compile(f'^{find_name}\s+=|^(def|class)\s+{find_name}\s*\(', re.MULTILINE)
if len(find_name.split('.')) == 2:
clas,func = find_name.split('.')
pat2 = re.compile(f'@patch\s*\ndef\s+{func}\s*\([^:]*:\s*{clas}\s*(?:,|\))')
else: pat2 = None
for i,cell in enumerate(nb['cells']):
if cell['cell_type'] == 'code':
if re.search(pat, cell['source']): break
if pat2 is not None and re.search(pat2, cell['source']): break
if re.search(pat, cell['source']) is None and (pat2 is not None and re.search(pat2, cell['source']) is None):
return '' if is_name else get_function_source(func)
header_pat = re.compile(r'^\s*#+\s*(.*)$')
while i >= 0:
cell = nb['cells'][i]
if cell['cell_type'] == 'markdown' and _re_header.search(cell['source']):
title = _re_header.search(cell['source']).groups()[0]
anchor = '-'.join([s for s in title.split(' ') if len(s) > 0])
return f'{pref}{nb_name}#{anchor}'
i-=1
return f'{pref}{nb_name}'
# Cell
def nb_source_link(func, is_name=None, disp=True, local=True):
"Show a relative link to the notebook where `func` is defined"
is_name = is_name or isinstance(func, str)
func_name = func if is_name else qual_name(func)
link = get_nb_source_link(func, local=local, is_name=is_name)
text = func_name if local else f'{func_name} (GitHub)'
if disp: display(Markdown(f'[{text}]({link})'))
else: return link
# Cell
from fastcore.script import Param
# Cell
def _format_annos(anno, highlight=False):
"Returns a clean string representation of `anno` from either the `__qualname__` if it is a base class, or `str()` if not"
annos = listify(anno)
if len(annos) == 0: return "None" # If anno is none, listify has a length of 0
new_anno = "(" if len(annos) > 1 else ""
def _inner(o): return getattr(o, '__qualname__', str(o)) if '<' in str(o) else str(o)
for i, anno in enumerate(annos):
new_anno += _inner(anno) if not highlight else f'{doc_link(_inner(anno))}'
# if "." in new_anno: new_anno = new_anno.split('.')[-1]
if len(annos) > 1 and i < len(annos) - 1:
new_anno += ', '
return f'{new_anno})' if len(annos) > 1 else new_anno
# Cell
def type_repr(t):
"Representation of type `t` (in a type annotation)"
if (isinstance(t, Param)): return f'"{t.help}"'
if getattr(t, '__args__', None):
args = t.__args__
if len(args)==2 and args[1] == type(None):
return f'`Optional`\[{type_repr(args[0])}\]'
reprs = ', '.join([_format_annos(o, highlight=True) for o in args])
return f'{doc_link(get_name(t))}\[{reprs}\]'
else: return doc_link(_format_annos(t))
# Cell
_arg_prefixes = {inspect._VAR_POSITIONAL: '\*', inspect._VAR_KEYWORD:'\*\*'}
def format_param(p):
"Formats function param to `param:Type=val` with font weights: param=bold, val=italic"
arg_prefix = _arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs
res = f"**{arg_prefix}`{p.name}`**"
if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{type_repr(p.annotation)}'
if p.default != p.empty:
default = getattr(p.default, 'func', p.default) #For partials
if hasattr(default,'__name__'): default = getattr(default, '__name__')
else: default = repr(default)
if is_enum(default.__class__): #Enum have a crappy repr
res += f'=*`{default.__class__.__name__}.{default.name}`*'
else: res += f'=*`{default}`*'
return res
# Cell
def _format_enum_doc(enum, full_name):
"Formatted `enum` definition to show in documentation"
vals = ', '.join(enum.__members__.keys())
return f'<code>{full_name}</code>',f'<code>Enum</code> = [{vals}]'
# Cell
def _escape_chars(s):
return s.replace('_', '\_')
def _format_func_doc(func, full_name=None):
"Formatted `func` definition to show in documentation"
try:
sig = inspect.signature(func)
fmt_params = [format_param(param) for name,param
in sig.parameters.items() if name not in ('self','cls')]
except: fmt_params = []
name = f'<code>{full_name or func.__name__}</code>'
arg_str = f"({', '.join(fmt_params)})"
f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name
return f'{f_name}',f'{name}{arg_str}'
# Cell
def _format_cls_doc(cls, full_name):
"Formatted `cls` definition to show in documentation"
parent_class = inspect.getclasstree([cls])[-1][0][1][0]
name,args = _format_func_doc(cls, full_name)
if parent_class != object: args += f' :: {doc_link(get_name(parent_class))}'
return name,args
# Cell
def _has_docment(elt):
comments = {o.start[0]:_clean_comment(o.string) for o in _tokens(elt) if o.type==COMMENT}
params = _param_locs(elt, returns=True)
comments = [_get_comment(line,arg,comments,params) for line,arg in params.items()]
return any(c is not None for c in comments)
# Cell
def _generate_arg_string(argument_dict, has_docment=False, monospace=False):
"Turns a dictionary of argument information into a useful docstring"
arg_string = '||Type|Default|'
border_string = '|---|---|---|'
if has_docment:
arg_string += 'Details|'
border_string += '---|'
arg_string+= f'\n{border_string}\n'
for key, item in argument_dict.items():
is_required=True
if key == 'return': continue
if item['default'] != inspect._empty:
if item['default'] == '':
item['default'] = '""'
is_required = False
arg_string += f"|**`{key}`**|"
details_string = ""
if item['anno'] == None: item['anno'] = NoneType
if (item["default"] == None and item['anno'] == NoneType) or item['anno'] == inspect._empty:
details_string += "|"
else:
details_string += f"`{_format_annos(item['anno']).replace('|', 'or')}`|"
details_string += "|" if is_required else f"`{_format_annos(item['default'])}`|"
if has_docment:
if item['docment']:
item['docment'] = item['docment'].replace('\n', '<br />')
details_string += f"{item['docment']}|" if item['docment'] is not None else "*No Content*|"
arg_string += add_doc_links(details_string)
arg_string += '\n'
return arg_string
# Cell
def _generate_return_string(return_dict:dict, has_docment=False):
"Turns a dictionary of return information into a useful docstring"
if return_dict['anno'] is None:
if not return_dict['docment']: return ''
else: return_dict['anno'] = NoneType
anno = _format_annos(return_dict['anno']).replace('|', 'or')
return_string = f"|**Returns**|`{anno}`||"
if has_docment:
if return_dict['docment']:
return_dict['docment'] = return_dict['docment'].replace('\n', '<br />')
else: return_dict['docment'] = ''
return return_string if not has_docment else f"{return_string}{return_dict['docment']}|"
# Cell
def _is_static(func):
"Checks whether `func` is a static method in a class"
name = qual_name(func)
if len(name.split(".")) == 2:
cls, nm = name.split('.')
cls = getattr(sys.modules[func.__module__], cls)
method_type = inspect.getattr_static(cls, nm)
return isinstance(method_type, staticmethod)
return False
# Cell
def _format_args(elt, ment_dict:dict = None, kwargs = [], monospace=False, is_class=False):
"Generates a formatted argument string, potentially from an existing `ment_dict`"
if ment_dict is None:
ment_dict = docments(elt, full=True)
arg_string = ""
return_string = ""
if not _is_static(elt) and is_class:
ment_dict.pop("self", {})
ment_dict.pop("cls", {})
ret = ment_dict.pop("return", None)
has_docment = _has_docment(elt)
if len(ment_dict.keys()) > 0:
if len(kwargs) > 0:
kwarg_dict = filter_keys(ment_dict, lambda x: x in kwargs)
ment_dict = filter_keys(ment_dict, lambda x: x not in kwargs)
arg_string = _generate_arg_string(ment_dict, has_docment)
arg_string += "|||**Valid Keyword Arguments**||\n"
arg_string += _generate_arg_string(kwarg_dict, has_docment, monospace=monospace).replace("||Type|Default|Details|\n|---|---|---|---|\n", "")
else:
arg_string = _generate_arg_string(ment_dict, has_docment, monospace=monospace)
if not ret["anno"] == inspect._empty:
return_string = _generate_return_string(ret, has_docment)
return arg_string + return_string
# Cell
def is_source_available(
elt, # A python object
):
"Checks if it is possible to return the source code of `elt` mimicking `inspect.getfile`"
if inspect.ismodule(elt):
return True if getattr(object, '__file__', None) else False
elif isclass(elt):
if hasattr(elt, '__module__'):
module = sys.modules.get(elt.__module__)
return True if getattr(module, '__file__', None) else False
elif getattr(elt, '__name__', None) == "<lambda>":
return False
elif inspect.ismethod(elt) or inspect.isfunction(elt) or inspect.istraceback(elt) or inspect.isframe(elt) or inspect.iscode(elt):
return True
elif is_enum(elt):
return False
return False
# Cell
def _handle_delegates(elt):
"Generates a `docment` dict handling `@delegates` and returns names of the kwargs in `elt`"
kwargs = []
arg_dict = docments(elt, full=True)
delwrap_dict = docments(elt.__delwrap__, full=True)
drop = arg_dict.keys()
for k,v in arg_dict.items():
if k in delwrap_dict.keys() and v["docment"] is None and k != "return":
kwargs.append(k)
if delwrap_dict[k]["docment"] is not None:
v["docment"] = delwrap_dict[k]["docment"] + f" passed to `{qual_name(elt.__delwrap__)}`"
else:
v['docment'] = f"Argument passed to `{qual_name(elt.__delwrap__)}`"
return arg_dict, kwargs
# Cell
def _get_docments(elt, with_return=False, ment_dict=None, kwargs=[], monospace=False, is_class=False):
"Grabs docments for `elt` and formats with a potential `ment_dict` and valid kwarg names"
s = f"\n\n{_format_args(elt, ment_dict=ment_dict, kwargs=kwargs, monospace=monospace, is_class=is_class)}"
if not with_return: s = s.split("|**Returns**|")[0]
return s
# Cell
def show_doc(elt, doc_string:bool=True, name=None, title_level=None, disp=True, default_cls_level=2, show_all_docments=False, verbose=False):
"Show documentation for element `elt` with potential input documentation. Supported types: class, function, and enum."
elt = getattr(elt, '__func__', elt)
qname = name or qual_name(elt)
is_class = '.' in qname or inspect.isclass
if inspect.isclass(elt):
if is_enum(elt): name,args = _format_enum_doc(elt, qname)
else: name,args = _format_cls_doc (elt, qname)
elif callable(elt): name,args = _format_func_doc(elt, qname)
else: name,args = f"<code>{qname}</code>", ''
link = get_source_link(elt)
source_link = f'<a href="{link}" class="source_link" style="float:right">[source]</a>'
title_level = title_level or (default_cls_level if inspect.isclass(elt) else 4)
doc = f'<h{title_level} id="{qname}" class="doc_header">{name}{source_link}</h{title_level}>'
doc += f'\n\n> {args}\n\n' if len(args) > 0 else '\n\n'
s = ''
try:
monospace = get_config().d.getboolean('monospace_docstrings', False)
except FileNotFoundError:
monospace = False
if doc_string and inspect.getdoc(elt):
s = inspect.getdoc(elt)
# doc links don't work inside markdown pre/code blocks
s = f'```\n{s}\n```' if monospace else add_doc_links(s, elt)
doc += s
if len(args) > 0:
if hasattr(elt, '__init__') and isclass(elt):
elt = elt.__init__
if is_source_available(elt):
if show_all_docments or _has_docment(elt):
if hasattr(elt, "__delwrap__"):
arg_dict, kwargs = _handle_delegates(elt)
doc += _get_docments(elt, ment_dict=arg_dict, with_return=True, kwargs=kwargs, monospace=monospace, is_class=is_class)
else:
doc += _get_docments(elt, monospace=monospace, is_class=is_class)
elif verbose:
print(f'Warning: `docments` annotations will not work for built-in modules, classes, functions, and `enums` and are unavailable for {qual_name(elt)}. They will not be shown')
if disp: display(Markdown(doc))
else: return doc
# Cell
def md2html(md):
"Convert markdown `md` to HTML code"
import nbconvert
if nbconvert.__version__ < '5.5.0': return HTMLExporter().markdown2html(md)
else: return HTMLExporter().markdown2html(collections.defaultdict(lambda: collections.defaultdict(dict)), md)
# Cell
def get_doc_link(func):
mod = inspect.getmodule(func)
module = mod.__name__.replace('.', '/') + '.py'
try:
nbdev_mod = importlib.import_module(mod.__package__.split('.')[0] + '._nbdev')
try_pack = source_nb(func, mod=nbdev_mod)
if try_pack:
page = '.'.join(try_pack.partition('_')[-1:]).replace('.ipynb', '')
return f'{nbdev_mod.doc_url}{page}#{qual_name(func)}'
except: return None
# Cell
# Fancy CSS needed to make raw Jupyter rendering look nice
_TABLE_CSS = """<style>
table { border-collapse: collapse; border:thin solid #dddddd; margin: 25px 0px; ; }
table tr:first-child { background-color: #FFF}
table thead th { background-color: #eee; color: #000; text-align: center;}
tr, th, td { border: 1px solid #ccc; border-width: 1px 0 0 1px; border-collapse: collapse;
padding: 5px; }
tr:nth-child(even) {background: #eee;}</style>"""
# Cell
def doc(elt:int, show_all_docments:bool=True):
"Show `show_doc` info in preview window when used in a notebook"
md = show_doc(elt, disp=False, show_all_docments=show_all_docments)
doc_link = get_doc_link(elt)
if doc_link is not None:
md += f'\n\n<a href="{doc_link}" target="_blank" rel="noreferrer noopener">Show in docs</a>'
output = md2html(md)
if IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output + _TABLE_CSS)
else:
try: page.page({'text/html': output + _TABLE_CSS})
except: display(Markdown(md)) |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.docformatter.subsystem import Docformatter
from pants.backend.python.lint.python_fmt import PythonFmtConfigurations
from pants.backend.python.rules import download_pex_bin, pex
from pants.backend.python.rules.pex import (
Pex,
PexInterpreterConstraints,
PexRequest,
PexRequirements,
)
from pants.backend.python.subsystems import python_native_code, subprocess_environment
from pants.backend.python.subsystems.subprocess_environment import SubprocessEncodingEnvironment
from pants.backend.python.target_types import PythonSources
from pants.core.goals.fmt import FmtConfiguration, FmtConfigurations, FmtResult
from pants.core.goals.lint import LinterConfigurations, LintResult
from pants.core.util_rules import determine_source_files, strip_source_roots
from pants.core.util_rules.determine_source_files import (
AllSourceFilesRequest,
SourceFiles,
SpecifiedSourceFilesRequest,
)
from pants.engine.fs import Digest, DirectoriesToMerge
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import named_rule, rule, subsystem_rule
from pants.engine.selectors import Get
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class DocformatterConfiguration(FmtConfiguration):
required_fields = (PythonSources,)
sources: PythonSources
class DocformatterConfigurations(FmtConfigurations):
config_type = DocformatterConfiguration
@dataclass(frozen=True)
class SetupRequest:
configs: DocformatterConfigurations
check_only: bool
@dataclass(frozen=True)
class Setup:
process: Process
def generate_args(
*, specified_source_files: SourceFiles, docformatter: Docformatter, check_only: bool,
) -> Tuple[str, ...]:
return (
"--check" if check_only else "--in-place",
*docformatter.options.args,
*sorted(specified_source_files.snapshot.files),
)
@rule
async def setup(
request: SetupRequest,
docformatter: Docformatter,
python_setup: PythonSetup,
subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> Setup:
requirements_pex = await Get[Pex](
PexRequest(
output_filename="docformatter.pex",
requirements=PexRequirements(docformatter.get_requirement_specs()),
interpreter_constraints=PexInterpreterConstraints(
docformatter.default_interpreter_constraints
),
entry_point=docformatter.get_entry_point(),
)
)
if request.configs.prior_formatter_result is None:
all_source_files = await Get[SourceFiles](
AllSourceFilesRequest(config.sources for config in request.configs)
)
all_source_files_snapshot = all_source_files.snapshot
else:
all_source_files_snapshot = request.configs.prior_formatter_result
specified_source_files = await Get[SourceFiles](
SpecifiedSourceFilesRequest((config.sources, config.origin) for config in request.configs)
)
merged_input_files = await Get[Digest](
DirectoriesToMerge(
directories=(
all_source_files_snapshot.directory_digest,
requirements_pex.directory_digest,
)
),
)
address_references = ", ".join(sorted(config.address.reference() for config in request.configs))
process = requirements_pex.create_process(
python_setup=python_setup,
subprocess_encoding_environment=subprocess_encoding_environment,
pex_path="./docformatter.pex",
pex_args=generate_args(
specified_source_files=specified_source_files,
docformatter=docformatter,
check_only=request.check_only,
),
input_files=merged_input_files,
output_files=all_source_files_snapshot.files,
description=(
f"Run Docformatter on {pluralize(len(request.configs), "target")}: "
f"{address_references}."
),
)
return Setup(process)
@named_rule(desc="Format Python docstrings with docformatter")
async def docformatter_fmt(
configs: DocformatterConfigurations, docformatter: Docformatter
) -> FmtResult:
if docformatter.options.skip:
return FmtResult.noop()
setup = await Get[Setup](SetupRequest(configs, check_only=False))
result = await Get[ProcessResult](Process, setup.process)
return FmtResult.from_process_result(result)
@named_rule(desc="Lint Python docstrings with docformatter")
async def docformatter_lint(
configs: DocformatterConfigurations, docformatter: Docformatter
) -> LintResult:
if docformatter.options.skip:
return LintResult.noop()
setup = await Get[Setup](SetupRequest(configs, check_only=True))
result = await Get[FallibleProcessResult](Process, setup.process)
return LintResult.from_fallible_process_result(result)
def rules():
return [
setup,
docformatter_fmt,
docformatter_lint,
subsystem_rule(Docformatter),
UnionRule(PythonFmtConfigurations, DocformatterConfigurations),
UnionRule(LinterConfigurations, DocformatterConfigurations),
*download_pex_bin.rules(),
*determine_source_files.rules(),
*pex.rules(),
*python_native_code.rules(),
*strip_source_roots.rules(),
*subprocess_environment.rules(),
]
| # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.docformatter.subsystem import Docformatter
from pants.backend.python.lint.python_fmt import PythonFmtConfigurations
from pants.backend.python.rules import download_pex_bin, pex
from pants.backend.python.rules.pex import (
Pex,
PexInterpreterConstraints,
PexRequest,
PexRequirements,
)
from pants.backend.python.subsystems import python_native_code, subprocess_environment
from pants.backend.python.subsystems.subprocess_environment import SubprocessEncodingEnvironment
from pants.backend.python.target_types import PythonSources
from pants.core.goals.fmt import FmtConfiguration, FmtConfigurations, FmtResult
from pants.core.goals.lint import LinterConfigurations, LintResult
from pants.core.util_rules import determine_source_files, strip_source_roots
from pants.core.util_rules.determine_source_files import (
AllSourceFilesRequest,
SourceFiles,
SpecifiedSourceFilesRequest,
)
from pants.engine.fs import Digest, DirectoriesToMerge
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import named_rule, rule, subsystem_rule
from pants.engine.selectors import Get
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class DocformatterConfiguration(FmtConfiguration):
required_fields = (PythonSources,)
sources: PythonSources
class DocformatterConfigurations(FmtConfigurations):
config_type = DocformatterConfiguration
@dataclass(frozen=True)
class SetupRequest:
configs: DocformatterConfigurations
check_only: bool
@dataclass(frozen=True)
class Setup:
process: Process
def generate_args(
*, specified_source_files: SourceFiles, docformatter: Docformatter, check_only: bool,
) -> Tuple[str, ...]:
return (
"--check" if check_only else "--in-place",
*docformatter.options.args,
*sorted(specified_source_files.snapshot.files),
)
@rule
async def setup(
request: SetupRequest,
docformatter: Docformatter,
python_setup: PythonSetup,
subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> Setup:
requirements_pex = await Get[Pex](
PexRequest(
output_filename="docformatter.pex",
requirements=PexRequirements(docformatter.get_requirement_specs()),
interpreter_constraints=PexInterpreterConstraints(
docformatter.default_interpreter_constraints
),
entry_point=docformatter.get_entry_point(),
)
)
if request.configs.prior_formatter_result is None:
all_source_files = await Get[SourceFiles](
AllSourceFilesRequest(config.sources for config in request.configs)
)
all_source_files_snapshot = all_source_files.snapshot
else:
all_source_files_snapshot = request.configs.prior_formatter_result
specified_source_files = await Get[SourceFiles](
SpecifiedSourceFilesRequest((config.sources, config.origin) for config in request.configs)
)
merged_input_files = await Get[Digest](
DirectoriesToMerge(
directories=(
all_source_files_snapshot.directory_digest,
requirements_pex.directory_digest,
)
),
)
address_references = ", ".join(sorted(config.address.reference() for config in request.configs))
process = requirements_pex.create_process(
python_setup=python_setup,
subprocess_encoding_environment=subprocess_encoding_environment,
pex_path="./docformatter.pex",
pex_args=generate_args(
specified_source_files=specified_source_files,
docformatter=docformatter,
check_only=request.check_only,
),
input_files=merged_input_files,
output_files=all_source_files_snapshot.files,
description=(
f"Run Docformatter on {pluralize(len(request.configs), 'target')}: "
f"{address_references}."
),
)
return Setup(process)
@named_rule(desc="Format Python docstrings with docformatter")
async def docformatter_fmt(
configs: DocformatterConfigurations, docformatter: Docformatter
) -> FmtResult:
if docformatter.options.skip:
return FmtResult.noop()
setup = await Get[Setup](SetupRequest(configs, check_only=False))
result = await Get[ProcessResult](Process, setup.process)
return FmtResult.from_process_result(result)
@named_rule(desc="Lint Python docstrings with docformatter")
async def docformatter_lint(
configs: DocformatterConfigurations, docformatter: Docformatter
) -> LintResult:
if docformatter.options.skip:
return LintResult.noop()
setup = await Get[Setup](SetupRequest(configs, check_only=True))
result = await Get[FallibleProcessResult](Process, setup.process)
return LintResult.from_fallible_process_result(result)
def rules():
return [
setup,
docformatter_fmt,
docformatter_lint,
subsystem_rule(Docformatter),
UnionRule(PythonFmtConfigurations, DocformatterConfigurations),
UnionRule(LinterConfigurations, DocformatterConfigurations),
*download_pex_bin.rules(),
*determine_source_files.rules(),
*pex.rules(),
*python_native_code.rules(),
*strip_source_roots.rules(),
*subprocess_environment.rules(),
]
|
import json
import requests
import csv
from glob import glob
import pandas as pd
from pathlib import Path
from tqdm import trange
def main():
folder_setup()
download_trials()
write_txt()
def folder_setup():
"""Makes directory 'Full_Studies' to which trial files are downloaded."""
current_directory = Path.cwd()
global studies_directory
studies_directory = current_directory / r'Full_Studies_test'
not_available = studies_directory / r'log.txt'
criteria_file = studies_directory / r'criteria.txt'
if not Path.exists(studies_directory):
Path.mkdir(studies_directory)
if not Path.exists(not_available):
pass
else:
Path.unlink(not_available)
if not Path.exists(criteria_file):
pass
else:
Path.unlink(criteria_file)
return
def build_url(expr: str='Cancer',
country: str='United States',
status: str='Recruiting',
study_type: str='Interventional',
field_names: list=['NCTId','OfficialTitle','StartDate',
'PrimaryCompletionDate','LastUpdatePostDate',
'Condition','Gender','MaximumAge','EligibilityCriteria',
'CentralContactName','CentralContactPhone','CentralContactEMail',
'LocationFacility','LocationCity','LocationState',
'LocationZip','LeadSponsorName'],
min_rnk: int=1,
max_rnk: int=999,
fmt: str='csv'
) -> str:
"""returns api url for the study fields api on clinicaltrials.gov (https://clinicaltrials.gov/api/gui/demo/simple_study_fields).
expr - defaults to Cancer trials. However, any expression one might consider for clinicaltrials.gov.
country - defaults to The United States. However, any country can be entered.
status - defaults to Recruiting. However, the following status can also be passed:
Not yet recruiting: Participants are not yet being recruited
Recruiting: Participants are currently being recruited, whether or not any participants have yet been enrolled
Enrolling by invitation: Participants are being (or will be) selected from a predetermined population
Active, not recruiting: Study is continuing, meaning participants are receiving an intervention or being examined, but new participants are not currently being recruited or enrolled
Completed: The study has concluded normally; participants are no longer receiving an intervention or being examined (that is, last participant’s last visit has occurred)
Suspended: Study halted prematurely but potentially will resume
Terminated: Study halted prematurely and will not resume; participants are no longer being examined or receiving intervention
Withdrawn: Study halted prematurely, prior to enrollment of first participant
study_type - defaults to Interventional trials. However, Observational can also be passed.
field_names - a list of data elements and their corresponding API fields as described in the crosswalk documentation. (https://clinicaltrials.gov/api/gui/ref/crosswalks)
min_rnk = defaults to 1. Can be any interger.
max_rnk - defaults to 1000 records. Can range from 1 - 1000.
fmt - defaults to csv. However, json and xml can also be passed.
"""
base_url = 'https://clinicaltrials.gov/api/query/study_fields?'
if not expr:
expr = ''
else:
expr = f"{expr.replace(" ", "+")}+AND+"
if not status:
status = ''
else:
status = f"{status.replace(" ", "+")}"
if study_type == 'Observational' or study_type == 'Interventional':
study_type = study_type
else:
print(""" This paramater only accepts Observational or Interventional.
The url will not build if other parameters are entered.
""")
country = country.replace(' ', '+')
age = 'AND+AREA%5BMinimumAge%5D18+Years&'
fields = "%2C+".join(field_names)
api_url = f'{base_url}expr={expr}AREA%5BLocationCountry%5D{country}+AND+AREA%5BLocationStatus%5D{status}+AND+AREA%5BStudyType%5D{study_type}+{age}fields={fields}&min_rnk={min_rnk}&max_rnk={max_rnk}&fmt={fmt}'
return api_url
def generate_urls():
"""Gathers clinical trials from clinicaltrials.gov for search term
defined in build_url() function and downloads to specified file format.
"""
api_call = build_url(expr='Cancer', max_rnk=1, fmt='json')
r = requests.get(api_call)
data = r.json()
n_studies = data['StudyFieldsResponse']['NStudiesFound']
print(f'{n_studies} studies found.\n')
print('\nGenerating request urls...')
urls = []
for i in range(1, n_studies, 1000):
url = build_url(expr='Cancer', field_names=['EligibilityCriteria'],
min_rnk=f'{i}', max_rnk=f'{i+999}',
fmt='csv')
urls.append(url)
return urls
def download_trials():
urls = generate_urls()
print('\n-----Downloading trials-----\n')
for url, i in zip(urls, trange(1, len(urls))):
df = pd.read_csv(url, skiprows=9)
df = df.drop(columns='Rank')
df.to_csv(f'{studies_directory}/trial_set_{i}.csv', index=False)
print('\n-----Downloads complete-----\n')
def write_txt():
all_files = glob(f'{studies_directory}/*.csv')
for file in all_files:
data = []
print(f'Working on file {file}')
with open(file, 'r', encoding='utf-8', errors='ignore') as f:
reader = csv.DictReader(f)
for criteria in reader:
c = criteria['EligibilityCriteria']
c = c.replace('Inclusion Criteria:||', '')
c = c.split('|')
for i in c:
data.append(f'\n{i}')
with open(f'{studies_directory}/criteria.txt', 'a+', encoding='utf-8', errors='ignore') as f:
for item in data:
f.write(item)
print('\n-----Process complete-----')
if __name__=='__main__':
main()
# https://bioportal.bioontology.org/annotatorplus
| import json
import requests
import csv
from glob import glob
import pandas as pd
from pathlib import Path
from tqdm import trange
def main():
folder_setup()
download_trials()
write_txt()
def folder_setup():
"""Makes directory 'Full_Studies' to which trial files are downloaded."""
current_directory = Path.cwd()
global studies_directory
studies_directory = current_directory / r'Full_Studies_test'
not_available = studies_directory / r'log.txt'
criteria_file = studies_directory / r'criteria.txt'
if not Path.exists(studies_directory):
Path.mkdir(studies_directory)
if not Path.exists(not_available):
pass
else:
Path.unlink(not_available)
if not Path.exists(criteria_file):
pass
else:
Path.unlink(criteria_file)
return
def build_url(expr: str='Cancer',
country: str='United States',
status: str='Recruiting',
study_type: str='Interventional',
field_names: list=['NCTId','OfficialTitle','StartDate',
'PrimaryCompletionDate','LastUpdatePostDate',
'Condition','Gender','MaximumAge','EligibilityCriteria',
'CentralContactName','CentralContactPhone','CentralContactEMail',
'LocationFacility','LocationCity','LocationState',
'LocationZip','LeadSponsorName'],
min_rnk: int=1,
max_rnk: int=999,
fmt: str='csv'
) -> str:
"""returns api url for the study fields api on clinicaltrials.gov (https://clinicaltrials.gov/api/gui/demo/simple_study_fields).
expr - defaults to Cancer trials. However, any expression one might consider for clinicaltrials.gov.
country - defaults to The United States. However, any country can be entered.
status - defaults to Recruiting. However, the following status can also be passed:
Not yet recruiting: Participants are not yet being recruited
Recruiting: Participants are currently being recruited, whether or not any participants have yet been enrolled
Enrolling by invitation: Participants are being (or will be) selected from a predetermined population
Active, not recruiting: Study is continuing, meaning participants are receiving an intervention or being examined, but new participants are not currently being recruited or enrolled
Completed: The study has concluded normally; participants are no longer receiving an intervention or being examined (that is, last participant’s last visit has occurred)
Suspended: Study halted prematurely but potentially will resume
Terminated: Study halted prematurely and will not resume; participants are no longer being examined or receiving intervention
Withdrawn: Study halted prematurely, prior to enrollment of first participant
study_type - defaults to Interventional trials. However, Observational can also be passed.
field_names - a list of data elements and their corresponding API fields as described in the crosswalk documentation. (https://clinicaltrials.gov/api/gui/ref/crosswalks)
min_rnk = defaults to 1. Can be any interger.
max_rnk - defaults to 1000 records. Can range from 1 - 1000.
fmt - defaults to csv. However, json and xml can also be passed.
"""
base_url = 'https://clinicaltrials.gov/api/query/study_fields?'
if not expr:
expr = ''
else:
expr = f"{expr.replace(' ', '+')}+AND+"
if not status:
status = ''
else:
status = f"{status.replace(' ', '+')}"
if study_type == 'Observational' or study_type == 'Interventional':
study_type = study_type
else:
print(""" This paramater only accepts Observational or Interventional.
The url will not build if other parameters are entered.
""")
country = country.replace(' ', '+')
age = 'AND+AREA%5BMinimumAge%5D18+Years&'
fields = "%2C+".join(field_names)
api_url = f'{base_url}expr={expr}AREA%5BLocationCountry%5D{country}+AND+AREA%5BLocationStatus%5D{status}+AND+AREA%5BStudyType%5D{study_type}+{age}fields={fields}&min_rnk={min_rnk}&max_rnk={max_rnk}&fmt={fmt}'
return api_url
def generate_urls():
"""Gathers clinical trials from clinicaltrials.gov for search term
defined in build_url() function and downloads to specified file format.
"""
api_call = build_url(expr='Cancer', max_rnk=1, fmt='json')
r = requests.get(api_call)
data = r.json()
n_studies = data['StudyFieldsResponse']['NStudiesFound']
print(f'{n_studies} studies found.\n')
print('\nGenerating request urls...')
urls = []
for i in range(1, n_studies, 1000):
url = build_url(expr='Cancer', field_names=['EligibilityCriteria'],
min_rnk=f'{i}', max_rnk=f'{i+999}',
fmt='csv')
urls.append(url)
return urls
def download_trials():
urls = generate_urls()
print('\n-----Downloading trials-----\n')
for url, i in zip(urls, trange(1, len(urls))):
df = pd.read_csv(url, skiprows=9)
df = df.drop(columns='Rank')
df.to_csv(f'{studies_directory}/trial_set_{i}.csv', index=False)
print('\n-----Downloads complete-----\n')
def write_txt():
all_files = glob(f'{studies_directory}/*.csv')
for file in all_files:
data = []
print(f'Working on file {file}')
with open(file, 'r', encoding='utf-8', errors='ignore') as f:
reader = csv.DictReader(f)
for criteria in reader:
c = criteria['EligibilityCriteria']
c = c.replace('Inclusion Criteria:||', '')
c = c.split('|')
for i in c:
data.append(f'\n{i}')
with open(f'{studies_directory}/criteria.txt', 'a+', encoding='utf-8', errors='ignore') as f:
for item in data:
f.write(item)
print('\n-----Process complete-----')
if __name__=='__main__':
main()
# https://bioportal.bioontology.org/annotatorplus
|
import binascii
from collections import (
abc,
namedtuple,
)
import copy
import itertools
import re
from typing import (
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
from eth_abi import (
codec,
decoding,
encoding,
)
from eth_abi.base import (
parse_type_str,
)
from eth_abi.exceptions import (
ValueOutOfBounds,
)
from eth_abi.grammar import (
ABIType,
BasicType,
TupleType,
parse,
)
from eth_abi.registry import (
ABIRegistry,
BaseEquals,
registry as default_registry,
)
from eth_typing import (
HexStr,
TypeStr,
)
from eth_utils import (
combomethod,
decode_hex,
is_bytes,
is_list_like,
is_text,
to_text,
to_tuple,
)
from eth_utils.abi import (
collapse_if_tuple,
)
from eth_utils.toolz import (
curry,
partial,
pipe,
)
from web3._utils.ens import (
is_ens_name,
)
from web3._utils.formatters import (
recursive_map,
)
from web3.exceptions import (
FallbackNotFound,
)
from web3.types import (
ABI,
ABIEvent,
ABIEventParams,
ABIFunction,
ABIFunctionParams,
)
def filter_by_type(_type: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return [abi for abi in contract_abi if abi['type'] == _type]
def filter_by_name(name: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return [
abi
for abi
in contract_abi
if (
abi['type'] not in ('fallback', 'constructor', 'receive')
and abi['name'] == name
)
]
def get_abi_input_types(abi: ABIFunction) -> List[str]:
if 'inputs' not in abi and (abi['type'] == 'fallback' or abi['type'] == 'receive'):
return []
else:
return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['inputs']]
def get_abi_output_types(abi: ABIFunction) -> List[str]:
if abi['type'] == 'fallback':
return []
else:
return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['outputs']]
def get_abi_input_names(abi: Union[ABIFunction, ABIEvent]) -> List[str]:
if 'inputs' not in abi and abi['type'] == 'fallback':
return []
else:
return [arg['name'] for arg in abi['inputs']]
def get_receive_func_abi(contract_abi: ABI) -> ABIFunction:
receive_abis = filter_by_type('receive', contract_abi)
if receive_abis:
return cast(ABIFunction, receive_abis[0])
else:
raise FallbackNotFound("No receive function was found in the contract ABI.")
def get_fallback_func_abi(contract_abi: ABI) -> ABIFunction:
fallback_abis = filter_by_type('fallback', contract_abi)
if fallback_abis:
return cast(ABIFunction, fallback_abis[0])
else:
raise FallbackNotFound("No fallback function was found in the contract ABI.")
def fallback_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return filter_by_type('fallback', contract_abi)
def receive_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return filter_by_type('receive', contract_abi)
def get_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:
return [arg for arg in event_abi['inputs'] if arg['indexed'] is True]
def exclude_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:
return [arg for arg in event_abi['inputs'] if arg['indexed'] is False]
def get_normalized_abi_arg_type(abi_arg: ABIEventParams) -> str:
"""
Return the normalized type for the abi argument provided. In order to account for tuple argument
types, this abstraction makes use of `collapse_if_tuple()` to collapse the appropriate component
types within a tuple type, if present.
"""
return collapse_if_tuple(dict(abi_arg))
def filter_by_argument_count(
num_arguments: int, contract_abi: ABI
) -> List[Union[ABIFunction, ABIEvent]]:
return [
abi
for abi
in contract_abi
if len(abi['inputs']) == num_arguments
]
def filter_by_argument_name(
argument_names: Collection[str], contract_abi: ABI
) -> List[Union[ABIFunction, ABIEvent]]:
return [
abi
for abi in contract_abi
if set(argument_names).intersection(
get_abi_input_names(abi)
) == set(argument_names)
]
class AddressEncoder(encoding.AddressEncoder):
@classmethod
def validate_value(cls, value: Any) -> None:
if is_ens_name(value):
return
super().validate_value(value)
class AcceptsHexStrEncoder(encoding.BaseEncoder):
subencoder_cls: Type[encoding.BaseEncoder] = None
is_strict: bool = None
def __init__(self, subencoder: encoding.BaseEncoder) -> None:
self.subencoder = subencoder
# type ignored b/c conflict w/ defined BaseEncoder.is_dynamic = False
@property
def is_dynamic(self) -> bool: # type: ignore
return self.subencoder.is_dynamic
@classmethod
def from_type_str(cls, abi_type: TypeStr, registry: ABIRegistry) -> "AcceptsHexStrEncoder":
subencoder_cls = cls.get_subencoder_class()
# cast b/c expects BaseCoder but `from_type_string` restricted to BaseEncoder subclasses
subencoder = cast(encoding.BaseEncoder, subencoder_cls.from_type_str(abi_type, registry))
return cls(subencoder)
@classmethod
def get_subencoder_class(cls) -> Type[encoding.BaseEncoder]:
if cls.subencoder_cls is None:
raise AttributeError(f'No subencoder class is set. {cls.__name__}')
return cls.subencoder_cls
# type ignored b/c combomethod makes signature conflict w/ defined BaseEncoder.validate_value()
@combomethod
def validate_value(self, value: Any) -> None: # type: ignore
normalized_value = self.validate_and_normalize(value)
return self.subencoder.validate_value(normalized_value)
def encode(self, value: Any) -> bytes:
normalized_value = self.validate_and_normalize(value)
return self.subencoder.encode(normalized_value)
def validate_and_normalize(self, value: Any) -> HexStr:
raw_value = value
if is_text(value):
try:
value = decode_hex(value)
except binascii.Error:
self.invalidate_value(
value,
msg=f'{value} is an invalid hex string',
)
else:
if raw_value[:2] != '0x':
if self.is_strict:
self.invalidate_value(
raw_value,
msg='hex string must be prefixed with 0x'
)
elif raw_value[:2] != '0x':
warnings.warn(
'in v6 it will be invalid to pass a hex string without the "0x" prefix',
category=DeprecationWarning
)
return value
class BytesEncoder(AcceptsHexStrEncoder):
subencoder_cls = encoding.BytesEncoder
is_strict = False
class ByteStringEncoder(AcceptsHexStrEncoder):
subencoder_cls = encoding.ByteStringEncoder
is_strict = False
class StrictByteStringEncoder(AcceptsHexStrEncoder):
subencoder_cls = encoding.ByteStringEncoder
is_strict = True
class ExactLengthBytesEncoder(encoding.BaseEncoder):
# TODO: move this to eth-abi once the api is stabilized
is_big_endian = False
value_bit_size = None
data_byte_size = None
def validate(self) -> None:
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be none")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be none")
if self.encode_fn is None:
raise ValueError("`encode_fn` may not be none")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be none")
if self.value_bit_size % 8 != 0:
raise ValueError(
f"Invalid value bit size: {self.value_bit_size}. Must be a multiple of 8"
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def encode(self, value: Any) -> bytes:
normalized_value = self.validate_value(value)
return self.encode_fn(normalized_value)
# type ignored b/c conflict with defined BaseEncoder.validate_value() -> None
def validate_value(self, value: Any) -> bytes: # type: ignore
if not is_bytes(value) and not is_text(value):
self.invalidate_value(value)
raw_value = value
if is_text(value):
try:
value = decode_hex(value)
except binascii.Error:
self.invalidate_value(
value,
msg=f'{value} is not a valid hex string',
)
else:
if raw_value[:2] != '0x':
self.invalidate_value(
raw_value,
msg='hex string must be prefixed with 0x'
)
byte_size = self.value_bit_size // 8
if len(value) > byte_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg=f"exceeds total byte size for bytes{byte_size} encoding",
)
elif len(value) < byte_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg=f"less than total byte size for bytes{byte_size} encoding",
)
return value
@staticmethod
def encode_fn(value: Any) -> bytes:
return value
@parse_type_str('bytes')
def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:
# type ignored b/c kwargs are set in superclass init
# Unexpected keyword argument "value_bit_size" for "__call__" of "BaseEncoder"
return cls( # type: ignore
value_bit_size=abi_type.sub * 8,
data_byte_size=abi_type.sub,
)
class BytesDecoder(decoding.FixedByteSizeDecoder):
# FixedByteSizeDecoder.is_big_endian is defined as None
is_big_endian = False # type: ignore
# FixedByteSizeDecoder.decoder_fn is defined as None
@staticmethod
def decoder_fn(data: bytes) -> bytes: # type: ignore
return data
@parse_type_str('bytes')
def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:
# type ignored b/c kwargs are set in superclass init
# Unexpected keyword argument "value_bit_size" for "__call__" of "BaseDecoder"
return cls( # type: ignore
value_bit_size=abi_type.sub * 8,
data_byte_size=abi_type.sub,
)
class TextStringEncoder(encoding.TextStringEncoder):
@classmethod
def validate_value(cls, value: Any) -> None:
if is_bytes(value):
try:
value = to_text(value)
except UnicodeDecodeError:
cls.invalidate_value(
value,
msg='not decodable as unicode string',
)
super().validate_value(value)
def filter_by_encodability(
abi_codec: codec.ABIEncoder, args: Sequence[Any], kwargs: Dict[str, Any], contract_abi: ABI
) -> List[ABIFunction]:
return [
cast(ABIFunction, function_abi)
for function_abi
in contract_abi
if check_if_arguments_can_be_encoded(
cast(ABIFunction, function_abi), abi_codec, args, kwargs
)
]
def check_if_arguments_can_be_encoded(
function_abi: ABIFunction,
abi_codec: codec.ABIEncoder,
args: Sequence[Any],
kwargs: Dict[str, Any],
) -> bool:
try:
arguments = merge_args_and_kwargs(function_abi, args, kwargs)
except TypeError:
return False
if len(function_abi.get('inputs', [])) != len(arguments):
return False
try:
types, aligned_args = get_aligned_abi_inputs(function_abi, arguments)
except TypeError:
return False
return all(
abi_codec.is_encodable(_type, arg)
for _type, arg in zip(types, aligned_args)
)
def merge_args_and_kwargs(
function_abi: ABIFunction, args: Sequence[Any], kwargs: Dict[str, Any]
) -> Tuple[Any, ...]:
"""
Takes a list of positional args (``args``) and a dict of keyword args
(``kwargs``) defining values to be passed to a call to the contract function
described by ``function_abi``. Checks to ensure that the correct number of
args were given, no duplicate args were given, and no unknown args were
given. Returns a list of argument values aligned to the order of inputs
defined in ``function_abi``.
"""
# Ensure the function is being applied to the correct number of args
if len(args) + len(kwargs) != len(function_abi.get('inputs', [])):
raise TypeError(
f"Incorrect argument count. Expected '{len(function_abi["inputs"])}"
f". Got '{len(args) + len(kwargs)}'"
)
# If no keyword args were given, we don't need to align them
if not kwargs:
return cast(Tuple[Any, ...], args)
kwarg_names = set(kwargs.keys())
sorted_arg_names = tuple(arg_abi['name'] for arg_abi in function_abi['inputs'])
args_as_kwargs = dict(zip(sorted_arg_names, args))
# Check for duplicate args
duplicate_args = kwarg_names.intersection(args_as_kwargs.keys())
if duplicate_args:
raise TypeError(
f"{function_abi.get("name")}() got multiple values for argument(s) "
f"'{", ".join(duplicate_args)}'"
)
# Check for unknown args
unknown_args = kwarg_names.difference(sorted_arg_names)
if unknown_args:
if function_abi.get('name'):
raise TypeError(
f"{function_abi.get("name")}() got unexpected keyword argument(s)"
f" '{", ".join(unknown_args)}'"
)
raise TypeError(
f"Type: '{function_abi.get("type")}' got unexpected keyword argument(s)"
f" '{", ".join(unknown_args)}'"
)
# Sort args according to their position in the ABI and unzip them from their
# names
sorted_args = tuple(zip(
*sorted(
itertools.chain(kwargs.items(), args_as_kwargs.items()),
key=lambda kv: sorted_arg_names.index(kv[0]),
)
))
if sorted_args:
return sorted_args[1]
else:
return tuple()
TUPLE_TYPE_STR_RE = re.compile(r'^(tuple)(\[([1-9][0-9]*)?\])?$')
def get_tuple_type_str_parts(s: str) -> Optional[Tuple[str, Optional[str]]]:
"""
Takes a JSON ABI type string. For tuple type strings, returns the separated
prefix and array dimension parts. For all other strings, returns ``None``.
"""
match = TUPLE_TYPE_STR_RE.match(s)
if match is not None:
tuple_prefix = match.group(1)
tuple_dims = match.group(2)
return tuple_prefix, tuple_dims
return None
def _align_abi_input(arg_abi: ABIFunctionParams, arg: Any) -> Tuple[Any, ...]:
"""
Aligns the values of any mapping at any level of nesting in ``arg``
according to the layout of the corresponding abi spec.
"""
tuple_parts = get_tuple_type_str_parts(arg_abi['type'])
if tuple_parts is None:
# Arg is non-tuple. Just return value.
return arg
tuple_prefix, tuple_dims = tuple_parts
if tuple_dims is None:
# Arg is non-list tuple. Each sub arg in `arg` will be aligned
# according to its corresponding abi.
sub_abis = arg_abi['components']
else:
# Arg is list tuple. A non-list version of its abi will be used to
# align each element in `arg`.
new_abi = copy.copy(arg_abi)
new_abi['type'] = tuple_prefix
sub_abis = itertools.repeat(new_abi) # type: ignore
if isinstance(arg, abc.Mapping):
# Arg is mapping. Align values according to abi order.
aligned_arg = tuple(arg[abi['name']] for abi in sub_abis)
else:
aligned_arg = arg
if not is_list_like(aligned_arg):
raise TypeError(
f'Expected non-string sequence for "{arg_abi.get('type')}" '
f'component type: got {aligned_arg}'
)
# convert NamedTuple to regular tuple
typing = tuple if isinstance(aligned_arg, tuple) else type(aligned_arg)
return typing(
_align_abi_input(sub_abi, sub_arg)
for sub_abi, sub_arg in zip(sub_abis, aligned_arg)
)
def get_aligned_abi_inputs(
abi: ABIFunction, args: Union[Tuple[Any, ...], Mapping[Any, Any]]
) -> Tuple[Tuple[Any, ...], Tuple[Any, ...]]:
"""
Takes a function ABI (``abi``) and a sequence or mapping of args (``args``).
Returns a list of type strings for the function's inputs and a list of
arguments which have been aligned to the layout of those types. The args
contained in ``args`` may contain nested mappings or sequences corresponding
to tuple-encoded values in ``abi``.
"""
input_abis = abi.get('inputs', [])
if isinstance(args, abc.Mapping):
# `args` is mapping. Align values according to abi order.
args = tuple(args[abi['name']] for abi in input_abis)
return (
# typed dict cannot be used w/ a normal Dict
# https://github.com/python/mypy/issues/4976
tuple(collapse_if_tuple(abi) for abi in input_abis), # type: ignore
type(args)(
_align_abi_input(abi, arg)
for abi, arg in zip(input_abis, args)
),
)
def get_constructor_abi(contract_abi: ABI) -> ABIFunction:
candidates = [
abi for abi in contract_abi if abi['type'] == 'constructor'
]
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
return None
elif len(candidates) > 1:
raise ValueError("Found multiple constructors.")
return None
DYNAMIC_TYPES = ['bytes', 'string']
INT_SIZES = range(8, 257, 8)
BYTES_SIZES = range(1, 33)
UINT_TYPES = [f'uint{i}' for i in INT_SIZES]
INT_TYPES = [f'int{i}' for i in INT_SIZES]
BYTES_TYPES = [f'bytes{i}' for i in BYTES_SIZES] + ['bytes32.byte']
STATIC_TYPES = list(itertools.chain(
['address', 'bool'],
UINT_TYPES,
INT_TYPES,
BYTES_TYPES,
))
BASE_TYPE_REGEX = '|'.join((
_type + '(?![a-z0-9])'
for _type
in itertools.chain(STATIC_TYPES, DYNAMIC_TYPES)
))
SUB_TYPE_REGEX = (
r'\['
'[0-9]*'
r'\]'
)
TYPE_REGEX = (
'^'
'(?:{base_type})'
'(?:(?:{sub_type})*)?'
'$'
).format(
base_type=BASE_TYPE_REGEX,
sub_type=SUB_TYPE_REGEX,
)
def is_recognized_type(abi_type: TypeStr) -> bool:
return bool(re.match(TYPE_REGEX, abi_type))
def is_bool_type(abi_type: TypeStr) -> bool:
return abi_type == 'bool'
def is_uint_type(abi_type: TypeStr) -> bool:
return abi_type in UINT_TYPES
def is_int_type(abi_type: TypeStr) -> bool:
return abi_type in INT_TYPES
def is_address_type(abi_type: TypeStr) -> bool:
return abi_type == 'address'
def is_bytes_type(abi_type: TypeStr) -> bool:
return abi_type in BYTES_TYPES + ['bytes']
def is_string_type(abi_type: TypeStr) -> bool:
return abi_type == 'string'
@curry
def is_length(target_length: int, value: abc.Sized) -> bool:
return len(value) == target_length
def size_of_type(abi_type: TypeStr) -> int:
"""
Returns size in bits of abi_type
"""
if 'string' in abi_type:
return None
if 'byte' in abi_type:
return None
if '[' in abi_type:
return None
if abi_type == 'bool':
return 8
if abi_type == 'address':
return 160
return int(re.sub(r"\D", "", abi_type))
END_BRACKETS_OF_ARRAY_TYPE_REGEX = r"\[[^]]*\]$"
def sub_type_of_array_type(abi_type: TypeStr) -> str:
if not is_array_type(abi_type):
raise ValueError(
f"Cannot parse subtype of nonarray abi-type: {abi_type}"
)
return re.sub(END_BRACKETS_OF_ARRAY_TYPE_REGEX, '', abi_type, 1)
def length_of_array_type(abi_type: TypeStr) -> int:
if not is_array_type(abi_type):
raise ValueError(
f"Cannot parse length of nonarray abi-type: {abi_type}"
)
inner_brackets = re.search(END_BRACKETS_OF_ARRAY_TYPE_REGEX, abi_type).group(0).strip("[]")
if not inner_brackets:
return None
else:
return int(inner_brackets)
ARRAY_REGEX = (
"^"
"[a-zA-Z0-9_]+"
"({sub_type})+"
"$"
).format(sub_type=SUB_TYPE_REGEX)
def is_array_type(abi_type: TypeStr) -> bool:
return bool(re.match(ARRAY_REGEX, abi_type))
NAME_REGEX = (
'[a-zA-Z_]'
'[a-zA-Z0-9_]*'
)
ENUM_REGEX = (
'^'
'{lib_name}'
r'\.'
'{enum_name}'
'$'
).format(lib_name=NAME_REGEX, enum_name=NAME_REGEX)
def is_probably_enum(abi_type: TypeStr) -> bool:
return bool(re.match(ENUM_REGEX, abi_type))
@to_tuple
def normalize_event_input_types(
abi_args: Collection[Union[ABIFunction, ABIEvent]]
) -> Iterable[Union[ABIFunction, ABIEvent, Dict[TypeStr, Any]]]:
for arg in abi_args:
if is_recognized_type(arg['type']):
yield arg
elif is_probably_enum(arg['type']):
yield {k: 'uint8' if k == 'type' else v for k, v in arg.items()}
else:
yield arg
def abi_to_signature(abi: Union[ABIFunction, ABIEvent]) -> str:
function_signature = "{fn_name}({fn_input_types})".format(
fn_name=abi['name'],
fn_input_types=','.join([
arg['type'] for arg in normalize_event_input_types(abi.get('inputs', []))
]),
)
return function_signature
########################################################
#
# Conditionally modifying data, tagged with ABI Types
#
########################################################
@curry
def map_abi_data(
normalizers: Sequence[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],
types: Sequence[TypeStr],
data: Sequence[Any],
) -> Any:
"""
This function will apply normalizers to your data, in the
context of the relevant types. Each normalizer is in the format:
def normalizer(datatype, data):
# Conditionally modify data
return (datatype, data)
Where datatype is a valid ABI type string, like "uint".
In case of an array, like "bool[2]", normalizer will receive `data`
as an iterable of typed data, like `[("bool", True), ("bool", False)]`.
Internals
---
This is accomplished by:
1. Decorating the data tree with types
2. Recursively mapping each of the normalizers to the data
3. Stripping the types back out of the tree
"""
pipeline = itertools.chain(
[abi_data_tree(types)],
map(data_tree_map, normalizers),
[partial(recursive_map, strip_abi_type)],
)
return pipe(data, *pipeline)
@curry
def abi_data_tree(types: Sequence[TypeStr], data: Sequence[Any]) -> List[Any]:
"""
Decorate the data tree with pairs of (type, data). The pair tuple is actually an
ABITypedData, but can be accessed as a tuple.
As an example:
>>> abi_data_tree(types=["bool[2]", "uint"], data=[[True, False], 0])
[("bool[2]", [("bool", True), ("bool", False)]), ("uint256", 0)]
"""
return [
abi_sub_tree(data_type, data_value)
for data_type, data_value
in zip(types, data)
]
@curry
def data_tree_map(
func: Callable[[TypeStr, Any], Tuple[TypeStr, Any]], data_tree: Any
) -> "ABITypedData":
"""
Map func to every ABITypedData element in the tree. func will
receive two args: abi_type, and data
"""
def map_to_typed_data(elements: Any) -> "ABITypedData":
if isinstance(elements, ABITypedData) and elements.abi_type is not None:
return ABITypedData(func(*elements))
else:
return elements
return recursive_map(map_to_typed_data, data_tree)
class ABITypedData(namedtuple('ABITypedData', 'abi_type, data')):
"""
This class marks data as having a certain ABI-type.
>>> a1 = ABITypedData(['address', addr1])
>>> a2 = ABITypedData(['address', addr2])
>>> addrs = ABITypedData(['address[]', [a1, a2]])
You can access the fields using tuple() interface, or with
attributes:
>>> assert a1.abi_type == a1[0]
>>> assert a1.data == a1[1]
Unlike a typical `namedtuple`, you initialize with a single
positional argument that is iterable, to match the init
interface of all other relevant collections.
"""
def __new__(cls, iterable: Iterable[Any]) -> "ABITypedData":
return super().__new__(cls, *iterable)
def abi_sub_tree(
type_str_or_abi_type: Optional[Union[TypeStr, ABIType]], data_value: Any
) -> ABITypedData:
if type_str_or_abi_type is None:
return ABITypedData([None, data_value])
if isinstance(type_str_or_abi_type, TypeStr):
abi_type = parse(type_str_or_abi_type)
else:
abi_type = type_str_or_abi_type
# In the two special cases below, we rebuild the given data structures with
# annotated items
if abi_type.is_array:
# If type is array, determine item type and annotate all
# items in iterable with that type
item_type_str = abi_type.item_type.to_type_str()
value_to_annotate = [
abi_sub_tree(item_type_str, item_value)
for item_value in data_value
]
elif isinstance(abi_type, TupleType):
# Otherwise, if type is tuple, determine component types and annotate
# tuple components in iterable respectively with those types
value_to_annotate = type(data_value)(
abi_sub_tree(comp_type.to_type_str(), comp_value)
for comp_type, comp_value in zip(abi_type.components, data_value)
)
else:
value_to_annotate = data_value
return ABITypedData([
abi_type.to_type_str(),
value_to_annotate,
])
def strip_abi_type(elements: Any) -> Any:
if isinstance(elements, ABITypedData):
return elements.data
else:
return elements
def build_default_registry() -> ABIRegistry:
# We make a copy here just to make sure that eth-abi's default registry is not
# affected by our custom encoder subclasses
registry = default_registry.copy()
registry.unregister('address')
registry.unregister('bytes<M>')
registry.unregister('bytes')
registry.unregister('string')
registry.register(
BaseEquals('address'),
AddressEncoder, decoding.AddressDecoder,
label='address',
)
registry.register(
BaseEquals('bytes', with_sub=True),
BytesEncoder, decoding.BytesDecoder,
label='bytes<M>',
)
registry.register(
BaseEquals('bytes', with_sub=False),
ByteStringEncoder, decoding.ByteStringDecoder,
label='bytes',
)
registry.register(
BaseEquals('string'),
TextStringEncoder, decoding.StringDecoder,
label='string',
)
return registry
def build_strict_registry() -> ABIRegistry:
registry = default_registry.copy()
registry.unregister('address')
registry.unregister('bytes<M>')
registry.unregister('bytes')
registry.unregister('string')
registry.register(
BaseEquals('address'),
AddressEncoder, decoding.AddressDecoder,
label='address',
)
registry.register(
BaseEquals('bytes', with_sub=True),
ExactLengthBytesEncoder, BytesDecoder,
label='bytes<M>',
)
registry.register(
BaseEquals('bytes', with_sub=False),
StrictByteStringEncoder, decoding.ByteStringDecoder,
label='bytes',
)
registry.register(
BaseEquals('string'),
TextStringEncoder, decoding.StringDecoder,
label='string',
)
return registry
| import binascii
from collections import (
abc,
namedtuple,
)
import copy
import itertools
import re
from typing import (
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
from eth_abi import (
codec,
decoding,
encoding,
)
from eth_abi.base import (
parse_type_str,
)
from eth_abi.exceptions import (
ValueOutOfBounds,
)
from eth_abi.grammar import (
ABIType,
BasicType,
TupleType,
parse,
)
from eth_abi.registry import (
ABIRegistry,
BaseEquals,
registry as default_registry,
)
from eth_typing import (
HexStr,
TypeStr,
)
from eth_utils import (
combomethod,
decode_hex,
is_bytes,
is_list_like,
is_text,
to_text,
to_tuple,
)
from eth_utils.abi import (
collapse_if_tuple,
)
from eth_utils.toolz import (
curry,
partial,
pipe,
)
from web3._utils.ens import (
is_ens_name,
)
from web3._utils.formatters import (
recursive_map,
)
from web3.exceptions import (
FallbackNotFound,
)
from web3.types import (
ABI,
ABIEvent,
ABIEventParams,
ABIFunction,
ABIFunctionParams,
)
def filter_by_type(_type: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return [abi for abi in contract_abi if abi['type'] == _type]
def filter_by_name(name: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return [
abi
for abi
in contract_abi
if (
abi['type'] not in ('fallback', 'constructor', 'receive')
and abi['name'] == name
)
]
def get_abi_input_types(abi: ABIFunction) -> List[str]:
if 'inputs' not in abi and (abi['type'] == 'fallback' or abi['type'] == 'receive'):
return []
else:
return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['inputs']]
def get_abi_output_types(abi: ABIFunction) -> List[str]:
if abi['type'] == 'fallback':
return []
else:
return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['outputs']]
def get_abi_input_names(abi: Union[ABIFunction, ABIEvent]) -> List[str]:
if 'inputs' not in abi and abi['type'] == 'fallback':
return []
else:
return [arg['name'] for arg in abi['inputs']]
def get_receive_func_abi(contract_abi: ABI) -> ABIFunction:
receive_abis = filter_by_type('receive', contract_abi)
if receive_abis:
return cast(ABIFunction, receive_abis[0])
else:
raise FallbackNotFound("No receive function was found in the contract ABI.")
def get_fallback_func_abi(contract_abi: ABI) -> ABIFunction:
fallback_abis = filter_by_type('fallback', contract_abi)
if fallback_abis:
return cast(ABIFunction, fallback_abis[0])
else:
raise FallbackNotFound("No fallback function was found in the contract ABI.")
def fallback_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return filter_by_type('fallback', contract_abi)
def receive_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:
return filter_by_type('receive', contract_abi)
def get_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:
return [arg for arg in event_abi['inputs'] if arg['indexed'] is True]
def exclude_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:
return [arg for arg in event_abi['inputs'] if arg['indexed'] is False]
def get_normalized_abi_arg_type(abi_arg: ABIEventParams) -> str:
"""
Return the normalized type for the abi argument provided. In order to account for tuple argument
types, this abstraction makes use of `collapse_if_tuple()` to collapse the appropriate component
types within a tuple type, if present.
"""
return collapse_if_tuple(dict(abi_arg))
def filter_by_argument_count(
num_arguments: int, contract_abi: ABI
) -> List[Union[ABIFunction, ABIEvent]]:
return [
abi
for abi
in contract_abi
if len(abi['inputs']) == num_arguments
]
def filter_by_argument_name(
argument_names: Collection[str], contract_abi: ABI
) -> List[Union[ABIFunction, ABIEvent]]:
return [
abi
for abi in contract_abi
if set(argument_names).intersection(
get_abi_input_names(abi)
) == set(argument_names)
]
class AddressEncoder(encoding.AddressEncoder):
@classmethod
def validate_value(cls, value: Any) -> None:
if is_ens_name(value):
return
super().validate_value(value)
class AcceptsHexStrEncoder(encoding.BaseEncoder):
subencoder_cls: Type[encoding.BaseEncoder] = None
is_strict: bool = None
def __init__(self, subencoder: encoding.BaseEncoder) -> None:
self.subencoder = subencoder
# type ignored b/c conflict w/ defined BaseEncoder.is_dynamic = False
@property
def is_dynamic(self) -> bool: # type: ignore
return self.subencoder.is_dynamic
@classmethod
def from_type_str(cls, abi_type: TypeStr, registry: ABIRegistry) -> "AcceptsHexStrEncoder":
subencoder_cls = cls.get_subencoder_class()
# cast b/c expects BaseCoder but `from_type_string` restricted to BaseEncoder subclasses
subencoder = cast(encoding.BaseEncoder, subencoder_cls.from_type_str(abi_type, registry))
return cls(subencoder)
@classmethod
def get_subencoder_class(cls) -> Type[encoding.BaseEncoder]:
if cls.subencoder_cls is None:
raise AttributeError(f'No subencoder class is set. {cls.__name__}')
return cls.subencoder_cls
# type ignored b/c combomethod makes signature conflict w/ defined BaseEncoder.validate_value()
@combomethod
def validate_value(self, value: Any) -> None: # type: ignore
normalized_value = self.validate_and_normalize(value)
return self.subencoder.validate_value(normalized_value)
def encode(self, value: Any) -> bytes:
normalized_value = self.validate_and_normalize(value)
return self.subencoder.encode(normalized_value)
def validate_and_normalize(self, value: Any) -> HexStr:
raw_value = value
if is_text(value):
try:
value = decode_hex(value)
except binascii.Error:
self.invalidate_value(
value,
msg=f'{value} is an invalid hex string',
)
else:
if raw_value[:2] != '0x':
if self.is_strict:
self.invalidate_value(
raw_value,
msg='hex string must be prefixed with 0x'
)
elif raw_value[:2] != '0x':
warnings.warn(
'in v6 it will be invalid to pass a hex string without the "0x" prefix',
category=DeprecationWarning
)
return value
class BytesEncoder(AcceptsHexStrEncoder):
subencoder_cls = encoding.BytesEncoder
is_strict = False
class ByteStringEncoder(AcceptsHexStrEncoder):
subencoder_cls = encoding.ByteStringEncoder
is_strict = False
class StrictByteStringEncoder(AcceptsHexStrEncoder):
subencoder_cls = encoding.ByteStringEncoder
is_strict = True
class ExactLengthBytesEncoder(encoding.BaseEncoder):
# TODO: move this to eth-abi once the api is stabilized
is_big_endian = False
value_bit_size = None
data_byte_size = None
def validate(self) -> None:
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be none")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be none")
if self.encode_fn is None:
raise ValueError("`encode_fn` may not be none")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be none")
if self.value_bit_size % 8 != 0:
raise ValueError(
f"Invalid value bit size: {self.value_bit_size}. Must be a multiple of 8"
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def encode(self, value: Any) -> bytes:
normalized_value = self.validate_value(value)
return self.encode_fn(normalized_value)
# type ignored b/c conflict with defined BaseEncoder.validate_value() -> None
def validate_value(self, value: Any) -> bytes: # type: ignore
if not is_bytes(value) and not is_text(value):
self.invalidate_value(value)
raw_value = value
if is_text(value):
try:
value = decode_hex(value)
except binascii.Error:
self.invalidate_value(
value,
msg=f'{value} is not a valid hex string',
)
else:
if raw_value[:2] != '0x':
self.invalidate_value(
raw_value,
msg='hex string must be prefixed with 0x'
)
byte_size = self.value_bit_size // 8
if len(value) > byte_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg=f"exceeds total byte size for bytes{byte_size} encoding",
)
elif len(value) < byte_size:
self.invalidate_value(
value,
exc=ValueOutOfBounds,
msg=f"less than total byte size for bytes{byte_size} encoding",
)
return value
@staticmethod
def encode_fn(value: Any) -> bytes:
return value
@parse_type_str('bytes')
def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:
# type ignored b/c kwargs are set in superclass init
# Unexpected keyword argument "value_bit_size" for "__call__" of "BaseEncoder"
return cls( # type: ignore
value_bit_size=abi_type.sub * 8,
data_byte_size=abi_type.sub,
)
class BytesDecoder(decoding.FixedByteSizeDecoder):
# FixedByteSizeDecoder.is_big_endian is defined as None
is_big_endian = False # type: ignore
# FixedByteSizeDecoder.decoder_fn is defined as None
@staticmethod
def decoder_fn(data: bytes) -> bytes: # type: ignore
return data
@parse_type_str('bytes')
def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:
# type ignored b/c kwargs are set in superclass init
# Unexpected keyword argument "value_bit_size" for "__call__" of "BaseDecoder"
return cls( # type: ignore
value_bit_size=abi_type.sub * 8,
data_byte_size=abi_type.sub,
)
class TextStringEncoder(encoding.TextStringEncoder):
@classmethod
def validate_value(cls, value: Any) -> None:
if is_bytes(value):
try:
value = to_text(value)
except UnicodeDecodeError:
cls.invalidate_value(
value,
msg='not decodable as unicode string',
)
super().validate_value(value)
def filter_by_encodability(
abi_codec: codec.ABIEncoder, args: Sequence[Any], kwargs: Dict[str, Any], contract_abi: ABI
) -> List[ABIFunction]:
return [
cast(ABIFunction, function_abi)
for function_abi
in contract_abi
if check_if_arguments_can_be_encoded(
cast(ABIFunction, function_abi), abi_codec, args, kwargs
)
]
def check_if_arguments_can_be_encoded(
function_abi: ABIFunction,
abi_codec: codec.ABIEncoder,
args: Sequence[Any],
kwargs: Dict[str, Any],
) -> bool:
try:
arguments = merge_args_and_kwargs(function_abi, args, kwargs)
except TypeError:
return False
if len(function_abi.get('inputs', [])) != len(arguments):
return False
try:
types, aligned_args = get_aligned_abi_inputs(function_abi, arguments)
except TypeError:
return False
return all(
abi_codec.is_encodable(_type, arg)
for _type, arg in zip(types, aligned_args)
)
def merge_args_and_kwargs(
function_abi: ABIFunction, args: Sequence[Any], kwargs: Dict[str, Any]
) -> Tuple[Any, ...]:
"""
Takes a list of positional args (``args``) and a dict of keyword args
(``kwargs``) defining values to be passed to a call to the contract function
described by ``function_abi``. Checks to ensure that the correct number of
args were given, no duplicate args were given, and no unknown args were
given. Returns a list of argument values aligned to the order of inputs
defined in ``function_abi``.
"""
# Ensure the function is being applied to the correct number of args
if len(args) + len(kwargs) != len(function_abi.get('inputs', [])):
raise TypeError(
f"Incorrect argument count. Expected '{len(function_abi['inputs'])}"
f". Got '{len(args) + len(kwargs)}'"
)
# If no keyword args were given, we don't need to align them
if not kwargs:
return cast(Tuple[Any, ...], args)
kwarg_names = set(kwargs.keys())
sorted_arg_names = tuple(arg_abi['name'] for arg_abi in function_abi['inputs'])
args_as_kwargs = dict(zip(sorted_arg_names, args))
# Check for duplicate args
duplicate_args = kwarg_names.intersection(args_as_kwargs.keys())
if duplicate_args:
raise TypeError(
f"{function_abi.get('name')}() got multiple values for argument(s) "
f"'{', '.join(duplicate_args)}'"
)
# Check for unknown args
unknown_args = kwarg_names.difference(sorted_arg_names)
if unknown_args:
if function_abi.get('name'):
raise TypeError(
f"{function_abi.get('name')}() got unexpected keyword argument(s)"
f" '{', '.join(unknown_args)}'"
)
raise TypeError(
f"Type: '{function_abi.get('type')}' got unexpected keyword argument(s)"
f" '{', '.join(unknown_args)}'"
)
# Sort args according to their position in the ABI and unzip them from their
# names
sorted_args = tuple(zip(
*sorted(
itertools.chain(kwargs.items(), args_as_kwargs.items()),
key=lambda kv: sorted_arg_names.index(kv[0]),
)
))
if sorted_args:
return sorted_args[1]
else:
return tuple()
TUPLE_TYPE_STR_RE = re.compile(r'^(tuple)(\[([1-9][0-9]*)?\])?$')
def get_tuple_type_str_parts(s: str) -> Optional[Tuple[str, Optional[str]]]:
"""
Takes a JSON ABI type string. For tuple type strings, returns the separated
prefix and array dimension parts. For all other strings, returns ``None``.
"""
match = TUPLE_TYPE_STR_RE.match(s)
if match is not None:
tuple_prefix = match.group(1)
tuple_dims = match.group(2)
return tuple_prefix, tuple_dims
return None
def _align_abi_input(arg_abi: ABIFunctionParams, arg: Any) -> Tuple[Any, ...]:
"""
Aligns the values of any mapping at any level of nesting in ``arg``
according to the layout of the corresponding abi spec.
"""
tuple_parts = get_tuple_type_str_parts(arg_abi['type'])
if tuple_parts is None:
# Arg is non-tuple. Just return value.
return arg
tuple_prefix, tuple_dims = tuple_parts
if tuple_dims is None:
# Arg is non-list tuple. Each sub arg in `arg` will be aligned
# according to its corresponding abi.
sub_abis = arg_abi['components']
else:
# Arg is list tuple. A non-list version of its abi will be used to
# align each element in `arg`.
new_abi = copy.copy(arg_abi)
new_abi['type'] = tuple_prefix
sub_abis = itertools.repeat(new_abi) # type: ignore
if isinstance(arg, abc.Mapping):
# Arg is mapping. Align values according to abi order.
aligned_arg = tuple(arg[abi['name']] for abi in sub_abis)
else:
aligned_arg = arg
if not is_list_like(aligned_arg):
raise TypeError(
f'Expected non-string sequence for "{arg_abi.get("type")}" '
f'component type: got {aligned_arg}'
)
# convert NamedTuple to regular tuple
typing = tuple if isinstance(aligned_arg, tuple) else type(aligned_arg)
return typing(
_align_abi_input(sub_abi, sub_arg)
for sub_abi, sub_arg in zip(sub_abis, aligned_arg)
)
def get_aligned_abi_inputs(
abi: ABIFunction, args: Union[Tuple[Any, ...], Mapping[Any, Any]]
) -> Tuple[Tuple[Any, ...], Tuple[Any, ...]]:
"""
Takes a function ABI (``abi``) and a sequence or mapping of args (``args``).
Returns a list of type strings for the function's inputs and a list of
arguments which have been aligned to the layout of those types. The args
contained in ``args`` may contain nested mappings or sequences corresponding
to tuple-encoded values in ``abi``.
"""
input_abis = abi.get('inputs', [])
if isinstance(args, abc.Mapping):
# `args` is mapping. Align values according to abi order.
args = tuple(args[abi['name']] for abi in input_abis)
return (
# typed dict cannot be used w/ a normal Dict
# https://github.com/python/mypy/issues/4976
tuple(collapse_if_tuple(abi) for abi in input_abis), # type: ignore
type(args)(
_align_abi_input(abi, arg)
for abi, arg in zip(input_abis, args)
),
)
def get_constructor_abi(contract_abi: ABI) -> ABIFunction:
candidates = [
abi for abi in contract_abi if abi['type'] == 'constructor'
]
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
return None
elif len(candidates) > 1:
raise ValueError("Found multiple constructors.")
return None
DYNAMIC_TYPES = ['bytes', 'string']
INT_SIZES = range(8, 257, 8)
BYTES_SIZES = range(1, 33)
UINT_TYPES = [f'uint{i}' for i in INT_SIZES]
INT_TYPES = [f'int{i}' for i in INT_SIZES]
BYTES_TYPES = [f'bytes{i}' for i in BYTES_SIZES] + ['bytes32.byte']
STATIC_TYPES = list(itertools.chain(
['address', 'bool'],
UINT_TYPES,
INT_TYPES,
BYTES_TYPES,
))
BASE_TYPE_REGEX = '|'.join((
_type + '(?![a-z0-9])'
for _type
in itertools.chain(STATIC_TYPES, DYNAMIC_TYPES)
))
SUB_TYPE_REGEX = (
r'\['
'[0-9]*'
r'\]'
)
TYPE_REGEX = (
'^'
'(?:{base_type})'
'(?:(?:{sub_type})*)?'
'$'
).format(
base_type=BASE_TYPE_REGEX,
sub_type=SUB_TYPE_REGEX,
)
def is_recognized_type(abi_type: TypeStr) -> bool:
return bool(re.match(TYPE_REGEX, abi_type))
def is_bool_type(abi_type: TypeStr) -> bool:
return abi_type == 'bool'
def is_uint_type(abi_type: TypeStr) -> bool:
return abi_type in UINT_TYPES
def is_int_type(abi_type: TypeStr) -> bool:
return abi_type in INT_TYPES
def is_address_type(abi_type: TypeStr) -> bool:
return abi_type == 'address'
def is_bytes_type(abi_type: TypeStr) -> bool:
return abi_type in BYTES_TYPES + ['bytes']
def is_string_type(abi_type: TypeStr) -> bool:
return abi_type == 'string'
@curry
def is_length(target_length: int, value: abc.Sized) -> bool:
return len(value) == target_length
def size_of_type(abi_type: TypeStr) -> int:
"""
Returns size in bits of abi_type
"""
if 'string' in abi_type:
return None
if 'byte' in abi_type:
return None
if '[' in abi_type:
return None
if abi_type == 'bool':
return 8
if abi_type == 'address':
return 160
return int(re.sub(r"\D", "", abi_type))
END_BRACKETS_OF_ARRAY_TYPE_REGEX = r"\[[^]]*\]$"
def sub_type_of_array_type(abi_type: TypeStr) -> str:
if not is_array_type(abi_type):
raise ValueError(
f"Cannot parse subtype of nonarray abi-type: {abi_type}"
)
return re.sub(END_BRACKETS_OF_ARRAY_TYPE_REGEX, '', abi_type, 1)
def length_of_array_type(abi_type: TypeStr) -> int:
if not is_array_type(abi_type):
raise ValueError(
f"Cannot parse length of nonarray abi-type: {abi_type}"
)
inner_brackets = re.search(END_BRACKETS_OF_ARRAY_TYPE_REGEX, abi_type).group(0).strip("[]")
if not inner_brackets:
return None
else:
return int(inner_brackets)
ARRAY_REGEX = (
"^"
"[a-zA-Z0-9_]+"
"({sub_type})+"
"$"
).format(sub_type=SUB_TYPE_REGEX)
def is_array_type(abi_type: TypeStr) -> bool:
return bool(re.match(ARRAY_REGEX, abi_type))
NAME_REGEX = (
'[a-zA-Z_]'
'[a-zA-Z0-9_]*'
)
ENUM_REGEX = (
'^'
'{lib_name}'
r'\.'
'{enum_name}'
'$'
).format(lib_name=NAME_REGEX, enum_name=NAME_REGEX)
def is_probably_enum(abi_type: TypeStr) -> bool:
return bool(re.match(ENUM_REGEX, abi_type))
@to_tuple
def normalize_event_input_types(
abi_args: Collection[Union[ABIFunction, ABIEvent]]
) -> Iterable[Union[ABIFunction, ABIEvent, Dict[TypeStr, Any]]]:
for arg in abi_args:
if is_recognized_type(arg['type']):
yield arg
elif is_probably_enum(arg['type']):
yield {k: 'uint8' if k == 'type' else v for k, v in arg.items()}
else:
yield arg
def abi_to_signature(abi: Union[ABIFunction, ABIEvent]) -> str:
function_signature = "{fn_name}({fn_input_types})".format(
fn_name=abi['name'],
fn_input_types=','.join([
arg['type'] for arg in normalize_event_input_types(abi.get('inputs', []))
]),
)
return function_signature
########################################################
#
# Conditionally modifying data, tagged with ABI Types
#
########################################################
@curry
def map_abi_data(
normalizers: Sequence[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],
types: Sequence[TypeStr],
data: Sequence[Any],
) -> Any:
"""
This function will apply normalizers to your data, in the
context of the relevant types. Each normalizer is in the format:
def normalizer(datatype, data):
# Conditionally modify data
return (datatype, data)
Where datatype is a valid ABI type string, like "uint".
In case of an array, like "bool[2]", normalizer will receive `data`
as an iterable of typed data, like `[("bool", True), ("bool", False)]`.
Internals
---
This is accomplished by:
1. Decorating the data tree with types
2. Recursively mapping each of the normalizers to the data
3. Stripping the types back out of the tree
"""
pipeline = itertools.chain(
[abi_data_tree(types)],
map(data_tree_map, normalizers),
[partial(recursive_map, strip_abi_type)],
)
return pipe(data, *pipeline)
@curry
def abi_data_tree(types: Sequence[TypeStr], data: Sequence[Any]) -> List[Any]:
"""
Decorate the data tree with pairs of (type, data). The pair tuple is actually an
ABITypedData, but can be accessed as a tuple.
As an example:
>>> abi_data_tree(types=["bool[2]", "uint"], data=[[True, False], 0])
[("bool[2]", [("bool", True), ("bool", False)]), ("uint256", 0)]
"""
return [
abi_sub_tree(data_type, data_value)
for data_type, data_value
in zip(types, data)
]
@curry
def data_tree_map(
func: Callable[[TypeStr, Any], Tuple[TypeStr, Any]], data_tree: Any
) -> "ABITypedData":
"""
Map func to every ABITypedData element in the tree. func will
receive two args: abi_type, and data
"""
def map_to_typed_data(elements: Any) -> "ABITypedData":
if isinstance(elements, ABITypedData) and elements.abi_type is not None:
return ABITypedData(func(*elements))
else:
return elements
return recursive_map(map_to_typed_data, data_tree)
class ABITypedData(namedtuple('ABITypedData', 'abi_type, data')):
"""
This class marks data as having a certain ABI-type.
>>> a1 = ABITypedData(['address', addr1])
>>> a2 = ABITypedData(['address', addr2])
>>> addrs = ABITypedData(['address[]', [a1, a2]])
You can access the fields using tuple() interface, or with
attributes:
>>> assert a1.abi_type == a1[0]
>>> assert a1.data == a1[1]
Unlike a typical `namedtuple`, you initialize with a single
positional argument that is iterable, to match the init
interface of all other relevant collections.
"""
def __new__(cls, iterable: Iterable[Any]) -> "ABITypedData":
return super().__new__(cls, *iterable)
def abi_sub_tree(
type_str_or_abi_type: Optional[Union[TypeStr, ABIType]], data_value: Any
) -> ABITypedData:
if type_str_or_abi_type is None:
return ABITypedData([None, data_value])
if isinstance(type_str_or_abi_type, TypeStr):
abi_type = parse(type_str_or_abi_type)
else:
abi_type = type_str_or_abi_type
# In the two special cases below, we rebuild the given data structures with
# annotated items
if abi_type.is_array:
# If type is array, determine item type and annotate all
# items in iterable with that type
item_type_str = abi_type.item_type.to_type_str()
value_to_annotate = [
abi_sub_tree(item_type_str, item_value)
for item_value in data_value
]
elif isinstance(abi_type, TupleType):
# Otherwise, if type is tuple, determine component types and annotate
# tuple components in iterable respectively with those types
value_to_annotate = type(data_value)(
abi_sub_tree(comp_type.to_type_str(), comp_value)
for comp_type, comp_value in zip(abi_type.components, data_value)
)
else:
value_to_annotate = data_value
return ABITypedData([
abi_type.to_type_str(),
value_to_annotate,
])
def strip_abi_type(elements: Any) -> Any:
if isinstance(elements, ABITypedData):
return elements.data
else:
return elements
def build_default_registry() -> ABIRegistry:
# We make a copy here just to make sure that eth-abi's default registry is not
# affected by our custom encoder subclasses
registry = default_registry.copy()
registry.unregister('address')
registry.unregister('bytes<M>')
registry.unregister('bytes')
registry.unregister('string')
registry.register(
BaseEquals('address'),
AddressEncoder, decoding.AddressDecoder,
label='address',
)
registry.register(
BaseEquals('bytes', with_sub=True),
BytesEncoder, decoding.BytesDecoder,
label='bytes<M>',
)
registry.register(
BaseEquals('bytes', with_sub=False),
ByteStringEncoder, decoding.ByteStringDecoder,
label='bytes',
)
registry.register(
BaseEquals('string'),
TextStringEncoder, decoding.StringDecoder,
label='string',
)
return registry
def build_strict_registry() -> ABIRegistry:
registry = default_registry.copy()
registry.unregister('address')
registry.unregister('bytes<M>')
registry.unregister('bytes')
registry.unregister('string')
registry.register(
BaseEquals('address'),
AddressEncoder, decoding.AddressDecoder,
label='address',
)
registry.register(
BaseEquals('bytes', with_sub=True),
ExactLengthBytesEncoder, BytesDecoder,
label='bytes<M>',
)
registry.register(
BaseEquals('bytes', with_sub=False),
StrictByteStringEncoder, decoding.ByteStringDecoder,
label='bytes',
)
registry.register(
BaseEquals('string'),
TextStringEncoder, decoding.StringDecoder,
label='string',
)
return registry
|
ALETHIO_SIMPLE_TOKEN_BALANCES = '{"data":[{"type":"TokenBalance","id":"0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f","attributes":{"balance":"5700000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0x9531c059098e3d194ff87febb587ab07b30b1306"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f/account"}},"token":{"data":{"type":"Token","id":"0x6b175474e89094c44da98b954eedeac495271d0f"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f"}},{"type":"TokenBalance","id":"0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0x9531c059098e3d194ff87febb587ab07b30b1306"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359/account"}},"token":{"data":{"type":"Token","id":"0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359"}},{"type":"TokenBalance","id":"0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48","attributes":{"balance":"20000000"},"relationships":{"account":{"data":{"type":"Account","id":"0x9531c059098e3d194ff87febb587ab07b30b1306"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/account"}},"token":{"data":{"type":"Token","id":"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0x9531c059098e3d194ff87febb587ab07b30b1306\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0x9531c059098e3d194ff87febb587ab07b30b1306\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f"},"meta":{"confirmedBlock":{"number":9655856,"blockCreationTime":1584004250,"blockHash":"0x903df6c4a94da01dacf53f7417b4941e268a14fa58147af5b6053884a293f8a1"},"count":3,"latestBlock":{"number":9655856,"blockCreationTime":1584004250,"blockHash":"0x903df6c4a94da01dacf53f7417b4941e268a14fa58147af5b6053884a293f8a1"},"page":{"hasNext":false,"hasPrev":false},"query":{"block":{"data":{"id":"0x598cc41b881d8afa271f0b3e2f89e355e10ec822cb32ed7743e7e47cb89432d1","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0x598cc41b881d8afa271f0b3e2f89e355e10ec822cb32ed7743e7e47cb89432d1"}}}}}' # noqa: E501
ALETHIO_MULTIPAGE_TOKEN_BALANCES1 = '{"data":[{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376/account"}},"token":{"data":{"type":"Token","id":"0x0000000000085d4780b73119b644ae5ecd22b376"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04","attributes":{"balance":"60922"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04/account"}},"token":{"data":{"type":"Token","id":"0x0000000000b3f879cb30fe243b4dfee438691c04"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577","attributes":{"balance":"100000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577/account"}},"token":{"data":{"type":"Token","id":"0x02f2d4a04e6e01ace88bd2cd632875543b2ef577"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4","attributes":{"balance":"50000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4/account"}},"token":{"data":{"type":"Token","id":"0x0b150524707e66a3d85f15de9ab3865572e4e5f4"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef/account"}},"token":{"data":{"type":"Token","id":"0x0d8775f648430679a709e98d2b0cb6250d2887ef"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be","attributes":{"balance":"777000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be/account"}},"token":{"data":{"type":"Token","id":"0x0f8b6440a1f7be3354fe072638a5c0f500b044be"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862/account"}},"token":{"data":{"type":"Token","id":"0x1985365e9f78359a9b6ad760e32412f4a445e862"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811","attributes":{"balance":"10000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811/account"}},"token":{"data":{"type":"Token","id":"0x21d1f49d699d1f6324ccb5b6ba9847e2bf470811"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599/account"}},"token":{"data":{"type":"Token","id":"0x2260fac5e5542a773aa44fbcfedf7c193bc2c599"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289","attributes":{"balance":"500000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289/account"}},"token":{"data":{"type":"Token","id":"0x432555e5c898f83fc5f00df631bd9c2801fea289"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376"},"meta":{"confirmedBlock":{"number":9804841,"blockCreationTime":1585994599,"blockHash":"0x126a7e8212bab68a7d7058eee6937016750ac512a2d38632ccc4d384a9c620e3"},"count":10,"latestBlock":{"number":9804841,"blockCreationTime":1585994599,"blockHash":"0x126a7e8212bab68a7d7058eee6937016750ac512a2d38632ccc4d384a9c620e3"},"page":{"hasNext":true,"hasPrev":false},"query":{"block":{"data":{"id":"0xef0ed2b6a0ceeeae5102467952f1c0bf31da95dafaaf368341616b7c210af21d","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0xef0ed2b6a0ceeeae5102467952f1c0bf31da95dafaaf368341616b7c210af21d"}}}}}' # noqa: E501
ALETHIO_MULTIPAGE_TOKEN_BALANCES2 = '{"data":[{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce","attributes":{"balance":"100000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce/account"}},"token":{"data":{"type":"Token","id":"0x4dc3643dbc642b72c158e7f3d2ff232df61cb6ce"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca/account"}},"token":{"data":{"type":"Token","id":"0x514910771af9ca656af840dff83e8264ecf986ca"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75","attributes":{"balance":"1000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75/account"}},"token":{"data":{"type":"Token","id":"0x5c406d99e04b8494dc253fcc52943ef82bca7d75"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f","attributes":{"balance":"106029466350183178691413"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f/account"}},"token":{"data":{"type":"Token","id":"0x6b175474e89094c44da98b954eedeac495271d0f"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968","attributes":{"balance":"49950000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968/account"}},"token":{"data":{"type":"Token","id":"0x6ff313fb38d53d7a458860b1bf7512f54a03e968"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760","attributes":{"balance":"1000000000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760/account"}},"token":{"data":{"type":"Token","id":"0x7e96ecc14fa5c77e1eab08eb175b434b47470760"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359","attributes":{"balance":"24974762270881137775"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359/account"}},"token":{"data":{"type":"Token","id":"0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110","attributes":{"balance":"200000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110/account"}},"token":{"data":{"type":"Token","id":"0x98976a6dfaaf97b16a4bb06035cc84be12e79110"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48","attributes":{"balance":"1000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/account"}},"token":{"data":{"type":"Token","id":"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd","attributes":{"balance":"1000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd/account"}},"token":{"data":{"type":"Token","id":"0xac9bb427953ac7fddc562adca86cf42d988047fd"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce"},"meta":{"confirmedBlock":{"number":9805054,"blockCreationTime":1585997401,"blockHash":"0x8a797dc0fc4af3531d144066346dede91b17488103a1ec6e224375abe318d5fa"},"count":10,"latestBlock":{"number":9805054,"blockCreationTime":1585997401,"blockHash":"0x8a797dc0fc4af3531d144066346dede91b17488103a1ec6e224375abe318d5fa"},"page":{"hasNext":true,"hasPrev":true},"query":{"block":{"data":{"id":"0xa4b142abab23bc32c95a09abfa2b4395c86989f282f14030004bf6217b29122a","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0xa4b142abab23bc32c95a09abfa2b4395c86989f282f14030004bf6217b29122a"}}}}}' # noqa: E501
ALETHIO_MULTIPAGE_TOKEN_BALANCES3 = '{"data":[{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2","attributes":{"balance":"7579529152"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2/account"}},"token":{"data":{"type":"Token","id":"0xbe22ec66710caa72ab690bf816f8bce785fbbac2"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa","attributes":{"balance":"1410000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa/account"}},"token":{"data":{"type":"Token","id":"0xbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2","attributes":{"balance":"1139687612957391312303"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2/account"}},"token":{"data":{"type":"Token","id":"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e","attributes":{"balance":"88888800000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e/account"}},"token":{"data":{"type":"Token","id":"0xc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10","attributes":{"balance":"600000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10/account"}},"token":{"data":{"type":"Token","id":"0xd110bb8a24b100c37af7310416e685af807c1f10"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200/account"}},"token":{"data":{"type":"Token","id":"0xdd974d5c2e2928dea5f71b9825b8b646686bd200"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498/account"}},"token":{"data":{"type":"Token","id":"0xe41d2489571d322189246dafa5ebde1f4699f498"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831","attributes":{"balance":"50000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831/account"}},"token":{"data":{"type":"Token","id":"0xf18432ef894ef4b2a5726f933718f5a8cf9ff831"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09/account"}},"token":{"data":{"type":"Token","id":"0xf53ad2c6851052a81b42133467480961b2321c09"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7","attributes":{"balance":"200000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7/account"}},"token":{"data":{"type":"Token","id":"0xfe2786d7d1ccab8b015f6ef7392f67d778f8d8d7"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2"},"meta":{"confirmedBlock":{"number":9805056,"blockCreationTime":1585997452,"blockHash":"0xc40a8a6e81173ef149fcb7c57e4f79a67be6946c0fe3cf2e60f3afa4a43d0dc9"},"count":10,"latestBlock":{"number":9805056,"blockCreationTime":1585997452,"blockHash":"0xc40a8a6e81173ef149fcb7c57e4f79a67be6946c0fe3cf2e60f3afa4a43d0dc9"},"page":{"hasNext":false,"hasPrev":true},"query":{"block":{"data":{"id":"0x3f1b9826210202fad3023e3ccbd87c025728a3f6bdbccac9ca84944205860bdc","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0x3f1b9826210202fad3023e3ccbd87c025728a3f6bdbccac9ca84944205860bdc"}}}}}' # noqa: E501
| ALETHIO_SIMPLE_TOKEN_BALANCES = '{"data":[{"type":"TokenBalance","id":"0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f","attributes":{"balance":"5700000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0x9531c059098e3d194ff87febb587ab07b30b1306"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f/account"}},"token":{"data":{"type":"Token","id":"0x6b175474e89094c44da98b954eedeac495271d0f"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f"}},{"type":"TokenBalance","id":"0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0x9531c059098e3d194ff87febb587ab07b30b1306"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359/account"}},"token":{"data":{"type":"Token","id":"0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b130689d24a6b4ccb1b6faa2625fe562bdd9a23260359"}},{"type":"TokenBalance","id":"0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48","attributes":{"balance":"20000000"},"relationships":{"account":{"data":{"type":"Account","id":"0x9531c059098e3d194ff87febb587ab07b30b1306"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/account"}},"token":{"data":{"type":"Token","id":"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"},"links":{"related":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0x9531c059098e3d194ff87febb587ab07b30b1306\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0x9531c059098e3d194ff87febb587ab07b30b1306a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0x9531c059098e3d194ff87febb587ab07b30b1306\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0x9531c059098e3d194ff87febb587ab07b30b13066b175474e89094c44da98b954eedeac495271d0f"},"meta":{"confirmedBlock":{"number":9655856,"blockCreationTime":1584004250,"blockHash":"0x903df6c4a94da01dacf53f7417b4941e268a14fa58147af5b6053884a293f8a1"},"count":3,"latestBlock":{"number":9655856,"blockCreationTime":1584004250,"blockHash":"0x903df6c4a94da01dacf53f7417b4941e268a14fa58147af5b6053884a293f8a1"},"page":{"hasNext":false,"hasPrev":false},"query":{"block":{"data":{"id":"0x598cc41b881d8afa271f0b3e2f89e355e10ec822cb32ed7743e7e47cb89432d1","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0x598cc41b881d8afa271f0b3e2f89e355e10ec822cb32ed7743e7e47cb89432d1"}}}}}' # noqa: E501
ALETHIO_MULTIPAGE_TOKEN_BALANCES1 = '{"data":[{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376/account"}},"token":{"data":{"type":"Token","id":"0x0000000000085d4780b73119b644ae5ecd22b376"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04","attributes":{"balance":"60922"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04/account"}},"token":{"data":{"type":"Token","id":"0x0000000000b3f879cb30fe243b4dfee438691c04"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000b3f879cb30fe243b4dfee438691c04"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577","attributes":{"balance":"100000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577/account"}},"token":{"data":{"type":"Token","id":"0x02f2d4a04e6e01ace88bd2cd632875543b2ef577"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf02f2d4a04e6e01ace88bd2cd632875543b2ef577"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4","attributes":{"balance":"50000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4/account"}},"token":{"data":{"type":"Token","id":"0x0b150524707e66a3d85f15de9ab3865572e4e5f4"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0b150524707e66a3d85f15de9ab3865572e4e5f4"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef/account"}},"token":{"data":{"type":"Token","id":"0x0d8775f648430679a709e98d2b0cb6250d2887ef"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0d8775f648430679a709e98d2b0cb6250d2887ef"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be","attributes":{"balance":"777000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be/account"}},"token":{"data":{"type":"Token","id":"0x0f8b6440a1f7be3354fe072638a5c0f500b044be"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0f8b6440a1f7be3354fe072638a5c0f500b044be"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862/account"}},"token":{"data":{"type":"Token","id":"0x1985365e9f78359a9b6ad760e32412f4a445e862"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf1985365e9f78359a9b6ad760e32412f4a445e862"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811","attributes":{"balance":"10000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811/account"}},"token":{"data":{"type":"Token","id":"0x21d1f49d699d1f6324ccb5b6ba9847e2bf470811"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf21d1f49d699d1f6324ccb5b6ba9847e2bf470811"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599/account"}},"token":{"data":{"type":"Token","id":"0x2260fac5e5542a773aa44fbcfedf7c193bc2c599"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf2260fac5e5542a773aa44fbcfedf7c193bc2c599"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289","attributes":{"balance":"500000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289/account"}},"token":{"data":{"type":"Token","id":"0x432555e5c898f83fc5f00df631bd9c2801fea289"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf432555e5c898f83fc5f00df631bd9c2801fea289","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf0000000000085d4780b73119b644ae5ecd22b376"},"meta":{"confirmedBlock":{"number":9804841,"blockCreationTime":1585994599,"blockHash":"0x126a7e8212bab68a7d7058eee6937016750ac512a2d38632ccc4d384a9c620e3"},"count":10,"latestBlock":{"number":9804841,"blockCreationTime":1585994599,"blockHash":"0x126a7e8212bab68a7d7058eee6937016750ac512a2d38632ccc4d384a9c620e3"},"page":{"hasNext":true,"hasPrev":false},"query":{"block":{"data":{"id":"0xef0ed2b6a0ceeeae5102467952f1c0bf31da95dafaaf368341616b7c210af21d","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0xef0ed2b6a0ceeeae5102467952f1c0bf31da95dafaaf368341616b7c210af21d"}}}}}' # noqa: E501
ALETHIO_MULTIPAGE_TOKEN_BALANCES2 = '{"data":[{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce","attributes":{"balance":"100000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce/account"}},"token":{"data":{"type":"Token","id":"0x4dc3643dbc642b72c158e7f3d2ff232df61cb6ce"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca/account"}},"token":{"data":{"type":"Token","id":"0x514910771af9ca656af840dff83e8264ecf986ca"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf514910771af9ca656af840dff83e8264ecf986ca"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75","attributes":{"balance":"1000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75/account"}},"token":{"data":{"type":"Token","id":"0x5c406d99e04b8494dc253fcc52943ef82bca7d75"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf5c406d99e04b8494dc253fcc52943ef82bca7d75"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f","attributes":{"balance":"106029466350183178691413"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f/account"}},"token":{"data":{"type":"Token","id":"0x6b175474e89094c44da98b954eedeac495271d0f"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6b175474e89094c44da98b954eedeac495271d0f"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968","attributes":{"balance":"49950000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968/account"}},"token":{"data":{"type":"Token","id":"0x6ff313fb38d53d7a458860b1bf7512f54a03e968"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf6ff313fb38d53d7a458860b1bf7512f54a03e968"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760","attributes":{"balance":"1000000000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760/account"}},"token":{"data":{"type":"Token","id":"0x7e96ecc14fa5c77e1eab08eb175b434b47470760"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf7e96ecc14fa5c77e1eab08eb175b434b47470760"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359","attributes":{"balance":"24974762270881137775"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359/account"}},"token":{"data":{"type":"Token","id":"0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf89d24a6b4ccb1b6faa2625fe562bdd9a23260359"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110","attributes":{"balance":"200000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110/account"}},"token":{"data":{"type":"Token","id":"0x98976a6dfaaf97b16a4bb06035cc84be12e79110"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf98976a6dfaaf97b16a4bb06035cc84be12e79110"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48","attributes":{"balance":"1000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/account"}},"token":{"data":{"type":"Token","id":"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd","attributes":{"balance":"1000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd/account"}},"token":{"data":{"type":"Token","id":"0xac9bb427953ac7fddc562adca86cf42d988047fd"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfac9bb427953ac7fddc562adca86cf42d988047fd","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf4dc3643dbc642b72c158e7f3d2ff232df61cb6ce"},"meta":{"confirmedBlock":{"number":9805054,"blockCreationTime":1585997401,"blockHash":"0x8a797dc0fc4af3531d144066346dede91b17488103a1ec6e224375abe318d5fa"},"count":10,"latestBlock":{"number":9805054,"blockCreationTime":1585997401,"blockHash":"0x8a797dc0fc4af3531d144066346dede91b17488103a1ec6e224375abe318d5fa"},"page":{"hasNext":true,"hasPrev":true},"query":{"block":{"data":{"id":"0xa4b142abab23bc32c95a09abfa2b4395c86989f282f14030004bf6217b29122a","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0xa4b142abab23bc32c95a09abfa2b4395c86989f282f14030004bf6217b29122a"}}}}}' # noqa: E501
ALETHIO_MULTIPAGE_TOKEN_BALANCES3 = '{"data":[{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2","attributes":{"balance":"7579529152"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2/account"}},"token":{"data":{"type":"Token","id":"0xbe22ec66710caa72ab690bf816f8bce785fbbac2"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa","attributes":{"balance":"1410000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa/account"}},"token":{"data":{"type":"Token","id":"0xbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbf4a2ddaa16148a9d0fa2093ffac450adb7cd4aa"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2","attributes":{"balance":"1139687612957391312303"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2/account"}},"token":{"data":{"type":"Token","id":"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e","attributes":{"balance":"88888800000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e/account"}},"token":{"data":{"type":"Token","id":"0xc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10","attributes":{"balance":"600000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10/account"}},"token":{"data":{"type":"Token","id":"0xd110bb8a24b100c37af7310416e685af807c1f10"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfd110bb8a24b100c37af7310416e685af807c1f10"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200/account"}},"token":{"data":{"type":"Token","id":"0xdd974d5c2e2928dea5f71b9825b8b646686bd200"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfdd974d5c2e2928dea5f71b9825b8b646686bd200"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498/account"}},"token":{"data":{"type":"Token","id":"0xe41d2489571d322189246dafa5ebde1f4699f498"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfe41d2489571d322189246dafa5ebde1f4699f498"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831","attributes":{"balance":"50000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831/account"}},"token":{"data":{"type":"Token","id":"0xf18432ef894ef4b2a5726f933718f5a8cf9ff831"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff18432ef894ef4b2a5726f933718f5a8cf9ff831"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09","attributes":{"balance":"0"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09/account"}},"token":{"data":{"type":"Token","id":"0xf53ad2c6851052a81b42133467480961b2321c09"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cff53ad2c6851052a81b42133467480961b2321c09"}},{"type":"TokenBalance","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7","attributes":{"balance":"200000000000000000000"},"relationships":{"account":{"data":{"type":"Account","id":"0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7/account"}},"token":{"data":{"type":"Token","id":"0xfe2786d7d1ccab8b015f6ef7392f67d778f8d8d7"},"links":{"related":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7/token"}}},"links":{"self":"https://api.aleth.io/v1/token-balances/0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7"}}],"links":{"next":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bnext%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cffe2786d7d1ccab8b015f6ef7392f67d778f8d8d7","prev":"https://api.aleth.io/v1/token-balances?filter%5Baccount%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cf\u0026page%5Blimit%5D=10\u0026page%5Bprev%5D=0xa57bd00134b2850b2a1c55860c9e9ea100fdd6cfbe22ec66710caa72ab690bf816f8bce785fbbac2"},"meta":{"confirmedBlock":{"number":9805056,"blockCreationTime":1585997452,"blockHash":"0xc40a8a6e81173ef149fcb7c57e4f79a67be6946c0fe3cf2e60f3afa4a43d0dc9"},"count":10,"latestBlock":{"number":9805056,"blockCreationTime":1585997452,"blockHash":"0xc40a8a6e81173ef149fcb7c57e4f79a67be6946c0fe3cf2e60f3afa4a43d0dc9"},"page":{"hasNext":false,"hasPrev":true},"query":{"block":{"data":{"id":"0x3f1b9826210202fad3023e3ccbd87c025728a3f6bdbccac9ca84944205860bdc","type":"Block"},"links":{"self":"https://api.aleth.io/v1/blocks/0x3f1b9826210202fad3023e3ccbd87c025728a3f6bdbccac9ca84944205860bdc"}}}}}' # noqa: E501
|
'''
Running Dynamic Sednet simulations using OpenWater
'''
import os
import json
import pandas as pd
import geopandas as gpd
import shutil
import numpy as np
from openwater import OWTemplate, OWLink
from openwater.template import TAG_MODEL
import openwater.nodes as n
from collections import defaultdict
from openwater.examples.from_source import get_default_node_template, DEFAULT_NODE_TEMPLATES, storage_template_builder
from openwater.catchments import \
DOWNSTREAM_FLOW_FLUX, DOWNSTREAM_LOAD_FLUX, \
UPSTREAM_FLOW_FLUX, UPSTREAM_LOAD_FLUX
from openwater.results import OpenwaterResults
from openwater.template import ModelFile
from .const import *
from openwater.file import _tabulate_model_scalars_from_file
LANDSCAPE_CONSTITUENT_SOURCES=['Hillslope','Gully']
FINE_SEDIMENT = 'Sediment - Fine'
COARSE_SEDIMENT = 'Sediment - Coarse'
CGUS_TS_N_DIN = ['Sugarcane','Bananas']
SEDIMENT_CLASSES = [FINE_SEDIMENT,COARSE_SEDIMENT]
STANDARD_NUTRIENTS = ['TN','TP']
STANDARD_CONSTITUENTS = SEDIMENT_CLASSES + STANDARD_NUTRIENTS
QUICKFLOW_INPUTS = ['quickflow','flow']
BASEFLOW_INPUTS = ['baseflow','slowflow']
NIL_MODELS = {
'Dynamic_SedNet.Models.SedNet_Blank_Constituent_Generation_Model',
'RiverSystem.Catchments.Models.ContaminantGenerationModels.NilConstituent'
}
MODEL_NAME_TRANSLATIONS = {
}
# def default_generation_model(constituent,landuse):
# if constituent=='TSS':
# return n.USLEFineSedimentGeneration
# return n.EmcDwc
# def build_catchment_template(constituents,hrus,landuses,generation_model=default_generation_model):
# template = OWTemplate()
# routing_node = template.add_node(n.Muskingum,process='FlowRouting')
# for con in constituents:
# # transport_node = 'Transport-%s'%(con)
# transport_node = template.add_node(n.LumpedConstituentRouting,process='ConstituentRouting',constituent=con)
# template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
# for hru in hrus:
# runoff_node = template.add_node(n.Simhyd,process='RR',hru=hru)
# runoff_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Runoff')
# quickflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Quickflow')
# baseflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Baseflow')
# template.add_link(OWLink(runoff_node,'runoff',runoff_scale_node,'input'))
# template.add_link(OWLink(runoff_node,'quickflow',quickflow_scale_node,'input'))
# template.add_link(OWLink(runoff_node,'baseflow',baseflow_scale_node,'input'))
# template.add_link(OWLink(runoff_scale_node,'outflow',routing_node,'lateral'))
# for con in constituents:
# # transport_node = 'Transport-%s'%(con)
# transport_node = template.add_node(n.LumpedConstituentRouting,process='ConstituentRouting',constituent=con) #!!!ERROR
# template.add_link(OWLink(runoff_scale_node,'outflow',transport_node,'inflow'))
# for lu in landuses[hru]:
# #gen_node = 'Generation-%s-%s'%(con,lu)
# gen_node = template.add_node(generation_model(con,lu),process='ConstituentGeneration',constituent=con,lu=lu)
# template.add_link(OWLink(quickflow_scale_node,'outflow',gen_node,'quickflow'))
# template.add_link(OWLink(baseflow_scale_node,'outflow',gen_node,'baseflow'))
# template.add_link(OWLink(gen_node,'totalLoad',transport_node,'lateralLoad'))
# return template
# def link_catchments(graph,from_cat,to_cat,constituents):
# linkages = [('%d-FlowRouting (Muskingum)','outflow','inflow')] + \
# [('%%d-ConstituentRouting-%s (LumpedConstituentRouting)'%c,'outflowLoad','inflowLoad') for c in constituents]
# for (lt,src,dest) in linkages:
# dest_node = lt%from_cat
# src_node = lt%to_cat#'%d/%s'%(to_cat,lt)
# graph.add_edge(src_node,dest_node,src=[src],dest=[dest])
# def generation_models(constituent,cgu):
# if constituent in STANDARD_NUTRIENTS:
# return n.EmcDwc
# # if pesticide
# return n.EmcDwc
class Reach(object):
pass
class HydrologicalResponseUnit(object):
pass
class DynamicSednetCGU(object):
def __init__(self,cropping_cgu=True,sediment_fallback_model=False,gully_cgu=False,hillslope_cgu=False,ts_load_with_dwc=None):
self.cropping_cgu = cropping_cgu
# self.erosion_processes = erosion_processes
self.gully_cgu = gully_cgu
self.hillslope_cgu = hillslope_cgu
self.sediment_fallback_model = sediment_fallback_model
self.ts_load_with_dwc = ts_load_with_dwc
assert (not bool(gully_cgu)) or (not bool(sediment_fallback_model))
def generation_model(self,constituent,catchment_template,**kwargs):
return catchment_template.model_for(catchment_template.cg,constituent,**kwargs)
def get_template(self,catchment_template,**kwargs):
tag_values = list(kwargs.values())
cgu = kwargs.get('cgu','?')
template = OWTemplate('cgu:%s'%cgu)
runoff_scale_node = None
quickflow_scale_node = None
baseflow_scale_node = None
if catchment_template.rr is not None:
runoff_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Runoff',**kwargs)
quickflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Quickflow',**kwargs)
baseflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Baseflow',**kwargs)
def link_runoff(dest_node,qf_input,bf_input):
if quickflow_scale_node is None:
return
if qf_input is not None:
template.add_link(OWLink(quickflow_scale_node,'outflow',dest_node,qf_input))
if bf_input is not None:
template.add_link(OWLink(baseflow_scale_node,'outflow',dest_node,bf_input))
def add_emc_dwc(con):
dwc_node = template.add_node(n.EmcDwc,process='ConstituentDryWeatherGeneration',constituent=con,**kwargs)
link_runoff(dwc_node,'quickflow','baseflow')
return dwc_node
if runoff_scale_node is not None:
template.define_input(runoff_scale_node,'input','runoff')
template.define_input(quickflow_scale_node,'input','quickflow')
template.define_input(baseflow_scale_node,'input','baseflow')
template.define_output(runoff_scale_node,'outflow','lateral')
# This should be able to be done automatically... any input not defined
hillslope_fine_sed_gen = None
hillslope_coarse_sed_gen = None
hillslope_fine_sed_gen_flux = None
hillslope_coarse_sed_gen_flux = None
# fine_ts_scale = None
# coarse_ts_scale = None
gully_gen = None
if self.hillslope_cgu:
# Hillslope
sed_gen = template.add_node(n.USLEFineSedimentGeneration,process="HillslopeGeneration",**kwargs)
link_runoff(sed_gen,'quickflow','baseflow')
hillslope_fine_sed_gen = sed_gen
hillslope_coarse_sed_gen = sed_gen
hillslope_fine_sed_gen_flux = 'generatedLoadFine'
hillslope_coarse_sed_gen_flux = 'generatedLoadCoarse'
if self.gully_cgu:
# Gully
gully_gen = template.add_node(n.DynamicSednetGullyAlt,process="GullyGeneration",**kwargs)
link_runoff(gully_gen,'quickflow',None)
fine_sum = template.add_node(n.Sum,process='ConstituentGeneration',constituent=FINE_SEDIMENT,**kwargs)
coarse_sum = template.add_node(n.Sum,process='ConstituentGeneration',constituent=COARSE_SEDIMENT,**kwargs)
template.add_link(OWLink(gully_gen,'fineLoad',fine_sum,'i2'))
template.add_link(OWLink(gully_gen,'coarseLoad',coarse_sum,'i2'))
if self.hillslope_cgu:
template.add_link(OWLink(sed_gen,'totalLoad',fine_sum,'i1')) # was quickLoadFine
template.add_link(OWLink(sed_gen,'quickLoadCoarse',coarse_sum,'i1'))
else:
fine_dwc_node = add_emc_dwc(FINE_SEDIMENT)
template.add_link(OWLink(fine_dwc_node,'totalLoad',fine_sum,'i1'))
coarse_dwc_node = add_emc_dwc(COARSE_SEDIMENT)
template.add_link(OWLink(coarse_dwc_node,'totalLoad',coarse_sum,'i1'))
if self.cropping_cgu:
ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=FINE_SEDIMENT,**kwargs)
link_runoff(ts_node,'flow',None)
ts_split_node = template.add_node(n.FixedPartition,process='FineCoarseSplit',**kwargs)
template.add_link(OWLink(ts_node,'outputLoad',ts_split_node,'input'))
fine_ts_scale = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent=FINE_SEDIMENT,**kwargs)
template.add_link(OWLink(ts_split_node,'output1',fine_ts_scale,'input')) # fraction
fine_ts_sdr = template.add_node(n.DeliveryRatio,process='SDR',constituent=FINE_SEDIMENT,**kwargs)
template.add_link(OWLink(fine_ts_scale,'output',fine_ts_sdr,'input')) # fraction
coarse_ts_sdr = template.add_node(n.DeliveryRatio,process='SDR',constituent=COARSE_SEDIMENT,**kwargs)
template.add_link(OWLink(ts_split_node,'output2',coarse_ts_sdr,'input')) # 1-fraction
template.add_link(OWLink(fine_ts_sdr,'output',fine_sum,'i1'))
template.add_link(OWLink(coarse_ts_sdr,'output',coarse_sum,'i1'))
#TODO Will this always be the right thing to link? Should it ideally be i1 of the sum node going into other constituent models?
hillslope_fine_sed_gen = fine_ts_scale
hillslope_fine_sed_gen_flux = 'output'
hillslope_coarse_sed_gen = ts_split_node
hillslope_coarse_sed_gen_flux = 'output2'
# HACK - Just seeing if this is what we need in order to get
# the pre-SDR sediment loads?
# But it includes the load conversion factor on the fine...
# So *probably not*
# hillslope_fine_sed_gen = fine_ts_scale
# hillslope_fine_sed_gen_flux = 'output'
# hillslope_coarse_sed_gen = coarse_ts_scale
# hillslope_coarse_sed_gen_flux = 'output'
else:
# TODO: HACK - Check that quickLoad should apply whenever we have an EMC/DWC model for sediment
hillslope_fine_sed_gen = fine_dwc_node
hillslope_fine_sed_gen_flux = 'quickLoad'
hillslope_coarse_sed_gen = coarse_dwc_node
hillslope_coarse_sed_gen_flux = 'quickLoad'
template.define_output(fine_sum,'out','generatedLoad',constituent=FINE_SEDIMENT)
template.define_output(coarse_sum,'out','generatedLoad',constituent=COARSE_SEDIMENT)
for con in catchment_template.pesticides:
dwc_node = add_emc_dwc(con)
if self.cropping_cgu:
ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=con,**kwargs)
link_runoff(ts_node,'flow',None)
sum_node = template.add_node(n.Sum,process='ConstituentGeneration',constituent=con,**kwargs)
template.add_link(OWLink(dwc_node,'totalLoad',sum_node,'i1'))
template.add_link(OWLink(ts_node,'outputLoad',sum_node,'i2'))
template.define_output(sum_node,'out','generatedLoad')
for con in catchment_template.constituents:
if not self.sediment_fallback_model and (con in [FINE_SEDIMENT,COARSE_SEDIMENT]):
continue
if con in catchment_template.pesticides:
continue
ts_cane_din = (cgu in CGUS_TS_N_DIN) and (con=='N_DIN')
ts_crop_part_p = (con == 'P_Particulate') and self.cropping_cgu and not cgu=='Sugarcane'
ts_load_with_dwc = self.ts_load_with_dwc and \
(con in self.ts_load_with_dwc['constituents']) and \
(cgu in self.ts_load_with_dwc['cgus'])
if ts_cane_din or ts_crop_part_p or ts_load_with_dwc:
ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=con,**kwargs)
link_runoff(ts_node,'flow',None)
ts_scale_node = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent=con,**kwargs)
template.add_link(OWLink(ts_node,'outputLoad',ts_scale_node,'input'))
dwc_node = add_emc_dwc(con)
sum_node = template.add_node(n.Sum,process='ConstituentGeneration',constituent=con,**kwargs)
template.add_link(OWLink(ts_scale_node,'output',sum_node,'i1'))
template.add_link(OWLink(dwc_node,'totalLoad',sum_node,'i2'))
if ts_cane_din:
leached_ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent='NLeached',**kwargs)
link_runoff(leached_ts_node,None,'flow')
leached_ts_scale_node = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent='NLeached',**kwargs)
template.add_link(OWLink(leached_ts_node,'outputLoad',leached_ts_scale_node,'input'))
template.add_link(OWLink(leached_ts_scale_node,'output',sum_node,'i2'))
template.define_output(sum_node,'out','generatedLoad')
continue
model = self.generation_model(con,catchment_template,**kwargs)
if model is None:
print('No regular constituent generation model for %s'%con)
continue
gen_node = template.add_node(model,process='ConstituentGeneration',constituent=con,**kwargs)
if quickflow_scale_node is not None:
template.add_conditional_link(quickflow_scale_node,'outflow',gen_node,QUICKFLOW_INPUTS,model)
template.add_conditional_link(baseflow_scale_node, 'outflow',gen_node,BASEFLOW_INPUTS,model)
if model.name == 'SednetParticulateNutrientGeneration':
template.add_link(OWLink(gully_gen,'generatedFine',gen_node,'fineSedModelFineGullyGeneratedKg'))
template.add_link(OWLink(gully_gen,'generatedCoarse',gen_node,'fineSedModelCoarseGullyGeneratedKg'))
template.add_link(OWLink(hillslope_fine_sed_gen,hillslope_fine_sed_gen_flux,
gen_node,'fineSedModelFineSheetGeneratedKg'))
template.add_link(OWLink(hillslope_coarse_sed_gen,hillslope_coarse_sed_gen_flux,
gen_node,'fineSedModelCoarseSheetGeneratedKg'))
template.define_output(gen_node,main_output_flux(model),'generatedLoad')
return template
class DynamicSednetAgCGU(DynamicSednetCGU):
pass
# def generation_model(self,constituent,catchment):
# if constituent == FINE_SEDIMENT:
# return n.USLEFineSedimentGeneration
# return super(DynamicSednetAgCGU,self).generation_model(constituent)
class NilCGU(DynamicSednetCGU):
def generation_model(self,*args,**kwargs):
return None
class DynamicSednetCatchment(object):
def __init__(self,
dissolved_nutrients=['DisN','DisP'],
particulate_nutrients=['PartN','PartP'],
pesticides=['Pesticide1'],
particulate_nutrient_cgus=None,
ts_load_with_dwc=None):
self.hrus = ['HRU']
self.cgus = ['CGU']
self.cgu_hrus = {'CGU':'HRU'}
self.constituents = SEDIMENT_CLASSES + dissolved_nutrients + particulate_nutrients + pesticides
self.particulate_nutrients = particulate_nutrients
self.particulate_nutrient_cgus = particulate_nutrient_cgus
self.dissolved_nutrients = dissolved_nutrients
self.pesticides = pesticides
self.pesticide_cgus = None
self.timeseries_sediment_cgus = None
self.hillslope_cgus = None
self.gully_cgus = None
self.sediment_fallback_cgu = None
self.ts_load_with_dwc = ts_load_with_dwc
self.climate_inputs = ['rainfall','pet']
self.rr = n.Sacramento
self.cg = defaultdict(lambda:n.EmcDwc,{})
# {
# FINE_SEDIMENT:None,
# COARSE_SEDIMENT:None
# })
self.routing = n.Muskingum
self.transport = defaultdict(lambda:n.LumpedConstituentRouting,{
FINE_SEDIMENT:n.InstreamFineSediment,
COARSE_SEDIMENT:n.InstreamCoarseSediment
})
self._g = None
self._g_lookup = {}
self.node_templates = DEFAULT_NODE_TEMPLATES.copy()
self.node_templates['Storage'] = storage_template_builder(constituent_model_map=defaultdict(lambda:n.LumpedConstituentRouting,{
FINE_SEDIMENT:n.StorageParticulateTrapping,
COARSE_SEDIMENT:n.StorageParticulateTrapping
}))
def get_model_dissolved_nutrient(*args,**kwargs):
cgu = kwargs['cgu']
if cgu in self.pesticide_cgus:
constituent = args[0]
if cgu=='Sugarcane':
if constituent=='N_DIN':
return None
elif constituent=='N_DON':
return n.EmcDwc
elif constituent.startswith('P'):
return n.EmcDwc
if constituent.startswith('P'):
return n.PassLoadIfFlow
# if cgu is a cropping FU:
# look at constituent
# constituent = args[0]
if cgu in ['Water']: # 'Conservation','Horticulture','Other','Urban','Forestry'
return n.EmcDwc
# print(args)
# print(kwargs)
return n.SednetDissolvedNutrientGeneration
def get_model_particulate_nutrient(*args,**kwargs):
cgu = kwargs['cgu']
if self.particulate_nutrient_cgus is None:
if cgu in ['Water','Conservation','Horticulture','Other','Urban','Forestry']:
return n.EmcDwc
# if cropping (but not sugarcane) and constituent == P_Particulate
# Timeseries model...
return n.SednetParticulateNutrientGeneration
if cgu in self.particulate_nutrient_cgus:
return n.SednetParticulateNutrientGeneration
return n.EmcDwc
for dn in dissolved_nutrients:
self.cg[dn] = get_model_dissolved_nutrient
self.transport[dn] = n.InstreamDissolvedNutrientDecay
for pn in particulate_nutrients:
self.cg[pn] = get_model_particulate_nutrient
self.transport[pn] = n.InstreamParticulateNutrient
def model_for(self,provider,*args,**kwargs):
if hasattr(provider,'__call__'):
return self.model_for(provider(*args,**kwargs),*args,**kwargs)
if hasattr(provider,'__getitem__'):
return self.model_for(provider[args[0]],*args,**kwargs)
return provider
def get_link_template(self,**kwargs) -> OWTemplate:
tag_values = list(kwargs.values())
reach_template = OWTemplate('reach')
routing_node = None
if self.routing is not None:
lag_node = reach_template.add_node(n.Lag,process='FlowLag',constituent='_flow',**kwargs)
reach_template.define_input(lag_node,'inflow','lateral')
routing_node = reach_template.add_node(self.routing,process='FlowRouting',**kwargs)
reach_template.add_link(OWLink(lag_node,'outflow',routing_node,'lateral'))
reach_template.define_output(routing_node,'outflow')
reach_template.define_input(routing_node,'inflow',UPSTREAM_FLOW_FLUX,**kwargs)
reach_template.define_output(routing_node,'outflow',DOWNSTREAM_FLOW_FLUX,**kwargs)
bank_erosion = reach_template.add_node(n.BankErosion,process='BankErosion',**kwargs)
if routing_node is not None:
reach_template.add_link(OWLink(routing_node,'storage',bank_erosion,'totalVolume'))
reach_template.add_link(OWLink(routing_node,'outflow',bank_erosion,'downstreamFlowVolume'))
dis_nut_models = []
par_nut_models = []
fine_sed_model = None
fine_sed_con_lag_model = None
# n.InstreamFineSediment.name: ('upstreamMass','loadDownstream'),
# n.InstreamCoarseSediment.name: ('upstreamMass','loadDownstream'),
# n.InstreamDissolvedNutrientDecay.name: ('incomingMassUpstream','loadDownstream'),
# n.InstreamParticulateNutrient.name: ('incomingMassUpstream','loadDownstream')
for con in self.constituents:
model_type = self.model_for(self.transport,con,*tag_values)
constituent_lag_node = reach_template.add_node(n.Lag,process='FlowLag',constituent=con,**kwargs)
reach_template.define_input(constituent_lag_node,'inflow','generatedLoad')
transport_node = reach_template.add_node(model_type,process='ConstituentRouting',constituent=con,**kwargs)
if model_type == n.InstreamFineSediment:
fine_sed_con_lag_model = constituent_lag_node
# reach_template.define_input(transport_node,'incomingMass','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralMass'))
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionFine',transport_node,'reachLocalMass'))
load_out_flux = 'loadDownstream'
load_in_flux = 'upstreamMass'
fine_sed_model = transport_node
elif model_type == n.InstreamCoarseSediment:
# reach_template.define_input(transport_node,'incomingMass','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralMass'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionCoarse',transport_node,'reachLocalMass'))
load_out_flux = 'loadDownstream'
load_in_flux = 'upstreamMass'
elif model_type == n.InstreamDissolvedNutrientDecay:
dis_nut_models.append(transport_node)
# reach_template.define_input(transport_node,'incomingMassLateral','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'incomingMassLateral'))
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))
load_out_flux = 'loadDownstream'
load_in_flux = 'incomingMassUpstream'
# elif model_type == n.InstreamParticulateNutrient: TODO
elif model_type == n.InstreamParticulateNutrient:
par_nut_models.append(transport_node)
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'incomingMassLateral'))
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionFine',transport_node,'streambankErosion'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionCoarse',transport_node,'streambankErosion'))
load_out_flux = 'loadDownstream'
load_in_flux = 'incomingMassUpstream'
else:
# Lumped constituent routing
# reach_template.define_input(transport_node,'lateralLoad','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralLoad'))
# reach_template.add_link(OWLink(lag_node,'outflow',transport_node,'inflow')) # inflow removed from LumpedConstituentRouting. Unused
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'storage'))
load_out_flux = 'outflowLoad'
load_in_flux = 'inflowLoad'
reach_template.define_output(transport_node,load_out_flux,DOWNSTREAM_LOAD_FLUX,constituent=con,**kwargs)
reach_template.define_input(transport_node,load_in_flux,UPSTREAM_LOAD_FLUX,constituent=con,**kwargs)
if fine_sed_model is not None:
for dnm in dis_nut_models:
reach_template.add_link(OWLink(fine_sed_model,'floodplainDepositionFraction',dnm,'floodplainDepositionFraction'))
for pnm in par_nut_models:
reach_template.add_link(OWLink(fine_sed_model,'floodplainDepositionFraction',pnm,'floodplainDepositionFraction'))
reach_template.add_link(OWLink(fine_sed_model,'channelDepositionFraction',pnm,'channelDepositionFraction'))
# reach_template.add_link(OWLink(fine_sed_model,'channelDepositionFraction',pnm,'channelDepositionFraction'))
reach_template.add_link(OWLink(fine_sed_con_lag_model,'outflow',pnm,'lateralSediment'))
return reach_template
def cgu_factory(self,cgu):
cropping_cgu = (self.pesticide_cgus is not None) and (cgu in self.pesticide_cgus)
cropping_cgu = cropping_cgu or ((self.timeseries_sediment_cgus is not None) and (cgu in self.timeseries_sediment_cgus))
gully_proc = (self.gully_cgus is None) or (cgu in self.gully_cgus)
hillslope_proc = (self.hillslope_cgus is None) or (cgu in self.hillslope_cgus)
emc_proc = False
if self.sediment_fallback_cgu is not None:
emc_proc = cgu in self.sediment_fallback_cgu
if cgu=='Water/lakes':
return NilCGU()
# if cgu in ['Dryland', 'Irrigation', 'Horticulture', 'Irrigated Grazing']:
# return DynamicSednetAgCGU()
return DynamicSednetCGU(cropping_cgu=cropping_cgu,
sediment_fallback_model=emc_proc,
gully_cgu=gully_proc,
hillslope_cgu=hillslope_proc,
ts_load_with_dwc=self.ts_load_with_dwc)
def get_template(self,**kwargs):
tag_values = list(kwargs.values())
template = OWTemplate('catchment')
climate_nodes = {cvar: template.add_node(n.Input,process='input',variable=cvar,**kwargs) for cvar in self.climate_inputs}
hrus={}
for hru in self.hrus:
hru_template = OWTemplate('hru:%s'%hru)
if self.rr is not None:
runoff_template = OWTemplate('runoff:%s'%hru)
runoff_node = runoff_template.add_node(self.model_for(self.rr,hru,*tag_values),process='RR',hru=hru,**kwargs)
for clim_var, clim_node in climate_nodes.items():
template.add_link(OWLink(clim_node,'output',runoff_node,clim_var))
runoff_template.define_output(runoff_node,'runoff')
runoff_template.define_output(runoff_node,'surfaceRunoff','quickflow')
runoff_template.define_output(runoff_node,'baseflow')
hru_template.nest(runoff_template)
hrus[hru] = hru_template
template.nest(hru_template)
for cgu in self.cgus:
hru = self.cgu_hrus[cgu]
cgu_builder = self.cgu_factory(cgu)
cgu_template = cgu_builder.get_template(self,cgu=cgu,**kwargs)
hrus[hru].nest(cgu_template)
template.nest(self.get_link_template(**kwargs))
return template
def get_node_template(self,node_type,**kwargs):
assert self.node_templates is not None
return get_default_node_template(node_type,constituents=self.constituents,templates=self.node_templates,**kwargs)
def link_catchments(self,graph,upstream,downstream):
STANDARD_LINKS = defaultdict(lambda:[None,None],{
n.InstreamFineSediment.name: ('upstreamMass','loadDownstream'),
n.InstreamCoarseSediment.name: ('upstreamMass','loadDownstream'),
n.InstreamDissolvedNutrientDecay.name: ('incomingMassUpstream','loadDownstream'),
n.InstreamParticulateNutrient.name: ('incomingMassUpstream','loadDownstream')
})
if (self._g == None) or (self._g != graph):
self._g = graph
self._g_lookup = {}
def match_node(nm):
if nm in self._g_lookup:
return self._g_lookup[nm]
for nn in graph.nodes:
if nn.startswith(nm):
self._g_lookup[nm] = nn
return nn
return None
linkages = [('%s-FlowRouting','outflow','inflow')] + \
[('%%s-ConstituentRouting-%s'%c,'outflowLoad','inflowLoad') for c in self.constituents]
for (lt,src,dest) in linkages:
src_node = lt%(str(upstream))
dest_node = lt%(str(downstream))#'%d/%s'%(to_cat,lt)
src_node = match_node(src_node)#[n for n in graph.nodes if n.startswith(src_node)][0]
dest_node = match_node(dest_node)#[n for n in graph.nodes if n.startswith(dest_node)][0]
if (src_node is None) and (dest_node is None):
# If both are missing then assume process is not being modelled
continue
src_model = graph.nodes[src_node][TAG_MODEL]
dest_model = graph.nodes[dest_node][TAG_MODEL]
src = STANDARD_LINKS[src_model][1] or src
dest = STANDARD_LINKS[dest_model][0] or dest
# print(src_node,src,dest_node,dest)
graph.add_edge(src_node,dest_node,src=[src],dest=[dest])
def main_output_flux(model):
if model.name=='PassLoadIfFlow':
return 'outputLoad'
return 'totalLoad'
class OpenwaterDynamicSednetResults(object):
def __init__(self, fn, res_fn=None):
self.fn = fn
self.ow_model_fn = self.filename_from_base('.h5')
self.meta = json.load(open(self.filename_from_base('.meta.json')))
self.init_network(fn)
self.ow_results_fn = res_fn or self.filename_from_base('_outputs.h5')
self.dates = pd.date_range(self.meta['start'], self.meta['end'])
self.open_files()
def filename_from_base(self,fn):
return self.fn.replace('.h5','')+fn
def init_network(self,fn):
from veneer.general import _extend_network
self.nodes = gpd.read_file(self.filename_from_base('.nodes.json'))
self.links = gpd.read_file(self.filename_from_base('.links.json'))
self.catchments = gpd.read_file(self.filename_from_base('.catchments.json'))
raw = [json.load(open(self.filename_from_base('.'+c+'.json'),'r')) for c in ['nodes','links','catchments']]
self.network = {
'type':'FeatureCollection',
'crs':raw[0]['crs'],
'features':sum([r['features'] for r in raw],[])
}
self.network = _extend_network(self.network)
def run_model(self):
self.model.run(self.dates, self.ow_results_fn, overwrite=True)
self.open_files()
def open_files(self):
_ensure_uncompressed(self.ow_model_fn)
_ensure_uncompressed(self.ow_results_fn)
self.results = OpenwaterResults(self.ow_model_fn,
self.ow_results_fn,
self.dates)
self.model = ModelFile(self.ow_model_fn)
def regulated_links(self):
from veneer.extensions import _feature_id
network = self.network
outlet_nodes = network.outlet_nodes()
outlets = [n['properties']['name'] for n in outlet_nodes]
network.partition(outlets,'outlet')
storages = network['features'].find_by_icon('/resources/StorageNodeModel')
extractions = network['features'].find_by_icon('/resources/ExtractionNodeModel')
impacted_by_storage = []
for s in storages._list+extractions._list:
outlet = s['properties']['outlet']
outlet_id = _feature_id(network['features'].find_by_name(outlet)[0])
impacted_by_storage += network.path_between(s,outlet_id)
ids = set([_feature_id(f) for f in impacted_by_storage])
network_df = network.as_dataframe()
impacted_by_storage = network_df[network_df['id'].isin(ids)]
links_downstream_storage = [l.replace('link for catchment ','') for l in impacted_by_storage[impacted_by_storage.feature_type=='link'].name]
return links_downstream_storage
def generation_model(self,c,fu):
EMC = 'EmcDwc','totalLoad'
SUM = 'Sum','out'
if c in self.meta['sediments']:
if fu in (self.meta['usle_cgus']+self.meta['cropping_cgus']+self.meta['gully_cgus']):
return SUM
return EMC
if c in self.meta['pesticides']:
if fu in self.meta['cropping_cgus']:
return SUM
return EMC
if c in self.meta['dissolved_nutrients']:
if fu in ['Water']: #,'Conservation','Horticulture','Other','Urban','Forestry']:
return EMC
if (self.meta['ts_load'] is not None) and \
(fu in self.meta['ts_load']['cgus']) and \
(c in self.meta['ts_load']['constituents']):
return SUM
if fu == 'Sugarcane':
if c=='N_DIN':
return SUM
elif c=='N_DON':
return EMC
elif c.startswith('P'):
return EMC
if (fu == 'Bananas') and (c=='N_DIN'):
return SUM
if fu in self.meta['cropping_cgus'] or fu in self.meta.get('pesticide_cgus',[]):
if c.startswith('P'):
return 'PassLoadIfFlow', 'outputLoad'
return 'SednetDissolvedNutrientGeneration', 'totalLoad'
if c in self.meta['particulate_nutrients']:
if (fu != 'Sugarcane') and (c == 'P_Particulate'):
if (fu in self.meta['cropping_cgus']) or (fu in self.meta.get('timeseries_sediment',[])):
return SUM
for fu_cat in ['cropping_cgus','hillslope_emc_cgus','gully_cgus','erosion_cgus']:
if fu in self.meta.get(fu_cat,[]):
return 'SednetParticulateNutrientGeneration', 'totalLoad'
return EMC
def transport_model(self,c):
LCR = 'LumpedConstituentRouting','outflowLoad'
if c in self.meta['pesticides']:
return LCR
if c in self.meta['dissolved_nutrients']:
return 'InstreamDissolvedNutrientDecay', 'loadDownstream'
if c in self.meta['particulate_nutrients']:
return 'InstreamParticulateNutrient', 'loadDownstream'
if c == 'Sediment - Coarse':
return 'InstreamCoarseSediment', 'loadDownstream'
if c == 'Sediment - Fine':
return 'InstreamFineSediment', 'loadDownstream'
assert False
class DynamicSednetStandardReporting(object):
def __init__(self,ow_impl):
self.impl = ow_impl
self.results = ow_impl.results
self.model = ow_impl.model
def _get_states(self,f,model,**tags):
mmap = self.model._map_model_dims(model)
return _tabulate_model_scalars_from_file(f,model,mmap,'states',**tags)
def get_final_states(self,model,**tags):
f = self.results.results
return self._get_states(f,model,**tags)
def get_initial_states(self,model,**tags):
f = self.results.model
return self._get_states(f,model,**tags)
def outlet_nodes_time_series(self,dest,overwrite=False):
if os.path.exists(dest):
if overwrite and os.path.isdir(dest):
shutil.rmtree(dest)
else:
raise Exception("Destination exists")
os.makedirs(dest)
outlets = self.impl.network.outlet_nodes()
final_links = [l['properties']['name'].replace('link for catchment ','') \
for l in sum([self.impl.network.upstream_links(n['properties']['id'])._list for n in outlets],[])]
assert len(final_links)==len(outlets)
total_fn = os.path.join(dest,'TotalDaily_%s_ModelTotal_%s.csv')
flow_l = self.results.time_series('StorageRouting','outflow','catchment')[final_links]*PER_SECOND_TO_PER_DAY * M3_TO_L
for outlet,final_link in zip(outlets,final_links):
fn = os.path.join(dest,f'node_flow_{outlet['properties']['name']}_Litres.csv')
flow_l[final_link].to_csv(fn)
flow_l.sum(axis=1).to_csv(total_fn%('Flow','Litres'))
for c in self.impl.meta['constituents']:
mod, flux = self.impl.transport_model(c)
constituent_loads_kg = self.results.time_series(mod,flux,'catchment',constituent=c)[final_links]*PER_SECOND_TO_PER_DAY
for outlet,final_link in zip(outlets,final_links):
fn = os.path.join(dest,f'link_const_{outlet['properties']['name']}_{c}_Kilograms.csv')
constituent_loads_kg[final_link].to_csv(fn)
constituent_loads_kg.sum(axis=1).to_csv(total_fn%(c,'Kilograms'))
def outlet_nodes_rates_table(self):
outlets = [n['properties']['id'] for n in self.impl.network.outlet_nodes()]
final_links = [l['properties']['name'].replace('link for catchment ','') for l in sum([self.impl.network.upstream_links(n)._list for n in outlets],[])]
flow_l = np.array(self.results.time_series('StorageRouting','outflow','catchment')[final_links])*PER_SECOND_TO_PER_DAY * M3_TO_L
total_area = sum(self.model.parameters('DepthToRate',component='Runoff').area)
records = []
for c in self.impl.meta['constituents']:
mod, flux = self.impl.transport_model(c)
constituent_loads_kg = np.array(self.results.time_series(mod,flux,'catchment',constituent=c)[final_links])*PER_SECOND_TO_PER_DAY
records.append(dict(
Region='ModelTotal',
Constituent=c,
Area=total_area,
Total_Load_in_Kg=constituent_loads_kg.sum(),
Flow_Litres=flow_l.sum(),
Concentration=0.0,
LoadPerArea=0.0,
NumDays=flow_l.shape[0]
))
return pd.DataFrame(records)
def climate_table(self):
orig_tbls = []
melted_tbls = []
variables = [('rainfall','Rainfall'),('actualET','Actual ET'),('baseflow','Baseflow'),('runoff','Runoff (Quickflow)')]
for v,lbl in variables:
tbl = self.results.table('Sacramento',v,'catchment','hru','sum','sum')*MM_TO_M
if v=='runoff':
tbl = tbl - orig_tbls[-1]
orig_tbls.append(tbl)
tbl = tbl.reset_index().melt(id_vars=['index'],value_vars=list(tbl.columns)).rename(columns={'index':'Catchment','variable':'FU','value':'Depth_m'})
tbl['Element']=lbl
melted_tbls.append(tbl)
return pd.concat(melted_tbls).sort_values(['Catchment','FU','Element'])
def fu_areas_table(self):
tbl = self.model.parameters('DepthToRate',component='Runoff')
tbl = tbl[['catchment','cgu','area']].sort_values(['catchment','cgu']).rename(columns={'catchment':'Catchment','cgu':'CGU'})
return tbl
def fu_summary_table(self):
summary = []
seen = {}
for con in self.impl.meta['constituents']:
for fu in self.impl.meta['fus']:
combo = self.impl.generation_model(con,fu)
if not combo in seen:
model,flux = combo
tbl = self.results.table(model,flux,'constituent','cgu','sum','sum') * PER_SECOND_TO_PER_DAY
seen[combo]=tbl
tbl = seen[combo]
summary.append((con,fu,tbl.loc[con,fu]))
return pd.DataFrame(summary,columns=['Constituent','FU','Total_Load_in_Kg'])
def regional_summary_table(self):
tables = [self.mass_balance_summary_table(self,region) for region in self.impl.meta['regions']]
for tbl,region in zip(tables,self.impl.meta['regions']):
tbl['SummaryRegion']=region
return pd.concat(tables)
def overall_summary_table(self):
return self.mass_balance_summary_table()
def constituent_loss_table(self,region=None):
loss_fluxes = [
('InstreamFineSediment','loadToFloodplain','Sediment - Fine'),
('InstreamDissolvedNutrientDecay','loadToFloodplain'),
# ('InstreamDissolvedNutrientDecay','loadDecayed'), #TODO
]
loss_states = [
('InstreamFineSediment','channelStoreFine','Sediment - Fine')
]
pass
def residual_constituent_table(self,region=None):
# Need to query final states (and initial states?)
mass_states = [
('LumpedConstituentRouting','storedMass'),
('InstreamFineSediment','totalStoredMass', 'Sediment - Fine'),
('InstreamDissolvedNutrientDecay','totalStoredMass'),
('InstreamCoarseSediment','totalStoredMass', 'Sediment - Coarse')
]
tables = []
for state in mass_states:
m = state[0]
v = state[1]
values = self.get_final_states(m)
if len(state)>2:
values['constituent']=state[2]
tbl = values[['constituent',v]].groupby('constituent').sum().reset_index().rename(columns={
'constituent':'Constituent',
v:'Total_Load_in_Kg'
})
tables.append(tbl)
return pd.concat(tables).groupby('Constituent').sum().reset_index()
def mass_balance_summary_table(self,region=None):
cols =['Constituent','Total_Load_in_Kg']
input_tables = {
'Supply':self.fu_summary_table(),
'Export':self.outlet_nodes_rates_table(),
'Loss':self.constituent_loss_table(region),
'Residual':self.residual_constituent_table(region)
}
result = []
for k,tbl in input_tables.items():
if tbl is None:
print(f'Missing table {k}')
continue
tbl = tbl[cols].groupby('Constituent').sum().reset_index()
tbl['MassBalanceElement'] = k
tbl = tbl[['Constituent','MassBalanceElement','Total_Load_in_Kg']]
result.append(tbl)
return pd.concat(result).sort_values(['Constituent','MassBalanceElement'])
def _ensure_uncompressed(fn):
if os.path.exists(fn):
return
gzfn = fn + '.gz'
if not os.path.exists(gzfn):
raise Exception('File not found (compressed or uncompressed): %s'%fn)
os.system('gunzip %s'%gzfn)
assert os.path.exists(fn)
| '''
Running Dynamic Sednet simulations using OpenWater
'''
import os
import json
import pandas as pd
import geopandas as gpd
import shutil
import numpy as np
from openwater import OWTemplate, OWLink
from openwater.template import TAG_MODEL
import openwater.nodes as n
from collections import defaultdict
from openwater.examples.from_source import get_default_node_template, DEFAULT_NODE_TEMPLATES, storage_template_builder
from openwater.catchments import \
DOWNSTREAM_FLOW_FLUX, DOWNSTREAM_LOAD_FLUX, \
UPSTREAM_FLOW_FLUX, UPSTREAM_LOAD_FLUX
from openwater.results import OpenwaterResults
from openwater.template import ModelFile
from .const import *
from openwater.file import _tabulate_model_scalars_from_file
LANDSCAPE_CONSTITUENT_SOURCES=['Hillslope','Gully']
FINE_SEDIMENT = 'Sediment - Fine'
COARSE_SEDIMENT = 'Sediment - Coarse'
CGUS_TS_N_DIN = ['Sugarcane','Bananas']
SEDIMENT_CLASSES = [FINE_SEDIMENT,COARSE_SEDIMENT]
STANDARD_NUTRIENTS = ['TN','TP']
STANDARD_CONSTITUENTS = SEDIMENT_CLASSES + STANDARD_NUTRIENTS
QUICKFLOW_INPUTS = ['quickflow','flow']
BASEFLOW_INPUTS = ['baseflow','slowflow']
NIL_MODELS = {
'Dynamic_SedNet.Models.SedNet_Blank_Constituent_Generation_Model',
'RiverSystem.Catchments.Models.ContaminantGenerationModels.NilConstituent'
}
MODEL_NAME_TRANSLATIONS = {
}
# def default_generation_model(constituent,landuse):
# if constituent=='TSS':
# return n.USLEFineSedimentGeneration
# return n.EmcDwc
# def build_catchment_template(constituents,hrus,landuses,generation_model=default_generation_model):
# template = OWTemplate()
# routing_node = template.add_node(n.Muskingum,process='FlowRouting')
# for con in constituents:
# # transport_node = 'Transport-%s'%(con)
# transport_node = template.add_node(n.LumpedConstituentRouting,process='ConstituentRouting',constituent=con)
# template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
# for hru in hrus:
# runoff_node = template.add_node(n.Simhyd,process='RR',hru=hru)
# runoff_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Runoff')
# quickflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Quickflow')
# baseflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Baseflow')
# template.add_link(OWLink(runoff_node,'runoff',runoff_scale_node,'input'))
# template.add_link(OWLink(runoff_node,'quickflow',quickflow_scale_node,'input'))
# template.add_link(OWLink(runoff_node,'baseflow',baseflow_scale_node,'input'))
# template.add_link(OWLink(runoff_scale_node,'outflow',routing_node,'lateral'))
# for con in constituents:
# # transport_node = 'Transport-%s'%(con)
# transport_node = template.add_node(n.LumpedConstituentRouting,process='ConstituentRouting',constituent=con) #!!!ERROR
# template.add_link(OWLink(runoff_scale_node,'outflow',transport_node,'inflow'))
# for lu in landuses[hru]:
# #gen_node = 'Generation-%s-%s'%(con,lu)
# gen_node = template.add_node(generation_model(con,lu),process='ConstituentGeneration',constituent=con,lu=lu)
# template.add_link(OWLink(quickflow_scale_node,'outflow',gen_node,'quickflow'))
# template.add_link(OWLink(baseflow_scale_node,'outflow',gen_node,'baseflow'))
# template.add_link(OWLink(gen_node,'totalLoad',transport_node,'lateralLoad'))
# return template
# def link_catchments(graph,from_cat,to_cat,constituents):
# linkages = [('%d-FlowRouting (Muskingum)','outflow','inflow')] + \
# [('%%d-ConstituentRouting-%s (LumpedConstituentRouting)'%c,'outflowLoad','inflowLoad') for c in constituents]
# for (lt,src,dest) in linkages:
# dest_node = lt%from_cat
# src_node = lt%to_cat#'%d/%s'%(to_cat,lt)
# graph.add_edge(src_node,dest_node,src=[src],dest=[dest])
# def generation_models(constituent,cgu):
# if constituent in STANDARD_NUTRIENTS:
# return n.EmcDwc
# # if pesticide
# return n.EmcDwc
class Reach(object):
pass
class HydrologicalResponseUnit(object):
pass
class DynamicSednetCGU(object):
def __init__(self,cropping_cgu=True,sediment_fallback_model=False,gully_cgu=False,hillslope_cgu=False,ts_load_with_dwc=None):
self.cropping_cgu = cropping_cgu
# self.erosion_processes = erosion_processes
self.gully_cgu = gully_cgu
self.hillslope_cgu = hillslope_cgu
self.sediment_fallback_model = sediment_fallback_model
self.ts_load_with_dwc = ts_load_with_dwc
assert (not bool(gully_cgu)) or (not bool(sediment_fallback_model))
def generation_model(self,constituent,catchment_template,**kwargs):
return catchment_template.model_for(catchment_template.cg,constituent,**kwargs)
def get_template(self,catchment_template,**kwargs):
tag_values = list(kwargs.values())
cgu = kwargs.get('cgu','?')
template = OWTemplate('cgu:%s'%cgu)
runoff_scale_node = None
quickflow_scale_node = None
baseflow_scale_node = None
if catchment_template.rr is not None:
runoff_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Runoff',**kwargs)
quickflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Quickflow',**kwargs)
baseflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Baseflow',**kwargs)
def link_runoff(dest_node,qf_input,bf_input):
if quickflow_scale_node is None:
return
if qf_input is not None:
template.add_link(OWLink(quickflow_scale_node,'outflow',dest_node,qf_input))
if bf_input is not None:
template.add_link(OWLink(baseflow_scale_node,'outflow',dest_node,bf_input))
def add_emc_dwc(con):
dwc_node = template.add_node(n.EmcDwc,process='ConstituentDryWeatherGeneration',constituent=con,**kwargs)
link_runoff(dwc_node,'quickflow','baseflow')
return dwc_node
if runoff_scale_node is not None:
template.define_input(runoff_scale_node,'input','runoff')
template.define_input(quickflow_scale_node,'input','quickflow')
template.define_input(baseflow_scale_node,'input','baseflow')
template.define_output(runoff_scale_node,'outflow','lateral')
# This should be able to be done automatically... any input not defined
hillslope_fine_sed_gen = None
hillslope_coarse_sed_gen = None
hillslope_fine_sed_gen_flux = None
hillslope_coarse_sed_gen_flux = None
# fine_ts_scale = None
# coarse_ts_scale = None
gully_gen = None
if self.hillslope_cgu:
# Hillslope
sed_gen = template.add_node(n.USLEFineSedimentGeneration,process="HillslopeGeneration",**kwargs)
link_runoff(sed_gen,'quickflow','baseflow')
hillslope_fine_sed_gen = sed_gen
hillslope_coarse_sed_gen = sed_gen
hillslope_fine_sed_gen_flux = 'generatedLoadFine'
hillslope_coarse_sed_gen_flux = 'generatedLoadCoarse'
if self.gully_cgu:
# Gully
gully_gen = template.add_node(n.DynamicSednetGullyAlt,process="GullyGeneration",**kwargs)
link_runoff(gully_gen,'quickflow',None)
fine_sum = template.add_node(n.Sum,process='ConstituentGeneration',constituent=FINE_SEDIMENT,**kwargs)
coarse_sum = template.add_node(n.Sum,process='ConstituentGeneration',constituent=COARSE_SEDIMENT,**kwargs)
template.add_link(OWLink(gully_gen,'fineLoad',fine_sum,'i2'))
template.add_link(OWLink(gully_gen,'coarseLoad',coarse_sum,'i2'))
if self.hillslope_cgu:
template.add_link(OWLink(sed_gen,'totalLoad',fine_sum,'i1')) # was quickLoadFine
template.add_link(OWLink(sed_gen,'quickLoadCoarse',coarse_sum,'i1'))
else:
fine_dwc_node = add_emc_dwc(FINE_SEDIMENT)
template.add_link(OWLink(fine_dwc_node,'totalLoad',fine_sum,'i1'))
coarse_dwc_node = add_emc_dwc(COARSE_SEDIMENT)
template.add_link(OWLink(coarse_dwc_node,'totalLoad',coarse_sum,'i1'))
if self.cropping_cgu:
ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=FINE_SEDIMENT,**kwargs)
link_runoff(ts_node,'flow',None)
ts_split_node = template.add_node(n.FixedPartition,process='FineCoarseSplit',**kwargs)
template.add_link(OWLink(ts_node,'outputLoad',ts_split_node,'input'))
fine_ts_scale = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent=FINE_SEDIMENT,**kwargs)
template.add_link(OWLink(ts_split_node,'output1',fine_ts_scale,'input')) # fraction
fine_ts_sdr = template.add_node(n.DeliveryRatio,process='SDR',constituent=FINE_SEDIMENT,**kwargs)
template.add_link(OWLink(fine_ts_scale,'output',fine_ts_sdr,'input')) # fraction
coarse_ts_sdr = template.add_node(n.DeliveryRatio,process='SDR',constituent=COARSE_SEDIMENT,**kwargs)
template.add_link(OWLink(ts_split_node,'output2',coarse_ts_sdr,'input')) # 1-fraction
template.add_link(OWLink(fine_ts_sdr,'output',fine_sum,'i1'))
template.add_link(OWLink(coarse_ts_sdr,'output',coarse_sum,'i1'))
#TODO Will this always be the right thing to link? Should it ideally be i1 of the sum node going into other constituent models?
hillslope_fine_sed_gen = fine_ts_scale
hillslope_fine_sed_gen_flux = 'output'
hillslope_coarse_sed_gen = ts_split_node
hillslope_coarse_sed_gen_flux = 'output2'
# HACK - Just seeing if this is what we need in order to get
# the pre-SDR sediment loads?
# But it includes the load conversion factor on the fine...
# So *probably not*
# hillslope_fine_sed_gen = fine_ts_scale
# hillslope_fine_sed_gen_flux = 'output'
# hillslope_coarse_sed_gen = coarse_ts_scale
# hillslope_coarse_sed_gen_flux = 'output'
else:
# TODO: HACK - Check that quickLoad should apply whenever we have an EMC/DWC model for sediment
hillslope_fine_sed_gen = fine_dwc_node
hillslope_fine_sed_gen_flux = 'quickLoad'
hillslope_coarse_sed_gen = coarse_dwc_node
hillslope_coarse_sed_gen_flux = 'quickLoad'
template.define_output(fine_sum,'out','generatedLoad',constituent=FINE_SEDIMENT)
template.define_output(coarse_sum,'out','generatedLoad',constituent=COARSE_SEDIMENT)
for con in catchment_template.pesticides:
dwc_node = add_emc_dwc(con)
if self.cropping_cgu:
ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=con,**kwargs)
link_runoff(ts_node,'flow',None)
sum_node = template.add_node(n.Sum,process='ConstituentGeneration',constituent=con,**kwargs)
template.add_link(OWLink(dwc_node,'totalLoad',sum_node,'i1'))
template.add_link(OWLink(ts_node,'outputLoad',sum_node,'i2'))
template.define_output(sum_node,'out','generatedLoad')
for con in catchment_template.constituents:
if not self.sediment_fallback_model and (con in [FINE_SEDIMENT,COARSE_SEDIMENT]):
continue
if con in catchment_template.pesticides:
continue
ts_cane_din = (cgu in CGUS_TS_N_DIN) and (con=='N_DIN')
ts_crop_part_p = (con == 'P_Particulate') and self.cropping_cgu and not cgu=='Sugarcane'
ts_load_with_dwc = self.ts_load_with_dwc and \
(con in self.ts_load_with_dwc['constituents']) and \
(cgu in self.ts_load_with_dwc['cgus'])
if ts_cane_din or ts_crop_part_p or ts_load_with_dwc:
ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=con,**kwargs)
link_runoff(ts_node,'flow',None)
ts_scale_node = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent=con,**kwargs)
template.add_link(OWLink(ts_node,'outputLoad',ts_scale_node,'input'))
dwc_node = add_emc_dwc(con)
sum_node = template.add_node(n.Sum,process='ConstituentGeneration',constituent=con,**kwargs)
template.add_link(OWLink(ts_scale_node,'output',sum_node,'i1'))
template.add_link(OWLink(dwc_node,'totalLoad',sum_node,'i2'))
if ts_cane_din:
leached_ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent='NLeached',**kwargs)
link_runoff(leached_ts_node,None,'flow')
leached_ts_scale_node = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent='NLeached',**kwargs)
template.add_link(OWLink(leached_ts_node,'outputLoad',leached_ts_scale_node,'input'))
template.add_link(OWLink(leached_ts_scale_node,'output',sum_node,'i2'))
template.define_output(sum_node,'out','generatedLoad')
continue
model = self.generation_model(con,catchment_template,**kwargs)
if model is None:
print('No regular constituent generation model for %s'%con)
continue
gen_node = template.add_node(model,process='ConstituentGeneration',constituent=con,**kwargs)
if quickflow_scale_node is not None:
template.add_conditional_link(quickflow_scale_node,'outflow',gen_node,QUICKFLOW_INPUTS,model)
template.add_conditional_link(baseflow_scale_node, 'outflow',gen_node,BASEFLOW_INPUTS,model)
if model.name == 'SednetParticulateNutrientGeneration':
template.add_link(OWLink(gully_gen,'generatedFine',gen_node,'fineSedModelFineGullyGeneratedKg'))
template.add_link(OWLink(gully_gen,'generatedCoarse',gen_node,'fineSedModelCoarseGullyGeneratedKg'))
template.add_link(OWLink(hillslope_fine_sed_gen,hillslope_fine_sed_gen_flux,
gen_node,'fineSedModelFineSheetGeneratedKg'))
template.add_link(OWLink(hillslope_coarse_sed_gen,hillslope_coarse_sed_gen_flux,
gen_node,'fineSedModelCoarseSheetGeneratedKg'))
template.define_output(gen_node,main_output_flux(model),'generatedLoad')
return template
class DynamicSednetAgCGU(DynamicSednetCGU):
pass
# def generation_model(self,constituent,catchment):
# if constituent == FINE_SEDIMENT:
# return n.USLEFineSedimentGeneration
# return super(DynamicSednetAgCGU,self).generation_model(constituent)
class NilCGU(DynamicSednetCGU):
def generation_model(self,*args,**kwargs):
return None
class DynamicSednetCatchment(object):
def __init__(self,
dissolved_nutrients=['DisN','DisP'],
particulate_nutrients=['PartN','PartP'],
pesticides=['Pesticide1'],
particulate_nutrient_cgus=None,
ts_load_with_dwc=None):
self.hrus = ['HRU']
self.cgus = ['CGU']
self.cgu_hrus = {'CGU':'HRU'}
self.constituents = SEDIMENT_CLASSES + dissolved_nutrients + particulate_nutrients + pesticides
self.particulate_nutrients = particulate_nutrients
self.particulate_nutrient_cgus = particulate_nutrient_cgus
self.dissolved_nutrients = dissolved_nutrients
self.pesticides = pesticides
self.pesticide_cgus = None
self.timeseries_sediment_cgus = None
self.hillslope_cgus = None
self.gully_cgus = None
self.sediment_fallback_cgu = None
self.ts_load_with_dwc = ts_load_with_dwc
self.climate_inputs = ['rainfall','pet']
self.rr = n.Sacramento
self.cg = defaultdict(lambda:n.EmcDwc,{})
# {
# FINE_SEDIMENT:None,
# COARSE_SEDIMENT:None
# })
self.routing = n.Muskingum
self.transport = defaultdict(lambda:n.LumpedConstituentRouting,{
FINE_SEDIMENT:n.InstreamFineSediment,
COARSE_SEDIMENT:n.InstreamCoarseSediment
})
self._g = None
self._g_lookup = {}
self.node_templates = DEFAULT_NODE_TEMPLATES.copy()
self.node_templates['Storage'] = storage_template_builder(constituent_model_map=defaultdict(lambda:n.LumpedConstituentRouting,{
FINE_SEDIMENT:n.StorageParticulateTrapping,
COARSE_SEDIMENT:n.StorageParticulateTrapping
}))
def get_model_dissolved_nutrient(*args,**kwargs):
cgu = kwargs['cgu']
if cgu in self.pesticide_cgus:
constituent = args[0]
if cgu=='Sugarcane':
if constituent=='N_DIN':
return None
elif constituent=='N_DON':
return n.EmcDwc
elif constituent.startswith('P'):
return n.EmcDwc
if constituent.startswith('P'):
return n.PassLoadIfFlow
# if cgu is a cropping FU:
# look at constituent
# constituent = args[0]
if cgu in ['Water']: # 'Conservation','Horticulture','Other','Urban','Forestry'
return n.EmcDwc
# print(args)
# print(kwargs)
return n.SednetDissolvedNutrientGeneration
def get_model_particulate_nutrient(*args,**kwargs):
cgu = kwargs['cgu']
if self.particulate_nutrient_cgus is None:
if cgu in ['Water','Conservation','Horticulture','Other','Urban','Forestry']:
return n.EmcDwc
# if cropping (but not sugarcane) and constituent == P_Particulate
# Timeseries model...
return n.SednetParticulateNutrientGeneration
if cgu in self.particulate_nutrient_cgus:
return n.SednetParticulateNutrientGeneration
return n.EmcDwc
for dn in dissolved_nutrients:
self.cg[dn] = get_model_dissolved_nutrient
self.transport[dn] = n.InstreamDissolvedNutrientDecay
for pn in particulate_nutrients:
self.cg[pn] = get_model_particulate_nutrient
self.transport[pn] = n.InstreamParticulateNutrient
def model_for(self,provider,*args,**kwargs):
if hasattr(provider,'__call__'):
return self.model_for(provider(*args,**kwargs),*args,**kwargs)
if hasattr(provider,'__getitem__'):
return self.model_for(provider[args[0]],*args,**kwargs)
return provider
def get_link_template(self,**kwargs) -> OWTemplate:
tag_values = list(kwargs.values())
reach_template = OWTemplate('reach')
routing_node = None
if self.routing is not None:
lag_node = reach_template.add_node(n.Lag,process='FlowLag',constituent='_flow',**kwargs)
reach_template.define_input(lag_node,'inflow','lateral')
routing_node = reach_template.add_node(self.routing,process='FlowRouting',**kwargs)
reach_template.add_link(OWLink(lag_node,'outflow',routing_node,'lateral'))
reach_template.define_output(routing_node,'outflow')
reach_template.define_input(routing_node,'inflow',UPSTREAM_FLOW_FLUX,**kwargs)
reach_template.define_output(routing_node,'outflow',DOWNSTREAM_FLOW_FLUX,**kwargs)
bank_erosion = reach_template.add_node(n.BankErosion,process='BankErosion',**kwargs)
if routing_node is not None:
reach_template.add_link(OWLink(routing_node,'storage',bank_erosion,'totalVolume'))
reach_template.add_link(OWLink(routing_node,'outflow',bank_erosion,'downstreamFlowVolume'))
dis_nut_models = []
par_nut_models = []
fine_sed_model = None
fine_sed_con_lag_model = None
# n.InstreamFineSediment.name: ('upstreamMass','loadDownstream'),
# n.InstreamCoarseSediment.name: ('upstreamMass','loadDownstream'),
# n.InstreamDissolvedNutrientDecay.name: ('incomingMassUpstream','loadDownstream'),
# n.InstreamParticulateNutrient.name: ('incomingMassUpstream','loadDownstream')
for con in self.constituents:
model_type = self.model_for(self.transport,con,*tag_values)
constituent_lag_node = reach_template.add_node(n.Lag,process='FlowLag',constituent=con,**kwargs)
reach_template.define_input(constituent_lag_node,'inflow','generatedLoad')
transport_node = reach_template.add_node(model_type,process='ConstituentRouting',constituent=con,**kwargs)
if model_type == n.InstreamFineSediment:
fine_sed_con_lag_model = constituent_lag_node
# reach_template.define_input(transport_node,'incomingMass','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralMass'))
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionFine',transport_node,'reachLocalMass'))
load_out_flux = 'loadDownstream'
load_in_flux = 'upstreamMass'
fine_sed_model = transport_node
elif model_type == n.InstreamCoarseSediment:
# reach_template.define_input(transport_node,'incomingMass','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralMass'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionCoarse',transport_node,'reachLocalMass'))
load_out_flux = 'loadDownstream'
load_in_flux = 'upstreamMass'
elif model_type == n.InstreamDissolvedNutrientDecay:
dis_nut_models.append(transport_node)
# reach_template.define_input(transport_node,'incomingMassLateral','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'incomingMassLateral'))
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))
load_out_flux = 'loadDownstream'
load_in_flux = 'incomingMassUpstream'
# elif model_type == n.InstreamParticulateNutrient: TODO
elif model_type == n.InstreamParticulateNutrient:
par_nut_models.append(transport_node)
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'incomingMassLateral'))
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionFine',transport_node,'streambankErosion'))
reach_template.add_link(OWLink(bank_erosion,'bankErosionCoarse',transport_node,'streambankErosion'))
load_out_flux = 'loadDownstream'
load_in_flux = 'incomingMassUpstream'
else:
# Lumped constituent routing
# reach_template.define_input(transport_node,'lateralLoad','generatedLoad')
reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralLoad'))
# reach_template.add_link(OWLink(lag_node,'outflow',transport_node,'inflow')) # inflow removed from LumpedConstituentRouting. Unused
if self.routing is not None:
reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))
reach_template.add_link(OWLink(routing_node,'storage',transport_node,'storage'))
load_out_flux = 'outflowLoad'
load_in_flux = 'inflowLoad'
reach_template.define_output(transport_node,load_out_flux,DOWNSTREAM_LOAD_FLUX,constituent=con,**kwargs)
reach_template.define_input(transport_node,load_in_flux,UPSTREAM_LOAD_FLUX,constituent=con,**kwargs)
if fine_sed_model is not None:
for dnm in dis_nut_models:
reach_template.add_link(OWLink(fine_sed_model,'floodplainDepositionFraction',dnm,'floodplainDepositionFraction'))
for pnm in par_nut_models:
reach_template.add_link(OWLink(fine_sed_model,'floodplainDepositionFraction',pnm,'floodplainDepositionFraction'))
reach_template.add_link(OWLink(fine_sed_model,'channelDepositionFraction',pnm,'channelDepositionFraction'))
# reach_template.add_link(OWLink(fine_sed_model,'channelDepositionFraction',pnm,'channelDepositionFraction'))
reach_template.add_link(OWLink(fine_sed_con_lag_model,'outflow',pnm,'lateralSediment'))
return reach_template
def cgu_factory(self,cgu):
cropping_cgu = (self.pesticide_cgus is not None) and (cgu in self.pesticide_cgus)
cropping_cgu = cropping_cgu or ((self.timeseries_sediment_cgus is not None) and (cgu in self.timeseries_sediment_cgus))
gully_proc = (self.gully_cgus is None) or (cgu in self.gully_cgus)
hillslope_proc = (self.hillslope_cgus is None) or (cgu in self.hillslope_cgus)
emc_proc = False
if self.sediment_fallback_cgu is not None:
emc_proc = cgu in self.sediment_fallback_cgu
if cgu=='Water/lakes':
return NilCGU()
# if cgu in ['Dryland', 'Irrigation', 'Horticulture', 'Irrigated Grazing']:
# return DynamicSednetAgCGU()
return DynamicSednetCGU(cropping_cgu=cropping_cgu,
sediment_fallback_model=emc_proc,
gully_cgu=gully_proc,
hillslope_cgu=hillslope_proc,
ts_load_with_dwc=self.ts_load_with_dwc)
def get_template(self,**kwargs):
tag_values = list(kwargs.values())
template = OWTemplate('catchment')
climate_nodes = {cvar: template.add_node(n.Input,process='input',variable=cvar,**kwargs) for cvar in self.climate_inputs}
hrus={}
for hru in self.hrus:
hru_template = OWTemplate('hru:%s'%hru)
if self.rr is not None:
runoff_template = OWTemplate('runoff:%s'%hru)
runoff_node = runoff_template.add_node(self.model_for(self.rr,hru,*tag_values),process='RR',hru=hru,**kwargs)
for clim_var, clim_node in climate_nodes.items():
template.add_link(OWLink(clim_node,'output',runoff_node,clim_var))
runoff_template.define_output(runoff_node,'runoff')
runoff_template.define_output(runoff_node,'surfaceRunoff','quickflow')
runoff_template.define_output(runoff_node,'baseflow')
hru_template.nest(runoff_template)
hrus[hru] = hru_template
template.nest(hru_template)
for cgu in self.cgus:
hru = self.cgu_hrus[cgu]
cgu_builder = self.cgu_factory(cgu)
cgu_template = cgu_builder.get_template(self,cgu=cgu,**kwargs)
hrus[hru].nest(cgu_template)
template.nest(self.get_link_template(**kwargs))
return template
def get_node_template(self,node_type,**kwargs):
assert self.node_templates is not None
return get_default_node_template(node_type,constituents=self.constituents,templates=self.node_templates,**kwargs)
def link_catchments(self,graph,upstream,downstream):
STANDARD_LINKS = defaultdict(lambda:[None,None],{
n.InstreamFineSediment.name: ('upstreamMass','loadDownstream'),
n.InstreamCoarseSediment.name: ('upstreamMass','loadDownstream'),
n.InstreamDissolvedNutrientDecay.name: ('incomingMassUpstream','loadDownstream'),
n.InstreamParticulateNutrient.name: ('incomingMassUpstream','loadDownstream')
})
if (self._g == None) or (self._g != graph):
self._g = graph
self._g_lookup = {}
def match_node(nm):
if nm in self._g_lookup:
return self._g_lookup[nm]
for nn in graph.nodes:
if nn.startswith(nm):
self._g_lookup[nm] = nn
return nn
return None
linkages = [('%s-FlowRouting','outflow','inflow')] + \
[('%%s-ConstituentRouting-%s'%c,'outflowLoad','inflowLoad') for c in self.constituents]
for (lt,src,dest) in linkages:
src_node = lt%(str(upstream))
dest_node = lt%(str(downstream))#'%d/%s'%(to_cat,lt)
src_node = match_node(src_node)#[n for n in graph.nodes if n.startswith(src_node)][0]
dest_node = match_node(dest_node)#[n for n in graph.nodes if n.startswith(dest_node)][0]
if (src_node is None) and (dest_node is None):
# If both are missing then assume process is not being modelled
continue
src_model = graph.nodes[src_node][TAG_MODEL]
dest_model = graph.nodes[dest_node][TAG_MODEL]
src = STANDARD_LINKS[src_model][1] or src
dest = STANDARD_LINKS[dest_model][0] or dest
# print(src_node,src,dest_node,dest)
graph.add_edge(src_node,dest_node,src=[src],dest=[dest])
def main_output_flux(model):
if model.name=='PassLoadIfFlow':
return 'outputLoad'
return 'totalLoad'
class OpenwaterDynamicSednetResults(object):
def __init__(self, fn, res_fn=None):
self.fn = fn
self.ow_model_fn = self.filename_from_base('.h5')
self.meta = json.load(open(self.filename_from_base('.meta.json')))
self.init_network(fn)
self.ow_results_fn = res_fn or self.filename_from_base('_outputs.h5')
self.dates = pd.date_range(self.meta['start'], self.meta['end'])
self.open_files()
def filename_from_base(self,fn):
return self.fn.replace('.h5','')+fn
def init_network(self,fn):
from veneer.general import _extend_network
self.nodes = gpd.read_file(self.filename_from_base('.nodes.json'))
self.links = gpd.read_file(self.filename_from_base('.links.json'))
self.catchments = gpd.read_file(self.filename_from_base('.catchments.json'))
raw = [json.load(open(self.filename_from_base('.'+c+'.json'),'r')) for c in ['nodes','links','catchments']]
self.network = {
'type':'FeatureCollection',
'crs':raw[0]['crs'],
'features':sum([r['features'] for r in raw],[])
}
self.network = _extend_network(self.network)
def run_model(self):
self.model.run(self.dates, self.ow_results_fn, overwrite=True)
self.open_files()
def open_files(self):
_ensure_uncompressed(self.ow_model_fn)
_ensure_uncompressed(self.ow_results_fn)
self.results = OpenwaterResults(self.ow_model_fn,
self.ow_results_fn,
self.dates)
self.model = ModelFile(self.ow_model_fn)
def regulated_links(self):
from veneer.extensions import _feature_id
network = self.network
outlet_nodes = network.outlet_nodes()
outlets = [n['properties']['name'] for n in outlet_nodes]
network.partition(outlets,'outlet')
storages = network['features'].find_by_icon('/resources/StorageNodeModel')
extractions = network['features'].find_by_icon('/resources/ExtractionNodeModel')
impacted_by_storage = []
for s in storages._list+extractions._list:
outlet = s['properties']['outlet']
outlet_id = _feature_id(network['features'].find_by_name(outlet)[0])
impacted_by_storage += network.path_between(s,outlet_id)
ids = set([_feature_id(f) for f in impacted_by_storage])
network_df = network.as_dataframe()
impacted_by_storage = network_df[network_df['id'].isin(ids)]
links_downstream_storage = [l.replace('link for catchment ','') for l in impacted_by_storage[impacted_by_storage.feature_type=='link'].name]
return links_downstream_storage
def generation_model(self,c,fu):
EMC = 'EmcDwc','totalLoad'
SUM = 'Sum','out'
if c in self.meta['sediments']:
if fu in (self.meta['usle_cgus']+self.meta['cropping_cgus']+self.meta['gully_cgus']):
return SUM
return EMC
if c in self.meta['pesticides']:
if fu in self.meta['cropping_cgus']:
return SUM
return EMC
if c in self.meta['dissolved_nutrients']:
if fu in ['Water']: #,'Conservation','Horticulture','Other','Urban','Forestry']:
return EMC
if (self.meta['ts_load'] is not None) and \
(fu in self.meta['ts_load']['cgus']) and \
(c in self.meta['ts_load']['constituents']):
return SUM
if fu == 'Sugarcane':
if c=='N_DIN':
return SUM
elif c=='N_DON':
return EMC
elif c.startswith('P'):
return EMC
if (fu == 'Bananas') and (c=='N_DIN'):
return SUM
if fu in self.meta['cropping_cgus'] or fu in self.meta.get('pesticide_cgus',[]):
if c.startswith('P'):
return 'PassLoadIfFlow', 'outputLoad'
return 'SednetDissolvedNutrientGeneration', 'totalLoad'
if c in self.meta['particulate_nutrients']:
if (fu != 'Sugarcane') and (c == 'P_Particulate'):
if (fu in self.meta['cropping_cgus']) or (fu in self.meta.get('timeseries_sediment',[])):
return SUM
for fu_cat in ['cropping_cgus','hillslope_emc_cgus','gully_cgus','erosion_cgus']:
if fu in self.meta.get(fu_cat,[]):
return 'SednetParticulateNutrientGeneration', 'totalLoad'
return EMC
def transport_model(self,c):
LCR = 'LumpedConstituentRouting','outflowLoad'
if c in self.meta['pesticides']:
return LCR
if c in self.meta['dissolved_nutrients']:
return 'InstreamDissolvedNutrientDecay', 'loadDownstream'
if c in self.meta['particulate_nutrients']:
return 'InstreamParticulateNutrient', 'loadDownstream'
if c == 'Sediment - Coarse':
return 'InstreamCoarseSediment', 'loadDownstream'
if c == 'Sediment - Fine':
return 'InstreamFineSediment', 'loadDownstream'
assert False
class DynamicSednetStandardReporting(object):
def __init__(self,ow_impl):
self.impl = ow_impl
self.results = ow_impl.results
self.model = ow_impl.model
def _get_states(self,f,model,**tags):
mmap = self.model._map_model_dims(model)
return _tabulate_model_scalars_from_file(f,model,mmap,'states',**tags)
def get_final_states(self,model,**tags):
f = self.results.results
return self._get_states(f,model,**tags)
def get_initial_states(self,model,**tags):
f = self.results.model
return self._get_states(f,model,**tags)
def outlet_nodes_time_series(self,dest,overwrite=False):
if os.path.exists(dest):
if overwrite and os.path.isdir(dest):
shutil.rmtree(dest)
else:
raise Exception("Destination exists")
os.makedirs(dest)
outlets = self.impl.network.outlet_nodes()
final_links = [l['properties']['name'].replace('link for catchment ','') \
for l in sum([self.impl.network.upstream_links(n['properties']['id'])._list for n in outlets],[])]
assert len(final_links)==len(outlets)
total_fn = os.path.join(dest,'TotalDaily_%s_ModelTotal_%s.csv')
flow_l = self.results.time_series('StorageRouting','outflow','catchment')[final_links]*PER_SECOND_TO_PER_DAY * M3_TO_L
for outlet,final_link in zip(outlets,final_links):
fn = os.path.join(dest,f'node_flow_{outlet["properties"]["name"]}_Litres.csv')
flow_l[final_link].to_csv(fn)
flow_l.sum(axis=1).to_csv(total_fn%('Flow','Litres'))
for c in self.impl.meta['constituents']:
mod, flux = self.impl.transport_model(c)
constituent_loads_kg = self.results.time_series(mod,flux,'catchment',constituent=c)[final_links]*PER_SECOND_TO_PER_DAY
for outlet,final_link in zip(outlets,final_links):
fn = os.path.join(dest,f'link_const_{outlet["properties"]["name"]}_{c}_Kilograms.csv')
constituent_loads_kg[final_link].to_csv(fn)
constituent_loads_kg.sum(axis=1).to_csv(total_fn%(c,'Kilograms'))
def outlet_nodes_rates_table(self):
outlets = [n['properties']['id'] for n in self.impl.network.outlet_nodes()]
final_links = [l['properties']['name'].replace('link for catchment ','') for l in sum([self.impl.network.upstream_links(n)._list for n in outlets],[])]
flow_l = np.array(self.results.time_series('StorageRouting','outflow','catchment')[final_links])*PER_SECOND_TO_PER_DAY * M3_TO_L
total_area = sum(self.model.parameters('DepthToRate',component='Runoff').area)
records = []
for c in self.impl.meta['constituents']:
mod, flux = self.impl.transport_model(c)
constituent_loads_kg = np.array(self.results.time_series(mod,flux,'catchment',constituent=c)[final_links])*PER_SECOND_TO_PER_DAY
records.append(dict(
Region='ModelTotal',
Constituent=c,
Area=total_area,
Total_Load_in_Kg=constituent_loads_kg.sum(),
Flow_Litres=flow_l.sum(),
Concentration=0.0,
LoadPerArea=0.0,
NumDays=flow_l.shape[0]
))
return pd.DataFrame(records)
def climate_table(self):
orig_tbls = []
melted_tbls = []
variables = [('rainfall','Rainfall'),('actualET','Actual ET'),('baseflow','Baseflow'),('runoff','Runoff (Quickflow)')]
for v,lbl in variables:
tbl = self.results.table('Sacramento',v,'catchment','hru','sum','sum')*MM_TO_M
if v=='runoff':
tbl = tbl - orig_tbls[-1]
orig_tbls.append(tbl)
tbl = tbl.reset_index().melt(id_vars=['index'],value_vars=list(tbl.columns)).rename(columns={'index':'Catchment','variable':'FU','value':'Depth_m'})
tbl['Element']=lbl
melted_tbls.append(tbl)
return pd.concat(melted_tbls).sort_values(['Catchment','FU','Element'])
def fu_areas_table(self):
tbl = self.model.parameters('DepthToRate',component='Runoff')
tbl = tbl[['catchment','cgu','area']].sort_values(['catchment','cgu']).rename(columns={'catchment':'Catchment','cgu':'CGU'})
return tbl
def fu_summary_table(self):
summary = []
seen = {}
for con in self.impl.meta['constituents']:
for fu in self.impl.meta['fus']:
combo = self.impl.generation_model(con,fu)
if not combo in seen:
model,flux = combo
tbl = self.results.table(model,flux,'constituent','cgu','sum','sum') * PER_SECOND_TO_PER_DAY
seen[combo]=tbl
tbl = seen[combo]
summary.append((con,fu,tbl.loc[con,fu]))
return pd.DataFrame(summary,columns=['Constituent','FU','Total_Load_in_Kg'])
def regional_summary_table(self):
tables = [self.mass_balance_summary_table(self,region) for region in self.impl.meta['regions']]
for tbl,region in zip(tables,self.impl.meta['regions']):
tbl['SummaryRegion']=region
return pd.concat(tables)
def overall_summary_table(self):
return self.mass_balance_summary_table()
def constituent_loss_table(self,region=None):
loss_fluxes = [
('InstreamFineSediment','loadToFloodplain','Sediment - Fine'),
('InstreamDissolvedNutrientDecay','loadToFloodplain'),
# ('InstreamDissolvedNutrientDecay','loadDecayed'), #TODO
]
loss_states = [
('InstreamFineSediment','channelStoreFine','Sediment - Fine')
]
pass
def residual_constituent_table(self,region=None):
# Need to query final states (and initial states?)
mass_states = [
('LumpedConstituentRouting','storedMass'),
('InstreamFineSediment','totalStoredMass', 'Sediment - Fine'),
('InstreamDissolvedNutrientDecay','totalStoredMass'),
('InstreamCoarseSediment','totalStoredMass', 'Sediment - Coarse')
]
tables = []
for state in mass_states:
m = state[0]
v = state[1]
values = self.get_final_states(m)
if len(state)>2:
values['constituent']=state[2]
tbl = values[['constituent',v]].groupby('constituent').sum().reset_index().rename(columns={
'constituent':'Constituent',
v:'Total_Load_in_Kg'
})
tables.append(tbl)
return pd.concat(tables).groupby('Constituent').sum().reset_index()
def mass_balance_summary_table(self,region=None):
cols =['Constituent','Total_Load_in_Kg']
input_tables = {
'Supply':self.fu_summary_table(),
'Export':self.outlet_nodes_rates_table(),
'Loss':self.constituent_loss_table(region),
'Residual':self.residual_constituent_table(region)
}
result = []
for k,tbl in input_tables.items():
if tbl is None:
print(f'Missing table {k}')
continue
tbl = tbl[cols].groupby('Constituent').sum().reset_index()
tbl['MassBalanceElement'] = k
tbl = tbl[['Constituent','MassBalanceElement','Total_Load_in_Kg']]
result.append(tbl)
return pd.concat(result).sort_values(['Constituent','MassBalanceElement'])
def _ensure_uncompressed(fn):
if os.path.exists(fn):
return
gzfn = fn + '.gz'
if not os.path.exists(gzfn):
raise Exception('File not found (compressed or uncompressed): %s'%fn)
os.system('gunzip %s'%gzfn)
assert os.path.exists(fn)
|
"""Config flow to configure the Netgear integration."""
import logging
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data = {}
device_url = urlparse(discovery_info.ssdp_location)
if device_url.hostname:
updated_data[CONF_HOST] = device_url.hostname
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info["ModelName"]} - {info["DeviceName"]}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
| """Config flow to configure the Netgear integration."""
import logging
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data = {}
device_url = urlparse(discovery_info.ssdp_location)
if device_url.hostname:
updated_data[CONF_HOST] = device_url.hostname
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info['ModelName']} - {info['DeviceName']}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
|
"""Classes to help gather user submissions."""
from __future__ import annotations
import abc
import asyncio
from collections.abc import Mapping
from types import MappingProxyType
from typing import Any, TypedDict
import uuid
import voluptuous as vol
from .core import HomeAssistant, callback
from .exceptions import HomeAssistantError
RESULT_TYPE_FORM = "form"
RESULT_TYPE_CREATE_ENTRY = "create_entry"
RESULT_TYPE_ABORT = "abort"
RESULT_TYPE_EXTERNAL_STEP = "external"
RESULT_TYPE_EXTERNAL_STEP_DONE = "external_done"
RESULT_TYPE_SHOW_PROGRESS = "progress"
RESULT_TYPE_SHOW_PROGRESS_DONE = "progress_done"
# Event that is fired when a flow is progressed via external or progress source.
EVENT_DATA_ENTRY_FLOW_PROGRESSED = "data_entry_flow_progressed"
class FlowError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownHandler(FlowError):
"""Unknown handler specified."""
class UnknownFlow(FlowError):
"""Unknown flow specified."""
class UnknownStep(FlowError):
"""Unknown step specified."""
class AbortFlow(FlowError):
"""Exception to indicate a flow needs to be aborted."""
def __init__(
self, reason: str, description_placeholders: dict | None = None
) -> None:
"""Initialize an abort flow exception."""
super().__init__(f"Flow aborted: {reason}")
self.reason = reason
self.description_placeholders = description_placeholders
class FlowResult(TypedDict, total=False):
"""Typed result dict."""
version: int
type: str
flow_id: str
handler: str
title: str
data: Mapping[str, Any]
step_id: str
data_schema: vol.Schema
extra: str
required: bool
errors: dict[str, str] | None
description: str | None
description_placeholders: dict[str, Any] | None
progress_action: str
url: str
reason: str
context: dict[str, Any]
result: Any
last_step: bool | None
options: Mapping[str, Any]
class FlowManager(abc.ABC):
"""Manage all the flows that are in progress."""
def __init__(
self,
hass: HomeAssistant,
) -> None:
"""Initialize the flow manager."""
self.hass = hass
self._initializing: dict[str, list[asyncio.Future]] = {}
self._initialize_tasks: dict[str, list[asyncio.Task]] = {}
self._progress: dict[str, Any] = {}
async def async_wait_init_flow_finish(self, handler: str) -> None:
"""Wait till all flows in progress are initialized."""
if not (current := self._initializing.get(handler)):
return
await asyncio.wait(current)
@abc.abstractmethod
async def async_create_flow(
self,
handler_key: Any,
*,
context: dict[str, Any] | None = None,
data: dict[str, Any] | None = None,
) -> FlowHandler:
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
@abc.abstractmethod
async def async_finish_flow(
self, flow: FlowHandler, result: FlowResult
) -> FlowResult:
"""Finish a config flow and add an entry."""
async def async_post_init(self, flow: FlowHandler, result: FlowResult) -> None:
"""Entry has finished executing its first step asynchronously."""
@callback
def async_has_matching_flow(
self, handler: str, context: dict[str, Any], data: Any
) -> bool:
"""Check if an existing matching flow is in progress with the same handler, context, and data."""
return any(
flow
for flow in self._progress.values()
if flow.handler == handler
and flow.context["source"] == context["source"]
and flow.init_data == data
)
@callback
def async_progress(self, include_uninitialized: bool = False) -> list[FlowResult]:
"""Return the flows in progress."""
return [
{
"flow_id": flow.flow_id,
"handler": flow.handler,
"context": flow.context,
"step_id": flow.cur_step["step_id"] if flow.cur_step else None,
}
for flow in self._progress.values()
if include_uninitialized or flow.cur_step is not None
]
async def async_init(
self, handler: str, *, context: dict[str, Any] | None = None, data: Any = None
) -> FlowResult:
"""Start a configuration flow."""
if context is None:
context = {}
init_done: asyncio.Future = asyncio.Future()
self._initializing.setdefault(handler, []).append(init_done)
task = asyncio.create_task(self._async_init(init_done, handler, context, data))
self._initialize_tasks.setdefault(handler, []).append(task)
try:
flow, result = await task
finally:
self._initialize_tasks[handler].remove(task)
self._initializing[handler].remove(init_done)
if result["type"] != RESULT_TYPE_ABORT:
await self.async_post_init(flow, result)
return result
async def _async_init(
self,
init_done: asyncio.Future,
handler: str,
context: dict,
data: Any,
) -> tuple[FlowHandler, FlowResult]:
"""Run the init in a task to allow it to be canceled at shutdown."""
flow = await self.async_create_flow(handler, context=context, data=data)
if not flow:
raise UnknownFlow("Flow was not created")
flow.hass = self.hass
flow.handler = handler
flow.flow_id = uuid.uuid4().hex
flow.context = context
flow.init_data = data
self._progress[flow.flow_id] = flow
result = await self._async_handle_step(flow, flow.init_step, data, init_done)
return flow, result
async def async_shutdown(self) -> None:
"""Cancel any initializing flows."""
for task_list in self._initialize_tasks.values():
for task in task_list:
task.cancel()
async def async_configure(
self, flow_id: str, user_input: dict | None = None
) -> FlowResult:
"""Continue a configuration flow."""
if (flow := self._progress.get(flow_id)) is None:
raise UnknownFlow
cur_step = flow.cur_step
if cur_step.get("data_schema") is not None and user_input is not None:
user_input = cur_step["data_schema"](user_input)
result = await self._async_handle_step(flow, cur_step["step_id"], user_input)
if cur_step["type"] in (RESULT_TYPE_EXTERNAL_STEP, RESULT_TYPE_SHOW_PROGRESS):
if cur_step["type"] == RESULT_TYPE_EXTERNAL_STEP and result["type"] not in (
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_EXTERNAL_STEP_DONE,
):
raise ValueError(
"External step can only transition to "
"external step or external step done."
)
if cur_step["type"] == RESULT_TYPE_SHOW_PROGRESS and result["type"] not in (
RESULT_TYPE_SHOW_PROGRESS,
RESULT_TYPE_SHOW_PROGRESS_DONE,
):
raise ValueError(
"Show progress can only transition to show progress or show progress done."
)
# If the result has changed from last result, fire event to update
# the frontend.
if (
cur_step["step_id"] != result.get("step_id")
or result["type"] == RESULT_TYPE_SHOW_PROGRESS
):
# Tell frontend to reload the flow state.
self.hass.bus.async_fire(
EVENT_DATA_ENTRY_FLOW_PROGRESSED,
{"handler": flow.handler, "flow_id": flow_id, "refresh": True},
)
return result
@callback
def async_abort(self, flow_id: str) -> None:
"""Abort a flow."""
if self._progress.pop(flow_id, None) is None:
raise UnknownFlow
async def _async_handle_step(
self,
flow: Any,
step_id: str,
user_input: dict | None,
step_done: asyncio.Future | None = None,
) -> FlowResult:
"""Handle a step of a flow."""
method = f"async_step_{step_id}"
if not hasattr(flow, method):
self._progress.pop(flow.flow_id)
if step_done:
step_done.set_result(None)
raise UnknownStep(
f"Handler {flow.__class__.__name__} doesn't support step {step_id}"
)
try:
result: FlowResult = await getattr(flow, method)(user_input)
except AbortFlow as err:
result = _create_abort_data(
flow.flow_id, flow.handler, err.reason, err.description_placeholders
)
# Mark the step as done.
# We do this before calling async_finish_flow because config entries will hit a
# circular dependency where async_finish_flow sets up new entry, which needs the
# integration to be set up, which is waiting for init to be done.
if step_done:
step_done.set_result(None)
if result["type"] not in (
RESULT_TYPE_FORM,
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_ABORT,
RESULT_TYPE_EXTERNAL_STEP_DONE,
RESULT_TYPE_SHOW_PROGRESS,
RESULT_TYPE_SHOW_PROGRESS_DONE,
):
raise ValueError(f"Handler returned incorrect type: {result["type"]}")
if result["type"] in (
RESULT_TYPE_FORM,
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_EXTERNAL_STEP_DONE,
RESULT_TYPE_SHOW_PROGRESS,
RESULT_TYPE_SHOW_PROGRESS_DONE,
):
flow.cur_step = result
return result
# We pass a copy of the result because we're mutating our version
result = await self.async_finish_flow(flow, result.copy())
# _async_finish_flow may change result type, check it again
if result["type"] == RESULT_TYPE_FORM:
flow.cur_step = result
return result
# Abort and Success results both finish the flow
self._progress.pop(flow.flow_id)
return result
class FlowHandler:
"""Handle the configuration flow of a component."""
# Set by flow manager
cur_step: dict[str, str] | None = None
# While not purely typed, it makes typehinting more useful for us
# and removes the need for constant None checks or asserts.
flow_id: str = None # type: ignore
hass: HomeAssistant = None # type: ignore
handler: str = None # type: ignore
# Ensure the attribute has a subscriptable, but immutable, default value.
context: dict[str, Any] = MappingProxyType({}) # type: ignore
# Set by _async_create_flow callback
init_step = "init"
# The initial data that was used to start the flow
init_data: Any = None
# Set by developer
VERSION = 1
@property
def source(self) -> str | None:
"""Source that initialized the flow."""
if not hasattr(self, "context"):
return None
return self.context.get("source", None)
@property
def show_advanced_options(self) -> bool:
"""If we should show advanced options."""
if not hasattr(self, "context"):
return False
return self.context.get("show_advanced_options", False)
@callback
def async_show_form(
self,
*,
step_id: str,
data_schema: vol.Schema = None,
errors: dict[str, str] | None = None,
description_placeholders: dict[str, Any] | None = None,
last_step: bool | None = None,
) -> FlowResult:
"""Return the definition of a form to gather user input."""
return {
"type": RESULT_TYPE_FORM,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"data_schema": data_schema,
"errors": errors,
"description_placeholders": description_placeholders,
"last_step": last_step, # Display next or submit button in frontend
}
@callback
def async_create_entry(
self,
*,
title: str,
data: Mapping[str, Any],
description: str | None = None,
description_placeholders: dict | None = None,
) -> FlowResult:
"""Finish config flow and create a config entry."""
return {
"version": self.VERSION,
"type": RESULT_TYPE_CREATE_ENTRY,
"flow_id": self.flow_id,
"handler": self.handler,
"title": title,
"data": data,
"description": description,
"description_placeholders": description_placeholders,
}
@callback
def async_abort(
self, *, reason: str, description_placeholders: dict | None = None
) -> FlowResult:
"""Abort the config flow."""
return _create_abort_data(
self.flow_id, self.handler, reason, description_placeholders
)
@callback
def async_external_step(
self, *, step_id: str, url: str, description_placeholders: dict | None = None
) -> FlowResult:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_EXTERNAL_STEP,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"url": url,
"description_placeholders": description_placeholders,
}
@callback
def async_external_step_done(self, *, next_step_id: str) -> FlowResult:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_EXTERNAL_STEP_DONE,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": next_step_id,
}
@callback
def async_show_progress(
self,
*,
step_id: str,
progress_action: str,
description_placeholders: dict | None = None,
) -> FlowResult:
"""Show a progress message to the user, without user input allowed."""
return {
"type": RESULT_TYPE_SHOW_PROGRESS,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"progress_action": progress_action,
"description_placeholders": description_placeholders,
}
@callback
def async_show_progress_done(self, *, next_step_id: str) -> FlowResult:
"""Mark the progress done."""
return {
"type": RESULT_TYPE_SHOW_PROGRESS_DONE,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": next_step_id,
}
@callback
def _create_abort_data(
flow_id: str,
handler: str,
reason: str,
description_placeholders: dict | None = None,
) -> FlowResult:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_ABORT,
"flow_id": flow_id,
"handler": handler,
"reason": reason,
"description_placeholders": description_placeholders,
}
| """Classes to help gather user submissions."""
from __future__ import annotations
import abc
import asyncio
from collections.abc import Mapping
from types import MappingProxyType
from typing import Any, TypedDict
import uuid
import voluptuous as vol
from .core import HomeAssistant, callback
from .exceptions import HomeAssistantError
RESULT_TYPE_FORM = "form"
RESULT_TYPE_CREATE_ENTRY = "create_entry"
RESULT_TYPE_ABORT = "abort"
RESULT_TYPE_EXTERNAL_STEP = "external"
RESULT_TYPE_EXTERNAL_STEP_DONE = "external_done"
RESULT_TYPE_SHOW_PROGRESS = "progress"
RESULT_TYPE_SHOW_PROGRESS_DONE = "progress_done"
# Event that is fired when a flow is progressed via external or progress source.
EVENT_DATA_ENTRY_FLOW_PROGRESSED = "data_entry_flow_progressed"
class FlowError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownHandler(FlowError):
"""Unknown handler specified."""
class UnknownFlow(FlowError):
"""Unknown flow specified."""
class UnknownStep(FlowError):
"""Unknown step specified."""
class AbortFlow(FlowError):
"""Exception to indicate a flow needs to be aborted."""
def __init__(
self, reason: str, description_placeholders: dict | None = None
) -> None:
"""Initialize an abort flow exception."""
super().__init__(f"Flow aborted: {reason}")
self.reason = reason
self.description_placeholders = description_placeholders
class FlowResult(TypedDict, total=False):
"""Typed result dict."""
version: int
type: str
flow_id: str
handler: str
title: str
data: Mapping[str, Any]
step_id: str
data_schema: vol.Schema
extra: str
required: bool
errors: dict[str, str] | None
description: str | None
description_placeholders: dict[str, Any] | None
progress_action: str
url: str
reason: str
context: dict[str, Any]
result: Any
last_step: bool | None
options: Mapping[str, Any]
class FlowManager(abc.ABC):
"""Manage all the flows that are in progress."""
def __init__(
self,
hass: HomeAssistant,
) -> None:
"""Initialize the flow manager."""
self.hass = hass
self._initializing: dict[str, list[asyncio.Future]] = {}
self._initialize_tasks: dict[str, list[asyncio.Task]] = {}
self._progress: dict[str, Any] = {}
async def async_wait_init_flow_finish(self, handler: str) -> None:
"""Wait till all flows in progress are initialized."""
if not (current := self._initializing.get(handler)):
return
await asyncio.wait(current)
@abc.abstractmethod
async def async_create_flow(
self,
handler_key: Any,
*,
context: dict[str, Any] | None = None,
data: dict[str, Any] | None = None,
) -> FlowHandler:
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
@abc.abstractmethod
async def async_finish_flow(
self, flow: FlowHandler, result: FlowResult
) -> FlowResult:
"""Finish a config flow and add an entry."""
async def async_post_init(self, flow: FlowHandler, result: FlowResult) -> None:
"""Entry has finished executing its first step asynchronously."""
@callback
def async_has_matching_flow(
self, handler: str, context: dict[str, Any], data: Any
) -> bool:
"""Check if an existing matching flow is in progress with the same handler, context, and data."""
return any(
flow
for flow in self._progress.values()
if flow.handler == handler
and flow.context["source"] == context["source"]
and flow.init_data == data
)
@callback
def async_progress(self, include_uninitialized: bool = False) -> list[FlowResult]:
"""Return the flows in progress."""
return [
{
"flow_id": flow.flow_id,
"handler": flow.handler,
"context": flow.context,
"step_id": flow.cur_step["step_id"] if flow.cur_step else None,
}
for flow in self._progress.values()
if include_uninitialized or flow.cur_step is not None
]
async def async_init(
self, handler: str, *, context: dict[str, Any] | None = None, data: Any = None
) -> FlowResult:
"""Start a configuration flow."""
if context is None:
context = {}
init_done: asyncio.Future = asyncio.Future()
self._initializing.setdefault(handler, []).append(init_done)
task = asyncio.create_task(self._async_init(init_done, handler, context, data))
self._initialize_tasks.setdefault(handler, []).append(task)
try:
flow, result = await task
finally:
self._initialize_tasks[handler].remove(task)
self._initializing[handler].remove(init_done)
if result["type"] != RESULT_TYPE_ABORT:
await self.async_post_init(flow, result)
return result
async def _async_init(
self,
init_done: asyncio.Future,
handler: str,
context: dict,
data: Any,
) -> tuple[FlowHandler, FlowResult]:
"""Run the init in a task to allow it to be canceled at shutdown."""
flow = await self.async_create_flow(handler, context=context, data=data)
if not flow:
raise UnknownFlow("Flow was not created")
flow.hass = self.hass
flow.handler = handler
flow.flow_id = uuid.uuid4().hex
flow.context = context
flow.init_data = data
self._progress[flow.flow_id] = flow
result = await self._async_handle_step(flow, flow.init_step, data, init_done)
return flow, result
async def async_shutdown(self) -> None:
"""Cancel any initializing flows."""
for task_list in self._initialize_tasks.values():
for task in task_list:
task.cancel()
async def async_configure(
self, flow_id: str, user_input: dict | None = None
) -> FlowResult:
"""Continue a configuration flow."""
if (flow := self._progress.get(flow_id)) is None:
raise UnknownFlow
cur_step = flow.cur_step
if cur_step.get("data_schema") is not None and user_input is not None:
user_input = cur_step["data_schema"](user_input)
result = await self._async_handle_step(flow, cur_step["step_id"], user_input)
if cur_step["type"] in (RESULT_TYPE_EXTERNAL_STEP, RESULT_TYPE_SHOW_PROGRESS):
if cur_step["type"] == RESULT_TYPE_EXTERNAL_STEP and result["type"] not in (
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_EXTERNAL_STEP_DONE,
):
raise ValueError(
"External step can only transition to "
"external step or external step done."
)
if cur_step["type"] == RESULT_TYPE_SHOW_PROGRESS and result["type"] not in (
RESULT_TYPE_SHOW_PROGRESS,
RESULT_TYPE_SHOW_PROGRESS_DONE,
):
raise ValueError(
"Show progress can only transition to show progress or show progress done."
)
# If the result has changed from last result, fire event to update
# the frontend.
if (
cur_step["step_id"] != result.get("step_id")
or result["type"] == RESULT_TYPE_SHOW_PROGRESS
):
# Tell frontend to reload the flow state.
self.hass.bus.async_fire(
EVENT_DATA_ENTRY_FLOW_PROGRESSED,
{"handler": flow.handler, "flow_id": flow_id, "refresh": True},
)
return result
@callback
def async_abort(self, flow_id: str) -> None:
"""Abort a flow."""
if self._progress.pop(flow_id, None) is None:
raise UnknownFlow
async def _async_handle_step(
self,
flow: Any,
step_id: str,
user_input: dict | None,
step_done: asyncio.Future | None = None,
) -> FlowResult:
"""Handle a step of a flow."""
method = f"async_step_{step_id}"
if not hasattr(flow, method):
self._progress.pop(flow.flow_id)
if step_done:
step_done.set_result(None)
raise UnknownStep(
f"Handler {flow.__class__.__name__} doesn't support step {step_id}"
)
try:
result: FlowResult = await getattr(flow, method)(user_input)
except AbortFlow as err:
result = _create_abort_data(
flow.flow_id, flow.handler, err.reason, err.description_placeholders
)
# Mark the step as done.
# We do this before calling async_finish_flow because config entries will hit a
# circular dependency where async_finish_flow sets up new entry, which needs the
# integration to be set up, which is waiting for init to be done.
if step_done:
step_done.set_result(None)
if result["type"] not in (
RESULT_TYPE_FORM,
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_ABORT,
RESULT_TYPE_EXTERNAL_STEP_DONE,
RESULT_TYPE_SHOW_PROGRESS,
RESULT_TYPE_SHOW_PROGRESS_DONE,
):
raise ValueError(f"Handler returned incorrect type: {result['type']}")
if result["type"] in (
RESULT_TYPE_FORM,
RESULT_TYPE_EXTERNAL_STEP,
RESULT_TYPE_EXTERNAL_STEP_DONE,
RESULT_TYPE_SHOW_PROGRESS,
RESULT_TYPE_SHOW_PROGRESS_DONE,
):
flow.cur_step = result
return result
# We pass a copy of the result because we're mutating our version
result = await self.async_finish_flow(flow, result.copy())
# _async_finish_flow may change result type, check it again
if result["type"] == RESULT_TYPE_FORM:
flow.cur_step = result
return result
# Abort and Success results both finish the flow
self._progress.pop(flow.flow_id)
return result
class FlowHandler:
"""Handle the configuration flow of a component."""
# Set by flow manager
cur_step: dict[str, str] | None = None
# While not purely typed, it makes typehinting more useful for us
# and removes the need for constant None checks or asserts.
flow_id: str = None # type: ignore
hass: HomeAssistant = None # type: ignore
handler: str = None # type: ignore
# Ensure the attribute has a subscriptable, but immutable, default value.
context: dict[str, Any] = MappingProxyType({}) # type: ignore
# Set by _async_create_flow callback
init_step = "init"
# The initial data that was used to start the flow
init_data: Any = None
# Set by developer
VERSION = 1
@property
def source(self) -> str | None:
"""Source that initialized the flow."""
if not hasattr(self, "context"):
return None
return self.context.get("source", None)
@property
def show_advanced_options(self) -> bool:
"""If we should show advanced options."""
if not hasattr(self, "context"):
return False
return self.context.get("show_advanced_options", False)
@callback
def async_show_form(
self,
*,
step_id: str,
data_schema: vol.Schema = None,
errors: dict[str, str] | None = None,
description_placeholders: dict[str, Any] | None = None,
last_step: bool | None = None,
) -> FlowResult:
"""Return the definition of a form to gather user input."""
return {
"type": RESULT_TYPE_FORM,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"data_schema": data_schema,
"errors": errors,
"description_placeholders": description_placeholders,
"last_step": last_step, # Display next or submit button in frontend
}
@callback
def async_create_entry(
self,
*,
title: str,
data: Mapping[str, Any],
description: str | None = None,
description_placeholders: dict | None = None,
) -> FlowResult:
"""Finish config flow and create a config entry."""
return {
"version": self.VERSION,
"type": RESULT_TYPE_CREATE_ENTRY,
"flow_id": self.flow_id,
"handler": self.handler,
"title": title,
"data": data,
"description": description,
"description_placeholders": description_placeholders,
}
@callback
def async_abort(
self, *, reason: str, description_placeholders: dict | None = None
) -> FlowResult:
"""Abort the config flow."""
return _create_abort_data(
self.flow_id, self.handler, reason, description_placeholders
)
@callback
def async_external_step(
self, *, step_id: str, url: str, description_placeholders: dict | None = None
) -> FlowResult:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_EXTERNAL_STEP,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"url": url,
"description_placeholders": description_placeholders,
}
@callback
def async_external_step_done(self, *, next_step_id: str) -> FlowResult:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_EXTERNAL_STEP_DONE,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": next_step_id,
}
@callback
def async_show_progress(
self,
*,
step_id: str,
progress_action: str,
description_placeholders: dict | None = None,
) -> FlowResult:
"""Show a progress message to the user, without user input allowed."""
return {
"type": RESULT_TYPE_SHOW_PROGRESS,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": step_id,
"progress_action": progress_action,
"description_placeholders": description_placeholders,
}
@callback
def async_show_progress_done(self, *, next_step_id: str) -> FlowResult:
"""Mark the progress done."""
return {
"type": RESULT_TYPE_SHOW_PROGRESS_DONE,
"flow_id": self.flow_id,
"handler": self.handler,
"step_id": next_step_id,
}
@callback
def _create_abort_data(
flow_id: str,
handler: str,
reason: str,
description_placeholders: dict | None = None,
) -> FlowResult:
"""Return the definition of an external step for the user to take."""
return {
"type": RESULT_TYPE_ABORT,
"flow_id": flow_id,
"handler": handler,
"reason": reason,
"description_placeholders": description_placeholders,
}
|
import logging
import numpy
import os
import random
import shutil
import time
import torch
from draugr import AverageMeter, find_unclaimed_port
from draugr.numpy_utilities import Split
from draugr.torch_utilities import TensorBoardPytorchWriter
from pathlib import Path
from torch import distributed, multiprocessing, nn
from torch.backends import cudnn
from torch.optim import lr_scheduler
from neodroidvision.classification.architectures.self_attention_network import (
SelfAttentionTypeEnum,
make_san,
)
from san_utilities import (
cal_accuracy,
intersection_and_union_gpu,
mixup_data,
mixup_loss,
smooth_loss,
)
def get_logger():
"""
Returns:
"""
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def worker_init_fn(worker_id):
"""
Args:
worker_id:
"""
random.seed(CONFIG.manual_seed + worker_id)
def is_main_process():
"""
Returns:
"""
return not CONFIG.multiprocessing_distributed or (
CONFIG.multiprocessing_distributed and CONFIG.rank % CONFIG.ngpus_per_node == 0
)
def main_worker(gpu, ngpus_per_node, config):
"""
Args:
gpu:
ngpus_per_node:
config:
"""
global CONFIG, best_acc1
CONFIG, best_acc1 = config, 0
train_set = config.dataset_type(CONFIG.dataset_path, Split.Training)
val_set = config.dataset_type(CONFIG.dataset_path, Split.Validation)
if CONFIG.distributed:
if CONFIG.dist_url == "env://" and CONFIG.rank == -1:
CONFIG.rank = int(os.environ["RANK"])
if CONFIG.multiprocessing_distributed:
CONFIG.rank = CONFIG.rank * ngpus_per_node + gpu
distributed.init_process_group(
backend=CONFIG.dist_backend,
init_method=CONFIG.dist_url,
world_size=CONFIG.world_size,
rank=CONFIG.rank,
)
model = make_san(
self_attention_type=SelfAttentionTypeEnum(CONFIG.self_attention_type),
layers=CONFIG.layers,
kernels=CONFIG.kernels,
num_classes=train_set.response_shape[0],
)
criterion = nn.CrossEntropyLoss(ignore_index=CONFIG.ignore_label)
optimizer = torch.optim.SGD(
model.parameters(),
lr=CONFIG.base_lr,
momentum=CONFIG.momentum,
weight_decay=CONFIG.weight_decay,
)
if CONFIG.scheduler == "step":
scheduler = lr_scheduler.MultiStepLR(
optimizer, milestones=CONFIG.step_epochs, gamma=0.1
)
elif CONFIG.scheduler == "cosine":
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=CONFIG.epochs)
if is_main_process():
global logger, writer
logger = get_logger()
writer = TensorBoardPytorchWriter(str(CONFIG.save_path))
logger.info(CONFIG)
logger.info("=> creating model ...")
logger.info(f"Classes: {train_set.response_shape[0]}")
logger.info(model)
if CONFIG.distributed:
torch.cuda.set_device(gpu)
CONFIG.batch_size = int(CONFIG.batch_size / ngpus_per_node)
CONFIG.batch_size_val = int(CONFIG.batch_size_val / ngpus_per_node)
CONFIG.workers = int((CONFIG.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model.cuda(), device_ids=[gpu]
)
else:
model = torch.nn.DataParallel(model.cuda())
if CONFIG.weight:
if Path(CONFIG.weight).is_file():
if is_main_process():
global logger
logger.info(f"=> loading weight '{CONFIG.weight}'")
checkpoint = torch.load(CONFIG.weight)
model.load_state_dict(checkpoint["state_dict"])
if is_main_process():
global logger
logger.info(f"=> loaded weight '{CONFIG.weight}'")
else:
if is_main_process():
global logger
logger.info(f"=> no weight found at '{CONFIG.weight}'")
if CONFIG.resume:
if Path(CONFIG.resume).is_file():
if is_main_process():
global logger
logger.info(f"=> loading checkpoint '{CONFIG.resume}'")
checkpoint = torch.load(
CONFIG.resume, map_location=lambda storage, loc: storage.cuda(gpu)
)
CONFIG.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["top1_val"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
if is_main_process():
global logger
logger.info(
f"=> loaded checkpoint '{CONFIG.resume}" (epoch {checkpoint["epoch"]})"
)
else:
if is_main_process():
global logger
logger.info(f"=> no checkpoint found at '{CONFIG.resume}'")
if CONFIG.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_set)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=CONFIG.batch_size,
shuffle=(train_sampler is None),
num_workers=CONFIG.workers,
pin_memory=True,
sampler=train_sampler,
)
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=CONFIG.batch_size_val,
shuffle=False,
num_workers=CONFIG.workers,
pin_memory=True,
sampler=val_sampler,
)
for epoch in range(CONFIG.start_epoch, CONFIG.epochs):
if CONFIG.distributed:
train_sampler.set_epoch(epoch)
(
loss_train,
mIoU_train,
mAcc_train,
allAcc_train,
top1_train,
top5_train,
) = train(train_loader, model, criterion, optimizer, epoch)
loss_val, mIoU_val, mAcc_val, allAcc_val, top1_val, top5_val = validate(
val_loader, model, criterion
)
scheduler.step()
epoch_log = epoch + 1
if is_main_process():
global writer
writer.scalar("loss_train", loss_train, epoch_log)
writer.scalar("mIoU_train", mIoU_train, epoch_log)
writer.scalar("mAcc_train", mAcc_train, epoch_log)
writer.scalar("allAcc_train", allAcc_train, epoch_log)
writer.scalar("top1_train", top1_train, epoch_log)
writer.scalar("top5_train", top5_train, epoch_log)
writer.scalar("loss_val", loss_val, epoch_log)
writer.scalar("mIoU_val", mIoU_val, epoch_log)
writer.scalar("mAcc_val", mAcc_val, epoch_log)
writer.scalar("allAcc_val", allAcc_val, epoch_log)
writer.scalar("top1_val", top1_val, epoch_log)
writer.scalar("top5_val", top5_val, epoch_log)
if (epoch_log % CONFIG.save_freq == 0) and is_main_process():
filename = CONFIG.save_path / "train_epoch_" + str(epoch_log) + ".pth"
global logger
logger.info("Saving checkpoint to: " + filename)
torch.save(
{
"epoch": epoch_log,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"top1_val": top1_val,
"top5_val": top5_val,
},
filename,
)
if top1_val > best_acc1:
best_acc1 = top1_val
shutil.copyfile(filename, CONFIG.save_path / "model_best.pth")
if epoch_log / CONFIG.save_freq > 2:
deletename = (
CONFIG.save_path
/ f"train_epoch_{str(epoch_log - CONFIG.save_freq * 2)}.pth"
)
os.remove(deletename)
def train(train_loader, model, criterion, optimizer, epoch):
"""
Args:
train_loader:
model:
criterion:
optimizer:
epoch:
Returns:
"""
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
top1_meter = AverageMeter()
top5_meter = AverageMeter()
model.train()
end = time.time()
max_iter = CONFIG.epochs * len(train_loader)
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if CONFIG.mixup_alpha:
eps = CONFIG.label_smoothing if CONFIG.label_smoothing else 0.0
input, target_a, target_b, lam = mixup_data(
input, target, CONFIG.mixup_alpha
)
output = model(input)
loss = mixup_loss(output, target_a, target_b, lam, eps)
else:
output = model(input)
loss = (
smooth_loss(output, target, CONFIG.label_smoothing)
if CONFIG.label_smoothing
else criterion(output, target)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1, top5 = cal_accuracy(output, target, topk=(1, 5))
n = input.size(0)
if CONFIG.multiprocessing_distributed:
with torch.no_grad():
loss, top1, top5 = loss.detach() * n, top1 * n, top5 * n
count = target.new_tensor([n], dtype=torch.long)
distributed.all_reduce(loss)
distributed.all_reduce(top1)
distributed.all_reduce(top5)
distributed.all_reduce(count)
n = count.item()
loss, top1, top5 = loss / n, top1 / n, top5 / n
loss_meter.update(loss.item(), n), top1_meter.update(
top1.item(), n
), top5_meter.update(top5.item(), n)
output = output.max(1)[1]
intersection, union, target = intersection_and_union_gpu(
output, target, train_loader.dataset.response_shape[0], CONFIG.ignore_label
)
if CONFIG.multiprocessing_distributed:
distributed.all_reduce(intersection)
distributed.all_reduce(union)
distributed.all_reduce(target)
intersection, union, target = (
intersection.cpu().numpy(),
union.cpu().numpy(),
target.cpu().numpy(),
)
intersection_meter.update(intersection), union_meter.update(
union
), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
batch_time.update(time.time() - end)
end = time.time()
# calculate remain time
current_iter = epoch * len(train_loader) + i + 1
remain_iter = max_iter - current_iter
remain_time = remain_iter * batch_time.avg
t_m, t_s = divmod(remain_time, 60)
t_h, t_m = divmod(t_m, 60)
remain_time = f"{int(t_h):02d}:{int(t_m):02d}:{int(t_s):02d}"
if ((i + 1) % CONFIG.print_freq == 0) and is_main_process():
logger.info(
f"Epoch: [{epoch + 1}/{CONFIG.epochs}][{i + 1}/{len(train_loader)}] Data {data_time.val:.3f} ("
f"{data_time.avg:.3f}) Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) Remain {remain_time} Loss "
f"{loss_meter.val:.4f} Accuracy {accuracy:.4f} Acc@1 {top1_meter.val:.3f} ({top1_meter.avg:.3f}) "
f"Acc@5 {top5_meter.val:.3f} ({top5_meter.avg:.3f})."
)
if is_main_process():
writer.scalar("loss_train_batch", loss_meter.val, current_iter)
writer.scalar(
"mIoU_train_batch",
numpy.mean(intersection / (union + 1e-10)),
current_iter,
)
writer.scalar(
"mAcc_train_batch",
numpy.mean(intersection / (target + 1e-10)),
current_iter,
)
writer.scalar("allAcc_train_batch", accuracy, current_iter)
writer.scalar("top1_train_batch", top1, current_iter)
writer.scalar("top5_train_batch", top5, current_iter)
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
if is_main_process():
logger.info(
f"Train result at epoch [{epoch + 1}/{CONFIG.epochs}]: mIoU/mAcc/allAcc/top1/top5 {mIoU:.4f}/"
f"{mAcc:.4f}/{allAcc:.4f}/{top1_meter.avg:.4f}/{top5_meter.avg:.4f}."
)
return loss_meter.avg, mIoU, mAcc, allAcc, top1_meter.avg, top5_meter.avg
def validate(val_loader, model, criterion):
"""
Args:
val_loader:
model:
criterion:
Returns:
"""
if is_main_process():
logger.info(">>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>")
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
top1_meter = AverageMeter()
top5_meter = AverageMeter()
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
top1, top5 = cal_accuracy(output, target, topk=(1, 5))
n = input.size(0)
if CONFIG.multiprocessing_distributed:
with torch.no_grad():
loss, top1, top5 = loss.detach() * n, top1 * n, top5 * n
count = target.new_tensor([n], dtype=torch.long)
distributed.all_reduce(loss), distributed.all_reduce(
top1
), distributed.all_reduce(top5), distributed.all_reduce(count)
n = count.item()
loss, top1, top5 = loss / n, top1 / n, top5 / n
loss_meter.update(loss.item(), n), top1_meter.update(
top1.item(), n
), top5_meter.update(top5.item(), n)
output = output.max(1)[1]
intersection, union, target = intersection_and_union_gpu(
output, target, val_loader.dataset.response_shape[0], CONFIG.ignore_label
)
if CONFIG.multiprocessing_distributed:
distributed.all_reduce(intersection), distributed.all_reduce(
union
), distributed.all_reduce(target)
intersection, union, target = (
intersection.cpu().numpy(),
union.cpu().numpy(),
target.cpu().numpy(),
)
intersection_meter.update(intersection), union_meter.update(
union
), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % CONFIG.print_freq == 0) and is_main_process():
logger.info(
f"Test: [{i + 1}/{len(val_loader)}] Data {data_time.val:.3f} ({data_time.avg:.3f}) Batch "
f"{batch_time.val:.3f} ({batch_time.avg:.3f}) Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) "
f"Accuracy {accuracy:.4f} Acc@1 {top1_meter.val:.3f} ({top1_meter.avg:.3f}) Acc@5 "
f"{top5_meter.val:.3f} ({top5_meter.avg:.3f})."
)
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
if is_main_process():
logger.info(
f"Val result: mIoU/mAcc/allAcc/top1/top5 {mIoU:.4f}/{mAcc:.4f}/{allAcc:.4f}/{top1_meter.avg:.4f}/"
f"{top5_meter.avg:.4f}."
)
for i in range(val_loader.dataset.response_shape[0]):
if target_meter.sum[i] > 0:
logger.info(
f"Class_{i} Result: iou/accuracy {iou_class[i]:.4f}/{accuracy_class[i]:.4f} Count:"
f"{target_meter.sum[i]}"
)
logger.info("<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<")
return loss_meter.avg, mIoU, mAcc, allAcc, top1_meter.avg, top5_meter.avg
if __name__ == "__main__":
def main():
"""
"""
from samples.classification.san.configs.imagenet_san10_pairwise import (
SAN_CONFIG,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
str(x) for x in SAN_CONFIG.train_gpu
)
if SAN_CONFIG.manual_seed is not None:
random.seed(SAN_CONFIG.manual_seed)
numpy.random.seed(SAN_CONFIG.manual_seed)
torch.manual_seed(SAN_CONFIG.manualSeed)
torch.cuda.manual_seed(SAN_CONFIG.manualSeed)
torch.cuda.manual_seed_all(SAN_CONFIG.manualSeed)
cudnn.benchmark = False
cudnn.deterministic = True
if SAN_CONFIG.dist_url == "env://" and SAN_CONFIG.world_size == -1:
SAN_CONFIG.world_size = int(os.environ["WORLD_SIZE"])
SAN_CONFIG.distributed = (
SAN_CONFIG.world_size > 1 or SAN_CONFIG.multiprocessing_distributed
)
SAN_CONFIG.ngpus_per_node = len(SAN_CONFIG.train_gpu)
if len(SAN_CONFIG.train_gpu) == 1:
SAN_CONFIG.sync_bn = False
SAN_CONFIG.distributed = False
SAN_CONFIG.multiprocessing_distributed = False
if SAN_CONFIG.multiprocessing_distributed:
port = find_unclaimed_port()
SAN_CONFIG.dist_url = f"tcp://127.0.0.1:{port}"
SAN_CONFIG.world_size *= SAN_CONFIG.ngpus_per_node
multiprocessing.spawn(
main_worker,
nprocs=SAN_CONFIG.ngpus_per_node,
args=(SAN_CONFIG.ngpus_per_node, SAN_CONFIG),
)
else:
main_worker(SAN_CONFIG.train_gpu, SAN_CONFIG.ngpus_per_node, SAN_CONFIG)
main()
| import logging
import numpy
import os
import random
import shutil
import time
import torch
from draugr import AverageMeter, find_unclaimed_port
from draugr.numpy_utilities import Split
from draugr.torch_utilities import TensorBoardPytorchWriter
from pathlib import Path
from torch import distributed, multiprocessing, nn
from torch.backends import cudnn
from torch.optim import lr_scheduler
from neodroidvision.classification.architectures.self_attention_network import (
SelfAttentionTypeEnum,
make_san,
)
from san_utilities import (
cal_accuracy,
intersection_and_union_gpu,
mixup_data,
mixup_loss,
smooth_loss,
)
def get_logger():
"""
Returns:
"""
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def worker_init_fn(worker_id):
"""
Args:
worker_id:
"""
random.seed(CONFIG.manual_seed + worker_id)
def is_main_process():
"""
Returns:
"""
return not CONFIG.multiprocessing_distributed or (
CONFIG.multiprocessing_distributed and CONFIG.rank % CONFIG.ngpus_per_node == 0
)
def main_worker(gpu, ngpus_per_node, config):
"""
Args:
gpu:
ngpus_per_node:
config:
"""
global CONFIG, best_acc1
CONFIG, best_acc1 = config, 0
train_set = config.dataset_type(CONFIG.dataset_path, Split.Training)
val_set = config.dataset_type(CONFIG.dataset_path, Split.Validation)
if CONFIG.distributed:
if CONFIG.dist_url == "env://" and CONFIG.rank == -1:
CONFIG.rank = int(os.environ["RANK"])
if CONFIG.multiprocessing_distributed:
CONFIG.rank = CONFIG.rank * ngpus_per_node + gpu
distributed.init_process_group(
backend=CONFIG.dist_backend,
init_method=CONFIG.dist_url,
world_size=CONFIG.world_size,
rank=CONFIG.rank,
)
model = make_san(
self_attention_type=SelfAttentionTypeEnum(CONFIG.self_attention_type),
layers=CONFIG.layers,
kernels=CONFIG.kernels,
num_classes=train_set.response_shape[0],
)
criterion = nn.CrossEntropyLoss(ignore_index=CONFIG.ignore_label)
optimizer = torch.optim.SGD(
model.parameters(),
lr=CONFIG.base_lr,
momentum=CONFIG.momentum,
weight_decay=CONFIG.weight_decay,
)
if CONFIG.scheduler == "step":
scheduler = lr_scheduler.MultiStepLR(
optimizer, milestones=CONFIG.step_epochs, gamma=0.1
)
elif CONFIG.scheduler == "cosine":
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=CONFIG.epochs)
if is_main_process():
global logger, writer
logger = get_logger()
writer = TensorBoardPytorchWriter(str(CONFIG.save_path))
logger.info(CONFIG)
logger.info("=> creating model ...")
logger.info(f"Classes: {train_set.response_shape[0]}")
logger.info(model)
if CONFIG.distributed:
torch.cuda.set_device(gpu)
CONFIG.batch_size = int(CONFIG.batch_size / ngpus_per_node)
CONFIG.batch_size_val = int(CONFIG.batch_size_val / ngpus_per_node)
CONFIG.workers = int((CONFIG.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model.cuda(), device_ids=[gpu]
)
else:
model = torch.nn.DataParallel(model.cuda())
if CONFIG.weight:
if Path(CONFIG.weight).is_file():
if is_main_process():
global logger
logger.info(f"=> loading weight '{CONFIG.weight}'")
checkpoint = torch.load(CONFIG.weight)
model.load_state_dict(checkpoint["state_dict"])
if is_main_process():
global logger
logger.info(f"=> loaded weight '{CONFIG.weight}'")
else:
if is_main_process():
global logger
logger.info(f"=> no weight found at '{CONFIG.weight}'")
if CONFIG.resume:
if Path(CONFIG.resume).is_file():
if is_main_process():
global logger
logger.info(f"=> loading checkpoint '{CONFIG.resume}'")
checkpoint = torch.load(
CONFIG.resume, map_location=lambda storage, loc: storage.cuda(gpu)
)
CONFIG.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["top1_val"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
if is_main_process():
global logger
logger.info(
f"=> loaded checkpoint '{CONFIG.resume}' (epoch {checkpoint['epoch']})"
)
else:
if is_main_process():
global logger
logger.info(f"=> no checkpoint found at '{CONFIG.resume}'")
if CONFIG.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_set)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=CONFIG.batch_size,
shuffle=(train_sampler is None),
num_workers=CONFIG.workers,
pin_memory=True,
sampler=train_sampler,
)
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=CONFIG.batch_size_val,
shuffle=False,
num_workers=CONFIG.workers,
pin_memory=True,
sampler=val_sampler,
)
for epoch in range(CONFIG.start_epoch, CONFIG.epochs):
if CONFIG.distributed:
train_sampler.set_epoch(epoch)
(
loss_train,
mIoU_train,
mAcc_train,
allAcc_train,
top1_train,
top5_train,
) = train(train_loader, model, criterion, optimizer, epoch)
loss_val, mIoU_val, mAcc_val, allAcc_val, top1_val, top5_val = validate(
val_loader, model, criterion
)
scheduler.step()
epoch_log = epoch + 1
if is_main_process():
global writer
writer.scalar("loss_train", loss_train, epoch_log)
writer.scalar("mIoU_train", mIoU_train, epoch_log)
writer.scalar("mAcc_train", mAcc_train, epoch_log)
writer.scalar("allAcc_train", allAcc_train, epoch_log)
writer.scalar("top1_train", top1_train, epoch_log)
writer.scalar("top5_train", top5_train, epoch_log)
writer.scalar("loss_val", loss_val, epoch_log)
writer.scalar("mIoU_val", mIoU_val, epoch_log)
writer.scalar("mAcc_val", mAcc_val, epoch_log)
writer.scalar("allAcc_val", allAcc_val, epoch_log)
writer.scalar("top1_val", top1_val, epoch_log)
writer.scalar("top5_val", top5_val, epoch_log)
if (epoch_log % CONFIG.save_freq == 0) and is_main_process():
filename = CONFIG.save_path / "train_epoch_" + str(epoch_log) + ".pth"
global logger
logger.info("Saving checkpoint to: " + filename)
torch.save(
{
"epoch": epoch_log,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"top1_val": top1_val,
"top5_val": top5_val,
},
filename,
)
if top1_val > best_acc1:
best_acc1 = top1_val
shutil.copyfile(filename, CONFIG.save_path / "model_best.pth")
if epoch_log / CONFIG.save_freq > 2:
deletename = (
CONFIG.save_path
/ f"train_epoch_{str(epoch_log - CONFIG.save_freq * 2)}.pth"
)
os.remove(deletename)
def train(train_loader, model, criterion, optimizer, epoch):
"""
Args:
train_loader:
model:
criterion:
optimizer:
epoch:
Returns:
"""
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
top1_meter = AverageMeter()
top5_meter = AverageMeter()
model.train()
end = time.time()
max_iter = CONFIG.epochs * len(train_loader)
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if CONFIG.mixup_alpha:
eps = CONFIG.label_smoothing if CONFIG.label_smoothing else 0.0
input, target_a, target_b, lam = mixup_data(
input, target, CONFIG.mixup_alpha
)
output = model(input)
loss = mixup_loss(output, target_a, target_b, lam, eps)
else:
output = model(input)
loss = (
smooth_loss(output, target, CONFIG.label_smoothing)
if CONFIG.label_smoothing
else criterion(output, target)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1, top5 = cal_accuracy(output, target, topk=(1, 5))
n = input.size(0)
if CONFIG.multiprocessing_distributed:
with torch.no_grad():
loss, top1, top5 = loss.detach() * n, top1 * n, top5 * n
count = target.new_tensor([n], dtype=torch.long)
distributed.all_reduce(loss)
distributed.all_reduce(top1)
distributed.all_reduce(top5)
distributed.all_reduce(count)
n = count.item()
loss, top1, top5 = loss / n, top1 / n, top5 / n
loss_meter.update(loss.item(), n), top1_meter.update(
top1.item(), n
), top5_meter.update(top5.item(), n)
output = output.max(1)[1]
intersection, union, target = intersection_and_union_gpu(
output, target, train_loader.dataset.response_shape[0], CONFIG.ignore_label
)
if CONFIG.multiprocessing_distributed:
distributed.all_reduce(intersection)
distributed.all_reduce(union)
distributed.all_reduce(target)
intersection, union, target = (
intersection.cpu().numpy(),
union.cpu().numpy(),
target.cpu().numpy(),
)
intersection_meter.update(intersection), union_meter.update(
union
), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
batch_time.update(time.time() - end)
end = time.time()
# calculate remain time
current_iter = epoch * len(train_loader) + i + 1
remain_iter = max_iter - current_iter
remain_time = remain_iter * batch_time.avg
t_m, t_s = divmod(remain_time, 60)
t_h, t_m = divmod(t_m, 60)
remain_time = f"{int(t_h):02d}:{int(t_m):02d}:{int(t_s):02d}"
if ((i + 1) % CONFIG.print_freq == 0) and is_main_process():
logger.info(
f"Epoch: [{epoch + 1}/{CONFIG.epochs}][{i + 1}/{len(train_loader)}] Data {data_time.val:.3f} ("
f"{data_time.avg:.3f}) Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) Remain {remain_time} Loss "
f"{loss_meter.val:.4f} Accuracy {accuracy:.4f} Acc@1 {top1_meter.val:.3f} ({top1_meter.avg:.3f}) "
f"Acc@5 {top5_meter.val:.3f} ({top5_meter.avg:.3f})."
)
if is_main_process():
writer.scalar("loss_train_batch", loss_meter.val, current_iter)
writer.scalar(
"mIoU_train_batch",
numpy.mean(intersection / (union + 1e-10)),
current_iter,
)
writer.scalar(
"mAcc_train_batch",
numpy.mean(intersection / (target + 1e-10)),
current_iter,
)
writer.scalar("allAcc_train_batch", accuracy, current_iter)
writer.scalar("top1_train_batch", top1, current_iter)
writer.scalar("top5_train_batch", top5, current_iter)
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
if is_main_process():
logger.info(
f"Train result at epoch [{epoch + 1}/{CONFIG.epochs}]: mIoU/mAcc/allAcc/top1/top5 {mIoU:.4f}/"
f"{mAcc:.4f}/{allAcc:.4f}/{top1_meter.avg:.4f}/{top5_meter.avg:.4f}."
)
return loss_meter.avg, mIoU, mAcc, allAcc, top1_meter.avg, top5_meter.avg
def validate(val_loader, model, criterion):
"""
Args:
val_loader:
model:
criterion:
Returns:
"""
if is_main_process():
logger.info(">>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>")
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
top1_meter = AverageMeter()
top5_meter = AverageMeter()
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
top1, top5 = cal_accuracy(output, target, topk=(1, 5))
n = input.size(0)
if CONFIG.multiprocessing_distributed:
with torch.no_grad():
loss, top1, top5 = loss.detach() * n, top1 * n, top5 * n
count = target.new_tensor([n], dtype=torch.long)
distributed.all_reduce(loss), distributed.all_reduce(
top1
), distributed.all_reduce(top5), distributed.all_reduce(count)
n = count.item()
loss, top1, top5 = loss / n, top1 / n, top5 / n
loss_meter.update(loss.item(), n), top1_meter.update(
top1.item(), n
), top5_meter.update(top5.item(), n)
output = output.max(1)[1]
intersection, union, target = intersection_and_union_gpu(
output, target, val_loader.dataset.response_shape[0], CONFIG.ignore_label
)
if CONFIG.multiprocessing_distributed:
distributed.all_reduce(intersection), distributed.all_reduce(
union
), distributed.all_reduce(target)
intersection, union, target = (
intersection.cpu().numpy(),
union.cpu().numpy(),
target.cpu().numpy(),
)
intersection_meter.update(intersection), union_meter.update(
union
), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % CONFIG.print_freq == 0) and is_main_process():
logger.info(
f"Test: [{i + 1}/{len(val_loader)}] Data {data_time.val:.3f} ({data_time.avg:.3f}) Batch "
f"{batch_time.val:.3f} ({batch_time.avg:.3f}) Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) "
f"Accuracy {accuracy:.4f} Acc@1 {top1_meter.val:.3f} ({top1_meter.avg:.3f}) Acc@5 "
f"{top5_meter.val:.3f} ({top5_meter.avg:.3f})."
)
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
if is_main_process():
logger.info(
f"Val result: mIoU/mAcc/allAcc/top1/top5 {mIoU:.4f}/{mAcc:.4f}/{allAcc:.4f}/{top1_meter.avg:.4f}/"
f"{top5_meter.avg:.4f}."
)
for i in range(val_loader.dataset.response_shape[0]):
if target_meter.sum[i] > 0:
logger.info(
f"Class_{i} Result: iou/accuracy {iou_class[i]:.4f}/{accuracy_class[i]:.4f} Count:"
f"{target_meter.sum[i]}"
)
logger.info("<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<")
return loss_meter.avg, mIoU, mAcc, allAcc, top1_meter.avg, top5_meter.avg
if __name__ == "__main__":
def main():
"""
"""
from samples.classification.san.configs.imagenet_san10_pairwise import (
SAN_CONFIG,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
str(x) for x in SAN_CONFIG.train_gpu
)
if SAN_CONFIG.manual_seed is not None:
random.seed(SAN_CONFIG.manual_seed)
numpy.random.seed(SAN_CONFIG.manual_seed)
torch.manual_seed(SAN_CONFIG.manualSeed)
torch.cuda.manual_seed(SAN_CONFIG.manualSeed)
torch.cuda.manual_seed_all(SAN_CONFIG.manualSeed)
cudnn.benchmark = False
cudnn.deterministic = True
if SAN_CONFIG.dist_url == "env://" and SAN_CONFIG.world_size == -1:
SAN_CONFIG.world_size = int(os.environ["WORLD_SIZE"])
SAN_CONFIG.distributed = (
SAN_CONFIG.world_size > 1 or SAN_CONFIG.multiprocessing_distributed
)
SAN_CONFIG.ngpus_per_node = len(SAN_CONFIG.train_gpu)
if len(SAN_CONFIG.train_gpu) == 1:
SAN_CONFIG.sync_bn = False
SAN_CONFIG.distributed = False
SAN_CONFIG.multiprocessing_distributed = False
if SAN_CONFIG.multiprocessing_distributed:
port = find_unclaimed_port()
SAN_CONFIG.dist_url = f"tcp://127.0.0.1:{port}"
SAN_CONFIG.world_size *= SAN_CONFIG.ngpus_per_node
multiprocessing.spawn(
main_worker,
nprocs=SAN_CONFIG.ngpus_per_node,
args=(SAN_CONFIG.ngpus_per_node, SAN_CONFIG),
)
else:
main_worker(SAN_CONFIG.train_gpu, SAN_CONFIG.ngpus_per_node, SAN_CONFIG)
main()
|
from collections import Counter
import logging
import numpy as np
import os
from prob_cbr.data.data_utils import get_inv_relation, is_inv_relation
logger = logging.getLogger('stream_utils')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s \t %(message)s]",
"%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
def read_triple_raw(file_path, dataset_name):
"""
Read triples and map them into ids.
"""
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
if not is_inv_relation(r, dataset_name):
triples.append((h, r, t))
return triples
class KBStream:
def __init__(self, dataset_name, data_path, test_file_name=None,
stream_init_proportion=0.5, n_stream_updates=10, seed=42):
self.dataset_name = dataset_name
self.data_path = data_path
self.stream_init_proportion = stream_init_proportion
self.n_stream_updates = n_stream_updates
self.stream_rng = np.random.default_rng(seed)
self.train_rng = np.random.default_rng(seed)
self.entity_set, self.relation_set = set(), set()
with open(os.path.join(self.data_path, 'entities.dict')) as fin:
for line in fin:
eid, entity = line.strip().split('\t')
self.entity_set.add(entity)
with open(os.path.join(self.data_path, 'relations.dict')) as fin:
for line in fin:
rid, relation = line.strip().split('\t')
self.relation_set.add(relation)
if test_file_name is None or test_file_name == '':
test_file_name = 'test.txt'
if dataset_name == 'nell':
graph_file = 'full_graph.txt'
else:
graph_file = 'graph.txt'
self.train_triples = read_triple_raw(os.path.join(self.data_path, graph_file), self.dataset_name)
self.valid_triples = read_triple_raw(os.path.join(self.data_path, 'dev.txt'), self.dataset_name)
self.test_triples = read_triple_raw(os.path.join(self.data_path, test_file_name), self.dataset_name)
self.kb_state = {'entity2id': {}, 'relation2id': {},
'train_triples': [], 'valid_triples': [], 'test_triples': []}
def get_max_num_entities(self):
return len(self.entity_set)
def get_max_num_relations(self):
return 2*len(self.relation_set)
def get_init_kb(self):
# INIT
# Sample 10% of the most common nodes (hubs)
# Sample (stream_init_proportion - 10)% of the remaining nodes randomly
node_usage_train = Counter([e for (e, _, _) in self.train_triples] + [e for (_, _, e) in self.train_triples])
init_entities = [_ent for _ent, _ in node_usage_train.most_common(len(node_usage_train) // 10)]
for _ent in init_entities:
del node_usage_train[_ent]
permutation = self.stream_rng.permutation(len(node_usage_train))
usage_list = list(node_usage_train.most_common())
sample_size = int(np.ceil(max(self.stream_init_proportion - 0.1, 0.0)*len(self.entity_set)))
init_entities.extend([usage_list[j][0] for j in permutation[:sample_size]])
assert len(init_entities) == len(set(init_entities))
init_entities = set(init_entities)
entity2id, relation2id = {}, {}
id2entity, id2relation = {}, {}
for eid, entity in enumerate(sorted(init_entities)):
entity2id[entity] = eid
id2entity[eid] = entity
edge_coverage = {'train': 0, 'valid': 0, 'test': 0}
init_train_triples, init_valid_triples, init_test_triples = [], [], []
for edge in self.train_triples:
e1, r, e2 = edge
if e1 in init_entities and e2 in init_entities:
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
init_train_triples.append((e1, r, e2))
edge_coverage['train'] += 1
for edge in self.valid_triples:
e1, r, e2 = edge
if e1 in init_entities and e2 in init_entities:
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
init_valid_triples.append((e1, r, e2))
edge_coverage['valid'] += 1
for edge in self.test_triples:
e1, r, e2 = edge
if e1 in init_entities and e2 in init_entities:
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
init_test_triples.append((e1, r, e2))
edge_coverage['test'] += 1
logger.info(f"[STREAM] Init edge_coverage: "
f"train: {edge_coverage["train"]} ({edge_coverage["train"] / len(self.train_triples) * 100:0.2f}%) "
f"valid: {edge_coverage["valid"]} ({edge_coverage["valid"] / len(self.valid_triples) * 100:0.2f}%) "
f"test: {edge_coverage["test"]} ({edge_coverage["test"] / len(self.test_triples) * 100:0.2f}%)")
logger.info(f'[STREAM] Init entity_coverage:'
f' {len(init_entities)} ({len(init_entities) / (len(self.entity_set)) * 100:0.2f}%)')
self.kb_state['entity2id'] = entity2id.copy()
self.kb_state['relation2id'] = relation2id.copy()
self.kb_state['id2entity'] = id2entity.copy()
self.kb_state['id2relation'] = id2relation.copy()
self.kb_state['train_triples'] = init_train_triples.copy()
self.kb_state['valid_triples'] = init_valid_triples.copy()
self.kb_state['test_triples'] = init_test_triples.copy()
# RotatE explicitly adds them in model
rev_train_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_train_triples]
rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_valid_triples]
rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_test_triples]
init_train_triples = init_train_triples + rev_train_triples
init_valid_triples = init_valid_triples + rev_valid_triples
init_test_triples = init_test_triples + rev_test_triples
return entity2id, id2entity, relation2id, id2relation, \
init_train_triples + init_valid_triples + init_test_triples,\
init_train_triples, init_valid_triples, init_test_triples
def batch_generator(self):
for step in range(self.n_stream_updates):
logger.info(f'[STREAM] Generating batch {step + 1}...')
entity2id, relation2id = self.kb_state['entity2id'], self.kb_state['relation2id']
id2entity, id2relation = self.kb_state['id2entity'], self.kb_state['id2relation']
curr_train_triples, curr_valid_triples, curr_test_triples = \
self.kb_state['train_triples'], self.kb_state['valid_triples'], self.kb_state['test_triples']
new_train_triples, new_valid_triples, new_test_triples = [], [], []
seen_entities = set(entity2id.keys())
unseen_entities = sorted(self.entity_set.difference(seen_entities))
permutation = self.stream_rng.permutation(len(unseen_entities))
sample_size = int(np.ceil((1 - self.stream_init_proportion) / self.n_stream_updates * len(self.entity_set)))
if step == self.n_stream_updates - 1:
sample_size = len(unseen_entities)
new_entities = [unseen_entities[j] for j in permutation[:sample_size]]
new_entities = set(new_entities)
for entity in sorted(new_entities):
if entity not in entity2id:
new_id = len(entity2id)
entity2id[entity] = new_id
id2entity[new_id] = entity
for edge in self.train_triples:
e1, r, e2 = edge
if e1 in seen_entities and e2 in seen_entities:
continue
if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
new_train_triples.append((e1, r, e2))
for edge in self.valid_triples:
e1, r, e2 = edge
if e1 in seen_entities and e2 in seen_entities:
continue
if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
new_valid_triples.append((e1, r, e2))
for edge in self.test_triples:
e1, r, e2 = edge
if e1 in seen_entities and e2 in seen_entities:
continue
if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
new_test_triples.append((e1, r, e2))
all_train_triples = new_train_triples + curr_train_triples
all_valid_triples = new_valid_triples + curr_valid_triples
all_test_triples = new_test_triples + curr_test_triples
logger.info(f"[STREAM] Batch edge_coverage: "
f"train: {len(new_train_triples)} ({len(new_train_triples) / len(self.train_triples) * 100:0.2f}%) "
f"valid: {len(new_valid_triples)} ({len(new_valid_triples) / len(self.valid_triples) * 100:0.2f}%) "
f"test: {len(new_test_triples)} ({len(new_test_triples) / len(self.test_triples) * 100:0.2f}%)")
logger.info(f"[STREAM] Total edge_coverage: "
f"train: {len(all_train_triples)} ({len(all_train_triples) / len(self.train_triples) * 100:0.2f}%) "
f"valid: {len(all_valid_triples)} ({len(all_valid_triples) / len(self.valid_triples) * 100:0.2f}%) "
f"test: {len(all_test_triples)} ({len(all_test_triples) / len(self.test_triples) * 100:0.2f}%)")
logger.info(f'[STREAM] Total entity_coverage:'
f' {len(entity2id)} ({len(entity2id) / (len(self.entity_set)) * 100:0.2f}%)')
self.kb_state['entity2id'] = entity2id.copy()
self.kb_state['relation2id'] = relation2id.copy()
self.kb_state['id2entity'] = id2entity.copy()
self.kb_state['id2relation'] = id2relation.copy()
self.kb_state['train_triples'] = all_train_triples.copy()
self.kb_state['valid_triples'] = all_valid_triples.copy()
self.kb_state['test_triples'] = all_test_triples.copy()
# RotatE explicitly adds them in model
rev_train_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_train_triples]
rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_valid_triples]
rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_test_triples]
all_train_triples = all_train_triples + rev_train_triples
all_valid_triples = all_valid_triples + rev_valid_triples
all_test_triples = all_test_triples + rev_test_triples
rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in new_valid_triples]
rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in new_test_triples]
new_valid_triples = new_valid_triples + rev_valid_triples
new_test_triples = new_test_triples + rev_test_triples
yield entity2id, id2entity, relation2id, id2relation, \
all_train_triples + all_valid_triples + all_test_triples, \
all_train_triples, all_valid_triples, new_valid_triples, all_test_triples, new_test_triples
| from collections import Counter
import logging
import numpy as np
import os
from prob_cbr.data.data_utils import get_inv_relation, is_inv_relation
logger = logging.getLogger('stream_utils')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("[%(asctime)s \t %(message)s]",
"%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
def read_triple_raw(file_path, dataset_name):
"""
Read triples and map them into ids.
"""
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
if not is_inv_relation(r, dataset_name):
triples.append((h, r, t))
return triples
class KBStream:
def __init__(self, dataset_name, data_path, test_file_name=None,
stream_init_proportion=0.5, n_stream_updates=10, seed=42):
self.dataset_name = dataset_name
self.data_path = data_path
self.stream_init_proportion = stream_init_proportion
self.n_stream_updates = n_stream_updates
self.stream_rng = np.random.default_rng(seed)
self.train_rng = np.random.default_rng(seed)
self.entity_set, self.relation_set = set(), set()
with open(os.path.join(self.data_path, 'entities.dict')) as fin:
for line in fin:
eid, entity = line.strip().split('\t')
self.entity_set.add(entity)
with open(os.path.join(self.data_path, 'relations.dict')) as fin:
for line in fin:
rid, relation = line.strip().split('\t')
self.relation_set.add(relation)
if test_file_name is None or test_file_name == '':
test_file_name = 'test.txt'
if dataset_name == 'nell':
graph_file = 'full_graph.txt'
else:
graph_file = 'graph.txt'
self.train_triples = read_triple_raw(os.path.join(self.data_path, graph_file), self.dataset_name)
self.valid_triples = read_triple_raw(os.path.join(self.data_path, 'dev.txt'), self.dataset_name)
self.test_triples = read_triple_raw(os.path.join(self.data_path, test_file_name), self.dataset_name)
self.kb_state = {'entity2id': {}, 'relation2id': {},
'train_triples': [], 'valid_triples': [], 'test_triples': []}
def get_max_num_entities(self):
return len(self.entity_set)
def get_max_num_relations(self):
return 2*len(self.relation_set)
def get_init_kb(self):
# INIT
# Sample 10% of the most common nodes (hubs)
# Sample (stream_init_proportion - 10)% of the remaining nodes randomly
node_usage_train = Counter([e for (e, _, _) in self.train_triples] + [e for (_, _, e) in self.train_triples])
init_entities = [_ent for _ent, _ in node_usage_train.most_common(len(node_usage_train) // 10)]
for _ent in init_entities:
del node_usage_train[_ent]
permutation = self.stream_rng.permutation(len(node_usage_train))
usage_list = list(node_usage_train.most_common())
sample_size = int(np.ceil(max(self.stream_init_proportion - 0.1, 0.0)*len(self.entity_set)))
init_entities.extend([usage_list[j][0] for j in permutation[:sample_size]])
assert len(init_entities) == len(set(init_entities))
init_entities = set(init_entities)
entity2id, relation2id = {}, {}
id2entity, id2relation = {}, {}
for eid, entity in enumerate(sorted(init_entities)):
entity2id[entity] = eid
id2entity[eid] = entity
edge_coverage = {'train': 0, 'valid': 0, 'test': 0}
init_train_triples, init_valid_triples, init_test_triples = [], [], []
for edge in self.train_triples:
e1, r, e2 = edge
if e1 in init_entities and e2 in init_entities:
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
init_train_triples.append((e1, r, e2))
edge_coverage['train'] += 1
for edge in self.valid_triples:
e1, r, e2 = edge
if e1 in init_entities and e2 in init_entities:
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
init_valid_triples.append((e1, r, e2))
edge_coverage['valid'] += 1
for edge in self.test_triples:
e1, r, e2 = edge
if e1 in init_entities and e2 in init_entities:
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
init_test_triples.append((e1, r, e2))
edge_coverage['test'] += 1
logger.info(f"[STREAM] Init edge_coverage: "
f"train: {edge_coverage['train']} ({edge_coverage['train'] / len(self.train_triples) * 100:0.2f}%) "
f"valid: {edge_coverage['valid']} ({edge_coverage['valid'] / len(self.valid_triples) * 100:0.2f}%) "
f"test: {edge_coverage['test']} ({edge_coverage['test'] / len(self.test_triples) * 100:0.2f}%)")
logger.info(f'[STREAM] Init entity_coverage:'
f' {len(init_entities)} ({len(init_entities) / (len(self.entity_set)) * 100:0.2f}%)')
self.kb_state['entity2id'] = entity2id.copy()
self.kb_state['relation2id'] = relation2id.copy()
self.kb_state['id2entity'] = id2entity.copy()
self.kb_state['id2relation'] = id2relation.copy()
self.kb_state['train_triples'] = init_train_triples.copy()
self.kb_state['valid_triples'] = init_valid_triples.copy()
self.kb_state['test_triples'] = init_test_triples.copy()
# RotatE explicitly adds them in model
rev_train_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_train_triples]
rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_valid_triples]
rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_test_triples]
init_train_triples = init_train_triples + rev_train_triples
init_valid_triples = init_valid_triples + rev_valid_triples
init_test_triples = init_test_triples + rev_test_triples
return entity2id, id2entity, relation2id, id2relation, \
init_train_triples + init_valid_triples + init_test_triples,\
init_train_triples, init_valid_triples, init_test_triples
def batch_generator(self):
for step in range(self.n_stream_updates):
logger.info(f'[STREAM] Generating batch {step + 1}...')
entity2id, relation2id = self.kb_state['entity2id'], self.kb_state['relation2id']
id2entity, id2relation = self.kb_state['id2entity'], self.kb_state['id2relation']
curr_train_triples, curr_valid_triples, curr_test_triples = \
self.kb_state['train_triples'], self.kb_state['valid_triples'], self.kb_state['test_triples']
new_train_triples, new_valid_triples, new_test_triples = [], [], []
seen_entities = set(entity2id.keys())
unseen_entities = sorted(self.entity_set.difference(seen_entities))
permutation = self.stream_rng.permutation(len(unseen_entities))
sample_size = int(np.ceil((1 - self.stream_init_proportion) / self.n_stream_updates * len(self.entity_set)))
if step == self.n_stream_updates - 1:
sample_size = len(unseen_entities)
new_entities = [unseen_entities[j] for j in permutation[:sample_size]]
new_entities = set(new_entities)
for entity in sorted(new_entities):
if entity not in entity2id:
new_id = len(entity2id)
entity2id[entity] = new_id
id2entity[new_id] = entity
for edge in self.train_triples:
e1, r, e2 = edge
if e1 in seen_entities and e2 in seen_entities:
continue
if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
new_train_triples.append((e1, r, e2))
for edge in self.valid_triples:
e1, r, e2 = edge
if e1 in seen_entities and e2 in seen_entities:
continue
if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
new_valid_triples.append((e1, r, e2))
for edge in self.test_triples:
e1, r, e2 = edge
if e1 in seen_entities and e2 in seen_entities:
continue
if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):
if r not in relation2id:
new_id = len(relation2id)
relation2id[r] = new_id
id2relation[new_id] = r
new_id = len(relation2id)
r_inv = get_inv_relation(r, self.dataset_name)
relation2id[r_inv] = new_id
id2relation[new_id] = r_inv
new_test_triples.append((e1, r, e2))
all_train_triples = new_train_triples + curr_train_triples
all_valid_triples = new_valid_triples + curr_valid_triples
all_test_triples = new_test_triples + curr_test_triples
logger.info(f"[STREAM] Batch edge_coverage: "
f"train: {len(new_train_triples)} ({len(new_train_triples) / len(self.train_triples) * 100:0.2f}%) "
f"valid: {len(new_valid_triples)} ({len(new_valid_triples) / len(self.valid_triples) * 100:0.2f}%) "
f"test: {len(new_test_triples)} ({len(new_test_triples) / len(self.test_triples) * 100:0.2f}%)")
logger.info(f"[STREAM] Total edge_coverage: "
f"train: {len(all_train_triples)} ({len(all_train_triples) / len(self.train_triples) * 100:0.2f}%) "
f"valid: {len(all_valid_triples)} ({len(all_valid_triples) / len(self.valid_triples) * 100:0.2f}%) "
f"test: {len(all_test_triples)} ({len(all_test_triples) / len(self.test_triples) * 100:0.2f}%)")
logger.info(f'[STREAM] Total entity_coverage:'
f' {len(entity2id)} ({len(entity2id) / (len(self.entity_set)) * 100:0.2f}%)')
self.kb_state['entity2id'] = entity2id.copy()
self.kb_state['relation2id'] = relation2id.copy()
self.kb_state['id2entity'] = id2entity.copy()
self.kb_state['id2relation'] = id2relation.copy()
self.kb_state['train_triples'] = all_train_triples.copy()
self.kb_state['valid_triples'] = all_valid_triples.copy()
self.kb_state['test_triples'] = all_test_triples.copy()
# RotatE explicitly adds them in model
rev_train_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_train_triples]
rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_valid_triples]
rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_test_triples]
all_train_triples = all_train_triples + rev_train_triples
all_valid_triples = all_valid_triples + rev_valid_triples
all_test_triples = all_test_triples + rev_test_triples
rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in new_valid_triples]
rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in new_test_triples]
new_valid_triples = new_valid_triples + rev_valid_triples
new_test_triples = new_test_triples + rev_test_triples
yield entity2id, id2entity, relation2id, id2relation, \
all_train_triples + all_valid_triples + all_test_triples, \
all_train_triples, all_valid_triples, new_valid_triples, all_test_triples, new_test_triples
|
"""VALIDATORS"""
import json
import logging
from functools import wraps
from cerberus import Validator
from flask import request
from aqueduct.routes.api import error
def myCoerc(n):
try:
return lambda v: None if v in ('null') else n(v)
except Exception as e:
return None
null2int = myCoerc(int)
null2float = myCoerc(float)
to_bool = lambda v: v.lower() in ('true', '1')
to_lower = lambda v: v.lower()
# to_list = lambda v: json.loads(v.lower())
to_list = lambda v: json.loads(v)
def validate_wra_params(func):
"""Water Risk atlas parameters validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'wscheme': {
'required': True
},
'geostore': {
'type': 'string',
'required': True
},
'analysis_type': {
'type': 'string',
'required': True,
'default': None
},
'month': {
'required': False,
'default': None,
'nullable': True
},
'year': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'change_type': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'indicator': {
'type': 'string',
'required': True,
'default': None,
'nullable': True
},
'scenario': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'locations': {
'type': 'string',
'required': True,
'required': False,
'default': None,
'nullable': True
},
'input_address': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'match_address': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'ids': {
'type': 'string',
'required': False,
'nullable': True,
'default': None
}
}
jsonRequestContent = request.json or {}
rArgs = {**request.args, **jsonRequestContent}
kwargs.update(rArgs)
logging.debug(f'[MIDDLEWARE - ws scheme]: {kwargs}')
logging.debug(f"[VALIDATOR - wra_weights]: {kwargs}")
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs)
return func(*args, **kwargs)
return wrapper
def validate_params_cba(func):
"""World Validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'geogunit_unique_name': {'type': 'string', 'required': True},
'existing_prot': {
'type': 'integer',
'required': False,
'coerce': null2int,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'scenario': {
'type': 'string',
'required': True,
'allowed': ["business as usual", "pessimistic", "optimistic", "rcp4p5", "rcp8p5"],
'coerce': to_lower
},
'prot_fut': {
'type': 'integer',
'required': False,
'coerce': null2int,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'implementation_start': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 2020,
'max': 2079
},
'implementation_end': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 2021,
'max': 2080
},
'infrastructure_life': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 1,
'max': 100
},
'benefits_start': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 2020,
'max': 2080
},
'ref_year': {
'type': 'integer',
'required': True,
'coerce': int,
'allowed': [2030, 2050, 2080]
},
'estimated_costs': {
'type': 'float',
'required': False,
'coerce': null2float,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'discount_rate': {
'type': 'float',
'required': True,
'coerce': float,
'min': 0,
'max': 1
},
'om_costs': {
'type': 'float',
'required': True,
'coerce': float,
'min': 0,
'max': 1
},
'user_urb_cost': {
'type': 'float',
'required': False,
'coerce': null2float,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'user_rur_cost': {
'type': 'float',
'required': False,
'coerce': null2float,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
}
}
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs['params']):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs['params'])
logging.debug(f"[VALIDATOR - cba_params]: {kwargs["sanitized_params"]}")
return func(*args, **kwargs)
return wrapper
def validate_params_cba_def(func):
"""World Validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'geogunit_unique_name': {'type': 'string', 'required': True},
'scenario': {
'type': 'string',
'required': True,
'allowed': ["business as usual", "pessimistic", "optimistic", "rcp4p5", "rcp8p5"],
'coerce': to_lower
},
'flood': {
'type': 'string',
'required': False,
'coerce': to_lower,
'default': 'riverine',
'allowed': ["riverine", "coastal"],
},
'sub_scenario': {
'type': 'boolean',
'required': False,
'default': False,
'coerce': (str, to_bool)
}
}
logging.debug(f"[VALIDATOR - cba_def_params]: {kwargs}")
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs['params']):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs['params'])
logging.debug(f"[VALIDATOR - cba_def_params]: {kwargs["sanitized_params"]}")
return func(*args, **kwargs)
return wrapper
def validate_params_risk(func):
"""World Validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'geogunit_unique_name': {'type': 'string', 'required': True},
'existing_prot': {
'type': 'integer',
'required': False,
'coerce': null2int,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'scenario': {
'type': 'string',
'required': True,
'allowed': ["business as usual", "pessimistic", "optimistic", "rcp4p5", "rcp8p5"],
'coerce': to_lower
},
'sub_scenario': {
'type': 'boolean',
'required': True,
'coerce': (str, to_bool)
},
'exposure': {
'type': 'string',
'required': True,
'coerce': to_lower
},
'flood': {
'type': 'string',
'required': True,
'coerce': to_lower
}
}
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs['params']):
logging.debug(f"[VALIDATOR - risk_params]: {kwargs}")
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs['params'])
return func(*args, **kwargs)
return wrapper
| """VALIDATORS"""
import json
import logging
from functools import wraps
from cerberus import Validator
from flask import request
from aqueduct.routes.api import error
def myCoerc(n):
try:
return lambda v: None if v in ('null') else n(v)
except Exception as e:
return None
null2int = myCoerc(int)
null2float = myCoerc(float)
to_bool = lambda v: v.lower() in ('true', '1')
to_lower = lambda v: v.lower()
# to_list = lambda v: json.loads(v.lower())
to_list = lambda v: json.loads(v)
def validate_wra_params(func):
"""Water Risk atlas parameters validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'wscheme': {
'required': True
},
'geostore': {
'type': 'string',
'required': True
},
'analysis_type': {
'type': 'string',
'required': True,
'default': None
},
'month': {
'required': False,
'default': None,
'nullable': True
},
'year': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'change_type': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'indicator': {
'type': 'string',
'required': True,
'default': None,
'nullable': True
},
'scenario': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'locations': {
'type': 'string',
'required': True,
'required': False,
'default': None,
'nullable': True
},
'input_address': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'match_address': {
'type': 'string',
'required': False,
'default': None,
'nullable': True
},
'ids': {
'type': 'string',
'required': False,
'nullable': True,
'default': None
}
}
jsonRequestContent = request.json or {}
rArgs = {**request.args, **jsonRequestContent}
kwargs.update(rArgs)
logging.debug(f'[MIDDLEWARE - ws scheme]: {kwargs}')
logging.debug(f"[VALIDATOR - wra_weights]: {kwargs}")
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs)
return func(*args, **kwargs)
return wrapper
def validate_params_cba(func):
"""World Validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'geogunit_unique_name': {'type': 'string', 'required': True},
'existing_prot': {
'type': 'integer',
'required': False,
'coerce': null2int,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'scenario': {
'type': 'string',
'required': True,
'allowed': ["business as usual", "pessimistic", "optimistic", "rcp4p5", "rcp8p5"],
'coerce': to_lower
},
'prot_fut': {
'type': 'integer',
'required': False,
'coerce': null2int,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'implementation_start': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 2020,
'max': 2079
},
'implementation_end': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 2021,
'max': 2080
},
'infrastructure_life': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 1,
'max': 100
},
'benefits_start': {
'type': 'integer',
'required': True,
'coerce': int,
'min': 2020,
'max': 2080
},
'ref_year': {
'type': 'integer',
'required': True,
'coerce': int,
'allowed': [2030, 2050, 2080]
},
'estimated_costs': {
'type': 'float',
'required': False,
'coerce': null2float,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'discount_rate': {
'type': 'float',
'required': True,
'coerce': float,
'min': 0,
'max': 1
},
'om_costs': {
'type': 'float',
'required': True,
'coerce': float,
'min': 0,
'max': 1
},
'user_urb_cost': {
'type': 'float',
'required': False,
'coerce': null2float,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'user_rur_cost': {
'type': 'float',
'required': False,
'coerce': null2float,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
}
}
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs['params']):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs['params'])
logging.debug(f"[VALIDATOR - cba_params]: {kwargs['sanitized_params']}")
return func(*args, **kwargs)
return wrapper
def validate_params_cba_def(func):
"""World Validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'geogunit_unique_name': {'type': 'string', 'required': True},
'scenario': {
'type': 'string',
'required': True,
'allowed': ["business as usual", "pessimistic", "optimistic", "rcp4p5", "rcp8p5"],
'coerce': to_lower
},
'flood': {
'type': 'string',
'required': False,
'coerce': to_lower,
'default': 'riverine',
'allowed': ["riverine", "coastal"],
},
'sub_scenario': {
'type': 'boolean',
'required': False,
'default': False,
'coerce': (str, to_bool)
}
}
logging.debug(f"[VALIDATOR - cba_def_params]: {kwargs}")
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs['params']):
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs['params'])
logging.debug(f"[VALIDATOR - cba_def_params]: {kwargs['sanitized_params']}")
return func(*args, **kwargs)
return wrapper
def validate_params_risk(func):
"""World Validation"""
@wraps(func)
def wrapper(*args, **kwargs):
validation_schema = {
'geogunit_unique_name': {'type': 'string', 'required': True},
'existing_prot': {
'type': 'integer',
'required': False,
'coerce': null2int,
'default': None,
'nullable': True,
'min': 0,
'max': 1000
},
'scenario': {
'type': 'string',
'required': True,
'allowed': ["business as usual", "pessimistic", "optimistic", "rcp4p5", "rcp8p5"],
'coerce': to_lower
},
'sub_scenario': {
'type': 'boolean',
'required': True,
'coerce': (str, to_bool)
},
'exposure': {
'type': 'string',
'required': True,
'coerce': to_lower
},
'flood': {
'type': 'string',
'required': True,
'coerce': to_lower
}
}
validator = Validator(validation_schema, allow_unknown=True)
if not validator.validate(kwargs['params']):
logging.debug(f"[VALIDATOR - risk_params]: {kwargs}")
return error(status=400, detail=validator.errors)
kwargs['sanitized_params'] = validator.normalized(kwargs['params'])
return func(*args, **kwargs)
return wrapper
|
import codecs,os
def _verify_python_env():
M='.utf8';L='.utf-8';J=None;I='ascii'
try:import locale as A;G=codecs.lookup(A.getpreferredencoding()).name
except Exception:G=I
if G!=I:return
B=''
if os.name=='posix':
import subprocess as D
try:C=D.Popen(['locale','-a'],stdout=D.PIPE,stderr=D.PIPE).communicate()[0]
except OSError:C=b''
E=set();H=False
if isinstance(C,bytes):C=C.decode(I,'replace')
for K in C.splitlines():
A=K.strip()
if A.lower().endswith((L,M)):
E.add(A)
if A.lower()in('c.utf8','c.utf-8'):H=True
B+='\n\n'
if not E:B+='Additional information: on this system no suitable UTF-8 locales were discovered. This most likely requires resolving by reconfiguring the locale system.'
elif H:B+='This system supports the C.UTF-8 locale which is recommended. You might be able to resolve your issue by exporting the following environment variables:\n\n export LC_ALL=C.UTF-8\n export LANG=C.UTF-8'
else:B+=f"This system lists some UTF-8 supporting locales that you can pick from. The following suitable locales were discovered: {", ".join(sorted(E))}"
F=J
for A in (os.environ.get('LC_ALL'),os.environ.get('LANG')):
if A and A.lower().endswith((L,M)):F=A
if A is not J:break
if F is not J:B+=f"\n\nClick discovered that you exported a UTF-8 locale but the locale system could not pick up from it because it does not exist. The exported locale is {F!r} but it is not supported"
raise RuntimeError(f"Click will abort further execution because Python was configured to use ASCII as encoding for the environment. Consult https://click.palletsprojects.com/unicode-support/ for mitigation steps.{B}") | import codecs,os
def _verify_python_env():
M='.utf8';L='.utf-8';J=None;I='ascii'
try:import locale as A;G=codecs.lookup(A.getpreferredencoding()).name
except Exception:G=I
if G!=I:return
B=''
if os.name=='posix':
import subprocess as D
try:C=D.Popen(['locale','-a'],stdout=D.PIPE,stderr=D.PIPE).communicate()[0]
except OSError:C=b''
E=set();H=False
if isinstance(C,bytes):C=C.decode(I,'replace')
for K in C.splitlines():
A=K.strip()
if A.lower().endswith((L,M)):
E.add(A)
if A.lower()in('c.utf8','c.utf-8'):H=True
B+='\n\n'
if not E:B+='Additional information: on this system no suitable UTF-8 locales were discovered. This most likely requires resolving by reconfiguring the locale system.'
elif H:B+='This system supports the C.UTF-8 locale which is recommended. You might be able to resolve your issue by exporting the following environment variables:\n\n export LC_ALL=C.UTF-8\n export LANG=C.UTF-8'
else:B+=f"This system lists some UTF-8 supporting locales that you can pick from. The following suitable locales were discovered: {', '.join(sorted(E))}"
F=J
for A in (os.environ.get('LC_ALL'),os.environ.get('LANG')):
if A and A.lower().endswith((L,M)):F=A
if A is not J:break
if F is not J:B+=f"\n\nClick discovered that you exported a UTF-8 locale but the locale system could not pick up from it because it does not exist. The exported locale is {F!r} but it is not supported"
raise RuntimeError(f"Click will abort further execution because Python was configured to use ASCII as encoding for the environment. Consult https://click.palletsprojects.com/unicode-support/ for mitigation steps.{B}") |
from logging import DEBUG, INFO
from sys import exit
from logzero import setup_logger
from requests import post
l = setup_logger(name="GraphQL", level=INFO)
class Query:
__slots__ = ["data", "data_template", "headers", "url", "response", "json"]
# Request JSON data template
default_template = (
"{"
" search("
' query:"stars:>100",'
" type:REPOSITORY,"
" first:50,"
' after:"!<REPLACE-ME>!"){'
" pageInfo{"
" hasNextPage"
" endCursor"
" }"
" nodes{"
" ... on Repository {"
" nameWithOwner"
" url"
" createdAt"
" updatedAt"
" primaryLanguage{ name }"
" releases{ totalCount }"
" pullRequests{ totalCount }"
" all_issues: issues{ totalCount } "
" closed_issues: issues(states:CLOSED){ totalCount }"
" }"
" }"
" }"
" rateLimit{"
" remaining"
" resetAt"
" }"
"}"
)
def __init__(self, url: str, headers: dict, data_template: str = default_template):
# Initializing instance attributes
self.data = {"query": ""}
self.data_template = data_template
self.headers = headers
self.url = url
self.json = {}
# Setting up first query (the only one where 'after' is 'null')
self.data["query"] = data_template.replace('"!<REPLACE-ME>!"', "null")
# Running HTTP POST request
self.request()
def request(self):
# Running HTTP POST request
self.response = post(url=self.url, headers=self.headers, json=self.data)
l.debug(
f"response.status_code={self.response.status_code}; "
f"response.json()='{self.response.json()}'"
)
# Checking if request was successful
if self.noice_response():
self.json = self.response.json() # Updating json attribute if True
return self.response
def noice_response(self):
# Checking if HTTP POST request was successful
if self.response.status_code != 200:
l.error(
f"HTTP POST request failed! Status code: "
f"{self.response.status_code}"
)
exit(1)
if self.response.status_code == 200 and "errors" in self.response.json():
l.error(
f"HTTP POST request failed!"
f"\nErrors:"
f"\n{[err["message"] for err in self.response.json()["errors"]]}"
)
exit(1)
return True
def new_query(self, end_cursor: str):
l.debug(f"end_cursor={end_cursor}")
# GraphQL query definition (setting up parameter to get next page)
self.data["query"] = self.data_template.replace("!<REPLACE-ME>!", end_cursor)
return self.data
def next_page(self):
try:
if self.json["data"]["search"]["pageInfo"]["hasNextPage"]:
self.new_query(self.json["data"]["search"]["pageInfo"]["endCursor"])
return self.json["data"]["search"]["pageInfo"]["endCursor"]
else:
return False
except KeyError as e:
l.info(f"Doing Guido's | Exception: {e}")
if self.json['data']['user']['repositories']["pageInfo"]["hasNextPage"]:
self.new_query(
self.json["data"]["user"]['repositories']["pageInfo"]["endCursor"])
return self.json["data"]["user"]['repositories']["pageInfo"]["endCursor"]
else:
return False
def fix_dict(self, node: dict):
try:
node["primaryLanguage"] = node["primaryLanguage"]["name"]
except TypeError as e:
l.warning(f"Primary language not available. Setting null. | {e}")
node["primaryLanguage"] = None
except KeyError as e:
l.debug(f"No primaryLanguage | {e}")
try:
node["releases"] = node["releases"]["totalCount"]
except KeyError as e:
l.debug(f"No releases | {e}")
try:
node["pullRequests"] = node["pullRequests"]["totalCount"]
except KeyError as e:
l.debug(f"No pullRequests | {e}")
try:
node["all_issues"] = node["all_issues"]["totalCount"]
except KeyError as e:
l.debug(f"No all_issues | {e}")
try:
node["closed_issues"] = node["closed_issues"]["totalCount"]
except KeyError as e:
l.debug(f"No closed_issues | {e}")
try:
node["stargazers"] = node["stargazers"]["totalCount"]
except KeyError as e:
l.debug(f"No stargazers | {e}")
try:
node["watchers"] = node["watchers"]["totalCount"]
except KeyError as e:
l.debug(f"No watchers | {e}")
try:
node["commitComments"] = node["commitComments"]["totalCount"]
except KeyError as e:
l.debug(f"No commitComments | {e}")
return node
| from logging import DEBUG, INFO
from sys import exit
from logzero import setup_logger
from requests import post
l = setup_logger(name="GraphQL", level=INFO)
class Query:
__slots__ = ["data", "data_template", "headers", "url", "response", "json"]
# Request JSON data template
default_template = (
"{"
" search("
' query:"stars:>100",'
" type:REPOSITORY,"
" first:50,"
' after:"!<REPLACE-ME>!"){'
" pageInfo{"
" hasNextPage"
" endCursor"
" }"
" nodes{"
" ... on Repository {"
" nameWithOwner"
" url"
" createdAt"
" updatedAt"
" primaryLanguage{ name }"
" releases{ totalCount }"
" pullRequests{ totalCount }"
" all_issues: issues{ totalCount } "
" closed_issues: issues(states:CLOSED){ totalCount }"
" }"
" }"
" }"
" rateLimit{"
" remaining"
" resetAt"
" }"
"}"
)
def __init__(self, url: str, headers: dict, data_template: str = default_template):
# Initializing instance attributes
self.data = {"query": ""}
self.data_template = data_template
self.headers = headers
self.url = url
self.json = {}
# Setting up first query (the only one where 'after' is 'null')
self.data["query"] = data_template.replace('"!<REPLACE-ME>!"', "null")
# Running HTTP POST request
self.request()
def request(self):
# Running HTTP POST request
self.response = post(url=self.url, headers=self.headers, json=self.data)
l.debug(
f"response.status_code={self.response.status_code}; "
f"response.json()='{self.response.json()}'"
)
# Checking if request was successful
if self.noice_response():
self.json = self.response.json() # Updating json attribute if True
return self.response
def noice_response(self):
# Checking if HTTP POST request was successful
if self.response.status_code != 200:
l.error(
f"HTTP POST request failed! Status code: "
f"{self.response.status_code}"
)
exit(1)
if self.response.status_code == 200 and "errors" in self.response.json():
l.error(
f"HTTP POST request failed!"
f"\nErrors:"
f"\n{[err['message'] for err in self.response.json()['errors']]}"
)
exit(1)
return True
def new_query(self, end_cursor: str):
l.debug(f"end_cursor={end_cursor}")
# GraphQL query definition (setting up parameter to get next page)
self.data["query"] = self.data_template.replace("!<REPLACE-ME>!", end_cursor)
return self.data
def next_page(self):
try:
if self.json["data"]["search"]["pageInfo"]["hasNextPage"]:
self.new_query(self.json["data"]["search"]["pageInfo"]["endCursor"])
return self.json["data"]["search"]["pageInfo"]["endCursor"]
else:
return False
except KeyError as e:
l.info(f"Doing Guido's | Exception: {e}")
if self.json['data']['user']['repositories']["pageInfo"]["hasNextPage"]:
self.new_query(
self.json["data"]["user"]['repositories']["pageInfo"]["endCursor"])
return self.json["data"]["user"]['repositories']["pageInfo"]["endCursor"]
else:
return False
def fix_dict(self, node: dict):
try:
node["primaryLanguage"] = node["primaryLanguage"]["name"]
except TypeError as e:
l.warning(f"Primary language not available. Setting null. | {e}")
node["primaryLanguage"] = None
except KeyError as e:
l.debug(f"No primaryLanguage | {e}")
try:
node["releases"] = node["releases"]["totalCount"]
except KeyError as e:
l.debug(f"No releases | {e}")
try:
node["pullRequests"] = node["pullRequests"]["totalCount"]
except KeyError as e:
l.debug(f"No pullRequests | {e}")
try:
node["all_issues"] = node["all_issues"]["totalCount"]
except KeyError as e:
l.debug(f"No all_issues | {e}")
try:
node["closed_issues"] = node["closed_issues"]["totalCount"]
except KeyError as e:
l.debug(f"No closed_issues | {e}")
try:
node["stargazers"] = node["stargazers"]["totalCount"]
except KeyError as e:
l.debug(f"No stargazers | {e}")
try:
node["watchers"] = node["watchers"]["totalCount"]
except KeyError as e:
l.debug(f"No watchers | {e}")
try:
node["commitComments"] = node["commitComments"]["totalCount"]
except KeyError as e:
l.debug(f"No commitComments | {e}")
return node
|
import re
import os
import logging
from datetime import datetime
from weakref import WeakValueDictionary
from typing import Dict
from airflow_db_logger.handlers import DBLogStreamWriter, DBLogHandler, airflow_db_logger_log
from airflow_db_logger.exceptions import DBLoggerException
from airflow_db_logger.config import (
DB_LOGGER_WRITE_TO_GCS_BUCKET,
DB_LOGGER_WRITE_TO_GCS_PROJECT_ID,
DB_LOGGER_WRITE_TO_GCS_MULTI_FILE_LOG,
)
from zthreading.decorators import collect_delayed_calls_async
from google.cloud.storage import Client
GOOGLE_STORAGE_RUI_REGEX = r"^(gs:\/\/|)([^\/]+)(\/(.*)|)$"
def to_storage_parts(gs_path):
parts = re.findall(GOOGLE_STORAGE_RUI_REGEX, gs_path)
bucket_name = parts[0][1]
bucket_inner_path = parts[0][3] if len(parts[0]) > 2 else None
return bucket_name, bucket_inner_path
class GCSFileLogProcessor:
def __init__(self, filename: str) -> None:
self.filename = filename
self.pending_records = []
self.bucket_name, self.bucket_inner_path = to_storage_parts(DB_LOGGER_WRITE_TO_GCS_BUCKET)
def write(self, record):
self.pending_records.append(record)
self._write_async()
def _compose_progressing_log_file_name(self, filename):
filename, ext = os.path.splitext(filename)
return f"{filename}.{datetime.now().strftime("%Y%m%d.%H%M%S.%f")}{ext}"
@collect_delayed_calls_async(interval=0.1, on_error="write_async_error", use_daemon_thread=False)
def _write_async(self):
if len(self.pending_records) == 0:
return
try:
client = Client(project=DB_LOGGER_WRITE_TO_GCS_PROJECT_ID)
bucket_path = f"{self.bucket_inner_path}/{self.filename}"
if DB_LOGGER_WRITE_TO_GCS_MULTI_FILE_LOG:
bucket_path = self._compose_progressing_log_file_name(bucket_path)
bucket = client.bucket(bucket_name=self.bucket_name)
blob = bucket.blob(blob_name=bucket_path)
records = self.pending_records
self.pending_records = []
if not DB_LOGGER_WRITE_TO_GCS_MULTI_FILE_LOG and blob.exists():
current_log = blob.download_as_string().decode(encoding="utf-8").strip()
if current_log:
records.insert(0, current_log)
# Reset the blob
blob = bucket.blob(blob_name=bucket_path)
blob.upload_from_string("\n".join(records))
except Exception as err:
airflow_db_logger_log.error(
f"Failed to flash to bucket @ {self.bucket_name}/{self.bucket_inner_path}/{self.filename}"
)
airflow_db_logger_log.error(err)
def write_async_error(self, err: Exception):
airflow_db_logger_log.error(err)
class GCSFileWriter(DBLogStreamWriter):
def __init__(self, on_event=None) -> None:
super().__init__(on_event=on_event)
self._pending_loggers: Dict[str, GCSFileLogProcessor] = WeakValueDictionary()
def get_file_collection_writer(self, filename: str):
if filename not in self._pending_loggers:
logger = GCSFileLogProcessor(filename)
self._pending_loggers[filename] = logger
return logger
else:
return self._pending_loggers[filename]
def write(self, handler: DBLogHandler, record: logging.LogRecord):
# Only applies when using context execution.
if not handler.has_context:
return
filename = handler.get_logfile_subpath()
assert isinstance(filename, str), DBLoggerException(
f"Invalid filename when writing log @ {type(handler)}: {DB_LOGGER_WRITE_TO_GCS_BUCKET}/{filename}"
)
self.get_file_collection_writer(filename=filename).write(handler.format(record))
| import re
import os
import logging
from datetime import datetime
from weakref import WeakValueDictionary
from typing import Dict
from airflow_db_logger.handlers import DBLogStreamWriter, DBLogHandler, airflow_db_logger_log
from airflow_db_logger.exceptions import DBLoggerException
from airflow_db_logger.config import (
DB_LOGGER_WRITE_TO_GCS_BUCKET,
DB_LOGGER_WRITE_TO_GCS_PROJECT_ID,
DB_LOGGER_WRITE_TO_GCS_MULTI_FILE_LOG,
)
from zthreading.decorators import collect_delayed_calls_async
from google.cloud.storage import Client
GOOGLE_STORAGE_RUI_REGEX = r"^(gs:\/\/|)([^\/]+)(\/(.*)|)$"
def to_storage_parts(gs_path):
parts = re.findall(GOOGLE_STORAGE_RUI_REGEX, gs_path)
bucket_name = parts[0][1]
bucket_inner_path = parts[0][3] if len(parts[0]) > 2 else None
return bucket_name, bucket_inner_path
class GCSFileLogProcessor:
def __init__(self, filename: str) -> None:
self.filename = filename
self.pending_records = []
self.bucket_name, self.bucket_inner_path = to_storage_parts(DB_LOGGER_WRITE_TO_GCS_BUCKET)
def write(self, record):
self.pending_records.append(record)
self._write_async()
def _compose_progressing_log_file_name(self, filename):
filename, ext = os.path.splitext(filename)
return f"{filename}.{datetime.now().strftime('%Y%m%d.%H%M%S.%f')}{ext}"
@collect_delayed_calls_async(interval=0.1, on_error="write_async_error", use_daemon_thread=False)
def _write_async(self):
if len(self.pending_records) == 0:
return
try:
client = Client(project=DB_LOGGER_WRITE_TO_GCS_PROJECT_ID)
bucket_path = f"{self.bucket_inner_path}/{self.filename}"
if DB_LOGGER_WRITE_TO_GCS_MULTI_FILE_LOG:
bucket_path = self._compose_progressing_log_file_name(bucket_path)
bucket = client.bucket(bucket_name=self.bucket_name)
blob = bucket.blob(blob_name=bucket_path)
records = self.pending_records
self.pending_records = []
if not DB_LOGGER_WRITE_TO_GCS_MULTI_FILE_LOG and blob.exists():
current_log = blob.download_as_string().decode(encoding="utf-8").strip()
if current_log:
records.insert(0, current_log)
# Reset the blob
blob = bucket.blob(blob_name=bucket_path)
blob.upload_from_string("\n".join(records))
except Exception as err:
airflow_db_logger_log.error(
f"Failed to flash to bucket @ {self.bucket_name}/{self.bucket_inner_path}/{self.filename}"
)
airflow_db_logger_log.error(err)
def write_async_error(self, err: Exception):
airflow_db_logger_log.error(err)
class GCSFileWriter(DBLogStreamWriter):
def __init__(self, on_event=None) -> None:
super().__init__(on_event=on_event)
self._pending_loggers: Dict[str, GCSFileLogProcessor] = WeakValueDictionary()
def get_file_collection_writer(self, filename: str):
if filename not in self._pending_loggers:
logger = GCSFileLogProcessor(filename)
self._pending_loggers[filename] = logger
return logger
else:
return self._pending_loggers[filename]
def write(self, handler: DBLogHandler, record: logging.LogRecord):
# Only applies when using context execution.
if not handler.has_context:
return
filename = handler.get_logfile_subpath()
assert isinstance(filename, str), DBLoggerException(
f"Invalid filename when writing log @ {type(handler)}: {DB_LOGGER_WRITE_TO_GCS_BUCKET}/{filename}"
)
self.get_file_collection_writer(filename=filename).write(handler.format(record))
|
import os.path
import sys
from os import path
from traceback import format_exc
from unittest import TestCase
from unittest.loader import defaultTestLoader, makeSuite
from unittest.runner import TextTestRunner
from unittest.suite import TestSuite
from pdip.logging.loggers.console import ConsoleLogger
from pdip.utils import ModuleFinder, Utils
if __name__ == "__main__":
class TestRunner:
def __init__(self, test_folder):
self.root_directory = path.abspath(
path.join(path.dirname(path.abspath(__file__))))
self.logger = ConsoleLogger()
self.test_folder = test_folder
def run(self):
all_test_modules = self.find_test_modules()
test_results = self.run_all_tests(all_test_modules)
total = self.print_results(test_results)
# if total["runs"]!=total["successes"]:
# raise Exception("Tests getting error")
def find_test_modules(self):
module_finder = ModuleFinder(
root_directory=self.root_directory, initialize=False)
folder = os.path.join(self.root_directory,
'tests', self.test_folder)
module_finder.find_all_modules(folder=folder)
test_modules = []
for module in module_finder.modules:
if module["module_name"].startswith('test_') and module["module_address"].startswith('tests'):
test_modules.append(module)
return test_modules
def run_all_tests(self, test_modules):
results = []
for t in test_modules:
suite = TestSuite()
try:
try:
mod = __import__(t["module_address"],
globals(), locals(), ['suite'])
except KeyError:
self.logger.debug(
"!!!!Module Address : " + t["module_address"])
pass
module = None
for c in TestCase.__subclasses__():
if c.__module__.startswith(t["module_address"]):
module = c
if module is not None:
# suitefn = getattr(module, 'suite')
suite.addTest(makeSuite(module))
except (ImportError, AttributeError) as ex:
# else, just load all the test cases from the module.
trace = format_exc()
self.logger.debug(trace)
suite.addTest(
defaultTestLoader.loadTestsFromName(t["module_name"]))
header_string = f'{'Case':80}|{'Runs'.center(10)}|{'Success'.center(10)}|{'Errors'.center(10)}|{'Failures'.center(10)}'
self.logger.debug(f"{t["module_address"]} tests started".center(
len(header_string) + 2, '-'))
test_result = TextTestRunner().run(suite)
result = {
"test_namespace": t["module_address"], "result": test_result}
results.append(result)
self.print_results(results=[result])
self.logger.debug(f"{t["module_address"]} tests finished".center(
len(header_string) + 2, '-'))
self.logger.debug("-" * (len(header_string) + 2))
modules = [y for y in sys.modules if 'pdip' in y]
for module in modules:
del module
modules = [y for y in sys.modules if 'tests.unittests' in y]
for module in modules:
del module
return results
def print_results(self, results):
header_string = f'|{'Case':80}|{'Runs'.center(10)}|{'Success'.center(10)}|{'Errors'.center(10)}|{'Failures'.center(10)}|'
self.logger.debug("-" * len(header_string))
self.logger.debug(header_string)
self.logger.debug("-" * len(header_string))
total = {
"runs": 0,
"successes": 0,
"errors": 0,
"failures": 0,
}
for result in results:
runs = result["result"].testsRun
errors = len(result["result"].errors)
failures = len(result["result"].failures)
successes = runs - errors - failures
total["runs"] += runs
total["successes"] += successes
total["errors"] += errors
total["failures"] += failures
result_string = f'|{result['test_namespace']:80}|{runs:10}|{successes:10}|{errors:10}|{failures:10}|'
self.logger.debug(result_string)
total_string = f'|{'Total':80}|{total['runs']:10}|{total['successes']:10}|{total['errors']:10}|{total['failures']:10}|'
self.logger.debug(total_string)
self.logger.debug("-" * len(header_string))
return total
TestRunner('unittests').run()
#TestRunner('integrationtests').run()
| import os.path
import sys
from os import path
from traceback import format_exc
from unittest import TestCase
from unittest.loader import defaultTestLoader, makeSuite
from unittest.runner import TextTestRunner
from unittest.suite import TestSuite
from pdip.logging.loggers.console import ConsoleLogger
from pdip.utils import ModuleFinder, Utils
if __name__ == "__main__":
class TestRunner:
def __init__(self, test_folder):
self.root_directory = path.abspath(
path.join(path.dirname(path.abspath(__file__))))
self.logger = ConsoleLogger()
self.test_folder = test_folder
def run(self):
all_test_modules = self.find_test_modules()
test_results = self.run_all_tests(all_test_modules)
total = self.print_results(test_results)
# if total["runs"]!=total["successes"]:
# raise Exception("Tests getting error")
def find_test_modules(self):
module_finder = ModuleFinder(
root_directory=self.root_directory, initialize=False)
folder = os.path.join(self.root_directory,
'tests', self.test_folder)
module_finder.find_all_modules(folder=folder)
test_modules = []
for module in module_finder.modules:
if module["module_name"].startswith('test_') and module["module_address"].startswith('tests'):
test_modules.append(module)
return test_modules
def run_all_tests(self, test_modules):
results = []
for t in test_modules:
suite = TestSuite()
try:
try:
mod = __import__(t["module_address"],
globals(), locals(), ['suite'])
except KeyError:
self.logger.debug(
"!!!!Module Address : " + t["module_address"])
pass
module = None
for c in TestCase.__subclasses__():
if c.__module__.startswith(t["module_address"]):
module = c
if module is not None:
# suitefn = getattr(module, 'suite')
suite.addTest(makeSuite(module))
except (ImportError, AttributeError) as ex:
# else, just load all the test cases from the module.
trace = format_exc()
self.logger.debug(trace)
suite.addTest(
defaultTestLoader.loadTestsFromName(t["module_name"]))
header_string = f'{"Case":80}|{"Runs".center(10)}|{"Success".center(10)}|{"Errors".center(10)}|{"Failures".center(10)}'
self.logger.debug(f"{t['module_address']} tests started".center(
len(header_string) + 2, '-'))
test_result = TextTestRunner().run(suite)
result = {
"test_namespace": t["module_address"], "result": test_result}
results.append(result)
self.print_results(results=[result])
self.logger.debug(f"{t['module_address']} tests finished".center(
len(header_string) + 2, '-'))
self.logger.debug("-" * (len(header_string) + 2))
modules = [y for y in sys.modules if 'pdip' in y]
for module in modules:
del module
modules = [y for y in sys.modules if 'tests.unittests' in y]
for module in modules:
del module
return results
def print_results(self, results):
header_string = f'|{"Case":80}|{"Runs".center(10)}|{"Success".center(10)}|{"Errors".center(10)}|{"Failures".center(10)}|'
self.logger.debug("-" * len(header_string))
self.logger.debug(header_string)
self.logger.debug("-" * len(header_string))
total = {
"runs": 0,
"successes": 0,
"errors": 0,
"failures": 0,
}
for result in results:
runs = result["result"].testsRun
errors = len(result["result"].errors)
failures = len(result["result"].failures)
successes = runs - errors - failures
total["runs"] += runs
total["successes"] += successes
total["errors"] += errors
total["failures"] += failures
result_string = f'|{result["test_namespace"]:80}|{runs:10}|{successes:10}|{errors:10}|{failures:10}|'
self.logger.debug(result_string)
total_string = f'|{"Total":80}|{total["runs"]:10}|{total["successes"]:10}|{total["errors"]:10}|{total["failures"]:10}|'
self.logger.debug(total_string)
self.logger.debug("-" * len(header_string))
return total
TestRunner('unittests').run()
#TestRunner('integrationtests').run()
|
import logging
import os
import io
import matplotlib
import matplotlib.pyplot as plt
import pytz
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram.ext.dispatcher import run_async
import youtube_data as ytd
import user
from user import User, UserState
# Disabling matplotlib from opening a window on the server
matplotlib.use("Agg")
plt.ioff()
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def send_plot(
user: User, context, plot: matplotlib.axes.Axes, caption: str = ""
):
"""Send a pandas plot to the specified chat"""
fig = plot.get_figure()
fig.tight_layout()
image = io.BytesIO()
fig.savefig(image, format="png", dpi=300)
plt.close(fig)
image.seek(0)
context.bot.send_photo(
chat_id=user.telegram_id,
photo=image,
parse_mode="Markdown",
caption=caption,
)
pass
@run_async
def start_command(update, context):
"""Send a message when the command /start is issued."""
user = User.load(update.effective_user.id)
message = (
f"Hi {update.effective_user.name} 😊\n"
"YouTube saves a lot of data about you, however I can help you to "
"get some insight into this data. So for me to help you, you need to "
"download your data and send me the files `watch-history.json` and "
"`search-history.json`. Here is a [Guide]"
"(https://github.com/flofriday/youtube-data/blob/master/Download_Guide.md)"
" on how to download your personal data.\n\n"
"Some of the graphs I can create are time-sensetive, so it is "
"important that I know in which timezone you live in. At the moment I "
f"think/asume you live in `{user.timezone}`, if this is wrong you "
"can correct me with the /timezone command.\n\n"
"This bot is free software, and is developed in the hope to "
"be useful. Its code is publicly available on "
"[GitHub](https://github.com/flofriday/youtube-data).\n\n"
"*Disclaimer:* This is not an official YouTube application, nor am I "
"[flofriday](https://github.com/flofriday), in any way "
"associated with YouTube or Google."
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
# Also show all the available commands
help_command(update, context)
@run_async
def privacy_command(update, context):
"""Tell the user how this bot manages their data"""
message = (
"*Privarcy* 🔒\n"
"Privacy clearly is important, and this bot takes this subject "
"seriously. Thats why *this bot doesn't save your personal "
"YouTube data*.\n"
"However, this bot does save some userdata, which are either "
"collected to enable some feature, or to enable some kind of "
"analytics. Having this said, I will promise to allways make it "
"clear, what this bot collects. Therefore, I created the /info and "
"/statitic commands. The info command shows you all the data this bot "
"knows about you."
)
update.message.reply_text(message, parse_mode="Markdown")
@run_async
def help_command(update, context):
"""Send a message when the command /help is issued."""
message = (
"*Things I can do* 🤓\n"
"/timezone - Set your timezone\n"
"/privacy - How this bot handles your data\n"
"/info - Informations the bot has about you\n"
"/statistic - Informations on the bots usage\n"
"/help - This help message"
)
update.message.reply_text(message, parse_mode="Markdown")
@run_async
def info_command(update, context):
"""Show the user what the bot thinks about them"""
user = User.load(update.effective_user.id)
message = (
"*User Info*\n"
f"Telegram ID: {user.telegram_id}\n"
f"State: {UserState(user.state).name}\n"
f"Timezone: {user.timezone}\n"
f"Number of reports: {user.analyzes}"
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
@run_async
def statistic_command(update, context):
"""Tell the user how many users their are"""
users, analyzes = User.statistics()
message = (
f"*Statistics*\nUsers: *{users}*\nAnalyzes calculated: *{analyzes}*"
)
update.message.reply_text(message, parse_mode="Markdown")
@run_async
def timezone_command(update, context):
"""Set the timezone for the user"""
user = User.load(update.effective_user.id)
user.state = UserState.send_timezone
user.update()
message = (
"Send me the timezone you live in.\n"
"Unfortunatly, I am very strict about the format 😅.\n"
"The format must be like `Europe/Vienna`.\n"
"Here is the [Wikipedia Link]"
"(https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) "
"to help you out."
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
@run_async
def document_message(update, context):
"""React to files the user sends the bot"""
filename = update.message.document.file_name
if filename == "search-history.json":
context.bot.send_chat_action(
chat_id=update.effective_user.id,
action=telegram.ChatAction.TYPING,
)
analyze_search(update, context)
return
elif filename == "watch-history.json":
context.bot.send_chat_action(
chat_id=update.effective_user.id,
action=telegram.ChatAction.TYPING,
)
analyze_watch(update, context)
return
message = (
"Sorry, the file must either be named `search-history.json` or "
"`watch-history.json`. 😔"
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
@run_async
def text_message(update, context):
"""Handle normal messages"""
user = User.load(update.effective_user.id)
if user.state == UserState.send_timezone:
if update.message.text not in pytz.all_timezones:
update.message.reply_text("Sorry, I don't know that timezone. 😰")
return
user.timezone = update.message.text
user.state = UserState.idle
user.update()
update.message.reply_text("Great, set your new timezone. 😄")
return
# I don't know what else to do
update.message.reply_text("Sorry, I don't know what you want. 😔")
@run_async
def unknown_message(update, context):
update.message.reply_text("Sorry, I don't know what you want. 😔")
def analyze_search(update, context):
document = update.message.document
f = None
try:
f = document.get_file(30)
except telegram.TelegramError:
update.message.reply_text(
"An error occoured while downloading your file."
)
return
# Load the user and the data into a dataframe
user = User.load(update.effective_user.id)
json = f.download_as_bytearray().decode("utf-8")
df = None
try:
df = ytd.load_search_history(json, user.timezone)
except Exception:
update.message.reply_text(
"An error occoured while parsing your file. 😵\n"
"Maybe you uploaded a corrrupted file ?"
)
return
# Overall information about the searches
info_message = (
"*Absolut numbers*\n"
f"Searches since {df["time"].min().strftime("%b %d %Y")}: "
f"*{len(df)}*\n"
f"Average searches per day: "
f"*{len(df)/((df["time"].max()-df["time"].min()).days):.2f}*"
)
update.message.reply_text(info_message, parse_mode="Markdown")
# Plot the words used most often
plt1 = ytd.searchword_plot(df, 24)
send_plot(user, context, plt1)
# Plot the search activity over time
plt2 = ytd.search_timeline_plot(df)
send_plot(user, context, plt2)
# Update the counter for the user
user.analyzes += 1
user.update()
update.message.reply_text("Done 😊", parse_mode="Markdown")
def analyze_watch(update, context):
document = update.message.document
f = None
try:
f = document.get_file(30)
except telegram.TelegramError:
update.message.reply_text(
"An error occoured while downloading your file."
)
return
# Load the user and the data into a dataframe
user = User.load(update.effective_user.id)
json = f.download_as_bytearray().decode("utf-8")
df = None
try:
df = ytd.load_watch_history(json, user.timezone)
except Exception:
update.message.reply_text(
"An error occoured while parsing your file. 😵\n"
"Maybe you uploaded a corrrupted file ?"
)
return
# Overall information about the searches
info_message = (
"*Absolut numbers*\n"
f"Videos watched since {df["time"].min().strftime("%b %d %Y")}: "
f"*{len(df)}*\n"
f"Average videos per day: "
f"*{len(df)/((df["time"].max()-df["time"].min()).days):.2f}*"
)
update.message.reply_text(info_message, parse_mode="Markdown")
# Plot the most watched creators
plt = ytd.creator_plot(df, 24)
send_plot(user, context, plt)
# Plot the watch timeline
plt = ytd.watch_timeline_plot(df)
send_plot(user, context, plt)
# Plot the hours the users watches
plt = ytd.watch_hour_plot(df)
send_plot(user, context, plt)
# Update the counter for the user
user.analyzes += 1
user.update()
update.message.reply_text("Done 😊", parse_mode="Markdown")
def main():
"""Start the bot."""
# Initialize the database
user.__init__("data/bot.db")
# Read the config from the environment
token = os.environ["TELEGRAM_TOKEN"]
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
updater = Updater(token, use_context=True)
print("Bot running...")
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start_command))
dp.add_handler(CommandHandler("timezone", timezone_command))
dp.add_handler(CommandHandler("privacy", privacy_command))
dp.add_handler(CommandHandler("info", info_command))
dp.add_handler(CommandHandler("statistic", statistic_command))
dp.add_handler(CommandHandler("help", help_command))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, text_message))
dp.add_handler(MessageHandler(Filters.document, document_message))
dp.add_handler(MessageHandler(Filters.all, unknown_message))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == "__main__":
main()
| import logging
import os
import io
import matplotlib
import matplotlib.pyplot as plt
import pytz
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram.ext.dispatcher import run_async
import youtube_data as ytd
import user
from user import User, UserState
# Disabling matplotlib from opening a window on the server
matplotlib.use("Agg")
plt.ioff()
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def send_plot(
user: User, context, plot: matplotlib.axes.Axes, caption: str = ""
):
"""Send a pandas plot to the specified chat"""
fig = plot.get_figure()
fig.tight_layout()
image = io.BytesIO()
fig.savefig(image, format="png", dpi=300)
plt.close(fig)
image.seek(0)
context.bot.send_photo(
chat_id=user.telegram_id,
photo=image,
parse_mode="Markdown",
caption=caption,
)
pass
@run_async
def start_command(update, context):
"""Send a message when the command /start is issued."""
user = User.load(update.effective_user.id)
message = (
f"Hi {update.effective_user.name} 😊\n"
"YouTube saves a lot of data about you, however I can help you to "
"get some insight into this data. So for me to help you, you need to "
"download your data and send me the files `watch-history.json` and "
"`search-history.json`. Here is a [Guide]"
"(https://github.com/flofriday/youtube-data/blob/master/Download_Guide.md)"
" on how to download your personal data.\n\n"
"Some of the graphs I can create are time-sensetive, so it is "
"important that I know in which timezone you live in. At the moment I "
f"think/asume you live in `{user.timezone}`, if this is wrong you "
"can correct me with the /timezone command.\n\n"
"This bot is free software, and is developed in the hope to "
"be useful. Its code is publicly available on "
"[GitHub](https://github.com/flofriday/youtube-data).\n\n"
"*Disclaimer:* This is not an official YouTube application, nor am I "
"[flofriday](https://github.com/flofriday), in any way "
"associated with YouTube or Google."
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
# Also show all the available commands
help_command(update, context)
@run_async
def privacy_command(update, context):
"""Tell the user how this bot manages their data"""
message = (
"*Privarcy* 🔒\n"
"Privacy clearly is important, and this bot takes this subject "
"seriously. Thats why *this bot doesn't save your personal "
"YouTube data*.\n"
"However, this bot does save some userdata, which are either "
"collected to enable some feature, or to enable some kind of "
"analytics. Having this said, I will promise to allways make it "
"clear, what this bot collects. Therefore, I created the /info and "
"/statitic commands. The info command shows you all the data this bot "
"knows about you."
)
update.message.reply_text(message, parse_mode="Markdown")
@run_async
def help_command(update, context):
"""Send a message when the command /help is issued."""
message = (
"*Things I can do* 🤓\n"
"/timezone - Set your timezone\n"
"/privacy - How this bot handles your data\n"
"/info - Informations the bot has about you\n"
"/statistic - Informations on the bots usage\n"
"/help - This help message"
)
update.message.reply_text(message, parse_mode="Markdown")
@run_async
def info_command(update, context):
"""Show the user what the bot thinks about them"""
user = User.load(update.effective_user.id)
message = (
"*User Info*\n"
f"Telegram ID: {user.telegram_id}\n"
f"State: {UserState(user.state).name}\n"
f"Timezone: {user.timezone}\n"
f"Number of reports: {user.analyzes}"
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
@run_async
def statistic_command(update, context):
"""Tell the user how many users their are"""
users, analyzes = User.statistics()
message = (
f"*Statistics*\nUsers: *{users}*\nAnalyzes calculated: *{analyzes}*"
)
update.message.reply_text(message, parse_mode="Markdown")
@run_async
def timezone_command(update, context):
"""Set the timezone for the user"""
user = User.load(update.effective_user.id)
user.state = UserState.send_timezone
user.update()
message = (
"Send me the timezone you live in.\n"
"Unfortunatly, I am very strict about the format 😅.\n"
"The format must be like `Europe/Vienna`.\n"
"Here is the [Wikipedia Link]"
"(https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) "
"to help you out."
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
@run_async
def document_message(update, context):
"""React to files the user sends the bot"""
filename = update.message.document.file_name
if filename == "search-history.json":
context.bot.send_chat_action(
chat_id=update.effective_user.id,
action=telegram.ChatAction.TYPING,
)
analyze_search(update, context)
return
elif filename == "watch-history.json":
context.bot.send_chat_action(
chat_id=update.effective_user.id,
action=telegram.ChatAction.TYPING,
)
analyze_watch(update, context)
return
message = (
"Sorry, the file must either be named `search-history.json` or "
"`watch-history.json`. 😔"
)
update.message.reply_text(
message, parse_mode="Markdown", disable_web_page_preview=True
)
@run_async
def text_message(update, context):
"""Handle normal messages"""
user = User.load(update.effective_user.id)
if user.state == UserState.send_timezone:
if update.message.text not in pytz.all_timezones:
update.message.reply_text("Sorry, I don't know that timezone. 😰")
return
user.timezone = update.message.text
user.state = UserState.idle
user.update()
update.message.reply_text("Great, set your new timezone. 😄")
return
# I don't know what else to do
update.message.reply_text("Sorry, I don't know what you want. 😔")
@run_async
def unknown_message(update, context):
update.message.reply_text("Sorry, I don't know what you want. 😔")
def analyze_search(update, context):
document = update.message.document
f = None
try:
f = document.get_file(30)
except telegram.TelegramError:
update.message.reply_text(
"An error occoured while downloading your file."
)
return
# Load the user and the data into a dataframe
user = User.load(update.effective_user.id)
json = f.download_as_bytearray().decode("utf-8")
df = None
try:
df = ytd.load_search_history(json, user.timezone)
except Exception:
update.message.reply_text(
"An error occoured while parsing your file. 😵\n"
"Maybe you uploaded a corrrupted file ?"
)
return
# Overall information about the searches
info_message = (
"*Absolut numbers*\n"
f"Searches since {df['time'].min().strftime('%b %d %Y')}: "
f"*{len(df)}*\n"
f"Average searches per day: "
f"*{len(df)/((df['time'].max()-df['time'].min()).days):.2f}*"
)
update.message.reply_text(info_message, parse_mode="Markdown")
# Plot the words used most often
plt1 = ytd.searchword_plot(df, 24)
send_plot(user, context, plt1)
# Plot the search activity over time
plt2 = ytd.search_timeline_plot(df)
send_plot(user, context, plt2)
# Update the counter for the user
user.analyzes += 1
user.update()
update.message.reply_text("Done 😊", parse_mode="Markdown")
def analyze_watch(update, context):
document = update.message.document
f = None
try:
f = document.get_file(30)
except telegram.TelegramError:
update.message.reply_text(
"An error occoured while downloading your file."
)
return
# Load the user and the data into a dataframe
user = User.load(update.effective_user.id)
json = f.download_as_bytearray().decode("utf-8")
df = None
try:
df = ytd.load_watch_history(json, user.timezone)
except Exception:
update.message.reply_text(
"An error occoured while parsing your file. 😵\n"
"Maybe you uploaded a corrrupted file ?"
)
return
# Overall information about the searches
info_message = (
"*Absolut numbers*\n"
f"Videos watched since {df['time'].min().strftime('%b %d %Y')}: "
f"*{len(df)}*\n"
f"Average videos per day: "
f"*{len(df)/((df['time'].max()-df['time'].min()).days):.2f}*"
)
update.message.reply_text(info_message, parse_mode="Markdown")
# Plot the most watched creators
plt = ytd.creator_plot(df, 24)
send_plot(user, context, plt)
# Plot the watch timeline
plt = ytd.watch_timeline_plot(df)
send_plot(user, context, plt)
# Plot the hours the users watches
plt = ytd.watch_hour_plot(df)
send_plot(user, context, plt)
# Update the counter for the user
user.analyzes += 1
user.update()
update.message.reply_text("Done 😊", parse_mode="Markdown")
def main():
"""Start the bot."""
# Initialize the database
user.__init__("data/bot.db")
# Read the config from the environment
token = os.environ["TELEGRAM_TOKEN"]
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
updater = Updater(token, use_context=True)
print("Bot running...")
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start_command))
dp.add_handler(CommandHandler("timezone", timezone_command))
dp.add_handler(CommandHandler("privacy", privacy_command))
dp.add_handler(CommandHandler("info", info_command))
dp.add_handler(CommandHandler("statistic", statistic_command))
dp.add_handler(CommandHandler("help", help_command))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, text_message))
dp.add_handler(MessageHandler(Filters.document, document_message))
dp.add_handler(MessageHandler(Filters.all, unknown_message))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == "__main__":
main()
|
from unittest import TestCase
import logging
import os
import shutil
from datetime import datetime
from pathlib import Path
from shutil import rmtree
from random import randint
from offload import utils
from offload.utils import File, FileList
utils.setup_logger('debug')
class TestFile(TestCase):
def setUp(self):
self.test_file_name = "test_file.txt"
self.test_data_path = Path(__file__).parent / "test_data"
self.test_file_path = self.test_data_path / "test_files" / self.test_file_name
self.test_pic_path = Path(__file__).parent / "test_pic.jpg"
self.test_file_path.parent.mkdir(exist_ok=True, parents=True)
def tearDown(self):
if self.test_file_path.is_file():
self.test_file_path.unlink()
rmtree(self.test_data_path)
def test_size(self):
test_file = File(self.test_pic_path)
print(test_file.size)
self.assertGreater(test_file.size, 0)
def test_increment_filename(self):
test_file = File(self.test_file_name)
self.assertEqual(test_file.filename, "test_file.txt")
test_file.increment_filename()
self.assertEqual(test_file.filename, "test_file_001.txt")
test_file.inc = 52
test_file.increment_filename()
self.assertEqual(test_file.filename, "test_file_053.txt")
test_file.inc_pad = 5
test_file.increment_filename()
self.assertEqual(test_file.filename, "test_file_00054.txt")
def test_add_prefix(self):
test_file = File(self.test_pic_path)
self.assertEqual(test_file.filename, "test_pic.jpg")
self.assertEqual(test_file.prefix, None)
test_file.set_prefix('hest')
self.assertEqual(test_file.filename, "hest_test_pic.jpg")
test_file.prefix = "fest"
self.assertEqual(test_file.filename, "fest_test_pic.jpg")
test_file.set_prefix("taken_date")
logging.info(test_file.prefix)
logging.info(test_file.path.resolve())
self.assertEqual("200307_test_pic.jpg", test_file.filename)
test_file.set_prefix("taken_date_time")
self.assertEqual(test_file.prefix, "200307_192133")
self.assertEqual(test_file.filename, "200307_192133_test_pic.jpg")
logging.debug(f'test_file.prefix = {test_file.prefix}')
test_file.prefix = "offload_date"
today = datetime.now().strftime("%y%m%d")
logging.info(today)
logging.info(test_file.filename)
self.assertEqual(f"{today}_test_pic.jpg", test_file.filename)
test_file.prefix = "empty"
self.assertEqual(test_file.prefix, None)
test_file.set_prefix("")
self.assertEqual(test_file.prefix, None)
def test_update_relative_path(self):
test_file = File(self.test_file_path)
relative_to = Path(__file__).parent
test_file.set_relative_path(relative_to)
self.assertEqual(str(test_file.relative_path), "test_data/test_files/test_file.txt")
def test_update_path(self):
test_file = File(self.test_file_path)
test_file.path = "/test"
self.assertEqual(str(test_file.path), "/test/test_file.txt")
def test_update_checksum(self):
self.test_file_path.parent.mkdir(exist_ok=True, parents=True)
self.test_file_path.write_text("test")
test_file = File(self.test_file_path)
self.assertEqual("9ec9f7918d7dfc40", test_file.checksum)
def test_set_name(self):
test_file = File(self.test_file_path)
test_file.name = "jens"
self.assertEqual(test_file.name, "jens")
self.assertEqual(test_file.filename, "jens.txt")
self.assertTrue(str(test_file.path).endswith("jens.txt"))
def test_set_name_using_preset(self):
test_pic = File(self.test_pic_path)
logging.info(f'Testing set name using preset with file {test_pic._path}')
test_pic.name = "camera_model"
self.assertEqual(test_pic.filename, "ilce-7m3.jpg")
test_pic.name = 'camera_make'
self.assertEqual(test_pic.name, "sony")
class TestFileList(TestCase):
def setUp(self):
self.test_directory = Path(__file__).parent / "test_data" / "test_files"
self.test_directory.mkdir(exist_ok=True, parents=True)
# Test files
for i in range(100):
f = Path(self.test_directory / f"{i:04}.jpg")
f.write_text(utils.random_string(randint(1, 4096)))
def tearDown(self):
rmtree(self.test_directory)
pass
def test_get_file_list(self):
test_list = FileList(self.test_directory)
self.assertEqual(len(test_list.files), 100)
def test_update_total_size(self):
test_list = FileList(self.test_directory)
self.assertIsInstance(test_list.size, int)
def test_sort(self):
test_list = FileList(self.test_directory)
list_sorted = sorted(test_list.files, key=lambda f: f.mtime)
test_list.sort()
self.assertEqual(list_sorted, test_list.files)
class TestUtils(TestCase):
def setUp(self):
# Set variables
self.tests_path = Path(__file__).parent
logging.info(self.tests_path)
self.test_data_path = self.tests_path / "test_data"
self.test_data_path.mkdir(exist_ok=True, parents=True)
self.test_pic_path = self.tests_path / "test_pic.jpg"
self.test_file_source = self.test_data_path / "test_file_source.txt"
self.test_file_source.write_text("test")
self.test_file_dest = self.test_data_path / "test_file_destination.txt"
self.test_file_dest.write_text("destination")
self.test_file_compare_checksums = self.test_data_path / "test_file_compare_checksums.txt"
self.test_source_xxhash = "9ec9f7918d7dfc40"
self.test_source_md5 = "098f6bcd4621d373cade4e832627b4f6"
self.test_source_sha256 = "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"
self.test_dest_xxhash = "d07d9411d9203216"
self.test_dest_md5 = "6990a54322d9232390a784c5c9247dd6"
self.test_dest_sha256 = "b5c755aaab1038b3d5627bbde7f47ca80c5f5c0481c6d33f04139d07aa1530e7"
def tearDown(self):
shutil.rmtree(self.test_data_path)
def test_file_checksum(self):
self.assertEqual(self.test_source_xxhash, utils.checksum_xxhash(self.test_file_source))
self.assertEqual(self.test_source_md5, utils.checksum_md5(self.test_file_source))
self.assertEqual(self.test_source_sha256, utils.checksum_sha256(self.test_file_source))
def test_checksum_xxhash(self):
test_hash = self.test_source_xxhash
f_a = Path('ol_test_file_a.txt')
f_b = Path('ol_test_file_b.txt')
f_a.write_bytes(b'test')
f_b.write_bytes(b'test')
self.assertEqual(utils.checksum_xxhash(f_a), utils.checksum_xxhash(f_a))
def test_checksum_md5(self):
test_hash = self.test_source_md5
self.assertEqual(utils.checksum_md5(self.test_file_source), test_hash)
def test_checksum_sha256(self):
test_hash = self.test_source_sha256
self.assertEqual(utils.checksum_sha256(self.test_file_source), test_hash)
def test_convert_date(self):
test_timestamp = 1586115849.30226
self.assertIsInstance(utils.timestamp_to_datetime(test_timestamp), datetime)
def test_create_folder(self):
test_folder = self.test_data_path / "test_folder"
utils.create_folder(test_folder)
self.assertTrue(os.path.exists(test_folder))
def test_convert_size(self):
self.assertEqual(utils.convert_size(1000, binary=False), "1.0 KB")
self.assertEqual(utils.convert_size(10000, binary=False), "10.0 KB")
self.assertEqual(utils.convert_size(1000000, binary=False), "1.0 MB")
def test_move_file(self):
content = "Test string!"
source = self.test_data_path / "test_file.txt"
source.write_text(content)
destination = source.parent / "test_dest" / "test_file.txt"
destination.parent.mkdir()
utils.move_file(source, destination)
self.assertFalse(source.is_file())
def test_copy_file(self):
content = "Test string!"
source = self.test_data_path / "test_file.txt"
source.write_text(content)
destination = source.parent / "test_dest" / "test_file.txt"
destination.parent.mkdir()
utils.copy_file(source, destination)
st = source.stat()
for i in dir(st):
if i.startswith('st_'):
logging.info(i)
if i in ['st_birthtime', 'st_ctime', 'st_ctime_ns', 'st_ino']:
self.assertNotEqual(getattr(source.stat(), i), getattr(destination.stat(), i))
else:
self.assertEqual(getattr(source.stat(), i), getattr(destination.stat(), i))
# self.assertEqual(source.stat().st_size, destination.stat().st_size)
# self.assertEqual(source.stat().st_mtime, destination.stat().st_mtime)
# self.assertEqual(source.stat().st_ctime, destination.stat().st_ctime)
self.assertEqual(utils.checksum_md5(source), utils.checksum_md5(destination))
def test_get_file_info(self):
test_info = utils.get_file_info(self.test_file_source)
self.assertEqual(test_info["name"], self.test_file_source.name)
self.assertEqual(test_info["path"], self.test_file_source)
self.assertEqual(test_info["timestamp"], self.test_file_source.stat().st_mtime)
self.assertEqual(test_info["date"], datetime.fromtimestamp(self.test_file_source.stat().st_mtime))
self.assertEqual(test_info["size"], self.test_file_source.stat().st_size)
def test_compare_checksums(self):
shutil.copy2(self.test_file_source, self.test_file_compare_checksums)
self.assertTrue(utils.compare_checksums(utils.checksum_md5(self.test_file_source),
utils.checksum_md5(self.test_file_compare_checksums)))
def test_get_recent_paths(self):
self.assertIsInstance(utils.get_recent_paths(), list)
def test_exiftool_exists(self):
self.assertTrue(utils.exiftool_exists())
def test_exiftool(self):
self.assertIsInstance(utils.exiftool(self.test_file_source), str)
def test_file_metadata(self):
test_metadata = utils.file_metadata(self.test_pic_path)
self.assertIsInstance(test_metadata, dict)
test_metadata = utils.file_metadata(self.test_pic_path)
self.assertTrue(test_metadata.get("EXIF:Make"))
def test_pad_number(self):
self.assertEqual(utils.pad_number(2, padding=3), "002")
self.assertEqual(utils.pad_number(328, padding=4), "0328")
self.assertEqual(utils.pad_number(110328, padding=1), "110328")
self.assertEqual(utils.pad_number("3", padding=4), "0003")
def test_validate_string(self):
self.assertEqual(utils.validate_string("Tårtan 2"), "Tartan_2")
self.assertEqual(utils.validate_string("snel hest!"), "snel_hest")
self.assertEqual(utils.validate_string("snövits häst"), "snovits_hast")
self.assertEqual(utils.validate_string("Öland"), "Oland")
def test_file_modification_date(self):
test_file = Path(__file__).parent / "test_pic.jpg"
self.assertEqual(utils.file_mod_date(test_file), 1583605293.0)
def test_destination_folder(self):
test_file_date = datetime(2020, 3, 7, 19, 21, 33, 167691)
today = datetime.now()
# self.assertEqual(utils.destination_folder(test_file_date, preset="original"), "")
self.assertEqual(utils.destination_folder(test_file_date, preset="taken_date"), "2020/2020-03-07")
self.assertEqual(utils.destination_folder(test_file_date, preset="offload_date"),
f"{today.year}/{today.strftime("%Y-%m-%d")}")
self.assertEqual(utils.destination_folder(test_file_date, preset="year"), str(test_file_date.year))
self.assertEqual(utils.destination_folder(test_file_date, preset="year_month"),
f"{test_file_date.year}/{test_file_date.strftime("%m")}")
self.assertEqual(utils.destination_folder(test_file_date, preset="flat"), "")
def test_random_string(self):
random_string = utils.random_string(62)
self.assertIsInstance(random_string, str)
self.assertEqual(len(random_string), 62)
def test_folder_size(self):
result = utils.folder_size(Path(__file__).parent)
print(utils.convert_size(result))
print(result)
self.assertIsInstance(result, int)
def test_exifdata(self):
result = utils.exifdata(self.test_pic_path)
print(result)
self.assertIsInstance(result, dict)
self.assertTrue(result.get('Make'))
result2 = utils.exifdata(self.test_file_source)
self.assertFalse(result2.get('Model', False))
def test_get_camera_make(self):
result = utils.get_camera_make(self.test_pic_path)
logging.info(result)
self.assertIsInstance(result, str)
self.assertEqual('SONY', result)
def test_get_camera_model(self):
result = utils.get_camera_model(self.test_pic_path)
logging.info(result)
self.assertIsInstance(result, str)
self.assertEqual('ILCE-7M3', result)
def test_pathlib_copy(self):
source = self.test_data_path / "test_file.txt"
source.write_bytes(bytes('0' * 1024 ** 2 * 10, 'utf-8'))
destination = source.parent / "test_dest" / "test_file.txt"
destination.parent.mkdir()
utils.pathlib_copy(source, destination)
self.assertEqual(source.stat().st_size, destination.stat().st_size)
self.assertEqual(utils.checksum_md5(source), utils.checksum_md5(destination))
source.write_bytes(bytes('0' * 1024 ** 2 * 100, 'utf-8'))
utils.pathlib_copy(source, destination)
self.assertEqual(source.stat().st_size, destination.stat().st_size)
self.assertEqual(utils.checksum_md5(source), utils.checksum_md5(destination))
def test_time_to_string(self):
result = utils.time_to_string(123)
self.assertEqual(result, '2 minutes and 3 seconds')
result = utils.time_to_string(12334)
self.assertEqual(result, '3 hours, 25 minutes and 34 seconds')
result = utils.time_to_string(2.44)
self.assertEqual(result, '2 seconds')
def test_compare_file_mtime(self):
a = self.test_file_source
b = self.test_file_dest
result = utils.compare_file_mtime(a, b)
self.assertFalse(result)
shutil.copy2(a, b)
result = utils.compare_file_mtime(a, b)
self.assertTrue(result)
def test_compare_file_size(self):
a = self.test_file_source
b = self.test_file_dest
result = utils.compare_file_size(a, b)
self.assertFalse(result)
shutil.copy2(a, b)
result = utils.compare_file_size(a, b)
self.assertTrue(result)
def test_compare_files(self):
a = File(self.test_file_source)
b = File(self.test_file_dest)
result = utils.compare_files(a, b)
self.assertFalse(result)
shutil.copy2(a.path, b.path)
result = utils.compare_files(a, b)
self.assertTrue(result)
class TestPreset(TestCase):
def setUp(self) -> None:
self.preset = utils.Preset()
def test_structure(self):
self.assertEqual('{date.year}/{date:%Y-%m-%d}', self.preset.structure('taken_date'))
self.assertEqual(f'{datetime.now():%Y}/{datetime.now():%Y-%m-%d}', self.preset.structure('offload_date'))
self.assertEqual('{date.year}', self.preset.structure('year'))
self.assertEqual('{date.year}/{date.strftime("%m")}', self.preset.structure('year_month'))
self.assertEqual('', self.preset.structure('flat'))
def test_filename(self):
self.assertIsNone(self.preset.filename('original'))
self.assertEqual('Make', self.preset.filename('make'))
self.assertEqual('Model', self.preset.filename('model'))
def test_prefix(self):
self.assertEqual('{date:%y%m%d}', self.preset.prefix('taken_date'))
self.assertEqual('{date:%y%m%d_%H%M%S}', self.preset.prefix('taken_date_time'))
self.assertEqual(f'{datetime.now().strftime('%y%m%d')}', self.preset.prefix('offload_date'))
| from unittest import TestCase
import logging
import os
import shutil
from datetime import datetime
from pathlib import Path
from shutil import rmtree
from random import randint
from offload import utils
from offload.utils import File, FileList
utils.setup_logger('debug')
class TestFile(TestCase):
def setUp(self):
self.test_file_name = "test_file.txt"
self.test_data_path = Path(__file__).parent / "test_data"
self.test_file_path = self.test_data_path / "test_files" / self.test_file_name
self.test_pic_path = Path(__file__).parent / "test_pic.jpg"
self.test_file_path.parent.mkdir(exist_ok=True, parents=True)
def tearDown(self):
if self.test_file_path.is_file():
self.test_file_path.unlink()
rmtree(self.test_data_path)
def test_size(self):
test_file = File(self.test_pic_path)
print(test_file.size)
self.assertGreater(test_file.size, 0)
def test_increment_filename(self):
test_file = File(self.test_file_name)
self.assertEqual(test_file.filename, "test_file.txt")
test_file.increment_filename()
self.assertEqual(test_file.filename, "test_file_001.txt")
test_file.inc = 52
test_file.increment_filename()
self.assertEqual(test_file.filename, "test_file_053.txt")
test_file.inc_pad = 5
test_file.increment_filename()
self.assertEqual(test_file.filename, "test_file_00054.txt")
def test_add_prefix(self):
test_file = File(self.test_pic_path)
self.assertEqual(test_file.filename, "test_pic.jpg")
self.assertEqual(test_file.prefix, None)
test_file.set_prefix('hest')
self.assertEqual(test_file.filename, "hest_test_pic.jpg")
test_file.prefix = "fest"
self.assertEqual(test_file.filename, "fest_test_pic.jpg")
test_file.set_prefix("taken_date")
logging.info(test_file.prefix)
logging.info(test_file.path.resolve())
self.assertEqual("200307_test_pic.jpg", test_file.filename)
test_file.set_prefix("taken_date_time")
self.assertEqual(test_file.prefix, "200307_192133")
self.assertEqual(test_file.filename, "200307_192133_test_pic.jpg")
logging.debug(f'test_file.prefix = {test_file.prefix}')
test_file.prefix = "offload_date"
today = datetime.now().strftime("%y%m%d")
logging.info(today)
logging.info(test_file.filename)
self.assertEqual(f"{today}_test_pic.jpg", test_file.filename)
test_file.prefix = "empty"
self.assertEqual(test_file.prefix, None)
test_file.set_prefix("")
self.assertEqual(test_file.prefix, None)
def test_update_relative_path(self):
test_file = File(self.test_file_path)
relative_to = Path(__file__).parent
test_file.set_relative_path(relative_to)
self.assertEqual(str(test_file.relative_path), "test_data/test_files/test_file.txt")
def test_update_path(self):
test_file = File(self.test_file_path)
test_file.path = "/test"
self.assertEqual(str(test_file.path), "/test/test_file.txt")
def test_update_checksum(self):
self.test_file_path.parent.mkdir(exist_ok=True, parents=True)
self.test_file_path.write_text("test")
test_file = File(self.test_file_path)
self.assertEqual("9ec9f7918d7dfc40", test_file.checksum)
def test_set_name(self):
test_file = File(self.test_file_path)
test_file.name = "jens"
self.assertEqual(test_file.name, "jens")
self.assertEqual(test_file.filename, "jens.txt")
self.assertTrue(str(test_file.path).endswith("jens.txt"))
def test_set_name_using_preset(self):
test_pic = File(self.test_pic_path)
logging.info(f'Testing set name using preset with file {test_pic._path}')
test_pic.name = "camera_model"
self.assertEqual(test_pic.filename, "ilce-7m3.jpg")
test_pic.name = 'camera_make'
self.assertEqual(test_pic.name, "sony")
class TestFileList(TestCase):
def setUp(self):
self.test_directory = Path(__file__).parent / "test_data" / "test_files"
self.test_directory.mkdir(exist_ok=True, parents=True)
# Test files
for i in range(100):
f = Path(self.test_directory / f"{i:04}.jpg")
f.write_text(utils.random_string(randint(1, 4096)))
def tearDown(self):
rmtree(self.test_directory)
pass
def test_get_file_list(self):
test_list = FileList(self.test_directory)
self.assertEqual(len(test_list.files), 100)
def test_update_total_size(self):
test_list = FileList(self.test_directory)
self.assertIsInstance(test_list.size, int)
def test_sort(self):
test_list = FileList(self.test_directory)
list_sorted = sorted(test_list.files, key=lambda f: f.mtime)
test_list.sort()
self.assertEqual(list_sorted, test_list.files)
class TestUtils(TestCase):
def setUp(self):
# Set variables
self.tests_path = Path(__file__).parent
logging.info(self.tests_path)
self.test_data_path = self.tests_path / "test_data"
self.test_data_path.mkdir(exist_ok=True, parents=True)
self.test_pic_path = self.tests_path / "test_pic.jpg"
self.test_file_source = self.test_data_path / "test_file_source.txt"
self.test_file_source.write_text("test")
self.test_file_dest = self.test_data_path / "test_file_destination.txt"
self.test_file_dest.write_text("destination")
self.test_file_compare_checksums = self.test_data_path / "test_file_compare_checksums.txt"
self.test_source_xxhash = "9ec9f7918d7dfc40"
self.test_source_md5 = "098f6bcd4621d373cade4e832627b4f6"
self.test_source_sha256 = "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"
self.test_dest_xxhash = "d07d9411d9203216"
self.test_dest_md5 = "6990a54322d9232390a784c5c9247dd6"
self.test_dest_sha256 = "b5c755aaab1038b3d5627bbde7f47ca80c5f5c0481c6d33f04139d07aa1530e7"
def tearDown(self):
shutil.rmtree(self.test_data_path)
def test_file_checksum(self):
self.assertEqual(self.test_source_xxhash, utils.checksum_xxhash(self.test_file_source))
self.assertEqual(self.test_source_md5, utils.checksum_md5(self.test_file_source))
self.assertEqual(self.test_source_sha256, utils.checksum_sha256(self.test_file_source))
def test_checksum_xxhash(self):
test_hash = self.test_source_xxhash
f_a = Path('ol_test_file_a.txt')
f_b = Path('ol_test_file_b.txt')
f_a.write_bytes(b'test')
f_b.write_bytes(b'test')
self.assertEqual(utils.checksum_xxhash(f_a), utils.checksum_xxhash(f_a))
def test_checksum_md5(self):
test_hash = self.test_source_md5
self.assertEqual(utils.checksum_md5(self.test_file_source), test_hash)
def test_checksum_sha256(self):
test_hash = self.test_source_sha256
self.assertEqual(utils.checksum_sha256(self.test_file_source), test_hash)
def test_convert_date(self):
test_timestamp = 1586115849.30226
self.assertIsInstance(utils.timestamp_to_datetime(test_timestamp), datetime)
def test_create_folder(self):
test_folder = self.test_data_path / "test_folder"
utils.create_folder(test_folder)
self.assertTrue(os.path.exists(test_folder))
def test_convert_size(self):
self.assertEqual(utils.convert_size(1000, binary=False), "1.0 KB")
self.assertEqual(utils.convert_size(10000, binary=False), "10.0 KB")
self.assertEqual(utils.convert_size(1000000, binary=False), "1.0 MB")
def test_move_file(self):
content = "Test string!"
source = self.test_data_path / "test_file.txt"
source.write_text(content)
destination = source.parent / "test_dest" / "test_file.txt"
destination.parent.mkdir()
utils.move_file(source, destination)
self.assertFalse(source.is_file())
def test_copy_file(self):
content = "Test string!"
source = self.test_data_path / "test_file.txt"
source.write_text(content)
destination = source.parent / "test_dest" / "test_file.txt"
destination.parent.mkdir()
utils.copy_file(source, destination)
st = source.stat()
for i in dir(st):
if i.startswith('st_'):
logging.info(i)
if i in ['st_birthtime', 'st_ctime', 'st_ctime_ns', 'st_ino']:
self.assertNotEqual(getattr(source.stat(), i), getattr(destination.stat(), i))
else:
self.assertEqual(getattr(source.stat(), i), getattr(destination.stat(), i))
# self.assertEqual(source.stat().st_size, destination.stat().st_size)
# self.assertEqual(source.stat().st_mtime, destination.stat().st_mtime)
# self.assertEqual(source.stat().st_ctime, destination.stat().st_ctime)
self.assertEqual(utils.checksum_md5(source), utils.checksum_md5(destination))
def test_get_file_info(self):
test_info = utils.get_file_info(self.test_file_source)
self.assertEqual(test_info["name"], self.test_file_source.name)
self.assertEqual(test_info["path"], self.test_file_source)
self.assertEqual(test_info["timestamp"], self.test_file_source.stat().st_mtime)
self.assertEqual(test_info["date"], datetime.fromtimestamp(self.test_file_source.stat().st_mtime))
self.assertEqual(test_info["size"], self.test_file_source.stat().st_size)
def test_compare_checksums(self):
shutil.copy2(self.test_file_source, self.test_file_compare_checksums)
self.assertTrue(utils.compare_checksums(utils.checksum_md5(self.test_file_source),
utils.checksum_md5(self.test_file_compare_checksums)))
def test_get_recent_paths(self):
self.assertIsInstance(utils.get_recent_paths(), list)
def test_exiftool_exists(self):
self.assertTrue(utils.exiftool_exists())
def test_exiftool(self):
self.assertIsInstance(utils.exiftool(self.test_file_source), str)
def test_file_metadata(self):
test_metadata = utils.file_metadata(self.test_pic_path)
self.assertIsInstance(test_metadata, dict)
test_metadata = utils.file_metadata(self.test_pic_path)
self.assertTrue(test_metadata.get("EXIF:Make"))
def test_pad_number(self):
self.assertEqual(utils.pad_number(2, padding=3), "002")
self.assertEqual(utils.pad_number(328, padding=4), "0328")
self.assertEqual(utils.pad_number(110328, padding=1), "110328")
self.assertEqual(utils.pad_number("3", padding=4), "0003")
def test_validate_string(self):
self.assertEqual(utils.validate_string("Tårtan 2"), "Tartan_2")
self.assertEqual(utils.validate_string("snel hest!"), "snel_hest")
self.assertEqual(utils.validate_string("snövits häst"), "snovits_hast")
self.assertEqual(utils.validate_string("Öland"), "Oland")
def test_file_modification_date(self):
test_file = Path(__file__).parent / "test_pic.jpg"
self.assertEqual(utils.file_mod_date(test_file), 1583605293.0)
def test_destination_folder(self):
test_file_date = datetime(2020, 3, 7, 19, 21, 33, 167691)
today = datetime.now()
# self.assertEqual(utils.destination_folder(test_file_date, preset="original"), "")
self.assertEqual(utils.destination_folder(test_file_date, preset="taken_date"), "2020/2020-03-07")
self.assertEqual(utils.destination_folder(test_file_date, preset="offload_date"),
f"{today.year}/{today.strftime('%Y-%m-%d')}")
self.assertEqual(utils.destination_folder(test_file_date, preset="year"), str(test_file_date.year))
self.assertEqual(utils.destination_folder(test_file_date, preset="year_month"),
f"{test_file_date.year}/{test_file_date.strftime('%m')}")
self.assertEqual(utils.destination_folder(test_file_date, preset="flat"), "")
def test_random_string(self):
random_string = utils.random_string(62)
self.assertIsInstance(random_string, str)
self.assertEqual(len(random_string), 62)
def test_folder_size(self):
result = utils.folder_size(Path(__file__).parent)
print(utils.convert_size(result))
print(result)
self.assertIsInstance(result, int)
def test_exifdata(self):
result = utils.exifdata(self.test_pic_path)
print(result)
self.assertIsInstance(result, dict)
self.assertTrue(result.get('Make'))
result2 = utils.exifdata(self.test_file_source)
self.assertFalse(result2.get('Model', False))
def test_get_camera_make(self):
result = utils.get_camera_make(self.test_pic_path)
logging.info(result)
self.assertIsInstance(result, str)
self.assertEqual('SONY', result)
def test_get_camera_model(self):
result = utils.get_camera_model(self.test_pic_path)
logging.info(result)
self.assertIsInstance(result, str)
self.assertEqual('ILCE-7M3', result)
def test_pathlib_copy(self):
source = self.test_data_path / "test_file.txt"
source.write_bytes(bytes('0' * 1024 ** 2 * 10, 'utf-8'))
destination = source.parent / "test_dest" / "test_file.txt"
destination.parent.mkdir()
utils.pathlib_copy(source, destination)
self.assertEqual(source.stat().st_size, destination.stat().st_size)
self.assertEqual(utils.checksum_md5(source), utils.checksum_md5(destination))
source.write_bytes(bytes('0' * 1024 ** 2 * 100, 'utf-8'))
utils.pathlib_copy(source, destination)
self.assertEqual(source.stat().st_size, destination.stat().st_size)
self.assertEqual(utils.checksum_md5(source), utils.checksum_md5(destination))
def test_time_to_string(self):
result = utils.time_to_string(123)
self.assertEqual(result, '2 minutes and 3 seconds')
result = utils.time_to_string(12334)
self.assertEqual(result, '3 hours, 25 minutes and 34 seconds')
result = utils.time_to_string(2.44)
self.assertEqual(result, '2 seconds')
def test_compare_file_mtime(self):
a = self.test_file_source
b = self.test_file_dest
result = utils.compare_file_mtime(a, b)
self.assertFalse(result)
shutil.copy2(a, b)
result = utils.compare_file_mtime(a, b)
self.assertTrue(result)
def test_compare_file_size(self):
a = self.test_file_source
b = self.test_file_dest
result = utils.compare_file_size(a, b)
self.assertFalse(result)
shutil.copy2(a, b)
result = utils.compare_file_size(a, b)
self.assertTrue(result)
def test_compare_files(self):
a = File(self.test_file_source)
b = File(self.test_file_dest)
result = utils.compare_files(a, b)
self.assertFalse(result)
shutil.copy2(a.path, b.path)
result = utils.compare_files(a, b)
self.assertTrue(result)
class TestPreset(TestCase):
def setUp(self) -> None:
self.preset = utils.Preset()
def test_structure(self):
self.assertEqual('{date.year}/{date:%Y-%m-%d}', self.preset.structure('taken_date'))
self.assertEqual(f'{datetime.now():%Y}/{datetime.now():%Y-%m-%d}', self.preset.structure('offload_date'))
self.assertEqual('{date.year}', self.preset.structure('year'))
self.assertEqual('{date.year}/{date.strftime("%m")}', self.preset.structure('year_month'))
self.assertEqual('', self.preset.structure('flat'))
def test_filename(self):
self.assertIsNone(self.preset.filename('original'))
self.assertEqual('Make', self.preset.filename('make'))
self.assertEqual('Model', self.preset.filename('model'))
def test_prefix(self):
self.assertEqual('{date:%y%m%d}', self.preset.prefix('taken_date'))
self.assertEqual('{date:%y%m%d_%H%M%S}', self.preset.prefix('taken_date_time'))
self.assertEqual(f'{datetime.now().strftime("%y%m%d")}', self.preset.prefix('offload_date'))
|
"""Support for NiceHash sensors."""
import asyncio
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.core import callback
# from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
)
from homeassistant.helpers.typing import HomeAssistantType
from custom_components.nicehash.nicehash import NiceHashPrivateAPI
from custom_components.nicehash.common import NiceHashSensorDataUpdateCoordinator
from custom_components.nicehash.const import (
API,
DOMAIN,
RIGS_OBJ,
SENSOR_DATA_COORDINATOR,
SWITCH_ASYNC_UPDATE_AFTER_SECONDS,
UNSUB,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM = "switch"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the NiceHash sensor using config entry."""
coordinator: NiceHashSensorDataUpdateCoordinator = hass.data[DOMAIN][
config_entry.entry_id
][SENSOR_DATA_COORDINATOR]
@callback
def _update_entities():
if not hasattr(_update_entities, "dev"):
_update_entities.dev = []
if not coordinator.last_update_success:
return
new_dev = []
for rig in coordinator.data.get(RIGS_OBJ).get("miningRigs"):
rig_id = rig.get("rigId")
switch = NiceHashRigSwitch(
hass.data[DOMAIN][config_entry.entry_id][API],
coordinator,
config_entry,
rig_id,
)
if switch.unique_id not in _update_entities.dev:
new_dev.append(switch)
_update_entities.dev.append(switch.unique_id)
async_add_entities(new_dev)
unsub = coordinator.async_add_listener(_update_entities)
hass.data[DOMAIN][config_entry.entry_id][UNSUB].append(unsub)
await coordinator.async_refresh()
class NiceHashRigSwitch(CoordinatorEntity, ToggleEntity):
"""Class describing a rig switch"""
DOMAIN = PLATFORM
def __init__(
self, api: NiceHashPrivateAPI, coordinator, config_entry, rigId
) -> None:
super().__init__(coordinator)
self._rig_id = rigId
self._config_entry = config_entry
self._data_type = RIGS_OBJ
self._api = api
@property
def available(self):
"""Return availability"""
rig = self.get_rig()
return (
self.coordinator.last_update_success
and rig is not None
and rig.get("minerStatus", "UNKNOWN")
not in ["DISABLED", "TRANSFERED", "UNKNOWN", "OFFLINE"]
)
def get_rig(self):
"""Return the rig object."""
rig = None
for rig_entry in self.coordinator.data[self._data_type].get("miningRigs", []):
if rig_entry.get("rigId") == self._rig_id:
rig = rig_entry
return rig
@property
def name(self):
rig = self.get_rig()
if rig is not None:
name = f"NH - {rig.get("name")} - Power"
return name
return None
@property
def unique_id(self):
unique_id = f"nh-{self._rig_id}-power"
return unique_id
@property
def device_info(self):
"""Information about this entity/device."""
rig = self.get_rig()
return {
"identifiers": {(DOMAIN, self._rig_id)},
# If desired, the name for the device could be different to the entity
"name": rig.get("name"),
"sw_version": rig.get("softwareVersions"),
"model": rig.get("softwareVersions"),
"manufacturer": "NiceHash",
}
@property
def is_on(self):
"""Return true if switch is on."""
rig = self.get_rig()
if rig is not None:
status = rig.get("minerStatus", "UNKNOWN")
if status in ["BENCHMARKING", "MINING"]:
return True
return False
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
try:
await self._api.set_rig_status(self._rig_id, True)
await asyncio.sleep(SWITCH_ASYNC_UPDATE_AFTER_SECONDS)
except Exception as err:
_LOGGER.error("Failed to set the status of '%s': %s", self.entity_id, err)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
try:
await self._api.set_rig_status(self._rig_id, False)
await asyncio.sleep(SWITCH_ASYNC_UPDATE_AFTER_SECONDS)
except Exception as err:
_LOGGER.error("Failed to set the status of '%s': %s", self.entity_id, err)
await self.coordinator.async_request_refresh()
| """Support for NiceHash sensors."""
import asyncio
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.core import callback
# from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
)
from homeassistant.helpers.typing import HomeAssistantType
from custom_components.nicehash.nicehash import NiceHashPrivateAPI
from custom_components.nicehash.common import NiceHashSensorDataUpdateCoordinator
from custom_components.nicehash.const import (
API,
DOMAIN,
RIGS_OBJ,
SENSOR_DATA_COORDINATOR,
SWITCH_ASYNC_UPDATE_AFTER_SECONDS,
UNSUB,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM = "switch"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the NiceHash sensor using config entry."""
coordinator: NiceHashSensorDataUpdateCoordinator = hass.data[DOMAIN][
config_entry.entry_id
][SENSOR_DATA_COORDINATOR]
@callback
def _update_entities():
if not hasattr(_update_entities, "dev"):
_update_entities.dev = []
if not coordinator.last_update_success:
return
new_dev = []
for rig in coordinator.data.get(RIGS_OBJ).get("miningRigs"):
rig_id = rig.get("rigId")
switch = NiceHashRigSwitch(
hass.data[DOMAIN][config_entry.entry_id][API],
coordinator,
config_entry,
rig_id,
)
if switch.unique_id not in _update_entities.dev:
new_dev.append(switch)
_update_entities.dev.append(switch.unique_id)
async_add_entities(new_dev)
unsub = coordinator.async_add_listener(_update_entities)
hass.data[DOMAIN][config_entry.entry_id][UNSUB].append(unsub)
await coordinator.async_refresh()
class NiceHashRigSwitch(CoordinatorEntity, ToggleEntity):
"""Class describing a rig switch"""
DOMAIN = PLATFORM
def __init__(
self, api: NiceHashPrivateAPI, coordinator, config_entry, rigId
) -> None:
super().__init__(coordinator)
self._rig_id = rigId
self._config_entry = config_entry
self._data_type = RIGS_OBJ
self._api = api
@property
def available(self):
"""Return availability"""
rig = self.get_rig()
return (
self.coordinator.last_update_success
and rig is not None
and rig.get("minerStatus", "UNKNOWN")
not in ["DISABLED", "TRANSFERED", "UNKNOWN", "OFFLINE"]
)
def get_rig(self):
"""Return the rig object."""
rig = None
for rig_entry in self.coordinator.data[self._data_type].get("miningRigs", []):
if rig_entry.get("rigId") == self._rig_id:
rig = rig_entry
return rig
@property
def name(self):
rig = self.get_rig()
if rig is not None:
name = f"NH - {rig.get('name')} - Power"
return name
return None
@property
def unique_id(self):
unique_id = f"nh-{self._rig_id}-power"
return unique_id
@property
def device_info(self):
"""Information about this entity/device."""
rig = self.get_rig()
return {
"identifiers": {(DOMAIN, self._rig_id)},
# If desired, the name for the device could be different to the entity
"name": rig.get("name"),
"sw_version": rig.get("softwareVersions"),
"model": rig.get("softwareVersions"),
"manufacturer": "NiceHash",
}
@property
def is_on(self):
"""Return true if switch is on."""
rig = self.get_rig()
if rig is not None:
status = rig.get("minerStatus", "UNKNOWN")
if status in ["BENCHMARKING", "MINING"]:
return True
return False
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
try:
await self._api.set_rig_status(self._rig_id, True)
await asyncio.sleep(SWITCH_ASYNC_UPDATE_AFTER_SECONDS)
except Exception as err:
_LOGGER.error("Failed to set the status of '%s': %s", self.entity_id, err)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
try:
await self._api.set_rig_status(self._rig_id, False)
await asyncio.sleep(SWITCH_ASYNC_UPDATE_AFTER_SECONDS)
except Exception as err:
_LOGGER.error("Failed to set the status of '%s': %s", self.entity_id, err)
await self.coordinator.async_request_refresh()
|
"""A small utility for generating slurm scripts.
The slurm template contains tags of the form {{variable-name}}, which are then
replaced with values for submission.
Examples:
EXP_LIST="seeded-exps.txt"
EXP_LIST="msrvtt-ablations.txt"
EXP_LIST="lsmdc-ablations.txt"
EXP_LIST="activity-net-ablations.txt"
EXP_LIST="didemo-ablations.txt"
EXP_LIST="msvd-ablations.txt"
python misc/generate_slurm_scripts.py --job_queue "slurm/${EXP_LIST}" \
&& source data/slurm/scripts/slurm-dependencies.sh
"""
import re
import uuid
import copy
import argparse
import itertools
from pathlib import Path
from itertools import zip_longest
from collections import OrderedDict
def get_short_uuid():
"""Return a 7 alpha-numeric character random string. We could use the full uuid()
for better uniqueness properties, but it makes the filenames long and its not
needed for our purpose (simply grouping experiments that were run with the same
configuration).
"""
return str(uuid.uuid4()).split("-")[0]
def parse_grid(x):
"""Parse compact command line strings of the form:
--key1 val_a|val_b --key2 val_c|val_d
(here a vertical bar represents multiple values)
into a grid of separate strings e.g:
--key1 val_a --key2 val_c
--key1 val_a --key2 val_d
--key1 val_b --key2 val_c
--key1 val_b --key2 val_d
"""
args = x.split(" ")
group_id = get_short_uuid()
grid_opts, parsed = {}, []
for ii, token in enumerate(args):
if "|" in token:
grid_opts[ii] = token.split("|")
grid_idx, grid_vals = [], []
for ii, val in grid_opts.items():
grid_idx.append(ii)
grid_vals.append(val)
grid_vals = list(itertools.product(*grid_vals))
for cfg in grid_vals:
base = copy.deepcopy(args)
for ii, val in zip(grid_idx, cfg):
base[ii] = val
base.append(f"--group_id {group_id}")
parsed.append(" ".join(base))
return parsed
def fill_template(template_path, rules):
generated = []
with open(template_path, "r") as f:
template = f.read().splitlines()
for row in template:
edits = []
regex = r"\{\{(.*?)\}\}"
for match in re.finditer(regex, row):
groups = match.groups()
assert len(groups) == 1, "expected single group"
key = groups[0]
token = rules[key]
edits.append((match.span(), token))
if edits:
# invert the spans
spans = [(None, 0)] + [x[0] for x in edits] + [(len(row), None)]
inverse_spans = [(x[1], y[0]) for x, y in zip(spans, spans[1:])]
tokens = [row[start:stop] for start, stop in inverse_spans]
urls = [str(x[1]) for x in edits]
new_row = ""
for token, url in zip_longest(tokens, urls, fillvalue=""):
new_row += token + url
row = new_row
generated.append(row)
return "\n".join(generated)
def parse_group_ids(parsed_cmds):
group_ids = OrderedDict()
for ii, cmd in enumerate(parsed_cmds):
tokens = cmd.split(" ")
group_id = tokens[tokens.index("--group_id") + 1]
if group_id not in group_ids:
group_ids[group_id] = []
group_ids[group_id].append(ii + 1) # slurm arrays are 1-indexed
return group_ids
def generate_slurm_dependency_script(group_ids, dependency_template, aggregation_scripts,
generated_script_paths):
deps = []
for group_id, aggregation_script in aggregation_scripts.items():
array_id_list = group_ids[group_id]
array_deps = ":".join([f"${{job_id}}_{x}" for x in array_id_list])
dep = f"sbatch --dependency=afterok:{array_deps} {aggregation_script}"
deps.append(dep)
rules = {
"dependencies": "\n".join(deps),
"job_script_path": str(generated_script_paths["array-job"]),
}
return fill_template(template_path=dependency_template, rules=rules)
def jobn_name2agg_log_path(exp_dir, job_name):
return Path(exp_dir) / "data/slurm" / job_name / "log.txt"
def generate_aggregation_script(exp_dir, group_id, aggregation_template,
aggregation_script_path):
job_name = aggregation_script_path2job_name(aggregation_script_path)
log_path = jobn_name2agg_log_path(exp_dir, job_name)
log_path.parent.mkdir(exist_ok=True, parents=True)
rules = {"job-name": job_name, "group_id": group_id, "log-path": log_path}
return fill_template(template_path=aggregation_template, rules=rules)
def aggregation_script_path2job_name(aggregation_script_path):
job_name = f"{aggregation_script_path.parent.stem}-{aggregation_script_path.stem}"
return job_name
def generate_script(template_path, slurm_script_dir, job_queue, exp_dir,
monitor_script, constraints, dependency_template,
aggregation_template):
with open(job_queue, "r") as f:
custom_args = f.read().splitlines()
# remove blank lines
custom_args = [x for x in custom_args if x]
parsed = []
for line in custom_args:
parsed.extend(parse_grid(line))
num_array_workers = len(parsed)
if Path(job_queue).stem != "latest":
array_job_name = Path(job_queue).stem
else:
config = parsed[0].split(" ")[1]
array_job_name = config.replace("/", "-")
generated_script_paths = {
"main": "slurm-dependencies.sh",
"array-job": "slurm-job.sh",
"backup": f"{array_job_name}.sh",
}
group_ids = parse_group_ids(parsed)
generated_script_paths = {key: Path(slurm_script_dir) / val
for key, val in generated_script_paths.items()}
aggregation_scripts = {}
for group_id, arg_list in zip(group_ids, custom_args):
arg_list = arg_list.replace("--", "").replace(" ", "_").replace(".json", "")
arg_list = arg_list.replace("|", "_")
fname = f"{array_job_name}-{arg_list}_agg_{group_id}.sh"
path = Path(slurm_script_dir) / fname
aggregation_scripts[group_id] = path
generated_script_paths.update(aggregation_scripts)
# worker logs
array_log_path = Path(exp_dir) / "data/slurm" / array_job_name / "%4a-log.txt"
array_log_path.parent.mkdir(exist_ok=True, parents=True)
watched_logs = {"paths": [], "dividers": []}
for idx in range(num_array_workers):
slurm_id = idx + 1
watched_log = Path(str(array_log_path).replace("%4a", f"{slurm_id:04d}"))
msg = f">> START OF NEW JOB [{idx}/{num_array_workers}] <<\n"
watched_logs["paths"].append(watched_log)
watched_logs["dividers"].append(msg)
for aggregation_script_path in aggregation_scripts.values():
job_name = aggregation_script_path2job_name(aggregation_script_path)
watched_log = jobn_name2agg_log_path(exp_dir, job_name)
watched_logs["paths"].append(watched_log)
watched_logs["dividers"].append(f">> STARTING AGGREGATION job [{job_name}] <<\n")
for watched_log, divider in zip(watched_logs["paths"], watched_logs["dividers"]):
watched_log.parent.mkdir(exist_ok=True, parents=True)
if not watched_log.exists():
print(f"Creating watch log: {watched_log} for the first time")
watched_log.touch()
else:
with open(str(watched_log), "a") as f:
f.write(divider)
with open(monitor_script, "w") as f:
cmd = f"watchlogs {",".join([str(x) for x in watched_logs["paths"]])}"
f.write(f"{cmd}\n")
print(f"Watching logs: {",".join(watched_logs)}")
for script_name, dest_path in generated_script_paths.items():
dest_path.parent.mkdir(exist_ok=True, parents=True)
if script_name in {"array-job", "backup"}:
rules = {
"job-name": array_job_name,
"job_queue": " ".join([f'"{x}"' for x in parsed]),
"constraints": constraints,
"array-range": f"1-{num_array_workers}",
"log-path": str(array_log_path),
}
script = fill_template(template_path, rules)
# script = generated
elif script_name in aggregation_scripts:
script = generate_aggregation_script(
exp_dir=exp_dir,
group_id=script_name,
aggregation_script_path=dest_path,
aggregation_template=aggregation_template,
)
elif script_name == "main":
script = generate_slurm_dependency_script(
group_ids=group_ids,
generated_script_paths=generated_script_paths,
aggregation_scripts=aggregation_scripts,
dependency_template=dependency_template,
)
with open(str(dest_path), "w") as f:
print(f"Writing slurm script ({script_name}) to {dest_path}")
f.write(script)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job_queue", default="data/job-queues/latest.txt")
parser.add_argument("--slurm_script_dir", default="data/slurm/scripts")
parser.add_argument("--slurm_template", default="misc/slurm/gpu-template_v2.sh")
parser.add_argument("--dependency_template", default="misc/slurm/dependencies.sh")
parser.add_argument("--aggregation_template",
default="misc/slurm/aggregate-logs-and-stats.sh")
parser.add_argument("--constraints", default="")
parser.add_argument("--exp_dir",
default="/users/albanie/coding/libs/pt/speedy-experts")
args = parser.parse_args()
monitor_script = f"slurm/monitor-jobs.sh"
generate_script(
exp_dir=args.exp_dir,
job_queue=args.job_queue,
monitor_script=monitor_script,
template_path=args.slurm_template,
slurm_script_dir=args.slurm_script_dir,
dependency_template=args.dependency_template,
aggregation_template=args.aggregation_template,
constraints=args.constraints,
)
if __name__ == "__main__":
main()
| """A small utility for generating slurm scripts.
The slurm template contains tags of the form {{variable-name}}, which are then
replaced with values for submission.
Examples:
EXP_LIST="seeded-exps.txt"
EXP_LIST="msrvtt-ablations.txt"
EXP_LIST="lsmdc-ablations.txt"
EXP_LIST="activity-net-ablations.txt"
EXP_LIST="didemo-ablations.txt"
EXP_LIST="msvd-ablations.txt"
python misc/generate_slurm_scripts.py --job_queue "slurm/${EXP_LIST}" \
&& source data/slurm/scripts/slurm-dependencies.sh
"""
import re
import uuid
import copy
import argparse
import itertools
from pathlib import Path
from itertools import zip_longest
from collections import OrderedDict
def get_short_uuid():
"""Return a 7 alpha-numeric character random string. We could use the full uuid()
for better uniqueness properties, but it makes the filenames long and its not
needed for our purpose (simply grouping experiments that were run with the same
configuration).
"""
return str(uuid.uuid4()).split("-")[0]
def parse_grid(x):
"""Parse compact command line strings of the form:
--key1 val_a|val_b --key2 val_c|val_d
(here a vertical bar represents multiple values)
into a grid of separate strings e.g:
--key1 val_a --key2 val_c
--key1 val_a --key2 val_d
--key1 val_b --key2 val_c
--key1 val_b --key2 val_d
"""
args = x.split(" ")
group_id = get_short_uuid()
grid_opts, parsed = {}, []
for ii, token in enumerate(args):
if "|" in token:
grid_opts[ii] = token.split("|")
grid_idx, grid_vals = [], []
for ii, val in grid_opts.items():
grid_idx.append(ii)
grid_vals.append(val)
grid_vals = list(itertools.product(*grid_vals))
for cfg in grid_vals:
base = copy.deepcopy(args)
for ii, val in zip(grid_idx, cfg):
base[ii] = val
base.append(f"--group_id {group_id}")
parsed.append(" ".join(base))
return parsed
def fill_template(template_path, rules):
generated = []
with open(template_path, "r") as f:
template = f.read().splitlines()
for row in template:
edits = []
regex = r"\{\{(.*?)\}\}"
for match in re.finditer(regex, row):
groups = match.groups()
assert len(groups) == 1, "expected single group"
key = groups[0]
token = rules[key]
edits.append((match.span(), token))
if edits:
# invert the spans
spans = [(None, 0)] + [x[0] for x in edits] + [(len(row), None)]
inverse_spans = [(x[1], y[0]) for x, y in zip(spans, spans[1:])]
tokens = [row[start:stop] for start, stop in inverse_spans]
urls = [str(x[1]) for x in edits]
new_row = ""
for token, url in zip_longest(tokens, urls, fillvalue=""):
new_row += token + url
row = new_row
generated.append(row)
return "\n".join(generated)
def parse_group_ids(parsed_cmds):
group_ids = OrderedDict()
for ii, cmd in enumerate(parsed_cmds):
tokens = cmd.split(" ")
group_id = tokens[tokens.index("--group_id") + 1]
if group_id not in group_ids:
group_ids[group_id] = []
group_ids[group_id].append(ii + 1) # slurm arrays are 1-indexed
return group_ids
def generate_slurm_dependency_script(group_ids, dependency_template, aggregation_scripts,
generated_script_paths):
deps = []
for group_id, aggregation_script in aggregation_scripts.items():
array_id_list = group_ids[group_id]
array_deps = ":".join([f"${{job_id}}_{x}" for x in array_id_list])
dep = f"sbatch --dependency=afterok:{array_deps} {aggregation_script}"
deps.append(dep)
rules = {
"dependencies": "\n".join(deps),
"job_script_path": str(generated_script_paths["array-job"]),
}
return fill_template(template_path=dependency_template, rules=rules)
def jobn_name2agg_log_path(exp_dir, job_name):
return Path(exp_dir) / "data/slurm" / job_name / "log.txt"
def generate_aggregation_script(exp_dir, group_id, aggregation_template,
aggregation_script_path):
job_name = aggregation_script_path2job_name(aggregation_script_path)
log_path = jobn_name2agg_log_path(exp_dir, job_name)
log_path.parent.mkdir(exist_ok=True, parents=True)
rules = {"job-name": job_name, "group_id": group_id, "log-path": log_path}
return fill_template(template_path=aggregation_template, rules=rules)
def aggregation_script_path2job_name(aggregation_script_path):
job_name = f"{aggregation_script_path.parent.stem}-{aggregation_script_path.stem}"
return job_name
def generate_script(template_path, slurm_script_dir, job_queue, exp_dir,
monitor_script, constraints, dependency_template,
aggregation_template):
with open(job_queue, "r") as f:
custom_args = f.read().splitlines()
# remove blank lines
custom_args = [x for x in custom_args if x]
parsed = []
for line in custom_args:
parsed.extend(parse_grid(line))
num_array_workers = len(parsed)
if Path(job_queue).stem != "latest":
array_job_name = Path(job_queue).stem
else:
config = parsed[0].split(" ")[1]
array_job_name = config.replace("/", "-")
generated_script_paths = {
"main": "slurm-dependencies.sh",
"array-job": "slurm-job.sh",
"backup": f"{array_job_name}.sh",
}
group_ids = parse_group_ids(parsed)
generated_script_paths = {key: Path(slurm_script_dir) / val
for key, val in generated_script_paths.items()}
aggregation_scripts = {}
for group_id, arg_list in zip(group_ids, custom_args):
arg_list = arg_list.replace("--", "").replace(" ", "_").replace(".json", "")
arg_list = arg_list.replace("|", "_")
fname = f"{array_job_name}-{arg_list}_agg_{group_id}.sh"
path = Path(slurm_script_dir) / fname
aggregation_scripts[group_id] = path
generated_script_paths.update(aggregation_scripts)
# worker logs
array_log_path = Path(exp_dir) / "data/slurm" / array_job_name / "%4a-log.txt"
array_log_path.parent.mkdir(exist_ok=True, parents=True)
watched_logs = {"paths": [], "dividers": []}
for idx in range(num_array_workers):
slurm_id = idx + 1
watched_log = Path(str(array_log_path).replace("%4a", f"{slurm_id:04d}"))
msg = f">> START OF NEW JOB [{idx}/{num_array_workers}] <<\n"
watched_logs["paths"].append(watched_log)
watched_logs["dividers"].append(msg)
for aggregation_script_path in aggregation_scripts.values():
job_name = aggregation_script_path2job_name(aggregation_script_path)
watched_log = jobn_name2agg_log_path(exp_dir, job_name)
watched_logs["paths"].append(watched_log)
watched_logs["dividers"].append(f">> STARTING AGGREGATION job [{job_name}] <<\n")
for watched_log, divider in zip(watched_logs["paths"], watched_logs["dividers"]):
watched_log.parent.mkdir(exist_ok=True, parents=True)
if not watched_log.exists():
print(f"Creating watch log: {watched_log} for the first time")
watched_log.touch()
else:
with open(str(watched_log), "a") as f:
f.write(divider)
with open(monitor_script, "w") as f:
cmd = f"watchlogs {','.join([str(x) for x in watched_logs['paths']])}"
f.write(f"{cmd}\n")
print(f"Watching logs: {','.join(watched_logs)}")
for script_name, dest_path in generated_script_paths.items():
dest_path.parent.mkdir(exist_ok=True, parents=True)
if script_name in {"array-job", "backup"}:
rules = {
"job-name": array_job_name,
"job_queue": " ".join([f'"{x}"' for x in parsed]),
"constraints": constraints,
"array-range": f"1-{num_array_workers}",
"log-path": str(array_log_path),
}
script = fill_template(template_path, rules)
# script = generated
elif script_name in aggregation_scripts:
script = generate_aggregation_script(
exp_dir=exp_dir,
group_id=script_name,
aggregation_script_path=dest_path,
aggregation_template=aggregation_template,
)
elif script_name == "main":
script = generate_slurm_dependency_script(
group_ids=group_ids,
generated_script_paths=generated_script_paths,
aggregation_scripts=aggregation_scripts,
dependency_template=dependency_template,
)
with open(str(dest_path), "w") as f:
print(f"Writing slurm script ({script_name}) to {dest_path}")
f.write(script)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job_queue", default="data/job-queues/latest.txt")
parser.add_argument("--slurm_script_dir", default="data/slurm/scripts")
parser.add_argument("--slurm_template", default="misc/slurm/gpu-template_v2.sh")
parser.add_argument("--dependency_template", default="misc/slurm/dependencies.sh")
parser.add_argument("--aggregation_template",
default="misc/slurm/aggregate-logs-and-stats.sh")
parser.add_argument("--constraints", default="")
parser.add_argument("--exp_dir",
default="/users/albanie/coding/libs/pt/speedy-experts")
args = parser.parse_args()
monitor_script = f"slurm/monitor-jobs.sh"
generate_script(
exp_dir=args.exp_dir,
job_queue=args.job_queue,
monitor_script=monitor_script,
template_path=args.slurm_template,
slurm_script_dir=args.slurm_script_dir,
dependency_template=args.dependency_template,
aggregation_template=args.aggregation_template,
constraints=args.constraints,
)
if __name__ == "__main__":
main()
|
import colorsys
import logging
import pprint
import textwrap
from collections import Counter, defaultdict
from string import Template
from typing import Any, Mapping, Optional, Union
from discord import Colour, Embed, Member, Message, Role, Status, utils
from discord.ext.commands import BucketType, Cog, Context, Paginator, command, group
from discord.utils import escape_markdown
from bot import constants
from bot.bot import Bot
from bot.decorators import InChannelCheckFailure, in_channel, with_role
from bot.pagination import LinePaginator
from bot.utils.checks import cooldown_with_role_bypass, with_role_check
from bot.utils.time import time_since
log = logging.getLogger(__name__)
class Information(Cog):
"""A cog with commands for generating embeds with server info, such as server stats and user info."""
def __init__(self, bot: Bot):
self.bot = bot
@with_role(*constants.MODERATION_ROLES)
@command(name="roles")
async def roles_info(self, ctx: Context) -> None:
"""Returns a list of all roles and their corresponding IDs."""
# Sort the roles alphabetically and remove the @everyone role
roles = sorted(ctx.guild.roles[1:], key=lambda role: role.name)
# Build a list
role_list = []
for role in roles:
role_list.append(f"`{role.id}` - {role.mention}")
# Build an embed
embed = Embed(
title=f"Role information (Total {len(roles)} role{"s" * (len(role_list) > 1)})",
colour=Colour.blurple()
)
await LinePaginator.paginate(role_list, ctx, embed, empty=False)
@with_role(*constants.MODERATION_ROLES)
@command(name="role")
async def role_info(self, ctx: Context, *roles: Union[Role, str]) -> None:
"""
Return information on a role or list of roles.
To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.
"""
parsed_roles = []
failed_roles = []
for role_name in roles:
if isinstance(role_name, Role):
# Role conversion has already succeeded
parsed_roles.append(role_name)
continue
role = utils.find(lambda r: r.name.lower() == role_name.lower(), ctx.guild.roles)
if not role:
failed_roles.append(role_name)
continue
parsed_roles.append(role)
if failed_roles:
await ctx.send(
":x: I could not convert the following role names to a role: \n- "
"\n- ".join(failed_roles)
)
for role in parsed_roles:
h, s, v = colorsys.rgb_to_hsv(*role.colour.to_rgb())
embed = Embed(
title=f"{role.name} info",
colour=role.colour,
)
embed.add_field(name="ID", value=role.id, inline=True)
embed.add_field(name="Colour (RGB)", value=f"#{role.colour.value:0>6x}", inline=True)
embed.add_field(name="Colour (HSV)", value=f"{h:.2f} {s:.2f} {v}", inline=True)
embed.add_field(name="Member count", value=len(role.members), inline=True)
embed.add_field(name="Position", value=role.position)
embed.add_field(name="Permission code", value=role.permissions.value, inline=True)
await ctx.send(embed=embed)
@command(name="server", aliases=["server_info", "guild", "guild_info"])
async def server_info(self, ctx: Context) -> None:
"""Returns an embed full of server information."""
created = time_since(ctx.guild.created_at, precision="days")
features = ", ".join(ctx.guild.features)
region = ctx.guild.region
roles = len(ctx.guild.roles)
member_count = ctx.guild.member_count
# How many of each type of channel?
channels = Counter(c.type for c in ctx.guild.channels)
channel_counts = "".join(sorted(f"{str(ch).title()} channels: {channels[ch]}\n" for ch in channels)).strip()
# How many of each user status?
statuses = Counter(member.status for member in ctx.guild.members)
embed = Embed(colour=Colour.blurple())
# Because channel_counts lacks leading whitespace, it breaks the dedent if it's inserted directly by the
# f-string. While this is correctly formated by Discord, it makes unit testing difficult. To keep the formatting
# without joining a tuple of strings we can use a Template string to insert the already-formatted channel_counts
# after the dedent is made.
embed.description = Template(
textwrap.dedent(f"""
**Server information**
Created: {created}
Voice region: {region}
Features: {features}
**Counts**
Members: {member_count:,}
Roles: {roles}
$channel_counts
**Members**
{constants.Emojis.status_online} {statuses[Status.online]:,}
{constants.Emojis.status_idle} {statuses[Status.idle]:,}
{constants.Emojis.status_dnd} {statuses[Status.dnd]:,}
{constants.Emojis.status_offline} {statuses[Status.offline]:,}
""")
).substitute({"channel_counts": channel_counts})
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@command(name="user", aliases=["user_info", "member", "member_info"])
async def user_info(self, ctx: Context, user: Member = None) -> None:
"""Returns info about a user."""
if user is None:
user = ctx.author
# Do a role check if this is being executed on someone other than the caller
elif user != ctx.author and not with_role_check(ctx, *constants.MODERATION_ROLES):
await ctx.send("You may not use this command on users other than yourself.")
return
# Non-staff may only do this in #bot-commands
if not with_role_check(ctx, *constants.STAFF_ROLES):
if not ctx.channel.id == constants.Channels.bot_commands:
raise InChannelCheckFailure(constants.Channels.bot_commands)
embed = await self.create_user_embed(ctx, user)
await ctx.send(embed=embed)
async def create_user_embed(self, ctx: Context, user: Member) -> Embed:
"""Creates an embed containing information on the `user`."""
created = time_since(user.created_at, max_units=3)
# Custom status
custom_status = ''
for activity in user.activities:
# Check activity.state for None value if user has a custom status set
# This guards against a custom status with an emoji but no text, which will cause
# escape_markdown to raise an exception
# This can be reworked after a move to d.py 1.3.0+, which adds a CustomActivity class
if activity.name == 'Custom Status' and activity.state:
state = escape_markdown(activity.state)
custom_status = f'Status: {state}\n'
name = str(user)
if user.nick:
name = f"{user.nick} ({name})"
joined = time_since(user.joined_at, precision="days")
roles = ", ".join(role.mention for role in user.roles[1:])
description = [
textwrap.dedent(f"""
**User Information**
Created: {created}
Profile: {user.mention}
ID: {user.id}
{custom_status}
**Member Information**
Joined: {joined}
Roles: {roles or None}
""").strip()
]
# Show more verbose output in moderation channels for infractions and nominations
if ctx.channel.id in constants.MODERATION_CHANNELS:
description.append(await self.expanded_user_infraction_counts(user))
description.append(await self.user_nomination_counts(user))
else:
description.append(await self.basic_user_infraction_counts(user))
# Let's build the embed now
embed = Embed(
title=name,
description="\n\n".join(description)
)
embed.set_thumbnail(url=user.avatar_url_as(format="png"))
embed.colour = user.top_role.colour if roles else Colour.blurple()
return embed
async def basic_user_infraction_counts(self, member: Member) -> str:
"""Gets the total and active infraction counts for the given `member`."""
infractions = await self.bot.api_client.get(
'bot/infractions',
params={
'hidden': 'False',
'user__id': str(member.id)
}
)
total_infractions = len(infractions)
active_infractions = sum(infraction['active'] for infraction in infractions)
infraction_output = f"**Infractions**\nTotal: {total_infractions}\nActive: {active_infractions}"
return infraction_output
async def expanded_user_infraction_counts(self, member: Member) -> str:
"""
Gets expanded infraction counts for the given `member`.
The counts will be split by infraction type and the number of active infractions for each type will indicated
in the output as well.
"""
infractions = await self.bot.api_client.get(
'bot/infractions',
params={
'user__id': str(member.id)
}
)
infraction_output = ["**Infractions**"]
if not infractions:
infraction_output.append("This user has never received an infraction.")
else:
# Count infractions split by `type` and `active` status for this user
infraction_types = set()
infraction_counter = defaultdict(int)
for infraction in infractions:
infraction_type = infraction["type"]
infraction_active = 'active' if infraction["active"] else 'inactive'
infraction_types.add(infraction_type)
infraction_counter[f"{infraction_active} {infraction_type}"] += 1
# Format the output of the infraction counts
for infraction_type in sorted(infraction_types):
active_count = infraction_counter[f"active {infraction_type}"]
total_count = active_count + infraction_counter[f"inactive {infraction_type}"]
line = f"{infraction_type.capitalize()}s: {total_count}"
if active_count:
line += f" ({active_count} active)"
infraction_output.append(line)
return "\n".join(infraction_output)
async def user_nomination_counts(self, member: Member) -> str:
"""Gets the active and historical nomination counts for the given `member`."""
nominations = await self.bot.api_client.get(
'bot/nominations',
params={
'user__id': str(member.id)
}
)
output = ["**Nominations**"]
if not nominations:
output.append("This user has never been nominated.")
else:
count = len(nominations)
is_currently_nominated = any(nomination["active"] for nomination in nominations)
nomination_noun = "nomination" if count == 1 else "nominations"
if is_currently_nominated:
output.append(f"This user is **currently** nominated ({count} {nomination_noun} in total).")
else:
output.append(f"This user has {count} historical {nomination_noun}, but is currently not nominated.")
return "\n".join(output)
def format_fields(self, mapping: Mapping[str, Any], field_width: Optional[int] = None) -> str:
"""Format a mapping to be readable to a human."""
# sorting is technically superfluous but nice if you want to look for a specific field
fields = sorted(mapping.items(), key=lambda item: item[0])
if field_width is None:
field_width = len(max(mapping.keys(), key=len))
out = ''
for key, val in fields:
if isinstance(val, dict):
# if we have dicts inside dicts we want to apply the same treatment to the inner dictionaries
inner_width = int(field_width * 1.6)
val = '\n' + self.format_fields(val, field_width=inner_width)
elif isinstance(val, str):
# split up text since it might be long
text = textwrap.fill(val, width=100, replace_whitespace=False)
# indent it, I guess you could do this with `wrap` and `join` but this is nicer
val = textwrap.indent(text, ' ' * (field_width + len(': ')))
# the first line is already indented so we `str.lstrip` it
val = val.lstrip()
if key == 'color':
# makes the base 10 representation of a hex number readable to humans
val = hex(val)
out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width)
# remove trailing whitespace
return out.rstrip()
@cooldown_with_role_bypass(2, 60 * 3, BucketType.member, bypass_roles=constants.STAFF_ROLES)
@group(invoke_without_command=True)
@in_channel(constants.Channels.bot_commands, bypass_roles=constants.STAFF_ROLES)
async def raw(self, ctx: Context, *, message: Message, json: bool = False) -> None:
"""Shows information about the raw API response."""
# I *guess* it could be deleted right as the command is invoked but I felt like it wasn't worth handling
# doing this extra request is also much easier than trying to convert everything back into a dictionary again
raw_data = await ctx.bot.http.get_message(message.channel.id, message.id)
paginator = Paginator()
def add_content(title: str, content: str) -> None:
paginator.add_line(f'== {title} ==\n')
# replace backticks as it breaks out of code blocks. Spaces seemed to be the most reasonable solution.
# we hope it's not close to 2000
paginator.add_line(content.replace('```', '`` `'))
paginator.close_page()
if message.content:
add_content('Raw message', message.content)
transformer = pprint.pformat if json else self.format_fields
for field_name in ('embeds', 'attachments'):
data = raw_data[field_name]
if not data:
continue
total = len(data)
for current, item in enumerate(data, start=1):
title = f'Raw {field_name} ({current}/{total})'
add_content(title, transformer(item))
for page in paginator.pages:
await ctx.send(page)
@raw.command()
async def json(self, ctx: Context, message: Message) -> None:
"""Shows information about the raw API response in a copy-pasteable Python format."""
await ctx.invoke(self.raw, message=message, json=True)
def setup(bot: Bot) -> None:
"""Load the Information cog."""
bot.add_cog(Information(bot))
| import colorsys
import logging
import pprint
import textwrap
from collections import Counter, defaultdict
from string import Template
from typing import Any, Mapping, Optional, Union
from discord import Colour, Embed, Member, Message, Role, Status, utils
from discord.ext.commands import BucketType, Cog, Context, Paginator, command, group
from discord.utils import escape_markdown
from bot import constants
from bot.bot import Bot
from bot.decorators import InChannelCheckFailure, in_channel, with_role
from bot.pagination import LinePaginator
from bot.utils.checks import cooldown_with_role_bypass, with_role_check
from bot.utils.time import time_since
log = logging.getLogger(__name__)
class Information(Cog):
"""A cog with commands for generating embeds with server info, such as server stats and user info."""
def __init__(self, bot: Bot):
self.bot = bot
@with_role(*constants.MODERATION_ROLES)
@command(name="roles")
async def roles_info(self, ctx: Context) -> None:
"""Returns a list of all roles and their corresponding IDs."""
# Sort the roles alphabetically and remove the @everyone role
roles = sorted(ctx.guild.roles[1:], key=lambda role: role.name)
# Build a list
role_list = []
for role in roles:
role_list.append(f"`{role.id}` - {role.mention}")
# Build an embed
embed = Embed(
title=f"Role information (Total {len(roles)} role{'s' * (len(role_list) > 1)})",
colour=Colour.blurple()
)
await LinePaginator.paginate(role_list, ctx, embed, empty=False)
@with_role(*constants.MODERATION_ROLES)
@command(name="role")
async def role_info(self, ctx: Context, *roles: Union[Role, str]) -> None:
"""
Return information on a role or list of roles.
To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.
"""
parsed_roles = []
failed_roles = []
for role_name in roles:
if isinstance(role_name, Role):
# Role conversion has already succeeded
parsed_roles.append(role_name)
continue
role = utils.find(lambda r: r.name.lower() == role_name.lower(), ctx.guild.roles)
if not role:
failed_roles.append(role_name)
continue
parsed_roles.append(role)
if failed_roles:
await ctx.send(
":x: I could not convert the following role names to a role: \n- "
"\n- ".join(failed_roles)
)
for role in parsed_roles:
h, s, v = colorsys.rgb_to_hsv(*role.colour.to_rgb())
embed = Embed(
title=f"{role.name} info",
colour=role.colour,
)
embed.add_field(name="ID", value=role.id, inline=True)
embed.add_field(name="Colour (RGB)", value=f"#{role.colour.value:0>6x}", inline=True)
embed.add_field(name="Colour (HSV)", value=f"{h:.2f} {s:.2f} {v}", inline=True)
embed.add_field(name="Member count", value=len(role.members), inline=True)
embed.add_field(name="Position", value=role.position)
embed.add_field(name="Permission code", value=role.permissions.value, inline=True)
await ctx.send(embed=embed)
@command(name="server", aliases=["server_info", "guild", "guild_info"])
async def server_info(self, ctx: Context) -> None:
"""Returns an embed full of server information."""
created = time_since(ctx.guild.created_at, precision="days")
features = ", ".join(ctx.guild.features)
region = ctx.guild.region
roles = len(ctx.guild.roles)
member_count = ctx.guild.member_count
# How many of each type of channel?
channels = Counter(c.type for c in ctx.guild.channels)
channel_counts = "".join(sorted(f"{str(ch).title()} channels: {channels[ch]}\n" for ch in channels)).strip()
# How many of each user status?
statuses = Counter(member.status for member in ctx.guild.members)
embed = Embed(colour=Colour.blurple())
# Because channel_counts lacks leading whitespace, it breaks the dedent if it's inserted directly by the
# f-string. While this is correctly formated by Discord, it makes unit testing difficult. To keep the formatting
# without joining a tuple of strings we can use a Template string to insert the already-formatted channel_counts
# after the dedent is made.
embed.description = Template(
textwrap.dedent(f"""
**Server information**
Created: {created}
Voice region: {region}
Features: {features}
**Counts**
Members: {member_count:,}
Roles: {roles}
$channel_counts
**Members**
{constants.Emojis.status_online} {statuses[Status.online]:,}
{constants.Emojis.status_idle} {statuses[Status.idle]:,}
{constants.Emojis.status_dnd} {statuses[Status.dnd]:,}
{constants.Emojis.status_offline} {statuses[Status.offline]:,}
""")
).substitute({"channel_counts": channel_counts})
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@command(name="user", aliases=["user_info", "member", "member_info"])
async def user_info(self, ctx: Context, user: Member = None) -> None:
"""Returns info about a user."""
if user is None:
user = ctx.author
# Do a role check if this is being executed on someone other than the caller
elif user != ctx.author and not with_role_check(ctx, *constants.MODERATION_ROLES):
await ctx.send("You may not use this command on users other than yourself.")
return
# Non-staff may only do this in #bot-commands
if not with_role_check(ctx, *constants.STAFF_ROLES):
if not ctx.channel.id == constants.Channels.bot_commands:
raise InChannelCheckFailure(constants.Channels.bot_commands)
embed = await self.create_user_embed(ctx, user)
await ctx.send(embed=embed)
async def create_user_embed(self, ctx: Context, user: Member) -> Embed:
"""Creates an embed containing information on the `user`."""
created = time_since(user.created_at, max_units=3)
# Custom status
custom_status = ''
for activity in user.activities:
# Check activity.state for None value if user has a custom status set
# This guards against a custom status with an emoji but no text, which will cause
# escape_markdown to raise an exception
# This can be reworked after a move to d.py 1.3.0+, which adds a CustomActivity class
if activity.name == 'Custom Status' and activity.state:
state = escape_markdown(activity.state)
custom_status = f'Status: {state}\n'
name = str(user)
if user.nick:
name = f"{user.nick} ({name})"
joined = time_since(user.joined_at, precision="days")
roles = ", ".join(role.mention for role in user.roles[1:])
description = [
textwrap.dedent(f"""
**User Information**
Created: {created}
Profile: {user.mention}
ID: {user.id}
{custom_status}
**Member Information**
Joined: {joined}
Roles: {roles or None}
""").strip()
]
# Show more verbose output in moderation channels for infractions and nominations
if ctx.channel.id in constants.MODERATION_CHANNELS:
description.append(await self.expanded_user_infraction_counts(user))
description.append(await self.user_nomination_counts(user))
else:
description.append(await self.basic_user_infraction_counts(user))
# Let's build the embed now
embed = Embed(
title=name,
description="\n\n".join(description)
)
embed.set_thumbnail(url=user.avatar_url_as(format="png"))
embed.colour = user.top_role.colour if roles else Colour.blurple()
return embed
async def basic_user_infraction_counts(self, member: Member) -> str:
"""Gets the total and active infraction counts for the given `member`."""
infractions = await self.bot.api_client.get(
'bot/infractions',
params={
'hidden': 'False',
'user__id': str(member.id)
}
)
total_infractions = len(infractions)
active_infractions = sum(infraction['active'] for infraction in infractions)
infraction_output = f"**Infractions**\nTotal: {total_infractions}\nActive: {active_infractions}"
return infraction_output
async def expanded_user_infraction_counts(self, member: Member) -> str:
"""
Gets expanded infraction counts for the given `member`.
The counts will be split by infraction type and the number of active infractions for each type will indicated
in the output as well.
"""
infractions = await self.bot.api_client.get(
'bot/infractions',
params={
'user__id': str(member.id)
}
)
infraction_output = ["**Infractions**"]
if not infractions:
infraction_output.append("This user has never received an infraction.")
else:
# Count infractions split by `type` and `active` status for this user
infraction_types = set()
infraction_counter = defaultdict(int)
for infraction in infractions:
infraction_type = infraction["type"]
infraction_active = 'active' if infraction["active"] else 'inactive'
infraction_types.add(infraction_type)
infraction_counter[f"{infraction_active} {infraction_type}"] += 1
# Format the output of the infraction counts
for infraction_type in sorted(infraction_types):
active_count = infraction_counter[f"active {infraction_type}"]
total_count = active_count + infraction_counter[f"inactive {infraction_type}"]
line = f"{infraction_type.capitalize()}s: {total_count}"
if active_count:
line += f" ({active_count} active)"
infraction_output.append(line)
return "\n".join(infraction_output)
async def user_nomination_counts(self, member: Member) -> str:
"""Gets the active and historical nomination counts for the given `member`."""
nominations = await self.bot.api_client.get(
'bot/nominations',
params={
'user__id': str(member.id)
}
)
output = ["**Nominations**"]
if not nominations:
output.append("This user has never been nominated.")
else:
count = len(nominations)
is_currently_nominated = any(nomination["active"] for nomination in nominations)
nomination_noun = "nomination" if count == 1 else "nominations"
if is_currently_nominated:
output.append(f"This user is **currently** nominated ({count} {nomination_noun} in total).")
else:
output.append(f"This user has {count} historical {nomination_noun}, but is currently not nominated.")
return "\n".join(output)
def format_fields(self, mapping: Mapping[str, Any], field_width: Optional[int] = None) -> str:
"""Format a mapping to be readable to a human."""
# sorting is technically superfluous but nice if you want to look for a specific field
fields = sorted(mapping.items(), key=lambda item: item[0])
if field_width is None:
field_width = len(max(mapping.keys(), key=len))
out = ''
for key, val in fields:
if isinstance(val, dict):
# if we have dicts inside dicts we want to apply the same treatment to the inner dictionaries
inner_width = int(field_width * 1.6)
val = '\n' + self.format_fields(val, field_width=inner_width)
elif isinstance(val, str):
# split up text since it might be long
text = textwrap.fill(val, width=100, replace_whitespace=False)
# indent it, I guess you could do this with `wrap` and `join` but this is nicer
val = textwrap.indent(text, ' ' * (field_width + len(': ')))
# the first line is already indented so we `str.lstrip` it
val = val.lstrip()
if key == 'color':
# makes the base 10 representation of a hex number readable to humans
val = hex(val)
out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width)
# remove trailing whitespace
return out.rstrip()
@cooldown_with_role_bypass(2, 60 * 3, BucketType.member, bypass_roles=constants.STAFF_ROLES)
@group(invoke_without_command=True)
@in_channel(constants.Channels.bot_commands, bypass_roles=constants.STAFF_ROLES)
async def raw(self, ctx: Context, *, message: Message, json: bool = False) -> None:
"""Shows information about the raw API response."""
# I *guess* it could be deleted right as the command is invoked but I felt like it wasn't worth handling
# doing this extra request is also much easier than trying to convert everything back into a dictionary again
raw_data = await ctx.bot.http.get_message(message.channel.id, message.id)
paginator = Paginator()
def add_content(title: str, content: str) -> None:
paginator.add_line(f'== {title} ==\n')
# replace backticks as it breaks out of code blocks. Spaces seemed to be the most reasonable solution.
# we hope it's not close to 2000
paginator.add_line(content.replace('```', '`` `'))
paginator.close_page()
if message.content:
add_content('Raw message', message.content)
transformer = pprint.pformat if json else self.format_fields
for field_name in ('embeds', 'attachments'):
data = raw_data[field_name]
if not data:
continue
total = len(data)
for current, item in enumerate(data, start=1):
title = f'Raw {field_name} ({current}/{total})'
add_content(title, transformer(item))
for page in paginator.pages:
await ctx.send(page)
@raw.command()
async def json(self, ctx: Context, message: Message) -> None:
"""Shows information about the raw API response in a copy-pasteable Python format."""
await ctx.invoke(self.raw, message=message, json=True)
def setup(bot: Bot) -> None:
"""Load the Information cog."""
bot.add_cog(Information(bot))
|
#!/usr/bin/env python3
"""
Copyright 2020 Samuel Huang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import json
import os
import sys
from datetime import datetime, timedelta
VERSION = 'v1.0.2'
JSON_FILENAME = '.tldt.json'
JSON_NAME_CONFIGURATION = 'configuration'
JSON_NAME_ACTIONS = 'actions'
JSON_NAME_KEY = 'key'
JSON_NAME_DESC = 'description'
JSON_NAME_CMD = 'command'
ACTION_KEY_QUIT = 'q'
def create_sample_file():
content = '{"configuration":{"sortByKey":true},"actions":[{"key":"1","description":"simple command","command":"ls"}]}'
tldt = json.loads(content)
with open(JSON_FILENAME, 'w', encoding='utf-8') as f:
try:
json.dump(obj=tldt, fp=f, indent=2)
except:
print(f'ERROR: Creating {JSON_FILENAME} failed.')
sys.exit(1)
def show_action_table(actions: list):
max_key_len = len(JSON_NAME_KEY)
max_desc_len = len(JSON_NAME_DESC)
max_cmd_len = len(JSON_NAME_CMD)
for action in actions:
max_key_len = max(max_key_len, len(action[JSON_NAME_KEY]))
max_desc_len = max(max_desc_len, len(action[JSON_NAME_DESC]))
max_cmd_len = max(max_cmd_len, len(action[JSON_NAME_CMD]))
print(f'{JSON_NAME_KEY:^{max_key_len}} | {JSON_NAME_DESC:^{max_desc_len}} | {JSON_NAME_CMD:^{max_cmd_len}}')
print(f'{'-' * max_key_len}-+-{'-' * max_desc_len}-+-{'-' * max_cmd_len}')
for action in actions:
action_key = action[JSON_NAME_KEY]
action_desc = action[JSON_NAME_DESC]
action_cmd = action[JSON_NAME_CMD]
print(f'{action_key:>{max_key_len}} | {action_desc:<{max_desc_len}} | {action_cmd:<{max_cmd_len}}')
def show_execution_time_info(start_time: datetime, end_time: datetime):
print(' Execution Time Information')
print('=================================')
datetime_format = '%Y-%m-%d %H:%M:%S'
print(f' Start Time: {start_time.strftime(datetime_format)}')
print(f' End Time: {end_time.strftime(datetime_format)}')
elapsed_time = end_time - start_time
print(f'Elapsed Time: {str(timedelta(days=elapsed_time.days, seconds=elapsed_time.seconds)):>19}')
def main():
parser = argparse.ArgumentParser(prog='tldt', description="Too Long; Didn't Type.")
parser.add_argument('--version', '-v', action='version', version=f'TLDT {VERSION}')
parser.add_argument('--init', action='store_true', help=f'create a sample {JSON_FILENAME} file in the current directory')
parser.add_argument('--no-time', '-nt', action='store_true', help="don't show execution time information")
parser.add_argument('key', type=str, nargs='?', help='a key of the action')
args = parser.parse_args()
# print(args)
# Create a sample file if the user wants
if args.init:
if os.path.exists(JSON_FILENAME):
print(f'ERROR: {JSON_FILENAME} already exist.')
sys.exit(1)
else:
create_sample_file()
sys.exit(0)
# Check file exists
if not os.path.exists(JSON_FILENAME):
print(f'ERROR: {JSON_FILENAME} not exist. Run "tldt --init" to create one!')
sys.exit(1)
# Parse file
with open(JSON_FILENAME, 'r') as f:
try:
tldt = json.load(f)
except:
print(f'ERROR: Parsing {JSON_FILENAME} failed.')
sys.exit(1)
# Check "actions" node exists and at least one action
actions = tldt[JSON_NAME_ACTIONS]
if type(actions) is not list or len(actions) < 1:
print(f'ERROR: There is no action.')
sys.exit(1)
# Parse "configuration"
sort_by_key = False
if JSON_NAME_CONFIGURATION in tldt:
configuration = tldt[JSON_NAME_CONFIGURATION]
if configuration['sortByKey'] is True:
sort_by_key = True
if args.key:
# Take the key from argument
key = args.key
else:
# Sort action by key if need
if sort_by_key:
actions = sorted(actions, key=lambda act: act[JSON_NAME_KEY], reverse=False)
show_action_table(actions)
# Take the key from user input
key = input(f'\nEnter the key of action to be executed ("{ACTION_KEY_QUIT}" to quit): ')
# Quit program if the key is quit key
if key.lower() == ACTION_KEY_QUIT:
sys.exit()
# Find action
try:
action = next(d for i, d in enumerate(actions) if d[JSON_NAME_KEY] == key)
except:
print('ERROR: Action not found.')
sys.exit(1)
# Record start time if need
if not args.no_time:
start_time = datetime.now()
# Execute action
print(f'Start >> [{action[JSON_NAME_KEY]}] {action[JSON_NAME_DESC]} >> {action[JSON_NAME_CMD]}')
os.system(action[JSON_NAME_CMD])
print(f'End >> [{action[JSON_NAME_KEY]}] {action[JSON_NAME_DESC]} >> {action[JSON_NAME_CMD]}')
# Show execution time information if need
if not args.no_time:
end_time = datetime.now()
print('')
show_execution_time_info(start_time, end_time)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
"""
Copyright 2020 Samuel Huang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import json
import os
import sys
from datetime import datetime, timedelta
VERSION = 'v1.0.2'
JSON_FILENAME = '.tldt.json'
JSON_NAME_CONFIGURATION = 'configuration'
JSON_NAME_ACTIONS = 'actions'
JSON_NAME_KEY = 'key'
JSON_NAME_DESC = 'description'
JSON_NAME_CMD = 'command'
ACTION_KEY_QUIT = 'q'
def create_sample_file():
content = '{"configuration":{"sortByKey":true},"actions":[{"key":"1","description":"simple command","command":"ls"}]}'
tldt = json.loads(content)
with open(JSON_FILENAME, 'w', encoding='utf-8') as f:
try:
json.dump(obj=tldt, fp=f, indent=2)
except:
print(f'ERROR: Creating {JSON_FILENAME} failed.')
sys.exit(1)
def show_action_table(actions: list):
max_key_len = len(JSON_NAME_KEY)
max_desc_len = len(JSON_NAME_DESC)
max_cmd_len = len(JSON_NAME_CMD)
for action in actions:
max_key_len = max(max_key_len, len(action[JSON_NAME_KEY]))
max_desc_len = max(max_desc_len, len(action[JSON_NAME_DESC]))
max_cmd_len = max(max_cmd_len, len(action[JSON_NAME_CMD]))
print(f'{JSON_NAME_KEY:^{max_key_len}} | {JSON_NAME_DESC:^{max_desc_len}} | {JSON_NAME_CMD:^{max_cmd_len}}')
print(f'{"-" * max_key_len}-+-{"-" * max_desc_len}-+-{"-" * max_cmd_len}')
for action in actions:
action_key = action[JSON_NAME_KEY]
action_desc = action[JSON_NAME_DESC]
action_cmd = action[JSON_NAME_CMD]
print(f'{action_key:>{max_key_len}} | {action_desc:<{max_desc_len}} | {action_cmd:<{max_cmd_len}}')
def show_execution_time_info(start_time: datetime, end_time: datetime):
print(' Execution Time Information')
print('=================================')
datetime_format = '%Y-%m-%d %H:%M:%S'
print(f' Start Time: {start_time.strftime(datetime_format)}')
print(f' End Time: {end_time.strftime(datetime_format)}')
elapsed_time = end_time - start_time
print(f'Elapsed Time: {str(timedelta(days=elapsed_time.days, seconds=elapsed_time.seconds)):>19}')
def main():
parser = argparse.ArgumentParser(prog='tldt', description="Too Long; Didn't Type.")
parser.add_argument('--version', '-v', action='version', version=f'TLDT {VERSION}')
parser.add_argument('--init', action='store_true', help=f'create a sample {JSON_FILENAME} file in the current directory')
parser.add_argument('--no-time', '-nt', action='store_true', help="don't show execution time information")
parser.add_argument('key', type=str, nargs='?', help='a key of the action')
args = parser.parse_args()
# print(args)
# Create a sample file if the user wants
if args.init:
if os.path.exists(JSON_FILENAME):
print(f'ERROR: {JSON_FILENAME} already exist.')
sys.exit(1)
else:
create_sample_file()
sys.exit(0)
# Check file exists
if not os.path.exists(JSON_FILENAME):
print(f'ERROR: {JSON_FILENAME} not exist. Run "tldt --init" to create one!')
sys.exit(1)
# Parse file
with open(JSON_FILENAME, 'r') as f:
try:
tldt = json.load(f)
except:
print(f'ERROR: Parsing {JSON_FILENAME} failed.')
sys.exit(1)
# Check "actions" node exists and at least one action
actions = tldt[JSON_NAME_ACTIONS]
if type(actions) is not list or len(actions) < 1:
print(f'ERROR: There is no action.')
sys.exit(1)
# Parse "configuration"
sort_by_key = False
if JSON_NAME_CONFIGURATION in tldt:
configuration = tldt[JSON_NAME_CONFIGURATION]
if configuration['sortByKey'] is True:
sort_by_key = True
if args.key:
# Take the key from argument
key = args.key
else:
# Sort action by key if need
if sort_by_key:
actions = sorted(actions, key=lambda act: act[JSON_NAME_KEY], reverse=False)
show_action_table(actions)
# Take the key from user input
key = input(f'\nEnter the key of action to be executed ("{ACTION_KEY_QUIT}" to quit): ')
# Quit program if the key is quit key
if key.lower() == ACTION_KEY_QUIT:
sys.exit()
# Find action
try:
action = next(d for i, d in enumerate(actions) if d[JSON_NAME_KEY] == key)
except:
print('ERROR: Action not found.')
sys.exit(1)
# Record start time if need
if not args.no_time:
start_time = datetime.now()
# Execute action
print(f'Start >> [{action[JSON_NAME_KEY]}] {action[JSON_NAME_DESC]} >> {action[JSON_NAME_CMD]}')
os.system(action[JSON_NAME_CMD])
print(f'End >> [{action[JSON_NAME_KEY]}] {action[JSON_NAME_DESC]} >> {action[JSON_NAME_CMD]}')
# Show execution time information if need
if not args.no_time:
end_time = datetime.now()
print('')
show_execution_time_info(start_time, end_time)
if __name__ == '__main__':
main()
|
import json
from canvas import tk
from tkinter import Button, Entry, Label
from helpers import clean_screen
from products import render_products
def login(username, password):
with open('db/user_credentials_db.txt', 'r') as file:
lines = file.readlines()
for line in lines:
user, pas = line[:-1].split(', ')
if user == username and pas == password:
with open('db/current_user.txt', 'w') as current_user_file:
current_user_file.write(username)
render_products()
return
else:
render_login(errors=True)
def register(**user):
user.update({'products': []})
with open('db/users.txt', 'a') as file:
file.write(json.dumps(user))
file.write('\n')
with open('db/user_credentials_db.txt', 'a') as file:
file.write(f'{user.get('username')}, {user.get('password')}')
file.write('\n')
def render_register():
clean_screen()
Label(text='Enter your username:').grid(row=0, column=0)
username = Entry(tk)
username.grid(row=0, column=1)
Label(text='Enter your password:').grid(row=1, column=0)
password = Entry(tk, show='*')
password.grid(row=1, column=1)
Label(text='Enter your firstname:').grid(row=2, column=0)
firstname = Entry(tk)
firstname.grid(row=2, column=1)
Label(text='Enter your lastname:').grid(row=3, column=0)
lastname = Entry(tk)
lastname.grid(row=3, column=1)
Button(tk, text='Register', bg='green',
command=lambda: register(username=username.get(), password=password.get(), firstname=firstname.get(),
lastname=lastname.get())).grid(row=4, column=0)
def render_login(errors=None):
clean_screen()
Label(text='Enter your username:').grid(row=0, column=0)
username = Entry(tk)
username.grid(row=0, column=1)
Label(text='Enter your password:').grid(row=1, column=0)
password = Entry(tk, show='*')
password.grid(row=1, column=1)
Button(tk, text='Enter', bg='green', command=lambda: login(username=username.get(), password=password.get())).grid(
row=2, column=0)
if errors:
Label(text='Invalid username or password.').grid(row=3, column=0)
def render_main_enter_screen():
Button(tk, text='Login', bg='green', fg='white', command=render_login).grid(row=0, column=0)
Button(tk, text='Register', bg='yellow', command=render_register).grid(row=0, column=1)
| import json
from canvas import tk
from tkinter import Button, Entry, Label
from helpers import clean_screen
from products import render_products
def login(username, password):
with open('db/user_credentials_db.txt', 'r') as file:
lines = file.readlines()
for line in lines:
user, pas = line[:-1].split(', ')
if user == username and pas == password:
with open('db/current_user.txt', 'w') as current_user_file:
current_user_file.write(username)
render_products()
return
else:
render_login(errors=True)
def register(**user):
user.update({'products': []})
with open('db/users.txt', 'a') as file:
file.write(json.dumps(user))
file.write('\n')
with open('db/user_credentials_db.txt', 'a') as file:
file.write(f'{user.get("username")}, {user.get("password")}')
file.write('\n')
def render_register():
clean_screen()
Label(text='Enter your username:').grid(row=0, column=0)
username = Entry(tk)
username.grid(row=0, column=1)
Label(text='Enter your password:').grid(row=1, column=0)
password = Entry(tk, show='*')
password.grid(row=1, column=1)
Label(text='Enter your firstname:').grid(row=2, column=0)
firstname = Entry(tk)
firstname.grid(row=2, column=1)
Label(text='Enter your lastname:').grid(row=3, column=0)
lastname = Entry(tk)
lastname.grid(row=3, column=1)
Button(tk, text='Register', bg='green',
command=lambda: register(username=username.get(), password=password.get(), firstname=firstname.get(),
lastname=lastname.get())).grid(row=4, column=0)
def render_login(errors=None):
clean_screen()
Label(text='Enter your username:').grid(row=0, column=0)
username = Entry(tk)
username.grid(row=0, column=1)
Label(text='Enter your password:').grid(row=1, column=0)
password = Entry(tk, show='*')
password.grid(row=1, column=1)
Button(tk, text='Enter', bg='green', command=lambda: login(username=username.get(), password=password.get())).grid(
row=2, column=0)
if errors:
Label(text='Invalid username or password.').grid(row=3, column=0)
def render_main_enter_screen():
Button(tk, text='Login', bg='green', fg='white', command=render_login).grid(row=0, column=0)
Button(tk, text='Register', bg='yellow', command=render_register).grid(row=0, column=1)
|
from settings import NPY_EXT, MODELS_DIR
import os
import math
import numpy as np
from datetime import datetime
def euc(vec:np.array, pC:np.array):
pC_vec = np.full((pC.shape[0], pC.shape[1]), vec)
step1 = np.subtract(pC, pC_vec)
step2 = np.square(step1)
step3 = np.sum(step2, axis=1, dtype=float).reshape(pC.shape[0],)
step4 = np.sqrt(step3, dtype=float)
return step4
def eucledian_between_point(point1: tuple, point2: tuple):
"""
Return eucledian distance between two points.
Parameters
----------
point1 : tuple
(x,y) coordinate pair.
point2 : tuple
(x,y) coordinate pair.
Returns
-------
Eucledian distance between both vectors.
"""
point1_x, point1_y = point1
point2_x, point2_y = point2
return math.sqrt(((point1_x - point2_x) ** 2) + ((point1_y - point2_y) ** 2))
def eucledian_between_vec(vec1: np.array, vec2: np.array):
"""
Return eucledian distance between two vectors.
Parameters
----------
vec1 : numpy.array
Array contains coordinate set of points.
vec2 : numpy.array
Array contains coordinate set of points.
Returns
-------
Eucledian distance between both vectors.
"""
return np.sqrt(np.sum(np.square(np.subtract(vec1, vec2))))
def get_model_path(model_name):
"""
Returns a path with extension based on param:model_name.
Parameters
----------
model_name : str
Name of file under which weights are saved.
"""
model_name = model_name.replace(NPY_EXT, "")
return os.path.join(MODELS_DIR, f"{model_name}{NPY_EXT}")
def generate_model_name(grid_size, max_iterations, learning_rate):
"""
Parameters
----------
grid_size : api_params.TrainParameters
Same parameter object used for training a model.
max_iterations : int
Max iterations that model training on.
learning_rate : float
Learning rate that model training on.
Returns
-------
model_name : str
A unique string build using parameters attributes.
"""
grid_x, grid_y = grid_size
return f"{datetime.now().strftime("%d-%m-%Y_%Hh%Mm%Ss")}T_{grid_x}X{grid_y}_{max_iterations}N_{learning_rate}LR"
##############################################################################
import multiprocessing
import enum
class ParallelProcessingTargets(enum.Enum):
"""
This enum class helps to facilitate boolean flags in code
to isolate parallel processing code for conditional execution.
"""
FIND_BMU = "pp_FIND_BMU"
INF_BMU_W = "pp_INF_BMU_W"
INF_BMU_POS = "pp_INF_BMU_POS"
def apply_along_axis_wrapper(apply_along_axis_args):
"""
Wrapper around numpy.apply_along_axis().
Parameters
----------
apply_along_axis_args : n-tuple
Tuple containing arguments to numpy.apply_along_axis arguments
Returns
-------
A numpy array to which func1D has applied.
"""
(func1d, axis, arr, args, kwargs) = apply_along_axis_args
return np.apply_along_axis(func1d, axis, arr, *args, **kwargs)
def parallel_apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
A multiprocessing variant of numpy.apply_along_axis() which divides the
numpy.array into n-chunks based on the number of CPUs. It processes these
chunks in parallel and later concates the results from each chunk into a array.
Parameters
----------
func1d : function
A function that has to map to numpy array.
axis : int (0,1)
Axis along which arr is sliced.
arr : ndarray (Ni…, M, Nk…)
Input array
args : any
Additional arguments to func1d.
kwargs : any
Additional named arguments to func1d.
Returns
-------
A numpy array to which func1D has applied.
"""
pool = multiprocessing.Pool()
chunks = [
(func1d, axis, arr_chunk, args, kwargs) for arr_chunk in np.array_split(arr, multiprocessing.cpu_count())
]
chunk_results = pool.map(apply_along_axis_wrapper, chunks)
pool.close()
pool.join()
return np.concatenate(chunk_results)
| from settings import NPY_EXT, MODELS_DIR
import os
import math
import numpy as np
from datetime import datetime
def euc(vec:np.array, pC:np.array):
pC_vec = np.full((pC.shape[0], pC.shape[1]), vec)
step1 = np.subtract(pC, pC_vec)
step2 = np.square(step1)
step3 = np.sum(step2, axis=1, dtype=float).reshape(pC.shape[0],)
step4 = np.sqrt(step3, dtype=float)
return step4
def eucledian_between_point(point1: tuple, point2: tuple):
"""
Return eucledian distance between two points.
Parameters
----------
point1 : tuple
(x,y) coordinate pair.
point2 : tuple
(x,y) coordinate pair.
Returns
-------
Eucledian distance between both vectors.
"""
point1_x, point1_y = point1
point2_x, point2_y = point2
return math.sqrt(((point1_x - point2_x) ** 2) + ((point1_y - point2_y) ** 2))
def eucledian_between_vec(vec1: np.array, vec2: np.array):
"""
Return eucledian distance between two vectors.
Parameters
----------
vec1 : numpy.array
Array contains coordinate set of points.
vec2 : numpy.array
Array contains coordinate set of points.
Returns
-------
Eucledian distance between both vectors.
"""
return np.sqrt(np.sum(np.square(np.subtract(vec1, vec2))))
def get_model_path(model_name):
"""
Returns a path with extension based on param:model_name.
Parameters
----------
model_name : str
Name of file under which weights are saved.
"""
model_name = model_name.replace(NPY_EXT, "")
return os.path.join(MODELS_DIR, f"{model_name}{NPY_EXT}")
def generate_model_name(grid_size, max_iterations, learning_rate):
"""
Parameters
----------
grid_size : api_params.TrainParameters
Same parameter object used for training a model.
max_iterations : int
Max iterations that model training on.
learning_rate : float
Learning rate that model training on.
Returns
-------
model_name : str
A unique string build using parameters attributes.
"""
grid_x, grid_y = grid_size
return f"{datetime.now().strftime('%d-%m-%Y_%Hh%Mm%Ss')}T_{grid_x}X{grid_y}_{max_iterations}N_{learning_rate}LR"
##############################################################################
import multiprocessing
import enum
class ParallelProcessingTargets(enum.Enum):
"""
This enum class helps to facilitate boolean flags in code
to isolate parallel processing code for conditional execution.
"""
FIND_BMU = "pp_FIND_BMU"
INF_BMU_W = "pp_INF_BMU_W"
INF_BMU_POS = "pp_INF_BMU_POS"
def apply_along_axis_wrapper(apply_along_axis_args):
"""
Wrapper around numpy.apply_along_axis().
Parameters
----------
apply_along_axis_args : n-tuple
Tuple containing arguments to numpy.apply_along_axis arguments
Returns
-------
A numpy array to which func1D has applied.
"""
(func1d, axis, arr, args, kwargs) = apply_along_axis_args
return np.apply_along_axis(func1d, axis, arr, *args, **kwargs)
def parallel_apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
A multiprocessing variant of numpy.apply_along_axis() which divides the
numpy.array into n-chunks based on the number of CPUs. It processes these
chunks in parallel and later concates the results from each chunk into a array.
Parameters
----------
func1d : function
A function that has to map to numpy array.
axis : int (0,1)
Axis along which arr is sliced.
arr : ndarray (Ni…, M, Nk…)
Input array
args : any
Additional arguments to func1d.
kwargs : any
Additional named arguments to func1d.
Returns
-------
A numpy array to which func1D has applied.
"""
pool = multiprocessing.Pool()
chunks = [
(func1d, axis, arr_chunk, args, kwargs) for arr_chunk in np.array_split(arr, multiprocessing.cpu_count())
]
chunk_results = pool.map(apply_along_axis_wrapper, chunks)
pool.close()
pool.join()
return np.concatenate(chunk_results)
|
#!/usr/bin/env python
## -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import Q
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_permission, add_to_group, grant_access, remove_from_group
from desktop.models import DefaultConfiguration, Document2
from oozie.conf import ENABLE_V2
from oozie.importlib.workflows import generate_v2_graph_nodes
from oozie.models2 import Node, Workflow, WorkflowConfiguration, find_dollar_variables, find_dollar_braced_variables, \
_create_graph_adjaceny_list, _get_hierarchy_from_adj_list
from oozie.tests import OozieMockBase, save_temp_workflow, MockOozieApi
LOG = logging.getLogger(__name__)
class TestEditor(OozieMockBase):
def setUp(self):
super(TestEditor, self).setUp()
self.wf = Workflow()
def test_parsing(self):
assert_equal(['input', 'LIMIT', 'out'], find_dollar_variables("""
data = '$input';
$out = LIMIT data $LIMIT; -- ${nah}
$output = STORE "$out";
"""))
assert_equal(['max_salary', 'limit'], find_dollar_variables("""
SELECT sample_07.description, sample_07.salary
FROM
sample_07
WHERE
( sample_07.salary > $max_salary)
ORDER BY sample_07.salary DESC
LIMIT $limit"""))
def test_hive_script_parsing(self):
assert_equal(['field', 'tablename', 'LIMIT'], find_dollar_braced_variables("""
SELECT ${field}
FROM ${hivevar:tablename}
LIMIT ${hiveconf:LIMIT}
"""))
assert_equal(['field', 'tablename', 'LIMIT'], find_dollar_braced_variables("SELECT ${field} FROM ${hivevar:tablename} LIMIT ${hiveconf:LIMIT}"))
def test_workflow_gen_xml(self):
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">', u'<start', u'to="End"/>', u'<kill', u'name="Kill">', u'<message>Action', u'failed,',
u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>', u'<end', u'name="End"/>', u'</workflow-app>'],
self.wf.to_xml({'output': '/path'}).split()
)
def test_workflow_map_reduce_gen_xml(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"0c1908e7-0096-46e7-a16b-b17b1142a730\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1430228904.58\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"properties\": []}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"retry_max\": [{\"value\": \"5\"}], \"files\": [], \"job_xml\": \"\", \"jar_path\": \"my_jar\", \"job_properties\": [{\"name\": \"prop_1_name\", \"value\": \"prop_1_value\"}], \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}]}, \"name\": \"mapreduce-0cf2\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"mapreduce-widget\", \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"actionParameters\": []}], \"id\": 50019, \"nodeNamesMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": \"mapreduce-0cf2\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"084f4d4c-00f1-62d2-e27e-e153c1f9acfb\"}}")
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="mapreduce-0cf2"/>',
u'<kill', u'name="Kill">', u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>',
u'<action', u'name="mapreduce-0cf2"', 'retry-max="5">',
u'<map-reduce>',
u'<job-tracker>${jobTracker}</job-tracker>',
u'<name-node>${nameNode}</name-node>',
u'<configuration>',
u'<property>',
u'<name>prop_1_name</name>',
u'<value>prop_1_value</value>',
u'</property>',
u'</configuration>',
u'</map-reduce>',
u'<ok', u'to="End"/>',
u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'
],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_java_gen_xml(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Java program\", \"widgetType\": \"java-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": true, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\", \"size\": 12}], \"id\": \"badb3c81-78d6-8099-38fc-87a9904ba78c\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"adc3fe69-36eb-20f8-09ac-38fada1582b2\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Java program\", \"widgetType\": \"java-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": true, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\", \"size\": 12}], \"id\": \"badb3c81-78d6-8099-38fc-87a9904ba78c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"107bdacf-a37a-d69e-98dd-5801407cb57e\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"81e1869c-a2c3-66d2-c703-719335ea45cb\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"107bdacf-a37a-d69e-98dd-5801407cb57e\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"81e1869c-a2c3-66d2-c703-719335ea45cb\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"adc3fe69-36eb-20f8-09ac-38fada1582b2\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"8e0f37a5-2dfb-7329-be44-78e60b2cf62b\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1449080135.8\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"properties\": [], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}]}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": false, \"movedNode\": null, \"linkMapping\": {\"6ddafdc4-c070-95f0-4211-328e9f31daf6\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"6ddafdc4-c070-95f0-4211-328e9f31daf6\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"6ddafdc4-c070-95f0-4211-328e9f31daf6\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"body\": \"\", \"cc\": \"\", \"to\": \"\", \"enableMail\": false, \"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\", \"subject\": \"\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"files\": [{\"value\": \"/my_file\"}], \"job_xml\": [], \"jar_path\": \"/my/jar\", \"java_opts\": [{\"value\": \"-Dsun.security.jgss.debug=true\"}], \"retry_max\": [], \"retry_interval\": [], \"job_properties\": [], \"capture_output\": false, \"main_class\": \"MyClass\", \"arguments\": [{\"value\": \"my_arg\"}], \"prepares\": [], \"credentials\": [], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}], \"archives\": []}, \"name\": \"java-6dda\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"java-widget\", \"id\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\", \"actionParameters\": []}], \"id\": 50247, \"nodeNamesMapping\": {\"6ddafdc4-c070-95f0-4211-328e9f31daf6\": \"java-6dda\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"2667d60e-d894-c27b-6e6f-0333704c0989\"}}")
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="java-6dda"/>',
u'<kill', u'name="Kill">',
u'<message>Action', u'failed,',
u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>',
u'</kill>',
u'<action', u'name="java-6dda">',
u'<java>',
u'<job-tracker>${jobTracker}</job-tracker>',
u'<name-node>${nameNode}</name-node>',
u'<main-class>MyClass</main-class>',
u'<java-opts>-Dsun.security.jgss.debug=true</java-opts>',
u'<arg>my_arg</arg>',
u'<file>/my_file#my_file</file>',
u'</java>',
u'<ok', u'to="End"/>',
u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'
],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_generic_gen_xml(self):
workflow = """{"layout": [{"oozieRows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Generic", "widgetType": "generic-widget", "oozieMovable": true, "ooziePropertiesExpanded": true, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "e96bb09b-84d1-6864-5782-42942bab97cb", "size": 12}], "id": "ed10631a-f264-9a3b-aa09-b04cb76f5c32", "columns": []}], "rows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "68d83128-2c08-28f6-e9d1-a912d20f8af5", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Generic", "widgetType": "generic-widget", "oozieMovable": true, "ooziePropertiesExpanded": true, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "e96bb09b-84d1-6864-5782-42942bab97cb", "size": 12}], "id": "ed10631a-f264-9a3b-aa09-b04cb76f5c32", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "7bf3cdc7-f79b-ff36-b152-e37217c40ccb", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "07c4f1bd-8f58-ea51-fc3d-50acf74d6747", "columns": []}], "oozieEndRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "7bf3cdc7-f79b-ff36-b152-e37217c40ccb", "columns": []}, "oozieKillRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "07c4f1bd-8f58-ea51-fc3d-50acf74d6747", "columns": []}, "enableOozieDropOnAfter": true, "oozieStartRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "68d83128-2c08-28f6-e9d1-a912d20f8af5", "columns": []}, "klass": "card card-home card-column span12", "enableOozieDropOnBefore": true, "drops": ["temp"], "id": "0e8b5e24-4f78-0f76-fe91-0c8e7f0d290a", "size": 12}], "workflow": {"properties": {"job_xml": "", "description": "", "wf1_id": null, "sla_enabled": false, "deployment_dir": "/user/hue/oozie/workspaces/hue-oozie-1446487280.19", "schema_version": "uri:oozie:workflow:0.5", "properties": [], "show_arrows": true, "parameters": [{"name": "oozie.use.system.libpath", "value": true}], "sla": [{"value": false, "key": "enabled"}, {"value": "${nominal_time}", "key": "nominal-time"}, {"value": "", "key": "should-start"}, {"value": "${30 * MINUTES}", "key": "should-end"}, {"value": "", "key": "max-duration"}, {"value": "", "key": "alert-events"}, {"value": "", "key": "alert-contact"}, {"value": "", "key": "notification-msg"}, {"value": "", "key": "upstream-apps"}]}, "name": "My Workflow 3", "versions": ["uri:oozie:workflow:0.4", "uri:oozie:workflow:0.4.5", "uri:oozie:workflow:0.5"], "isDirty": false, "movedNode": null, "linkMapping": {"17c9c895-5a16-7443-bb81-f34b30b21548": [], "33430f0f-ebfa-c3ec-f237-3e77efa03d0a": [], "3f107997-04cc-8733-60a9-a4bb62cebffc": ["e96bb09b-84d1-6864-5782-42942bab97cb"], "e96bb09b-84d1-6864-5782-42942bab97cb": ["33430f0f-ebfa-c3ec-f237-3e77efa03d0a"]}, "nodeIds": ["3f107997-04cc-8733-60a9-a4bb62cebffc", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "17c9c895-5a16-7443-bb81-f34b30b21548", "e96bb09b-84d1-6864-5782-42942bab97cb"], "nodes": [{"properties": {}, "name": "Start", "children": [{"to": "e96bb09b-84d1-6864-5782-42942bab97cb"}], "actionParametersFetched": false, "type": "start-widget", "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "actionParameters": []}, {"properties": {}, "name": "End", "children": [], "actionParametersFetched": false, "type": "end-widget", "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "actionParameters": []}, {"properties": {"message": "Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]"}, "name": "Kill", "children": [], "actionParametersFetched": false, "type": "kill-widget", "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "actionParameters": []}, {"properties": {"xml": "<my_action xmlns=\\"uri:oozie:my_action-action:0.1\\">\\n</my_action>", "credentials": [], "retry_max": [], "sla": [{"key": "enabled", "value": false}, {"key": "nominal-time", "value": "${nominal_time}"}, {"key": "should-start", "value": ""}, {"key": "should-end", "value": "${30 * MINUTES}"}, {"key": "max-duration", "value": ""}, {"key": "alert-events", "value": ""}, {"key": "alert-contact", "value": ""}, {"key": "notification-msg", "value": ""}, {"key": "upstream-apps", "value": ""}], "retry_interval": []}, "name": "generic-e96b", "children": [{"to": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a"}, {"error": "17c9c895-5a16-7443-bb81-f34b30b21548"}], "actionParametersFetched": false, "type": "generic-widget", "id": "e96bb09b-84d1-6864-5782-42942bab97cb", "actionParameters": []}], "id": 50027, "nodeNamesMapping": {"17c9c895-5a16-7443-bb81-f34b30b21548": "Kill", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a": "End", "3f107997-04cc-8733-60a9-a4bb62cebffc": "Start", "e96bb09b-84d1-6864-5782-42942bab97cb": "generic-e96b"}, "uuid": "83fb9dc4-8687-e369-9220-c8501a93d446"}}"""
wf = Workflow(data=workflow)
assert_equal([
u'<workflow-app', u'name="My_Workflow_3"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="generic-e96b"/>',
u'<kill', u'name="Kill">', u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>',
u'<action', u'name="generic-e96b">', u'<my_action', u'xmlns="uri:oozie:my_action-action:0.1">', u'</my_action>',
u'<ok', u'to="End"/>', u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_email_on_kill_node_xml(self):
workflow = """{"history": {"oozie_id": "0000013-151015155856463-oozie-oozi-W", "properties": {"oozie.use.system.libpath": "True", "security_enabled": false, "dryrun": false, "jobTracker": "localhost:8032", "oozie.wf.application.path": "hdfs://localhost:8020/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "hue-id-w": 6, "nameNode": "hdfs://localhost:8020"}}, "layout": [{"oozieRows": [], "rows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}], "oozieEndRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, "oozieKillRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}, "enableOozieDropOnAfter": true, "oozieStartRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, "klass": "card card-home card-column span12", "enableOozieDropOnBefore": true, "drops": ["temp"], "id": "1920900a-a735-7e66-61d4-23de384e8f62", "size": 12}], "workflow": {"properties": {"job_xml": "", "description": "", "wf1_id": null, "sla_enabled": false, "deployment_dir": "/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "schema_version": "uri:oozie:workflow:0.5", "properties": [], "show_arrows": true, "parameters": [{"name": "oozie.use.system.libpath", "value": true}], "sla": [{"value": false, "key": "enabled"}, {"value": "${nominal_time}", "key": "nominal-time"}, {"value": "", "key": "should-start"}, {"value": "${30 * MINUTES}", "key": "should-end"}, {"value": "", "key": "max-duration"}, {"value": "", "key": "alert-events"}, {"value": "", "key": "alert-contact"}, {"value": "", "key": "notification-msg"}, {"value": "", "key": "upstream-apps"}]}, "name": "My real Workflow 1", "versions": ["uri:oozie:workflow:0.4", "uri:oozie:workflow:0.4.5", "uri:oozie:workflow:0.5"], "isDirty": false, "movedNode": null, "linkMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": [], "3f107997-04cc-8733-60a9-a4bb62cebffc": ["33430f0f-ebfa-c3ec-f237-3e77efa03d0a"], "17c9c895-5a16-7443-bb81-f34b30b21548": []}, "nodeIds": ["3f107997-04cc-8733-60a9-a4bb62cebffc", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "17c9c895-5a16-7443-bb81-f34b30b21548"], "nodes": [{"properties": {}, "name": "Start", "children": [{"to": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a"}], "actionParametersFetched": false, "type": "start-widget", "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "actionParameters": []}, {"properties": {}, "name": "End", "children": [], "actionParametersFetched": false, "type": "end-widget", "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "actionParameters": []}, {"properties": {"body": "", "cc": "", "to": "hue@gethue.com", "enableMail": true, "message": "Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]", "subject": "Error on workflow"}, "name": "Kill", "children": [], "actionParametersFetched": false, "type": "kill-widget", "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "actionParameters": []}], "id": 50020, "nodeNamesMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": "End", "3f107997-04cc-8733-60a9-a4bb62cebffc": "Start", "17c9c895-5a16-7443-bb81-f34b30b21548": "Kill"}, "uuid": "330c70c8-33fb-16e1-68fb-c42582c7d178"}}"""
wf = Workflow(data=workflow)
assert_equal([
u'<workflow-app', u'name="My_real_Workflow_1"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="End"/>',
u'<action', u'name="Kill">',
u'<email', u'xmlns="uri:oozie:email-action:0.2">', u'<to>hue@gethue.com</to>', u'<subject>Error', u'on', u'workflow</subject>', u'<body></body>', u'</email>',
u'<ok', u'to="Kill-kill"/>', u'<error', u'to="Kill-kill"/>',
u'</action>',
u'<kill', u'name="Kill-kill">',
u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>',
u'</kill>',
u'<end', u'name="End"/>',
u'</workflow-app>'],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_email_gen_xml(self):
self.maxDiff = None
workflow = """{"history": {"oozie_id": "0000013-151015155856463-oozie-oozi-W", "properties": {"oozie.use.system.libpath": "True", "security_enabled": false, "dryrun": false, "jobTracker": "localhost:8032", "oozie.wf.application.path": "hdfs://localhost:8020/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "hue-id-w": 6, "nameNode": "hdfs://localhost:8020"}}, "layout": [{"oozieRows": [], "rows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}], "oozieEndRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, "oozieKillRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}, "enableOozieDropOnAfter": true, "oozieStartRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, "klass": "card card-home card-column span12", "enableOozieDropOnBefore": true, "drops": ["temp"], "id": "1920900a-a735-7e66-61d4-23de384e8f62", "size": 12}], "workflow": {"properties": {"job_xml": "", "description": "", "wf1_id": null, "sla_enabled": false, "deployment_dir": "/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "schema_version": "uri:oozie:workflow:0.5", "properties": [], "show_arrows": true, "parameters": [{"name": "oozie.use.system.libpath", "value": true}], "sla": [{"value": false, "key": "enabled"}, {"value": "${nominal_time}", "key": "nominal-time"}, {"value": "", "key": "should-start"}, {"value": "${30 * MINUTES}", "key": "should-end"}, {"value": "", "key": "max-duration"}, {"value": "", "key": "alert-events"}, {"value": "", "key": "alert-contact"}, {"value": "", "key": "notification-msg"}, {"value": "", "key": "upstream-apps"}]}, "name": "My real Workflow 1", "versions": ["uri:oozie:workflow:0.4", "uri:oozie:workflow:0.4.5", "uri:oozie:workflow:0.5"], "isDirty": false, "movedNode": null, "linkMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": [], "3f107997-04cc-8733-60a9-a4bb62cebffc": ["33430f0f-ebfa-c3ec-f237-3e77efa03d0a"], "17c9c895-5a16-7443-bb81-f34b30b21548": []}, "nodeIds": ["3f107997-04cc-8733-60a9-a4bb62cebffc", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "17c9c895-5a16-7443-bb81-f34b30b21548"], "nodes": [{"properties": {}, "name": "Start", "children": [{"to": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a"}], "actionParametersFetched": false, "type": "start-widget", "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "actionParameters": []}, {"properties": {}, "name": "End", "children": [], "actionParametersFetched": false, "type": "end-widget", "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "actionParameters": []}, {"properties": {"body": "This\\n\\ncontains\\n\\n\\nnew lines.", "bcc": "example@bcc.com", "content_type": "text/plain", "cc": "", "to": "hue@gethue.com", "enableMail": true, "message": "Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]", "subject": "Error on workflow"}, "name": "Kill", "children": [], "actionParametersFetched": false, "type": "kill-widget", "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "actionParameters": []}], "id": 50020, "nodeNamesMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": "End", "3f107997-04cc-8733-60a9-a4bb62cebffc": "Start", "17c9c895-5a16-7443-bb81-f34b30b21548": "Kill"}, "uuid": "330c70c8-33fb-16e1-68fb-c42582c7d178"}}"""
wf = Workflow(data=workflow)
assert_equal(u'<workflow-app name="My_real_Workflow_1" xmlns="uri:oozie:workflow:0.5">\n <start to="End"/>\n <action name="Kill">\n <email xmlns="uri:oozie:email-action:0.2">\n <to>hue@gethue.com</to>\n <subject>Error on workflow</subject>\n <body>This\n\ncontains\n\n\nnew lines.</body>\n </email>\n <ok to="Kill-kill"/>\n <error to="Kill-kill"/>\n </action>\n <kill name="Kill-kill">\n <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>\n </kill>\n <end name="End"/>\n</workflow-app>', wf.to_xml({'output': '/path'}))
def test_job_validate_xml_name(self):
job = Workflow()
job.update_name('a')
assert_equal('a', job.validated_name)
job.update_name('aa')
assert_equal('aa', job.validated_name)
job.update_name('%a')
assert_equal('_a', job.validated_name)
job.update_name('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz')
assert_equal(len('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), len(job.validated_name))
job.update_name('My <...> 1st W$rkflow [With] (Bad) letter$')
assert_equal('My_______1st_W$rkflow__With___Bad__lette', job.validated_name)
def test_ignore_dead_fork_link(self):
data = {'id': 1, 'type': 'fork', 'children': [{'to': 1, 'id': 1}, {'to': 2, 'id': 2}], 'properties': {}, 'name': 'my-fork'} # to --> 2 does not exist
fork = Node(data)
node_mapping = {1: fork} # Point to ourself
assert_equal(['<fork', 'name="my-fork">', '<path', 'start="my-fork"', '/>', '</fork>'], fork.to_xml(node_mapping=node_mapping).split())
def test_action_gen_xml_prepare(self):
# Prepare has a value
data = {
u'properties': {
u'files': [], u'job_xml': [], u'parameters': [], u'retry_interval': [], u'retry_max': [], u'job_properties': [], u'arguments': [],
u'prepares': [{u'type': u'mkdir', u'value': u'/my_dir'}],
u'credentials': [], u'script_path': u'my_pig.pig',
u'sla': [{u'key': u'enabled', u'value': False}, {u'key': u'nominal-time', u'value': u'${nominal_time}'}, {u'key': u'should-start', u'value': u''}, {u'key': u'should-end', u'value': u'${30 * MINUTES}'}, {u'key': u'max-duration', u'value': u''}, {u'key': u'alert-events', u'value': u''}, {u'key': u'alert-contact', u'value': u''}, {u'key': u'notification-msg', u'value': u''}, {u'key': u'upstream-apps', u'value': u''}],
u'archives': []
},
u'type': u'pig-widget',
u'id': u'c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a',
u'name': u'pig-c59d',
"children":[{"to": "c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a"}, {"error": "c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a"}]
}
pig_node = Node(data)
node_mapping = {"c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a": pig_node}
xml = pig_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split()]
assert_true(u'<prepare>' in xml, xml)
assert_true(u'<mkdir' in xml, xml)
assert_true(u'path="${nameNode}/my_dir"/>' in xml, xml)
# Prepare has empty value and is skipped
pig_node.data['properties']['prepares'] = [{u'type': u'mkdir', u'value': u''}]
xml = pig_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split()]
assert_false(u'<prepare>' in xml, xml)
assert_false(u'<mkdir' in xml, xml)
# Prepare has a value and an empty value
pig_node.data['properties']['prepares'] = [{u'type': u'mkdir', u'value': u'/my_dir'}, {u'type': u'rm', u'value': u''}]
xml = pig_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split()]
assert_true(u'<prepare>' in xml, xml)
assert_true(u'<mkdir' in xml, xml)
assert_true(u'path="${nameNode}/my_dir"/>' in xml, xml)
assert_false(u'<rm' in xml, xml)
def test_upgrade_nodes_in_workflow(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Sqoop 1\", \"widgetType\": \"sqoop-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\", \"size\": 12}], \"id\": \"0f54ae72-7122-ad7c-fb31-aa715e15a707\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"371cf19e-0c45-1e40-2887-5de4033c2a01\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Sqoop 1\", \"widgetType\": \"sqoop-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\", \"size\": 12}], \"id\": \"0f54ae72-7122-ad7c-fb31-aa715e15a707\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"40cfacb5-0622-4305-1473-8f70e287668b\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"373c9cc8-c64a-f1ef-5486-f18ec52620e3\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"40cfacb5-0622-4305-1473-8f70e287668b\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"373c9cc8-c64a-f1ef-5486-f18ec52620e3\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"371cf19e-0c45-1e40-2887-5de4033c2a01\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"a8549012-ec27-4686-d71a-c6ff95785ff9\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1438808722.99\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"properties\": [], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}]}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"79774a62-94e3-2ddb-554f-b83640fa5b03\"], \"79774a62-94e3-2ddb-554f-b83640fa5b03\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"79774a62-94e3-2ddb-554f-b83640fa5b03\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"name\": \"sqoop-7977\", \"actionParametersUI\": [], \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"properties\": {\"files\": [], \"job_xml\": \"\", \"parameters\": [], \"job_properties\": [], \"command\": \"import --connect jdbc:hsqldb:file:db.hsqldb --table TT --target-dir hdfs://localhost:8020/user/foo -m 1\", \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}]}, \"actionParametersFetched\": true, \"type\": \"sqoop-widget\", \"id\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\", \"actionParameters\": []}], \"id\": null, \"nodeNamesMapping\": {\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"79774a62-94e3-2ddb-554f-b83640fa5b03\": \"sqoop-7977\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"b5511e29-c9cc-7f40-0d3a-6dd768f3b1e9\"}}")
assert_true('parameters' in json.loads(wf.data)['workflow']['nodes'][3]['properties'], wf.data)
assert_false('arguments' in json.loads(wf.data)['workflow']['nodes'][3]['properties'], wf.data) # Does not exist yet
data = wf.get_data()
assert_true('parameters' in data['workflow']['nodes'][3]['properties'], wf.data)
assert_true('arguments' in data['workflow']['nodes'][3]['properties'], wf.data) # New field transparently added
def test_action_gen_xml_java_opts(self):
# Contains java_opts
data = {u'name': u'java-fc05', u'properties': {u'files': [], u'job_xml': [], u'jar_path': u'/user/romain/hadoop-mapreduce-examples.jar', u'java_opts': [{u'value': u'-debug -Da -Db=1'}], u'retry_max': [], u'retry_interval': [], u'job_properties': [], u'capture_output': False, u'main_class': u'MyClass', u'arguments': [], u'prepares': [], u'credentials': [], u'sla': [{u'value': False, u'key': u'enabled'}, {u'value': u'${nominal_time}', u'key': u'nominal-time'}, {u'value': u'', u'key': u'should-start'}, {u'value': u'${30 * MINUTES}', u'key': u'should-end'}, {u'value': u'', u'key': u'max-duration'}, {u'value': u'', u'key': u'alert-events'}, {u'value': u'', u'key': u'alert-contact'}, {u'value': u'', u'key': u'notification-msg'}, {u'value': u'', u'key': u'upstream-apps'}], u'archives': []}, u'actionParametersFetched': False, u'id': u'fc05d86f-9f07-7a8d-6256-e6abfa87cf77', u'type': u'java-widget', u'children': [{u'to': u'33430f0f-ebfa-c3ec-f237-3e77efa03d0a'}, {u'error': u'17c9c895-5a16-7443-bb81-f34b30b21548'}], u'actionParameters': []}
java_node = Node(data)
node_mapping = {"fc05d86f-9f07-7a8d-6256-e6abfa87cf77": java_node, "33430f0f-ebfa-c3ec-f237-3e77efa03d0a": java_node, "17c9c895-5a16-7443-bb81-f34b30b21548": java_node} # Last 2 are actually kill and ok nodes
xml = java_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split('\n')]
assert_false("<java-opts>[{u'value': u'-debug -Da -Db=1'}]</java-opts>" in xml, xml)
assert_true("<java-opts>-debug -Da -Db=1</java-opts>" in xml, xml)
def test_workflow_create_single_action_data(self):
workflow = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"0c1908e7-0096-46e7-a16b-b17b1142a730\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1430228904.58\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"properties\": []}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"retry_max\": [{\"value\": \"5\"}], \"files\": [], \"job_xml\": \"\", \"jar_path\": \"my_jar\", \"job_properties\": [{\"name\": \"prop_1_name\", \"value\": \"prop_1_value\"}], \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}]}, \"name\": \"mapreduce-0cf2\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"mapreduce-widget\", \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"actionParameters\": []}], \"id\": 50019, \"nodeNamesMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": \"mapreduce-0cf2\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"084f4d4c-00f1-62d2-e27e-e153c1f9acfb\"}}")
single_action_wf_data = workflow.create_single_action_workflow_data('0cf2d5d5-2315-0bda-bd53-0eec257e943f')
single_action_wf = Workflow(data=single_action_wf_data)
assert_true(len(single_action_wf.nodes) == 4)
# Validating DAG: Start -> node -> Kill/End
_data = json.loads(single_action_wf_data)
start_node = [node for node in _data['workflow']['nodes'] if node['name'] == 'Start'][0]
submit_node = [node for node in _data['workflow']['nodes'] if node['id'] == '0cf2d5d5-2315-0bda-bd53-0eec257e943f'][0]
end_node = [node for node in _data['workflow']['nodes'] if node['name'] == 'End'][0]
kill_node = [node for node in _data['workflow']['nodes'] if node['name'] == 'Kill'][0]
assert_true(submit_node['id'] in str(start_node['children']))
assert_true(end_node['id'] in str(submit_node['children']))
assert_true(kill_node['id'] in str(submit_node['children']))
def test_submit_single_action(self):
wf_doc = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[5], self.user)
reset = ENABLE_V2.set_for_testing(True)
try:
response = self.c.get(reverse('oozie:submit_single_action', args=[wf_doc.id, '3f107997-04cc-8733-60a9-a4bb62cebabc']))
assert_equal([{'name':'Dryrun', 'value': False}, {'name':'ls_arg', 'value': '-l'}], response.context['params_form'].initial)
except Exception, ex:
logging.exception(ex)
finally:
reset()
wf_doc.delete()
def test_list_bundles_page(self):
response = self.c.get(reverse('oozie:list_editor_bundles'))
assert_true('bundles_json' in response.context, response.context)
def test_workflow_dependencies(self):
wf_doc1 = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[5], self.user)
# Add history dependency
wf_doc1.is_history = True
wf_doc1.dependencies.add(wf_doc1)
# Add sub-workflow dependency
wf_doc2 = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[4], self.user)
wf_doc1.dependencies.add(wf_doc2)
# Add coordinator dependency
data = {
'id': None,
'uuid': None,
'name': 'My Coordinator',
'variables': [], # Aka workflow parameters
'properties': {
'description': '',
'deployment_dir': '',
'schema_version': 'uri:oozie:coordinator:0.2',
'frequency_number': 1,
'frequency_unit': 'days',
'cron_frequency': '0 0 * * *',
'cron_advanced': False,
'timezone': '',
'start': '${start_date}',
'end': '${end_date}',
'workflow': None,
'timeout': None,
'concurrency': None,
'execution': None,
'throttle': None,
'job_xml': '',
'credentials': [],
'parameters': [
{'name': 'oozie.use.system.libpath', 'value': True},
{'name': 'start_date', 'value': ''},
{'name': 'end_date', 'value': ''}
],
'sla': WorkflowConfiguration.SLA_DEFAULT
}
}
wf_doc3 = Document2.objects.create(name='test', type='oozie-coordinator2', owner=User.objects.get(username='test'), data=data)
wf_doc1.dependencies.add(wf_doc3)
assert_true(len(wf_doc1.dependencies.all()) == 3)
wf_doc1.save()
# Validating dependencies after saving the workflow
assert_true(len(wf_doc1.dependencies.all()) == 3)
assert_true(len(wf_doc1.dependencies.filter(type='oozie-coordinator2')) > 0)
assert_true(len(wf_doc1.dependencies.filter(Q(is_history=False) & Q(type='oozie-workflow2'))) > 0)
assert_true(len(wf_doc1.dependencies.filter(Q(is_history=True) & Q(type='oozie-workflow2'))) > 0)
wf_doc1.delete()
wf_doc2.delete()
wf_doc3.delete()
def test_editor_access_permissions(self):
group = 'no_editor'
try:
# Block editor section
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
response = self.c.get(reverse('oozie:list_workflows'))
assert_equal(response.status_code, 200)
add_permission('test', 'no_editor', 'disable_editor_access', 'oozie')
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 401)
response = self.c.get(reverse('oozie:list_workflows'))
assert_equal(response.status_code, 200)
# Admin are not affected
admin = make_logged_in_client('admin', 'admin', is_superuser=True, recreate=True, groupname=group)
response = admin.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
response = admin.get(reverse('oozie:list_workflows'))
assert_equal(response.status_code, 200)
finally:
remove_from_group("test", group)
def test_list_editor_workflows(self):
wf_doc = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[5], self.user)
reset = ENABLE_V2.set_for_testing(True)
try:
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
data = json.loads(response.context['workflows_json'])
uuids = [doc['uuid'] for doc in data]
assert_true(wf_doc.uuid in uuids, data)
# Trash workflow and verify it no longer appears in list
response = self.c.post('/desktop/api2/doc/delete', {'uuid': json.dumps(wf_doc.uuid)})
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
data = json.loads(response.context['workflows_json'])
uuids = [doc['uuid'] for doc in data]
assert_false(wf_doc.uuid in uuids, data)
finally:
reset()
wf_doc.delete()
def test_workflow_properties(self):
# Test that a new workflow will be initialized with default properties if no saved configs exist
wf = Workflow(user=self.user)
data = json.loads(wf.data)
assert_equal(data['workflow']['properties'], Workflow.get_workflow_properties_for_user(self.user))
# Setup a test Default configuration, NOTE: this is an invalid format for testing only
properties = [
{
'multiple': False,
'value': '/user/test/oozie',
'nice_name': 'Workspace',
'key': 'deployment_dir',
'help_text': 'Specify the deployment directory.',
'type': 'hdfs-files'
}, {
'multiple': True,
'value': [
{
'value': 'test',
'key': 'mapred.queue.name'
}
],
'nice_name': 'Hadoop Properties',
'key': 'properties',
'help_text': 'Hadoop configuration properties.',
'type': 'settings'
}
]
config = DefaultConfiguration(app=WorkflowConfiguration.APP_NAME, properties=json.dumps(properties), is_default=True)
config.save()
wf_props = config.properties_dict
wf_props.update({'wf1_id': None, 'description': ''})
# Test that a new workflow will be initialized with Default saved config if it exists
wf = Workflow(user=self.user)
data = json.loads(wf.data)
assert_equal(data['workflow']['properties'], wf_props)
# Test that a new workflow will be initialized with Group saved config if it exists
properties = [
{
'multiple': True,
'value': [
{
'value': 'org.myorg.WordCount.Map',
'key': 'mapred.mapper.class'
},
{
'value': 'org.myorg.WordCount.Reduce',
'key': 'mapred.reducer.class'
}
],
'nice_name': 'Hadoop Properties',
'key': 'properties',
'help_text': 'Hadoop configuration properties.',
'type': 'settings'
}
]
config = DefaultConfiguration(app=WorkflowConfiguration.APP_NAME,
properties=json.dumps(properties),
is_default=False,
group=self.user.groups.first())
config.save()
wf_props = config.properties_dict
wf_props.update({'wf1_id': None, 'description': ''})
# Test that a new workflow will be initialized with Default saved config if it exists
wf = Workflow(user=self.user)
data = json.loads(wf.data)
assert_equal(data['workflow']['properties'], wf_props)
class TestExternalWorkflowGraph(object):
def setUp(self):
self.wf = Workflow()
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "oozie")
add_to_group("test")
self.user = User.objects.get(username='test')
def test_graph_generation_from_xml(self):
f = open('apps/oozie/src/oozie/test_data/xslt2/test-workflow.xml')
self.wf.definition = f.read()
self.node_list = [{u'node_type': u'start', u'ok_to': u'fork-68d4', u'name': u''}, {u'node_type': u'kill', u'ok_to': u'', u'name': u'Kill'}, {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'}, {u'node_type': u'join', u'ok_to': u'End', u'name': u'join-775e'}, {u'node_type': u'end', u'ok_to': u'', u'name': u'End'}, {u'subworkflow': {u'app-path': u'${nameNode}/user/hue/oozie/deployments/_admin_-oozie-50001-1427488969.48'}, u'node_type': u'sub-workflow', u'ok_to': u'join-775e', u'name': u'subworkflow-a13f', u'error_to': u'Kill'}, {u'shell': {u'command': u'ls'}, u'node_type': u'shell', u'ok_to': u'join-775e', u'name': u'shell-0f44', u'error_to': u'Kill'}]
assert_equal(self.node_list, generate_v2_graph_nodes(self.wf.definition))
def test_get_graph_adjacency_list(self):
self.node_list = [{u'node_type': u'start', u'ok_to': u'fork-68d4', u'name': u''}, {u'node_type': u'kill', u'ok_to': u'', u'name': u'kill'}, {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'}, {u'node_type': u'join', u'ok_to': u'end', u'name': u'join-775e'}, {u'node_type': u'end', u'ok_to': u'', u'name': u'end'}, {u'node_type': u'sub-workflow', u'ok_to': u'join-775e', u'sub-workflow': {u'app-path': u'${nameNode}/user/hue/oozie/deployments/_admin_-oozie-50001-1427488969.48'}, u'name': u'subworkflow-a13f', u'error_to': u'kill'}, {u'shell': {u'command': u'ls'}, u'node_type': u'shell', u'ok_to': u'join-775e', u'name': u'shell-0f44', u'error_to': u'kill'}]
adj_list = _create_graph_adjaceny_list(self.node_list)
assert_true(len(adj_list) == 7)
assert_true('subworkflow-a13f' in adj_list.keys())
assert_true(adj_list['shell-0f44']['shell']['command'] == 'ls')
assert_equal(adj_list['fork-68d4'], {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'})
def test_get_hierarchy_from_adj_list(self):
self.wf.definition = """<workflow-app name="ls-4thread" xmlns="uri:oozie:workflow:0.5">
<start to="fork-fe93"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="shell-5429">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<action name="shell-bd90">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<fork name="fork-fe93">
<path start="shell-5429" />
<path start="shell-bd90" />
<path start="shell-d64c" />
<path start="shell-d8cc" />
</fork>
<join name="join-7f80" to="End"/>
<action name="shell-d64c">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<action name="shell-d8cc">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>"""
node_list = generate_v2_graph_nodes(self.wf.definition)
adj_list = _create_graph_adjaceny_list(node_list)
node_hierarchy = ['start']
_get_hierarchy_from_adj_list(adj_list, adj_list['start']['ok_to'], node_hierarchy)
assert_equal(node_hierarchy, ['start', [u'fork-fe93', [[u'shell-bd90'], [u'shell-d64c'], [u'shell-5429'], [u'shell-d8cc']], u'join-7f80'], ['Kill'], ['End']])
def test_gen_workflow_data_from_xml(self):
self.wf.definition = """<workflow-app name="fork-fork-test" xmlns="uri:oozie:workflow:0.5">
<start to="fork-949d"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="shell-eadd">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-1a0f"/>
<error to="Kill"/>
</action>
<action name="shell-f4c1">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-3bba"/>
<error to="Kill"/>
</action>
<fork name="fork-949d">
<path start="fork-e5fa" />
<path start="shell-3dd5" />
</fork>
<join name="join-ca1a" to="End"/>
<action name="shell-ef70">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-1a0f"/>
<error to="Kill"/>
</action>
<fork name="fork-37d7">
<path start="shell-eadd" />
<path start="shell-ef70" />
</fork>
<join name="join-1a0f" to="join-ca1a"/>
<action name="shell-3dd5">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="fork-37d7"/>
<error to="Kill"/>
</action>
<action name="shell-2ba8">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-3bba"/>
<error to="Kill"/>
</action>
<fork name="fork-e5fa">
<path start="shell-f4c1" />
<path start="shell-2ba8" />
</fork>
<join name="join-3bba" to="join-ca1a"/>
<end name="End"/>
</workflow-app>"""
workflow_data = Workflow.gen_workflow_data_from_xml(self.user, self.wf)
assert_true(len(workflow_data['layout'][0]['rows']) == 6)
assert_true(len(workflow_data['workflow']['nodes']) == 14)
assert_equal(workflow_data['layout'][0]['rows'][1]['widgets'][0]['widgetType'], 'fork-widget')
assert_equal(workflow_data['workflow']['nodes'][0]['name'], 'start-3f10')
def test_gen_workflow_data_for_email(self):
self.wf.definition = """<workflow-app name="My_Workflow" xmlns="uri:oozie:workflow:0.5">
<start to="email-0377"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="email-0377">
<email xmlns="uri:oozie:email-action:0.2">
<to>example@example.com</to>
<subject>sub</subject>
<bcc>example@bcc.com</bcc>
<body>bod</body>
<content_type>text/plain</content_type>
</email>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>"""
workflow_data = Workflow.gen_workflow_data_from_xml(self.user, self.wf)
assert_true(len(workflow_data['layout'][0]['rows']) == 4)
assert_true(len(workflow_data['workflow']['nodes']) == 4)
assert_equal(workflow_data['layout'][0]['rows'][1]['widgets'][0]['widgetType'], 'email-widget')
assert_equal(workflow_data['workflow']['nodes'][0]['name'], 'start-3f10') | #!/usr/bin/env python
## -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import Q
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_permission, add_to_group, grant_access, remove_from_group
from desktop.models import DefaultConfiguration, Document2
from oozie.conf import ENABLE_V2
from oozie.importlib.workflows import generate_v2_graph_nodes
from oozie.models2 import Node, Workflow, WorkflowConfiguration, find_dollar_variables, find_dollar_braced_variables, \
_create_graph_adjaceny_list, _get_hierarchy_from_adj_list
from oozie.tests import OozieMockBase, save_temp_workflow, MockOozieApi
LOG = logging.getLogger(__name__)
class TestEditor(OozieMockBase):
def setUp(self):
super(TestEditor, self).setUp()
self.wf = Workflow()
def test_parsing(self):
assert_equal(['input', 'LIMIT', 'out'], find_dollar_variables("""
data = '$input';
$out = LIMIT data $LIMIT; -- ${nah}
$output = STORE "$out";
"""))
assert_equal(['max_salary', 'limit'], find_dollar_variables("""
SELECT sample_07.description, sample_07.salary
FROM
sample_07
WHERE
( sample_07.salary > $max_salary)
ORDER BY sample_07.salary DESC
LIMIT $limit"""))
def test_hive_script_parsing(self):
assert_equal(['field', 'tablename', 'LIMIT'], find_dollar_braced_variables("""
SELECT ${field}
FROM ${hivevar:tablename}
LIMIT ${hiveconf:LIMIT}
"""))
assert_equal(['field', 'tablename', 'LIMIT'], find_dollar_braced_variables("SELECT ${field} FROM ${hivevar:tablename} LIMIT ${hiveconf:LIMIT}"))
def test_workflow_gen_xml(self):
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">', u'<start', u'to="End"/>', u'<kill', u'name="Kill">', u'<message>Action', u'failed,',
u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>', u'<end', u'name="End"/>', u'</workflow-app>'],
self.wf.to_xml({'output': '/path'}).split()
)
def test_workflow_map_reduce_gen_xml(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"0c1908e7-0096-46e7-a16b-b17b1142a730\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1430228904.58\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"properties\": []}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"retry_max\": [{\"value\": \"5\"}], \"files\": [], \"job_xml\": \"\", \"jar_path\": \"my_jar\", \"job_properties\": [{\"name\": \"prop_1_name\", \"value\": \"prop_1_value\"}], \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}]}, \"name\": \"mapreduce-0cf2\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"mapreduce-widget\", \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"actionParameters\": []}], \"id\": 50019, \"nodeNamesMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": \"mapreduce-0cf2\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"084f4d4c-00f1-62d2-e27e-e153c1f9acfb\"}}")
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="mapreduce-0cf2"/>',
u'<kill', u'name="Kill">', u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>',
u'<action', u'name="mapreduce-0cf2"', 'retry-max="5">',
u'<map-reduce>',
u'<job-tracker>${jobTracker}</job-tracker>',
u'<name-node>${nameNode}</name-node>',
u'<configuration>',
u'<property>',
u'<name>prop_1_name</name>',
u'<value>prop_1_value</value>',
u'</property>',
u'</configuration>',
u'</map-reduce>',
u'<ok', u'to="End"/>',
u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'
],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_java_gen_xml(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Java program\", \"widgetType\": \"java-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": true, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\", \"size\": 12}], \"id\": \"badb3c81-78d6-8099-38fc-87a9904ba78c\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"adc3fe69-36eb-20f8-09ac-38fada1582b2\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Java program\", \"widgetType\": \"java-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": true, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\", \"size\": 12}], \"id\": \"badb3c81-78d6-8099-38fc-87a9904ba78c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"107bdacf-a37a-d69e-98dd-5801407cb57e\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"81e1869c-a2c3-66d2-c703-719335ea45cb\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"107bdacf-a37a-d69e-98dd-5801407cb57e\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"81e1869c-a2c3-66d2-c703-719335ea45cb\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"adc3fe69-36eb-20f8-09ac-38fada1582b2\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"8e0f37a5-2dfb-7329-be44-78e60b2cf62b\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1449080135.8\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"properties\": [], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}]}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": false, \"movedNode\": null, \"linkMapping\": {\"6ddafdc4-c070-95f0-4211-328e9f31daf6\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"6ddafdc4-c070-95f0-4211-328e9f31daf6\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"6ddafdc4-c070-95f0-4211-328e9f31daf6\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"body\": \"\", \"cc\": \"\", \"to\": \"\", \"enableMail\": false, \"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\", \"subject\": \"\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"files\": [{\"value\": \"/my_file\"}], \"job_xml\": [], \"jar_path\": \"/my/jar\", \"java_opts\": [{\"value\": \"-Dsun.security.jgss.debug=true\"}], \"retry_max\": [], \"retry_interval\": [], \"job_properties\": [], \"capture_output\": false, \"main_class\": \"MyClass\", \"arguments\": [{\"value\": \"my_arg\"}], \"prepares\": [], \"credentials\": [], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}], \"archives\": []}, \"name\": \"java-6dda\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"java-widget\", \"id\": \"6ddafdc4-c070-95f0-4211-328e9f31daf6\", \"actionParameters\": []}], \"id\": 50247, \"nodeNamesMapping\": {\"6ddafdc4-c070-95f0-4211-328e9f31daf6\": \"java-6dda\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"2667d60e-d894-c27b-6e6f-0333704c0989\"}}")
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="java-6dda"/>',
u'<kill', u'name="Kill">',
u'<message>Action', u'failed,',
u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>',
u'</kill>',
u'<action', u'name="java-6dda">',
u'<java>',
u'<job-tracker>${jobTracker}</job-tracker>',
u'<name-node>${nameNode}</name-node>',
u'<main-class>MyClass</main-class>',
u'<java-opts>-Dsun.security.jgss.debug=true</java-opts>',
u'<arg>my_arg</arg>',
u'<file>/my_file#my_file</file>',
u'</java>',
u'<ok', u'to="End"/>',
u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'
],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_generic_gen_xml(self):
workflow = """{"layout": [{"oozieRows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Generic", "widgetType": "generic-widget", "oozieMovable": true, "ooziePropertiesExpanded": true, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "e96bb09b-84d1-6864-5782-42942bab97cb", "size": 12}], "id": "ed10631a-f264-9a3b-aa09-b04cb76f5c32", "columns": []}], "rows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "68d83128-2c08-28f6-e9d1-a912d20f8af5", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Generic", "widgetType": "generic-widget", "oozieMovable": true, "ooziePropertiesExpanded": true, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "e96bb09b-84d1-6864-5782-42942bab97cb", "size": 12}], "id": "ed10631a-f264-9a3b-aa09-b04cb76f5c32", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "7bf3cdc7-f79b-ff36-b152-e37217c40ccb", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "07c4f1bd-8f58-ea51-fc3d-50acf74d6747", "columns": []}], "oozieEndRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "7bf3cdc7-f79b-ff36-b152-e37217c40ccb", "columns": []}, "oozieKillRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "07c4f1bd-8f58-ea51-fc3d-50acf74d6747", "columns": []}, "enableOozieDropOnAfter": true, "oozieStartRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "68d83128-2c08-28f6-e9d1-a912d20f8af5", "columns": []}, "klass": "card card-home card-column span12", "enableOozieDropOnBefore": true, "drops": ["temp"], "id": "0e8b5e24-4f78-0f76-fe91-0c8e7f0d290a", "size": 12}], "workflow": {"properties": {"job_xml": "", "description": "", "wf1_id": null, "sla_enabled": false, "deployment_dir": "/user/hue/oozie/workspaces/hue-oozie-1446487280.19", "schema_version": "uri:oozie:workflow:0.5", "properties": [], "show_arrows": true, "parameters": [{"name": "oozie.use.system.libpath", "value": true}], "sla": [{"value": false, "key": "enabled"}, {"value": "${nominal_time}", "key": "nominal-time"}, {"value": "", "key": "should-start"}, {"value": "${30 * MINUTES}", "key": "should-end"}, {"value": "", "key": "max-duration"}, {"value": "", "key": "alert-events"}, {"value": "", "key": "alert-contact"}, {"value": "", "key": "notification-msg"}, {"value": "", "key": "upstream-apps"}]}, "name": "My Workflow 3", "versions": ["uri:oozie:workflow:0.4", "uri:oozie:workflow:0.4.5", "uri:oozie:workflow:0.5"], "isDirty": false, "movedNode": null, "linkMapping": {"17c9c895-5a16-7443-bb81-f34b30b21548": [], "33430f0f-ebfa-c3ec-f237-3e77efa03d0a": [], "3f107997-04cc-8733-60a9-a4bb62cebffc": ["e96bb09b-84d1-6864-5782-42942bab97cb"], "e96bb09b-84d1-6864-5782-42942bab97cb": ["33430f0f-ebfa-c3ec-f237-3e77efa03d0a"]}, "nodeIds": ["3f107997-04cc-8733-60a9-a4bb62cebffc", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "17c9c895-5a16-7443-bb81-f34b30b21548", "e96bb09b-84d1-6864-5782-42942bab97cb"], "nodes": [{"properties": {}, "name": "Start", "children": [{"to": "e96bb09b-84d1-6864-5782-42942bab97cb"}], "actionParametersFetched": false, "type": "start-widget", "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "actionParameters": []}, {"properties": {}, "name": "End", "children": [], "actionParametersFetched": false, "type": "end-widget", "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "actionParameters": []}, {"properties": {"message": "Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]"}, "name": "Kill", "children": [], "actionParametersFetched": false, "type": "kill-widget", "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "actionParameters": []}, {"properties": {"xml": "<my_action xmlns=\\"uri:oozie:my_action-action:0.1\\">\\n</my_action>", "credentials": [], "retry_max": [], "sla": [{"key": "enabled", "value": false}, {"key": "nominal-time", "value": "${nominal_time}"}, {"key": "should-start", "value": ""}, {"key": "should-end", "value": "${30 * MINUTES}"}, {"key": "max-duration", "value": ""}, {"key": "alert-events", "value": ""}, {"key": "alert-contact", "value": ""}, {"key": "notification-msg", "value": ""}, {"key": "upstream-apps", "value": ""}], "retry_interval": []}, "name": "generic-e96b", "children": [{"to": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a"}, {"error": "17c9c895-5a16-7443-bb81-f34b30b21548"}], "actionParametersFetched": false, "type": "generic-widget", "id": "e96bb09b-84d1-6864-5782-42942bab97cb", "actionParameters": []}], "id": 50027, "nodeNamesMapping": {"17c9c895-5a16-7443-bb81-f34b30b21548": "Kill", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a": "End", "3f107997-04cc-8733-60a9-a4bb62cebffc": "Start", "e96bb09b-84d1-6864-5782-42942bab97cb": "generic-e96b"}, "uuid": "83fb9dc4-8687-e369-9220-c8501a93d446"}}"""
wf = Workflow(data=workflow)
assert_equal([
u'<workflow-app', u'name="My_Workflow_3"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="generic-e96b"/>',
u'<kill', u'name="Kill">', u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>',
u'<action', u'name="generic-e96b">', u'<my_action', u'xmlns="uri:oozie:my_action-action:0.1">', u'</my_action>',
u'<ok', u'to="End"/>', u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_email_on_kill_node_xml(self):
workflow = """{"history": {"oozie_id": "0000013-151015155856463-oozie-oozi-W", "properties": {"oozie.use.system.libpath": "True", "security_enabled": false, "dryrun": false, "jobTracker": "localhost:8032", "oozie.wf.application.path": "hdfs://localhost:8020/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "hue-id-w": 6, "nameNode": "hdfs://localhost:8020"}}, "layout": [{"oozieRows": [], "rows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}], "oozieEndRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, "oozieKillRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}, "enableOozieDropOnAfter": true, "oozieStartRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, "klass": "card card-home card-column span12", "enableOozieDropOnBefore": true, "drops": ["temp"], "id": "1920900a-a735-7e66-61d4-23de384e8f62", "size": 12}], "workflow": {"properties": {"job_xml": "", "description": "", "wf1_id": null, "sla_enabled": false, "deployment_dir": "/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "schema_version": "uri:oozie:workflow:0.5", "properties": [], "show_arrows": true, "parameters": [{"name": "oozie.use.system.libpath", "value": true}], "sla": [{"value": false, "key": "enabled"}, {"value": "${nominal_time}", "key": "nominal-time"}, {"value": "", "key": "should-start"}, {"value": "${30 * MINUTES}", "key": "should-end"}, {"value": "", "key": "max-duration"}, {"value": "", "key": "alert-events"}, {"value": "", "key": "alert-contact"}, {"value": "", "key": "notification-msg"}, {"value": "", "key": "upstream-apps"}]}, "name": "My real Workflow 1", "versions": ["uri:oozie:workflow:0.4", "uri:oozie:workflow:0.4.5", "uri:oozie:workflow:0.5"], "isDirty": false, "movedNode": null, "linkMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": [], "3f107997-04cc-8733-60a9-a4bb62cebffc": ["33430f0f-ebfa-c3ec-f237-3e77efa03d0a"], "17c9c895-5a16-7443-bb81-f34b30b21548": []}, "nodeIds": ["3f107997-04cc-8733-60a9-a4bb62cebffc", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "17c9c895-5a16-7443-bb81-f34b30b21548"], "nodes": [{"properties": {}, "name": "Start", "children": [{"to": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a"}], "actionParametersFetched": false, "type": "start-widget", "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "actionParameters": []}, {"properties": {}, "name": "End", "children": [], "actionParametersFetched": false, "type": "end-widget", "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "actionParameters": []}, {"properties": {"body": "", "cc": "", "to": "hue@gethue.com", "enableMail": true, "message": "Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]", "subject": "Error on workflow"}, "name": "Kill", "children": [], "actionParametersFetched": false, "type": "kill-widget", "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "actionParameters": []}], "id": 50020, "nodeNamesMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": "End", "3f107997-04cc-8733-60a9-a4bb62cebffc": "Start", "17c9c895-5a16-7443-bb81-f34b30b21548": "Kill"}, "uuid": "330c70c8-33fb-16e1-68fb-c42582c7d178"}}"""
wf = Workflow(data=workflow)
assert_equal([
u'<workflow-app', u'name="My_real_Workflow_1"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="End"/>',
u'<action', u'name="Kill">',
u'<email', u'xmlns="uri:oozie:email-action:0.2">', u'<to>hue@gethue.com</to>', u'<subject>Error', u'on', u'workflow</subject>', u'<body></body>', u'</email>',
u'<ok', u'to="Kill-kill"/>', u'<error', u'to="Kill-kill"/>',
u'</action>',
u'<kill', u'name="Kill-kill">',
u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>',
u'</kill>',
u'<end', u'name="End"/>',
u'</workflow-app>'],
wf.to_xml({'output': '/path'}).split()
)
def test_workflow_email_gen_xml(self):
self.maxDiff = None
workflow = """{"history": {"oozie_id": "0000013-151015155856463-oozie-oozi-W", "properties": {"oozie.use.system.libpath": "True", "security_enabled": false, "dryrun": false, "jobTracker": "localhost:8032", "oozie.wf.application.path": "hdfs://localhost:8020/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "hue-id-w": 6, "nameNode": "hdfs://localhost:8020"}}, "layout": [{"oozieRows": [], "rows": [{"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}], "oozieEndRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "End", "widgetType": "end-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "size": 12}], "id": "f8f22c81-a9eb-5138-64cf-014ae588d0ca", "columns": []}, "oozieKillRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Kill", "widgetType": "kill-widget", "oozieMovable": true, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "size": 12}], "id": "31f194ff-cd4f-faef-652d-0c5f66a80f97", "columns": []}, "enableOozieDropOnAfter": true, "oozieStartRow": {"enableOozieDropOnBefore": true, "enableOozieDropOnSide": true, "enableOozieDrop": false, "widgets": [{"status": "", "logsURL": "", "name": "Start", "widgetType": "start-widget", "oozieMovable": false, "ooziePropertiesExpanded": false, "properties": {}, "isLoading": true, "offset": 0, "actionURL": "", "progress": 0, "klass": "card card-widget span12", "oozieExpanded": false, "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "size": 12}], "id": "9cf57679-292c-d980-8053-1180a84eaa54", "columns": []}, "klass": "card card-home card-column span12", "enableOozieDropOnBefore": true, "drops": ["temp"], "id": "1920900a-a735-7e66-61d4-23de384e8f62", "size": 12}], "workflow": {"properties": {"job_xml": "", "description": "", "wf1_id": null, "sla_enabled": false, "deployment_dir": "/user/hue/oozie/workspaces/hue-oozie-1445431078.26", "schema_version": "uri:oozie:workflow:0.5", "properties": [], "show_arrows": true, "parameters": [{"name": "oozie.use.system.libpath", "value": true}], "sla": [{"value": false, "key": "enabled"}, {"value": "${nominal_time}", "key": "nominal-time"}, {"value": "", "key": "should-start"}, {"value": "${30 * MINUTES}", "key": "should-end"}, {"value": "", "key": "max-duration"}, {"value": "", "key": "alert-events"}, {"value": "", "key": "alert-contact"}, {"value": "", "key": "notification-msg"}, {"value": "", "key": "upstream-apps"}]}, "name": "My real Workflow 1", "versions": ["uri:oozie:workflow:0.4", "uri:oozie:workflow:0.4.5", "uri:oozie:workflow:0.5"], "isDirty": false, "movedNode": null, "linkMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": [], "3f107997-04cc-8733-60a9-a4bb62cebffc": ["33430f0f-ebfa-c3ec-f237-3e77efa03d0a"], "17c9c895-5a16-7443-bb81-f34b30b21548": []}, "nodeIds": ["3f107997-04cc-8733-60a9-a4bb62cebffc", "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "17c9c895-5a16-7443-bb81-f34b30b21548"], "nodes": [{"properties": {}, "name": "Start", "children": [{"to": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a"}], "actionParametersFetched": false, "type": "start-widget", "id": "3f107997-04cc-8733-60a9-a4bb62cebffc", "actionParameters": []}, {"properties": {}, "name": "End", "children": [], "actionParametersFetched": false, "type": "end-widget", "id": "33430f0f-ebfa-c3ec-f237-3e77efa03d0a", "actionParameters": []}, {"properties": {"body": "This\\n\\ncontains\\n\\n\\nnew lines.", "bcc": "example@bcc.com", "content_type": "text/plain", "cc": "", "to": "hue@gethue.com", "enableMail": true, "message": "Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]", "subject": "Error on workflow"}, "name": "Kill", "children": [], "actionParametersFetched": false, "type": "kill-widget", "id": "17c9c895-5a16-7443-bb81-f34b30b21548", "actionParameters": []}], "id": 50020, "nodeNamesMapping": {"33430f0f-ebfa-c3ec-f237-3e77efa03d0a": "End", "3f107997-04cc-8733-60a9-a4bb62cebffc": "Start", "17c9c895-5a16-7443-bb81-f34b30b21548": "Kill"}, "uuid": "330c70c8-33fb-16e1-68fb-c42582c7d178"}}"""
wf = Workflow(data=workflow)
assert_equal(u'<workflow-app name="My_real_Workflow_1" xmlns="uri:oozie:workflow:0.5">\n <start to="End"/>\n <action name="Kill">\n <email xmlns="uri:oozie:email-action:0.2">\n <to>hue@gethue.com</to>\n <subject>Error on workflow</subject>\n <body>This\n\ncontains\n\n\nnew lines.</body>\n </email>\n <ok to="Kill-kill"/>\n <error to="Kill-kill"/>\n </action>\n <kill name="Kill-kill">\n <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>\n </kill>\n <end name="End"/>\n</workflow-app>', wf.to_xml({'output': '/path'}))
def test_job_validate_xml_name(self):
job = Workflow()
job.update_name('a')
assert_equal('a', job.validated_name)
job.update_name('aa')
assert_equal('aa', job.validated_name)
job.update_name('%a')
assert_equal('_a', job.validated_name)
job.update_name('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz')
assert_equal(len('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), len(job.validated_name))
job.update_name('My <...> 1st W$rkflow [With] (Bad) letter$')
assert_equal('My_______1st_W$rkflow__With___Bad__lette', job.validated_name)
def test_ignore_dead_fork_link(self):
data = {'id': 1, 'type': 'fork', 'children': [{'to': 1, 'id': 1}, {'to': 2, 'id': 2}], 'properties': {}, 'name': 'my-fork'} # to --> 2 does not exist
fork = Node(data)
node_mapping = {1: fork} # Point to ourself
assert_equal(['<fork', 'name="my-fork">', '<path', 'start="my-fork"', '/>', '</fork>'], fork.to_xml(node_mapping=node_mapping).split())
def test_action_gen_xml_prepare(self):
# Prepare has a value
data = {
u'properties': {
u'files': [], u'job_xml': [], u'parameters': [], u'retry_interval': [], u'retry_max': [], u'job_properties': [], u'arguments': [],
u'prepares': [{u'type': u'mkdir', u'value': u'/my_dir'}],
u'credentials': [], u'script_path': u'my_pig.pig',
u'sla': [{u'key': u'enabled', u'value': False}, {u'key': u'nominal-time', u'value': u'${nominal_time}'}, {u'key': u'should-start', u'value': u''}, {u'key': u'should-end', u'value': u'${30 * MINUTES}'}, {u'key': u'max-duration', u'value': u''}, {u'key': u'alert-events', u'value': u''}, {u'key': u'alert-contact', u'value': u''}, {u'key': u'notification-msg', u'value': u''}, {u'key': u'upstream-apps', u'value': u''}],
u'archives': []
},
u'type': u'pig-widget',
u'id': u'c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a',
u'name': u'pig-c59d',
"children":[{"to": "c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a"}, {"error": "c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a"}]
}
pig_node = Node(data)
node_mapping = {"c59d1947-7ce0-ef34-22b2-d64b9fc5bf9a": pig_node}
xml = pig_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split()]
assert_true(u'<prepare>' in xml, xml)
assert_true(u'<mkdir' in xml, xml)
assert_true(u'path="${nameNode}/my_dir"/>' in xml, xml)
# Prepare has empty value and is skipped
pig_node.data['properties']['prepares'] = [{u'type': u'mkdir', u'value': u''}]
xml = pig_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split()]
assert_false(u'<prepare>' in xml, xml)
assert_false(u'<mkdir' in xml, xml)
# Prepare has a value and an empty value
pig_node.data['properties']['prepares'] = [{u'type': u'mkdir', u'value': u'/my_dir'}, {u'type': u'rm', u'value': u''}]
xml = pig_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split()]
assert_true(u'<prepare>' in xml, xml)
assert_true(u'<mkdir' in xml, xml)
assert_true(u'path="${nameNode}/my_dir"/>' in xml, xml)
assert_false(u'<rm' in xml, xml)
def test_upgrade_nodes_in_workflow(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Sqoop 1\", \"widgetType\": \"sqoop-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\", \"size\": 12}], \"id\": \"0f54ae72-7122-ad7c-fb31-aa715e15a707\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"371cf19e-0c45-1e40-2887-5de4033c2a01\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Sqoop 1\", \"widgetType\": \"sqoop-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\", \"size\": 12}], \"id\": \"0f54ae72-7122-ad7c-fb31-aa715e15a707\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"40cfacb5-0622-4305-1473-8f70e287668b\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"373c9cc8-c64a-f1ef-5486-f18ec52620e3\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"40cfacb5-0622-4305-1473-8f70e287668b\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"373c9cc8-c64a-f1ef-5486-f18ec52620e3\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"371cf19e-0c45-1e40-2887-5de4033c2a01\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"a8549012-ec27-4686-d71a-c6ff95785ff9\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1438808722.99\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"properties\": [], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}]}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"79774a62-94e3-2ddb-554f-b83640fa5b03\"], \"79774a62-94e3-2ddb-554f-b83640fa5b03\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"79774a62-94e3-2ddb-554f-b83640fa5b03\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"name\": \"sqoop-7977\", \"actionParametersUI\": [], \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"properties\": {\"files\": [], \"job_xml\": \"\", \"parameters\": [], \"job_properties\": [], \"command\": \"import --connect jdbc:hsqldb:file:db.hsqldb --table TT --target-dir hdfs://localhost:8020/user/foo -m 1\", \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"value\": false, \"key\": \"enabled\"}, {\"value\": \"${nominal_time}\", \"key\": \"nominal-time\"}, {\"value\": \"\", \"key\": \"should-start\"}, {\"value\": \"${30 * MINUTES}\", \"key\": \"should-end\"}, {\"value\": \"\", \"key\": \"max-duration\"}, {\"value\": \"\", \"key\": \"alert-events\"}, {\"value\": \"\", \"key\": \"alert-contact\"}, {\"value\": \"\", \"key\": \"notification-msg\"}, {\"value\": \"\", \"key\": \"upstream-apps\"}]}, \"actionParametersFetched\": true, \"type\": \"sqoop-widget\", \"id\": \"79774a62-94e3-2ddb-554f-b83640fa5b03\", \"actionParameters\": []}], \"id\": null, \"nodeNamesMapping\": {\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"79774a62-94e3-2ddb-554f-b83640fa5b03\": \"sqoop-7977\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"b5511e29-c9cc-7f40-0d3a-6dd768f3b1e9\"}}")
assert_true('parameters' in json.loads(wf.data)['workflow']['nodes'][3]['properties'], wf.data)
assert_false('arguments' in json.loads(wf.data)['workflow']['nodes'][3]['properties'], wf.data) # Does not exist yet
data = wf.get_data()
assert_true('parameters' in data['workflow']['nodes'][3]['properties'], wf.data)
assert_true('arguments' in data['workflow']['nodes'][3]['properties'], wf.data) # New field transparently added
def test_action_gen_xml_java_opts(self):
# Contains java_opts
data = {u'name': u'java-fc05', u'properties': {u'files': [], u'job_xml': [], u'jar_path': u'/user/romain/hadoop-mapreduce-examples.jar', u'java_opts': [{u'value': u'-debug -Da -Db=1'}], u'retry_max': [], u'retry_interval': [], u'job_properties': [], u'capture_output': False, u'main_class': u'MyClass', u'arguments': [], u'prepares': [], u'credentials': [], u'sla': [{u'value': False, u'key': u'enabled'}, {u'value': u'${nominal_time}', u'key': u'nominal-time'}, {u'value': u'', u'key': u'should-start'}, {u'value': u'${30 * MINUTES}', u'key': u'should-end'}, {u'value': u'', u'key': u'max-duration'}, {u'value': u'', u'key': u'alert-events'}, {u'value': u'', u'key': u'alert-contact'}, {u'value': u'', u'key': u'notification-msg'}, {u'value': u'', u'key': u'upstream-apps'}], u'archives': []}, u'actionParametersFetched': False, u'id': u'fc05d86f-9f07-7a8d-6256-e6abfa87cf77', u'type': u'java-widget', u'children': [{u'to': u'33430f0f-ebfa-c3ec-f237-3e77efa03d0a'}, {u'error': u'17c9c895-5a16-7443-bb81-f34b30b21548'}], u'actionParameters': []}
java_node = Node(data)
node_mapping = {"fc05d86f-9f07-7a8d-6256-e6abfa87cf77": java_node, "33430f0f-ebfa-c3ec-f237-3e77efa03d0a": java_node, "17c9c895-5a16-7443-bb81-f34b30b21548": java_node} # Last 2 are actually kill and ok nodes
xml = java_node.to_xml(node_mapping=node_mapping)
xml = [row.strip() for row in xml.split('\n')]
assert_false("<java-opts>[{u'value': u'-debug -Da -Db=1'}]</java-opts>" in xml, xml)
assert_true("<java-opts>-debug -Da -Db=1</java-opts>" in xml, xml)
def test_workflow_create_single_action_data(self):
workflow = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"0c1908e7-0096-46e7-a16b-b17b1142a730\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1430228904.58\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"properties\": []}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"retry_max\": [{\"value\": \"5\"}], \"files\": [], \"job_xml\": \"\", \"jar_path\": \"my_jar\", \"job_properties\": [{\"name\": \"prop_1_name\", \"value\": \"prop_1_value\"}], \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}]}, \"name\": \"mapreduce-0cf2\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"mapreduce-widget\", \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"actionParameters\": []}], \"id\": 50019, \"nodeNamesMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": \"mapreduce-0cf2\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"084f4d4c-00f1-62d2-e27e-e153c1f9acfb\"}}")
single_action_wf_data = workflow.create_single_action_workflow_data('0cf2d5d5-2315-0bda-bd53-0eec257e943f')
single_action_wf = Workflow(data=single_action_wf_data)
assert_true(len(single_action_wf.nodes) == 4)
# Validating DAG: Start -> node -> Kill/End
_data = json.loads(single_action_wf_data)
start_node = [node for node in _data['workflow']['nodes'] if node['name'] == 'Start'][0]
submit_node = [node for node in _data['workflow']['nodes'] if node['id'] == '0cf2d5d5-2315-0bda-bd53-0eec257e943f'][0]
end_node = [node for node in _data['workflow']['nodes'] if node['name'] == 'End'][0]
kill_node = [node for node in _data['workflow']['nodes'] if node['name'] == 'Kill'][0]
assert_true(submit_node['id'] in str(start_node['children']))
assert_true(end_node['id'] in str(submit_node['children']))
assert_true(kill_node['id'] in str(submit_node['children']))
def test_submit_single_action(self):
wf_doc = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[5], self.user)
reset = ENABLE_V2.set_for_testing(True)
try:
response = self.c.get(reverse('oozie:submit_single_action', args=[wf_doc.id, '3f107997-04cc-8733-60a9-a4bb62cebabc']))
assert_equal([{'name':'Dryrun', 'value': False}, {'name':'ls_arg', 'value': '-l'}], response.context['params_form'].initial)
except Exception, ex:
logging.exception(ex)
finally:
reset()
wf_doc.delete()
def test_list_bundles_page(self):
response = self.c.get(reverse('oozie:list_editor_bundles'))
assert_true('bundles_json' in response.context, response.context)
def test_workflow_dependencies(self):
wf_doc1 = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[5], self.user)
# Add history dependency
wf_doc1.is_history = True
wf_doc1.dependencies.add(wf_doc1)
# Add sub-workflow dependency
wf_doc2 = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[4], self.user)
wf_doc1.dependencies.add(wf_doc2)
# Add coordinator dependency
data = {
'id': None,
'uuid': None,
'name': 'My Coordinator',
'variables': [], # Aka workflow parameters
'properties': {
'description': '',
'deployment_dir': '',
'schema_version': 'uri:oozie:coordinator:0.2',
'frequency_number': 1,
'frequency_unit': 'days',
'cron_frequency': '0 0 * * *',
'cron_advanced': False,
'timezone': '',
'start': '${start_date}',
'end': '${end_date}',
'workflow': None,
'timeout': None,
'concurrency': None,
'execution': None,
'throttle': None,
'job_xml': '',
'credentials': [],
'parameters': [
{'name': 'oozie.use.system.libpath', 'value': True},
{'name': 'start_date', 'value': ''},
{'name': 'end_date', 'value': ''}
],
'sla': WorkflowConfiguration.SLA_DEFAULT
}
}
wf_doc3 = Document2.objects.create(name='test', type='oozie-coordinator2', owner=User.objects.get(username='test'), data=data)
wf_doc1.dependencies.add(wf_doc3)
assert_true(len(wf_doc1.dependencies.all()) == 3)
wf_doc1.save()
# Validating dependencies after saving the workflow
assert_true(len(wf_doc1.dependencies.all()) == 3)
assert_true(len(wf_doc1.dependencies.filter(type='oozie-coordinator2')) > 0)
assert_true(len(wf_doc1.dependencies.filter(Q(is_history=False) & Q(type='oozie-workflow2'))) > 0)
assert_true(len(wf_doc1.dependencies.filter(Q(is_history=True) & Q(type='oozie-workflow2'))) > 0)
wf_doc1.delete()
wf_doc2.delete()
wf_doc3.delete()
def test_editor_access_permissions(self):
group = 'no_editor'
try:
# Block editor section
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
response = self.c.get(reverse('oozie:list_workflows'))
assert_equal(response.status_code, 200)
add_permission('test', 'no_editor', 'disable_editor_access', 'oozie')
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 401)
response = self.c.get(reverse('oozie:list_workflows'))
assert_equal(response.status_code, 200)
# Admin are not affected
admin = make_logged_in_client('admin', 'admin', is_superuser=True, recreate=True, groupname=group)
response = admin.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
response = admin.get(reverse('oozie:list_workflows'))
assert_equal(response.status_code, 200)
finally:
remove_from_group("test", group)
def test_list_editor_workflows(self):
wf_doc = save_temp_workflow(MockOozieApi.JSON_WORKFLOW_LIST[5], self.user)
reset = ENABLE_V2.set_for_testing(True)
try:
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
data = json.loads(response.context['workflows_json'])
uuids = [doc['uuid'] for doc in data]
assert_true(wf_doc.uuid in uuids, data)
# Trash workflow and verify it no longer appears in list
response = self.c.post('/desktop/api2/doc/delete', {'uuid': json.dumps(wf_doc.uuid)})
response = self.c.get(reverse('oozie:list_editor_workflows'))
assert_equal(response.status_code, 200)
data = json.loads(response.context['workflows_json'])
uuids = [doc['uuid'] for doc in data]
assert_false(wf_doc.uuid in uuids, data)
finally:
reset()
wf_doc.delete()
def test_workflow_properties(self):
# Test that a new workflow will be initialized with default properties if no saved configs exist
wf = Workflow(user=self.user)
data = json.loads(wf.data)
assert_equal(data['workflow']['properties'], Workflow.get_workflow_properties_for_user(self.user))
# Setup a test Default configuration, NOTE: this is an invalid format for testing only
properties = [
{
'multiple': False,
'value': '/user/test/oozie',
'nice_name': 'Workspace',
'key': 'deployment_dir',
'help_text': 'Specify the deployment directory.',
'type': 'hdfs-files'
}, {
'multiple': True,
'value': [
{
'value': 'test',
'key': 'mapred.queue.name'
}
],
'nice_name': 'Hadoop Properties',
'key': 'properties',
'help_text': 'Hadoop configuration properties.',
'type': 'settings'
}
]
config = DefaultConfiguration(app=WorkflowConfiguration.APP_NAME, properties=json.dumps(properties), is_default=True)
config.save()
wf_props = config.properties_dict
wf_props.update({'wf1_id': None, 'description': ''})
# Test that a new workflow will be initialized with Default saved config if it exists
wf = Workflow(user=self.user)
data = json.loads(wf.data)
assert_equal(data['workflow']['properties'], wf_props)
# Test that a new workflow will be initialized with Group saved config if it exists
properties = [
{
'multiple': True,
'value': [
{
'value': 'org.myorg.WordCount.Map',
'key': 'mapred.mapper.class'
},
{
'value': 'org.myorg.WordCount.Reduce',
'key': 'mapred.reducer.class'
}
],
'nice_name': 'Hadoop Properties',
'key': 'properties',
'help_text': 'Hadoop configuration properties.',
'type': 'settings'
}
]
config = DefaultConfiguration(app=WorkflowConfiguration.APP_NAME,
properties=json.dumps(properties),
is_default=False,
group=self.user.groups.first())
config.save()
wf_props = config.properties_dict
wf_props.update({'wf1_id': None, 'description': ''})
# Test that a new workflow will be initialized with Default saved config if it exists
wf = Workflow(user=self.user)
data = json.loads(wf.data)
assert_equal(data['workflow']['properties'], wf_props)
class TestExternalWorkflowGraph(object):
def setUp(self):
self.wf = Workflow()
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "oozie")
add_to_group("test")
self.user = User.objects.get(username='test')
def test_graph_generation_from_xml(self):
f = open('apps/oozie/src/oozie/test_data/xslt2/test-workflow.xml')
self.wf.definition = f.read()
self.node_list = [{u'node_type': u'start', u'ok_to': u'fork-68d4', u'name': u''}, {u'node_type': u'kill', u'ok_to': u'', u'name': u'Kill'}, {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'}, {u'node_type': u'join', u'ok_to': u'End', u'name': u'join-775e'}, {u'node_type': u'end', u'ok_to': u'', u'name': u'End'}, {u'subworkflow': {u'app-path': u'${nameNode}/user/hue/oozie/deployments/_admin_-oozie-50001-1427488969.48'}, u'node_type': u'sub-workflow', u'ok_to': u'join-775e', u'name': u'subworkflow-a13f', u'error_to': u'Kill'}, {u'shell': {u'command': u'ls'}, u'node_type': u'shell', u'ok_to': u'join-775e', u'name': u'shell-0f44', u'error_to': u'Kill'}]
assert_equal(self.node_list, generate_v2_graph_nodes(self.wf.definition))
def test_get_graph_adjacency_list(self):
self.node_list = [{u'node_type': u'start', u'ok_to': u'fork-68d4', u'name': u''}, {u'node_type': u'kill', u'ok_to': u'', u'name': u'kill'}, {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'}, {u'node_type': u'join', u'ok_to': u'end', u'name': u'join-775e'}, {u'node_type': u'end', u'ok_to': u'', u'name': u'end'}, {u'node_type': u'sub-workflow', u'ok_to': u'join-775e', u'sub-workflow': {u'app-path': u'${nameNode}/user/hue/oozie/deployments/_admin_-oozie-50001-1427488969.48'}, u'name': u'subworkflow-a13f', u'error_to': u'kill'}, {u'shell': {u'command': u'ls'}, u'node_type': u'shell', u'ok_to': u'join-775e', u'name': u'shell-0f44', u'error_to': u'kill'}]
adj_list = _create_graph_adjaceny_list(self.node_list)
assert_true(len(adj_list) == 7)
assert_true('subworkflow-a13f' in adj_list.keys())
assert_true(adj_list['shell-0f44']['shell']['command'] == 'ls')
assert_equal(adj_list['fork-68d4'], {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'})
def test_get_hierarchy_from_adj_list(self):
self.wf.definition = """<workflow-app name="ls-4thread" xmlns="uri:oozie:workflow:0.5">
<start to="fork-fe93"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="shell-5429">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<action name="shell-bd90">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<fork name="fork-fe93">
<path start="shell-5429" />
<path start="shell-bd90" />
<path start="shell-d64c" />
<path start="shell-d8cc" />
</fork>
<join name="join-7f80" to="End"/>
<action name="shell-d64c">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<action name="shell-d8cc">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-7f80"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>"""
node_list = generate_v2_graph_nodes(self.wf.definition)
adj_list = _create_graph_adjaceny_list(node_list)
node_hierarchy = ['start']
_get_hierarchy_from_adj_list(adj_list, adj_list['start']['ok_to'], node_hierarchy)
assert_equal(node_hierarchy, ['start', [u'fork-fe93', [[u'shell-bd90'], [u'shell-d64c'], [u'shell-5429'], [u'shell-d8cc']], u'join-7f80'], ['Kill'], ['End']])
def test_gen_workflow_data_from_xml(self):
self.wf.definition = """<workflow-app name="fork-fork-test" xmlns="uri:oozie:workflow:0.5">
<start to="fork-949d"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="shell-eadd">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-1a0f"/>
<error to="Kill"/>
</action>
<action name="shell-f4c1">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-3bba"/>
<error to="Kill"/>
</action>
<fork name="fork-949d">
<path start="fork-e5fa" />
<path start="shell-3dd5" />
</fork>
<join name="join-ca1a" to="End"/>
<action name="shell-ef70">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-1a0f"/>
<error to="Kill"/>
</action>
<fork name="fork-37d7">
<path start="shell-eadd" />
<path start="shell-ef70" />
</fork>
<join name="join-1a0f" to="join-ca1a"/>
<action name="shell-3dd5">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="fork-37d7"/>
<error to="Kill"/>
</action>
<action name="shell-2ba8">
<shell xmlns="uri:oozie:shell-action:0.1">
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<exec>ls</exec>
<capture-output/>
</shell>
<ok to="join-3bba"/>
<error to="Kill"/>
</action>
<fork name="fork-e5fa">
<path start="shell-f4c1" />
<path start="shell-2ba8" />
</fork>
<join name="join-3bba" to="join-ca1a"/>
<end name="End"/>
</workflow-app>"""
workflow_data = Workflow.gen_workflow_data_from_xml(self.user, self.wf)
assert_true(len(workflow_data['layout'][0]['rows']) == 6)
assert_true(len(workflow_data['workflow']['nodes']) == 14)
assert_equal(workflow_data['layout'][0]['rows'][1]['widgets'][0]['widgetType'], 'fork-widget')
assert_equal(workflow_data['workflow']['nodes'][0]['name'], 'start-3f10')
def test_gen_workflow_data_for_email(self):
self.wf.definition = """<workflow-app name="My_Workflow" xmlns="uri:oozie:workflow:0.5">
<start to="email-0377"/>
<kill name="Kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<action name="email-0377">
<email xmlns="uri:oozie:email-action:0.2">
<to>example@example.com</to>
<subject>sub</subject>
<bcc>example@bcc.com</bcc>
<body>bod</body>
<content_type>text/plain</content_type>
</email>
<ok to="End"/>
<error to="Kill"/>
</action>
<end name="End"/>
</workflow-app>"""
workflow_data = Workflow.gen_workflow_data_from_xml(self.user, self.wf)
assert_true(len(workflow_data['layout'][0]['rows']) == 4)
assert_true(len(workflow_data['workflow']['nodes']) == 4)
assert_equal(workflow_data['layout'][0]['rows'][1]['widgets'][0]['widgetType'], 'email-widget')
assert_equal(workflow_data['workflow']['nodes'][0]['name'], 'start-3f10') |
from error import *
from models import *
class Condition(object):
"""Represents a condition"""
def __init__(self):
pass
def __str__(self):
return "condition"
class SayCondition(Condition):
def __init__(self, phrase):
self.phrase = phrase
def eval(self, phrase):
return self.phrase == phrase
def __str__(self):
return f"'{self.phrase}'"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.phrase == other.phrase
comparison_ops = {
"greater than": ">",
"less than": "<",
"greater than or equal to": ">=",
"less than or equal to": "<="
}
class UntilStopCondition(Condition):
"""Loop-only condition that stops the loop only if user says stop"""
def eval(self):
return True
def __str__(self):
return "you say 'stop'"
def to_nl(self):
return self.__str__()
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return True
class EqualityCondition(Condition):
"""Represents an equality condition"""
def __init__(self, variable, value, negation=False):
# Variable to retrieve when evaluating
self.variable = variable
# Value to compare against
self.value = value
# Whether is is == or !=
self.negation = negation
def eval(self, variables):
"""
Evaluate the variable
Assumes that the variable to evaluate is in variables
"""
variable = variables[self.variable.variable]
value = variables[self.value.variable] if isinstance(self.value, ValueOf) else self.value
if type(value) != type(variable):
if isinstance(value, str) or isinstance(variable, str):
raise ExecutionError(f"The values {value} and {variable} cannot be compared.")
return variable != value if self.negation else variable == value
def __str__(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"{self.variable.variable} {"!" if self.negation else "="}= {value}"
def to_nl(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"variable {self.variable.variable} is {"not " if self.negation else ""}equal to {value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value and self.negation == other.negation
class ComparisonCondition(Condition):
"""Represents an comparison condition"""
def __init__(self, variable, op, value):
# Variable to retrieve when evaluating
self.variable = variable
# Value to compare against
self.value = value
# Operator to evaluate with - includes >, >=, <, <=
self.op = op
def eval(self, variables):
"""
Evaluate the variable
Assumes that the variable to evaluate is in variables
"""
variable = variables[self.variable.variable]
value = variables[self.value.variable] if isinstance(self.value, ValueOf) else self.value
if type(value) != type(variable):
if isinstance(value, str) or isinstance(variable, str):
raise ExecutionError(f"The values {value} and {variable} cannot be compared.")
if self.op == "greater than":
return variable > value
elif self.op == "less than":
return variable < value
elif self.op == "greater than or equal to":
return variable >= value
elif self.op == "less than or equal to":
return variable <= value
return False
def __str__(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"{self.variable.variable} {comparison_ops.get(self.op)} {value}"
def to_nl(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"variable {self.variable.variable} is {self.op} {value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value and self.op == other.op
| from error import *
from models import *
class Condition(object):
"""Represents a condition"""
def __init__(self):
pass
def __str__(self):
return "condition"
class SayCondition(Condition):
def __init__(self, phrase):
self.phrase = phrase
def eval(self, phrase):
return self.phrase == phrase
def __str__(self):
return f"'{self.phrase}'"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.phrase == other.phrase
comparison_ops = {
"greater than": ">",
"less than": "<",
"greater than or equal to": ">=",
"less than or equal to": "<="
}
class UntilStopCondition(Condition):
"""Loop-only condition that stops the loop only if user says stop"""
def eval(self):
return True
def __str__(self):
return "you say 'stop'"
def to_nl(self):
return self.__str__()
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return True
class EqualityCondition(Condition):
"""Represents an equality condition"""
def __init__(self, variable, value, negation=False):
# Variable to retrieve when evaluating
self.variable = variable
# Value to compare against
self.value = value
# Whether is is == or !=
self.negation = negation
def eval(self, variables):
"""
Evaluate the variable
Assumes that the variable to evaluate is in variables
"""
variable = variables[self.variable.variable]
value = variables[self.value.variable] if isinstance(self.value, ValueOf) else self.value
if type(value) != type(variable):
if isinstance(value, str) or isinstance(variable, str):
raise ExecutionError(f"The values {value} and {variable} cannot be compared.")
return variable != value if self.negation else variable == value
def __str__(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"{self.variable.variable} {'!' if self.negation else '='}= {value}"
def to_nl(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"variable {self.variable.variable} is {'not ' if self.negation else ''}equal to {value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value and self.negation == other.negation
class ComparisonCondition(Condition):
"""Represents an comparison condition"""
def __init__(self, variable, op, value):
# Variable to retrieve when evaluating
self.variable = variable
# Value to compare against
self.value = value
# Operator to evaluate with - includes >, >=, <, <=
self.op = op
def eval(self, variables):
"""
Evaluate the variable
Assumes that the variable to evaluate is in variables
"""
variable = variables[self.variable.variable]
value = variables[self.value.variable] if isinstance(self.value, ValueOf) else self.value
if type(value) != type(variable):
if isinstance(value, str) or isinstance(variable, str):
raise ExecutionError(f"The values {value} and {variable} cannot be compared.")
if self.op == "greater than":
return variable > value
elif self.op == "less than":
return variable < value
elif self.op == "greater than or equal to":
return variable >= value
elif self.op == "less than or equal to":
return variable <= value
return False
def __str__(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"{self.variable.variable} {comparison_ops.get(self.op)} {value}"
def to_nl(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"variable {self.variable.variable} is {self.op} {value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value and self.op == other.op
|
import os
from time import sleep
from slack import RTMClient
from slack.errors import SlackApiError
@RTMClient.run_on(event='message')
def say_hello(**payload):
target_channel_name = os.environ["TARGET_CHANNEL_NAME"]
target_channel_id = os.environ["TARGET_CHANNEL_ID"]
data = payload['data']
web_client = payload['web_client']
rtm_client = payload['rtm_client']
# bot自身の応答(userが未設定)でなければ代理投稿開始
if 'text' in data and 'user' in data:
channel_id = data['channel']
thread_ts = data['ts']
#01 投稿者に返信する
try:
response1 = web_client.chat_postMessage(
channel=channel_id,
text=f"{target_channel_name}に匿名で投稿しました。",
thread_ts=thread_ts,
icon_emoji=":penguin:"
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is Falseß
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error on reply: {e.response["error"]}")
#02 1秒待ってから名無しで投稿する
sleep(1)
try:
response2 = web_client.chat_postMessage(
channel=target_channel_id,
text=data["text"],
username="名無しさん",
icon_emoji=":penguin:"
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error on target post: {e.response["error"]}")
rtm_client = RTMClient(token=os.environ["SLACK_API_TOKEN"])
rtm_client.start() | import os
from time import sleep
from slack import RTMClient
from slack.errors import SlackApiError
@RTMClient.run_on(event='message')
def say_hello(**payload):
target_channel_name = os.environ["TARGET_CHANNEL_NAME"]
target_channel_id = os.environ["TARGET_CHANNEL_ID"]
data = payload['data']
web_client = payload['web_client']
rtm_client = payload['rtm_client']
# bot自身の応答(userが未設定)でなければ代理投稿開始
if 'text' in data and 'user' in data:
channel_id = data['channel']
thread_ts = data['ts']
#01 投稿者に返信する
try:
response1 = web_client.chat_postMessage(
channel=channel_id,
text=f"{target_channel_name}に匿名で投稿しました。",
thread_ts=thread_ts,
icon_emoji=":penguin:"
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is Falseß
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error on reply: {e.response['error']}")
#02 1秒待ってから名無しで投稿する
sleep(1)
try:
response2 = web_client.chat_postMessage(
channel=target_channel_id,
text=data["text"],
username="名無しさん",
icon_emoji=":penguin:"
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error on target post: {e.response['error']}")
rtm_client = RTMClient(token=os.environ["SLACK_API_TOKEN"])
rtm_client.start() |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import inspect
import itertools
import operator
from typing import cast, Iterator, Optional, List, Tuple
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
try:
import numpy_dispatch
except ImportError:
numpy_dispatch = None
import jax
import jax.ops
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax._src import dtypes
from jax import tree_util
from jax.interpreters import xla
from jax.test_util import check_grads
from jax._src.util import prod
from jax._src.numpy.util import _parse_numpydoc, ParsedDoc
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
unsigned_dtypes = jtu.dtypes.all_unsigned
bool_dtypes = jtu.dtypes.boolean
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
# uint64 is problematic because with any uint type it promotes to float:
int_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
def _compatible_shapes(shape):
if shape in scalar_shapes or np.ndim(shape) == 0:
return [shape]
return (shape[n:] for n in range(len(shape) + 1))
def _get_y_shapes(y_dtype, shape, rowvar):
# Helper function for testCov.
if y_dtype is None:
return [None]
if len(shape) == 1:
return [shape]
elif rowvar or shape[0] == 1:
return [(1, shape[-1]), (2, shape[-1]), (5, shape[-1])]
return [(shape[0], 1), (shape[0], 2), (shape[0], 5)]
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True,
tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes + unsigned_dtypes + bool_dtypes,
all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ceil", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, np.float32: 1e-3,
np.float64: 1e-12, np.complex64: 2e-4,
np.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("i0", 1, float_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False),
op_record("ldexp", 2, int_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equiv", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("trunc", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, low=-1.5, high=1.5), ["rev"],
inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={np.float64: 1e-7, np.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-4, np.complex128: 2E-14}),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-2, np.complex128: 2E-12}),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True, tolerance={np.float64: 1e-9}),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_some_inf, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 4e-2, np.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={np.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={np.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("fix", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, unsigned_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("fmin", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmax", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmod", 2, default_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={np.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float16: 1e-2, np.float64: 2e-14}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={dtypes.bfloat16: 4e-2, np.float16: 1e-2,
np.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={np.complex128: 1e-14}, check_dtypes=False),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("modf", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("modf", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("rint", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan,
[]),
op_record("rint", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("sign", 1, number_dtypes + unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, []),
# numpy 1.16 has trouble mixing uint and bfloat16, so we test these separately.
op_record("copysign", 2, default_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("copysign", 2, unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={np.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("ediff1d", 3, [np.int32], all_shapes, jtu.rand_default, []),
# TODO(phawkins): np.unwrap does not correctly promote its default period
# argument under NumPy 1.21 for bfloat16 inputs. It works fine if we
# explicitly pass a bfloat16 value that does not need promition. We should
# probably add a custom test harness for unwrap that tests the period
# argument anyway.
op_record("unwrap", 1, [t for t in float_dtypes if t != dtypes.bfloat16],
nonempty_nonscalar_array_shapes,
jtu.rand_default, ["rev"],
# numpy.unwrap always returns float64
check_dtypes=False,
# numpy cumsum is inaccurate, see issue #3517
tolerance={dtypes.bfloat16: 1e-1, np.float16: 1e-1}),
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("invert", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, all_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_INITIAL_RECORDS = [
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("max", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_WHERE_NO_INITIAL_RECORDS = [
op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("mean", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("nanmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanvar", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanstd", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("nanargmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanargmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 2e-4, np.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__lshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rlshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rrshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), [])
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = np.zeros([])
for shape in shapes:
try:
accumulator = accumulator + np.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and np have different type promotion semantics; this decorator allows
tests make an np reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: np.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
@jtu.with_config(jax_numpy_rank_promotion="raise")
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if np_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a
for a in out]
return f
def testNotImplemented(self):
for name in jnp._NOT_IMPLEMENTED:
func = getattr(jnp, name)
with self.assertRaises(NotImplementedError):
func()
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory(self.rng())
# np and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory(self.rng())
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
def testArrayEqualExamples(self):
# examples from the array_equal() docstring.
self.assertTrue(jnp.array_equal([1, 2], [1, 2]))
self.assertTrue(jnp.array_equal(np.array([1, 2]), np.array([1, 2])))
self.assertFalse(jnp.array_equal([1, 2], [1, 2, 3]))
self.assertFalse(jnp.array_equal([1, 2], [1, 4]))
a = np.array([1, np.nan])
self.assertFalse(jnp.array_equal(a, a))
self.assertTrue(jnp.array_equal(a, a, equal_nan=True))
a = np.array([1 + 1j])
b = a.copy()
a.real = np.nan
b.imag = np.nan
self.assertTrue(jnp.array_equal(a, b, equal_nan=True))
def testArrayEquivExamples(self):
# examples from the array_equiv() docstring.
self.assertTrue(jnp.array_equiv([1, 2], [1, 2]))
self.assertFalse(jnp.array_equiv([1, 2], [1, 3]))
with jax.numpy_rank_promotion('allow'):
self.assertTrue(jnp.array_equiv([1, 2], [[1, 2], [1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2], [1, 3]]))
def testArrayModule(self):
if numpy_dispatch is None:
raise SkipTest('requires https://github.com/seberg/numpy-dispatch')
jnp_array = jnp.array(1.0)
np_array = np.array(1.0)
module = numpy_dispatch.get_array_module(jnp_array)
self.assertIs(module, jnp)
module = numpy_dispatch.get_array_module(jnp_array, np_array)
self.assertIs(module, jnp)
def f(x):
module = numpy_dispatch.get_array_module(x)
self.assertIs(module, jnp)
return x
jax.jit(f)(jnp_array)
jax.grad(f)(jnp_array)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
itertools.combinations_with_replacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory(self.rng())
if not config.x64_enabled and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op.__name__, shapes, dtypes),
"op": op, "dtypes": dtypes, "shapes": shapes}
for op in [jnp.left_shift, jnp.right_shift]
for shapes in filter(
_shapes_are_broadcast_compatible,
# TODO numpy always promotes to shift dtype for zero-dim shapes:
itertools.combinations_with_replacement(nonzerodim_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testShiftOpAgainstNumpy(self, op, dtypes, shapes):
dtype, shift_dtype = dtypes
signed_mix = np.issubdtype(dtype, np.signedinteger) != \
np.issubdtype(shift_dtype, np.signedinteger)
has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)
promoting_to_64 = has_32 and signed_mix
if promoting_to_64 and not config.x64_enabled:
self.skipTest("np.right_shift/left_shift promoting to int64"
"differs from jnp in 32 bit mode.")
info, shift_info = map(np.iinfo, dtypes)
x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)
# NumPy requires shifts to be non-negative and below the bit width:
shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))
args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))
self._CompileAndCheck(op, args_maker)
np_op = getattr(np, op.__name__)
self._CheckAgainstNumpy(np_op, op, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else np.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, np_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory(self.rng())
@jtu.ignore_warning(category=np.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered.*")
def np_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(np.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else np.float32
return np_op(x_cast, axis, dtype=t, keepdims=keepdims)
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {np.float16: 1e-2, np.int32: 1E-3, np.float32: 1e-3,
np.complex64: 1e-3, np.float64: 1e-5, np.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_NO_DTYPE_RECORDS))
def testReducerNoDtype(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="All-NaN slice encountered.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol = {np.float16: 0.002}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerWhere(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact, whereshape):
if (shape in [()] + scalar_shapes and
dtype in [jnp.int16, jnp.uint16] and
jnp_op in [jnp.min, jnp.max]):
self.skipTest("Known XLA failure; see https://github.com/google/jax/issues/4971.")
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 20), "where parameter not supported in older numpy")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_WHERE_NO_INITIAL_RECORDS))
def testReducerWhereNoInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact, whereshape):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="Mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="invalid value encountered in true_divide*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
if numpy_version >= (1, 20, 2) or np_op.__name__ in ("all", "any"):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.nonzero(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_size={}_fill_value={}".format(
jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
"shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
for shape in nonempty_array_shapes
for dtype in all_dtypes
for fill_value in [None, -1]
for size in [1, 5, 10]))
def testNonzeroSize(self, shape, dtype, size, fill_value):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
@jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
def np_fun(x):
result = np.nonzero(x)
if size <= len(result[0]):
return tuple(arg[:size] for arg in result)
else:
return tuple(np.concatenate([arg, np.full(size - len(arg), fill_value or 0, arg.dtype)])
for arg in result)
jnp_fun = lambda x: jnp.nonzero(x, size=size, fill_value=fill_value)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testFlatNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.flatnonzero)
jnp_fun = jnp.flatnonzero
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying the size statically:
jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testArgWhere(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.argwhere)
jnp_fun = jnp.argwhere
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of this
# behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, np_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory(self.rng())
if dtype == np.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
if "nan" in np_op.__name__ and dtype == jnp.bfloat16:
raise unittest.SkipTest("NumPy doesn't correctly handle bfloat16 arrays")
def np_fun(array_to_reduce):
return np_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
except ValueError as e:
if str(e) == "All-NaN slice encountered":
self.skipTest("JAX doesn't support checking for all-NaN slices")
else:
raise
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name.capitalize(), "name": rec.name,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for rec in JAX_ARGMINMAX_RECORDS))
def testArgMinMaxEmpty(self, name, np_op, jnp_op):
name = name[3:] if name.startswith("nan") else name
msg = "attempt to get {} of an empty sequence".format(name)
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.array([]))
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.zeros((2, 0)), axis=1)
np_fun = partial(np_op, axis=0)
jnp_fun = partial(jnp_op, axis=0)
args_maker = lambda: [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def np_fun(a, b):
a = a.astype(np.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(np.float32) if rhs_dtype == jnp.bfloat16 else b
out = np.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, np.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-14,
np.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
def np_dot(x, y):
x = x.astype(np.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(np.float32) if rhs_dtype == jnp.bfloat16 else y
return np.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(np_dot, jnp.dot, args_maker,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
def np_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 4e-2
self._CheckAgainstNumpy(np_fun, jnp.matmul, args_maker, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def np_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(np.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.tensordot(a, b, axes).astype(dtype)
tol = {np.float16: 1e-1, np.float32: 1e-3, np.float64: 1e-12,
np.complex64: 1e-3, np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
def testTensordotErrors(self):
a = np.random.random((3, 2, 2))
b = np.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIsin(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.isin(e, t, invert=invert)
np_fun = lambda e, t: np.isin(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIn1d(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.in1d(e, t, invert=invert)
np_fun = lambda e, t: np.in1d(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes))
def testSetdiff1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
self._CheckAgainstNumpy(np.setdiff1d, jnp.setdiff1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes))
def testUnion1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
return np.union1d(arg1, arg2).astype(dtype)
self._CheckAgainstNumpy(np_fun, jnp.union1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_size={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2), size),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2, "size": size}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes
for size in [1, 5, 10]))
def testUnion1dSize(self, shape1, shape2, dtype1, dtype2, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
result = np.union1d(arg1, arg2).astype(dtype)
if size <= len(result):
return result[:size]
else:
return np.concatenate([result, np.full(size - len(result), result[0], result.dtype)])
def jnp_fun(arg1, arg2):
return jnp.union1d(arg1, arg2, size=size)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]))
def testSetxor1d(self, shape1, dtype1, shape2, dtype2, assume_unique):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.setxor1d(ar1, ar2, assume_unique=assume_unique)
def np_fun(ar1, ar2):
if assume_unique:
# pre-flatten the arrays to match with jax implementation
ar1 = np.ravel(ar1)
ar2 = np.ravel(ar2)
return np.setxor1d(ar1, ar2, assume_unique)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}_return_indices={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique,
return_indices),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique, "return_indices": return_indices}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]
for return_indices in [False, True]))
def testIntersect1d(self, shape1, dtype1, shape2, dtype2, assume_unique, return_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
np_fun = lambda ar1, ar2: np.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def np_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(np.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-13,
np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_deg={}_rcond={}_full={}_w={}_cov={}".format(
jtu.format_shape_dtype_string(shape, dtype),
deg,
rcond,
full,
w,
cov),
"shape": shape, "dtype": dtype, "deg": deg,
"rcond": rcond, "full": full, "w":w, "cov":cov}
for dtype in [dt for dt in float_dtypes if dt not in [jnp.float16, jnp.bfloat16]]
for shape in [shape for shape in one_dim_array_shapes if shape != (1,)]
for deg in [1, 2, 3]
for rcond in [None, -1, 10e-3, 10e-5, 10e-10]
for full in [False, True]
for w in [False, True]
for cov in [False, True, "unscaled"]))
def testPolyfit(self, shape, dtype, deg, rcond, full, w, cov):
rng = jtu.rand_default(self.rng())
tol_spec = {np.float32: 1e-3, np.float64: 1e-13, np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = jtu.tolerance(dtype, tol_spec)
_w = lambda a: abs(a) if w else None
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
jnp_fun = lambda x, y, a: jnp.polyfit(x, y, deg=deg, rcond=rcond, full=full, w=_w(a), cov=cov)
np_fun = jtu.ignore_warning(
message="Polyfit may be poorly conditioned*")(lambda x, y, a: np.polyfit(x, y, deg=deg, rcond=rcond, full=full, w=_w(a), cov=cov))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-0.9, 1),
(-np.ones(1), None),
(None, np.ones(1)),
(np.full(1, -0.9), np.ones(1))]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testClipStaticBounds(self, shape, dtype, a_min, a_max):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals):
rng = jtu.rand_default(self.rng())
if jnp.issubdtype(dtype, np.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
np_fun = lambda x: np.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, np.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.float32(7.5), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.float32(1.234), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_padwidth={}_constantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width,
constant_values),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width": pad_width, "constant_values": constant_values}
for mode, shapes in [
('constant', all_shapes),
('wrap', nonempty_shapes),
('edge', nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for constant_values in [
# None is used for modes other than 'constant'
None,
# constant
0, 1,
# (constant,)
(0,), (2.718,),
# ((before_const, after_const),)
((0, 2),), ((-1, 3.14),),
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i / 2, -3.14 * i) for i in range(len(shape))),
]
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
if (pad_width != () and constant_values != () and
((mode == 'constant' and constant_values is not None) or
(mode != 'constant' and constant_values is None)))))
def testPad(self, shape, dtype, mode, pad_width, constant_values):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if constant_values is None:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode)
else:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_stat_length={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, stat_length),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"stat_length": stat_length}
for mode in ['maximum', 'minimum', 'mean', 'median']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for stat_length in [
None,
# ((before_1, after_1), ..., (before_N, after_N))
tuple(((i % 3 + 1), ((i + 1) % 3) + 1) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 2),),
# (before, after) (not in the docstring but works in numpy)
(1, 1), (3, 4),
# (pad,)
(1,), (2,),
# pad
1, 2
]
if (pad_width != () and stat_length != () and
not (dtype in bool_dtypes and mode == 'mean'))))
def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_reflect_type={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, reflect_type),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"reflect_type": reflect_type}
for mode in ['symmetric', 'reflect']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 3),),
# (before, after) (not in the docstring but works in numpy)
(2, 1), (1, 2),
# (pad,)
(1,), (2,), (3,),
# pad
0, 5, 7, 10
]
for reflect_type in ['even', 'odd']
if (pad_width != () and
# following types lack precision when calculating odd values
(reflect_type != 'odd' or dtype not in [np.bool_, np.float16, jnp.bfloat16]))))
def testPadSymmetricAndReflect(self, shape, dtype, mode, pad_width, reflect_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,
tol={np.float32: 1e-3, np.complex64: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_end_values={}".format(
jtu.format_shape_dtype_string(shape, dtype), "linear_ramp", pad_width, end_values),
"shape": shape, "dtype": dtype, "pad_width": pad_width,
"end_values": end_values}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for end_values in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2.0, 3.14),),
# (before, after) (not in the docstring but works in numpy)
(0, 0), (-8.0, 2.0),
# (end_values,)
(1,), (2,),
# end_values
0, 1, 100, 10.0, 3.5, 4.2, -5, -3
]
if (pad_width != () and end_values != () and
# following types lack precision
dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16])))
def testPadLinearRamp(self, shape, dtype, pad_width, end_values):
if numpy_version < (1, 20) and np.issubdtype(dtype, np.integer):
raise unittest.SkipTest("NumPy 1.20 changed the semantics of np.linspace")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadEmpty(self):
arr = np.arange(6).reshape(2, 3)
pad_width = ((2, 3), (3, 1))
np_res = np.pad(arr, pad_width=pad_width, mode="empty")
jnp_res = jnp.pad(arr, pad_width=pad_width, mode="empty")
np.testing.assert_equal(np_res.shape, jnp_res.shape)
np.testing.assert_equal(arr, np_res[2:-3, 3:-1])
np.testing.assert_equal(arr, jnp_res[2:-3, 3:-1])
np.testing.assert_equal(np_res[2:-3, 3:-1], jnp_res[2:-3, 3:-1])
def testPadKwargs(self):
modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
arr = jnp.array([1, 2, 3])
pad_width = 1
for mode in modes.keys():
allowed = modes[mode]
not_allowed = {}
for kwargs in modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
jnp.pad(arr, pad_width, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
match = "unsupported keyword arguments for mode '{}'".format(mode)
for key, value in not_allowed.items():
with self.assertRaisesRegex(ValueError, match):
jnp.pad(arr, pad_width, mode, **{key: value})
# Test if unsupported mode raise error.
unsupported_modes = [1, None, "foo"]
for mode in unsupported_modes:
match = "Unimplemented padding mode '{}' for np.pad.".format(mode)
with self.assertRaisesRegex(NotImplementedError, match):
jnp.pad(arr, pad_width, mode)
def testPadFunction(self):
def np_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def jnp_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector = vector.at[:pad_width[0]].set(pad_value)
vector = vector.at[-pad_width[1]:].set(pad_value)
return vector
arr = np.arange(6).reshape(2, 3)
np_res = np.pad(arr, 2, np_pad_with)
jnp_res = jnp.pad(arr, 2, jnp_pad_with)
np.testing.assert_equal(np_res, jnp_res)
arr = np.arange(24).reshape(2, 3, 4)
np_res = np.pad(arr, 1, np_pad_with, padder=100)
jnp_res = jnp.pad(arr, 1, jnp_pad_with, padder=100)
np.testing.assert_equal(np_res, jnp_res)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arr.shape, arr.dtype)]
jnp_fun = partial(jnp.pad, pad_width=1, mode=jnp_pad_with)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadWithNumpyPadWidth(self):
a = jnp.array([1, 2, 3, 4, 5])
f = jax.jit(
partial(
jnp.pad,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
np.testing.assert_array_equal(
f(a),
np.pad(
a,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps}
for reps in [(), (2,), (3, 4), (2, 3, 4), (1, 0, 2)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in all_dtypes))
def testExtract(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, jnp.float32), rng(shape, dtype)]
self._CheckAgainstNumpy(np.extract, jnp.extract, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ncond={}_nfunc={}".format(
jtu.format_shape_dtype_string(shape, dtype), ncond, nfunc),
"shape": shape, "dtype": dtype, "ncond": ncond, "nfunc": nfunc}
for ncond in [1, 2, 3]
for nfunc in [ncond, ncond + 1]
for shape in all_shapes
for dtype in all_dtypes))
def testPiecewise(self, shape, dtype, ncond, nfunc):
rng = jtu.rand_default(self.rng())
rng_bool = jtu.rand_int(self.rng(), 0, 2)
funclist = [lambda x: x - 1, 1, lambda x: x, 0][:nfunc]
args_maker = lambda: (rng(shape, dtype), [rng_bool(shape, bool) for i in range(ncond)])
np_fun = partial(np.piecewise, funclist=funclist)
jnp_fun = partial(jnp.piecewise, funclist=funclist)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
# This is a higher-order function, so the cache miss check will fail.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_perm={}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), perm, arg_type),
"dtype": dtype, "shape": shape, "perm": perm, "arg_type": arg_type}
for dtype in default_dtypes
for shape in array_shapes
for arg_type in ["splat", "value"]
for perm in [None, tuple(np.random.RandomState(0).permutation(np.zeros(shape).ndim))]))
def testTransposeTuple(self, shape, dtype, perm, arg_type):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if arg_type == "value":
np_fun = lambda x: x.transpose(perm)
jnp_fun = lambda x: jnp.array(x).transpose(perm)
else:
np_fun = lambda x: x.transpose(*(perm or ()))
jnp_fun = lambda x: jnp.array(x).transpose(*(perm or ()))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_trim={}".format(
jtu.format_shape_dtype_string(a_shape, dtype), trim),
"dtype": dtype, "a_shape": a_shape, "trim": trim}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for trim in ["f", "b", "fb"]))
def testTrimZeros(self, a_shape, dtype, trim):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(a_shape, dtype)]
np_fun = lambda arg1: np.trim_zeros(arg1, trim)
jnp_fun = lambda arg1: jnp.trim_zeros(arg1, trim)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_rank{}".format(
jtu.format_shape_dtype_string(a_shape, dtype), rank),
"dtype": dtype, "a_shape": a_shape, "rank": rank}
for rank in (1, 2)
for dtype in default_dtypes
for a_shape in one_dim_array_shapes))
def testPoly(self, a_shape, dtype, rank):
if dtype in (np.float16, jnp.bfloat16, np.int16):
self.skipTest(f"{dtype} gets promoted to {np.float16}, which is not supported.")
elif rank == 2 and jtu.device_under_test() in ("tpu", "gpu"):
self.skipTest("Nonsymmetric eigendecomposition is only implemented on the CPU backend.")
rng = jtu.rand_default(self.rng())
tol = { np.int8: 1e-3, np.int32: 1e-3, np.float32: 1e-3, np.float64: 1e-6 }
if jtu.device_under_test() == "tpu":
tol[np.int32] = tol[np.float32] = 1e-1
tol = jtu.tolerance(dtype, tol)
args_maker = lambda: [rng(a_shape * rank, dtype)]
self._CheckAgainstNumpy(np.poly, jnp.poly, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp.poly, args_maker, check_dtypes=True, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolyAdd(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polyadd(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polyadd(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolySub(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polysub(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polysub(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_k={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order, k),
"dtype": dtype, "a_shape": a_shape, "order" : order, "k": k}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)
for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))
def testPolyInt(self, a_shape, order, k, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)
jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order),
"dtype": dtype, "a_shape": a_shape, "order" : order}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)))
def testPolyDer(self, a_shape, order, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyder(arg1, m=order)
jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ptype={}".format(ptype), "ptype": ptype}
for ptype in ['int', 'np.int', 'jnp.int']))
def testIntegerPower(self, ptype):
p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]
jaxpr = jax.make_jaxpr(partial(jnp.power, x2=p))(1)
eqns = jaxpr.jaxpr.eqns
self.assertLen(eqns, 1)
self.assertEqual(eqns[0].primitive, lax.integer_pow_p)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}".format(x, y), "x": x, "y": y}
for x in [-1, 0, 1]
for y in [0, 32, 64, 128]))
def testIntegerPowerOverflow(self, x, y):
# Regression test for https://github.com/google/jax/issues/5987
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np.power, jnp.power, args_maker)
self._CompileAndCheck(jnp.power, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompress(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_condition=array[{}]_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),
"shape": shape, "dtype": dtype, "condition": condition, "axis": axis}
for shape in [(2, 3)]
for dtype in int_dtypes
# condition entries beyond axis size must be zero.
for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]
for axis in [None, 0, 1]))
def testCompressMismatchedShapes(self, shape, dtype, condition, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.array(condition), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompressMethod(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = lambda condition, x: np.compress(condition, x, axis=axis)
jnp_fun = lambda condition, x: x.compress(condition, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for num_arrs in [3]
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return np.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(4, 1), (4, 3), (4, 5, 6)]
for dtype in all_dtypes
for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))
def testConcatenateArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.concatenate(x, axis=axis)
jnp_fun = lambda x: jnp.concatenate(x, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testConcatenateAxisNone(self):
# https://github.com/google/jax/issues/3419
a = jnp.array([[1, 2], [3, 4]])
b = jnp.array([[5]])
jnp.concatenate((a, b), axis=None)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(arr, values):
arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(np.float32) if values.dtype == jnp.bfloat16
else values)
out = np.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idx),
"dtype": dtype, "shape": shape, "axis": axis, "idx": idx}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx in (range(-prod(shape), prod(shape))
if axis is None else
range(-shape[axis], shape[axis]))))
def testDeleteInteger(self, shape, dtype, idx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, slc),
"dtype": dtype, "shape": shape, "axis": axis, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))
def testDeleteSlice(self, shape, dtype, axis, slc):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, slc, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
jtu.format_shape_dtype_string(idx_shape, int)),
"dtype": dtype, "shape": shape, "axis": axis, "idx_shape": idx_shape}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx_shape in all_shapes))
def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
rng = jtu.rand_default(self.rng())
max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
# Previous to numpy 1.19, negative indices were ignored so we don't test this.
low = 0 if numpy_version < (1, 19, 0) else -max_idx
idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 19), "boolean mask not supported in numpy < 1.19.0")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"dtype": dtype, "shape": shape, "axis": axis}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testDeleteMaskArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, mask, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_out_dims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, out_dims),
"shape": shape, "dtype": dtype, "axis": axis, "out_dims": out_dims}
for shape in nonempty_array_shapes
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for out_dims in [0, 1, 2]))
def testApplyAlongAxis(self, shape, dtype, axis, out_dims):
def func(x, out_dims):
if out_dims == 0:
return x.sum()
elif out_dims == 1:
return x * x[0]
elif out_dims == 2:
return x[:, None] + x[None, :]
else:
raise NotImplementedError(f"out_dims={out_dims}")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)
jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_func={}_keepdims={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype),
func, keepdims, axes),
"shape": shape, "dtype": dtype, "func": func, "keepdims": keepdims, "axes": axes}
for shape in nonempty_shapes
for func in ["sum"]
for keepdims in [True, False]
for axes in itertools.combinations(range(len(shape)), 2)
# Avoid low-precision types in sum()
for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))
def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):
f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
np_fun = lambda a: np.apply_over_axes(f, a, axes)
jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}_fixed_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, repeats, fixed_size),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
'fixed_size': fixed_size}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), max(1, len(shape))))
for fixed_size in [True, False]))
def testRepeat(self, axis, shape, dtype, repeats, fixed_size):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis)
np_fun = _promote_like_jnp(np_fun)
if fixed_size:
total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis,
total_repeat_length=total_repeat_length)
jnp_args_maker = lambda: [rng(shape, dtype), repeats]
clo_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis,
total_repeat_length=total_repeat_length)
clo_fun_args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(jnp_fun, jnp_args_maker)
self._CheckAgainstNumpy(np_fun, clo_fun, clo_fun_args_maker)
else:
# Now repeats is in a closure, so a constant.
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testRepeatScalarFastPath(self):
a = jnp.array([1,2,3,4])
f = lambda a: jnp.repeat(a, repeats=2)
jaxpr = jax.make_jaxpr(f)(a)
self.assertLessEqual(len(jaxpr.jaxpr.eqns), 6)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_ind={}_inv={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
return_index, return_inverse, return_counts),
"shape": shape, "dtype": dtype, "axis": axis,
"return_index": return_index, "return_inverse": return_inverse,
"return_counts": return_counts}
for dtype in number_dtypes
for shape in all_shapes
for axis in [None] + list(range(len(shape)))
for return_index in [False, True]
for return_inverse in [False, True]
for return_counts in [False, True]))
def testUnique(self, shape, dtype, axis, return_index, return_inverse, return_counts):
if axis is not None and numpy_version < (1, 19) and np.empty(shape).size == 0:
self.skipTest("zero-sized axis in unique leads to error in older numpy.")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.unique(x, return_index, return_inverse, return_counts, axis=axis)
jnp_fun = lambda x: jnp.unique(x, return_index, return_inverse, return_counts, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_size={}".format(
jtu.format_shape_dtype_string(shape, dtype), size),
"shape": shape, "dtype": dtype, "size": size}
for dtype in number_dtypes
for size in [1, 5, 10]
for shape in nonempty_array_shapes))
def testUniqueSize(self, shape, dtype, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
kwds = dict(return_index=True, return_inverse=True, return_counts=True)
def np_fun(x):
u, ind, inv, counts = jnp.unique(x, **kwds)
if size <= len(u):
u, ind, counts = u[:size], ind[:size], counts[:size]
else:
extra = size - len(u)
u = np.concatenate([u, np.full(extra, u[0], u.dtype)])
ind = np.concatenate([ind, np.full(extra, ind[0], ind.dtype)])
counts = np.concatenate([counts, np.zeros(extra, counts.dtype)])
return u, ind, inv, counts
jnp_fun = lambda x: jnp.unique(x, size=size, **kwds)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_fixed_size={}".format(fixed_size),
"fixed_size": fixed_size}
for fixed_size in [True, False]))
def testNonScalarRepeats(self, fixed_size):
'''
Following numpy test suite from `test_repeat` at
https://github.com/numpy/numpy/blob/main/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = np.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, rtol=tol, atol=tol)
if fixed_size:
# Calculate expected size of the repeated axis.
rep_length = np.repeat(np.zeros_like(m), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(
arg, repeats=rep, axis=axis, total_repeat_length=rep_length)
else:
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker)
m = jnp.array([1,2,3,4,5,6])
if fixed_size:
args_maker = lambda: [m, repeats]
else:
args_maker = lambda: [m]
for repeats in [2, jnp.array([1,3,0,1,1,2]), jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, axis=None)
test_single(m, args_maker, repeats, axis=0)
m_rect = m.reshape((2,3))
if fixed_size:
args_maker = lambda: [m_rect, repeats]
else:
args_maker = lambda: [m_rect]
for repeats in [2, jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
np_input = np.ones((1))
jnp_input = jnp.ones((1))
expected_np_input_after_call = np.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertTrue(xla.type_is_device_array(jnp.concatenate([np_input])))
attempt_sideeffect(np_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(np_input, expected_np_input_after_call)
self.assertAllClose(jnp_input, expected_jnp_input_after_call)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"jnp_op": getattr(jnp, op),
"np_op": getattr(np, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in number_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, jnp_op, np_op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
np_fun = partial(np_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode, precision=precision)
tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
np.complex128: 1e-14}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np_op(arg, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["nancumsum", "nancumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testNanCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_some_nan(self.rng())
np_fun = partial(np_op, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = partial(jnp_op, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
if dtype != jnp.bfloat16:
# numpy functions do not properly handle bfloat16
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_yshape={}_xshape={}_dx={}_axis={}".format(
jtu.format_shape_dtype_string(yshape, dtype),
jtu.format_shape_dtype_string(xshape, dtype) if xshape is not None else None,
dx, axis),
"yshape": yshape, "xshape": xshape, "dtype": dtype, "dx": dx, "axis": axis}
for dtype in default_dtypes
for yshape, xshape, dx, axis in [
((10,), None, 1.0, -1),
((3, 10), None, 2.0, -1),
((3, 10), None, 3.0, -0),
((10, 3), (10,), 1.0, -2),
((3, 10), (10,), 1.0, -1),
((3, 10), (3, 10), 1.0, -1),
((2, 3, 10), (3, 10), 1.0, -2),
]))
@jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and reenable this test.
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testTrapz(self, yshape, xshape, dtype, dx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
np_fun = partial(np.trapz, dx=dx, axis=axis)
jnp_fun = partial(jnp.trapz, dx=dx, axis=axis)
tol = jtu.tolerance(dtype, {np.float64: 1e-12,
dtypes.bfloat16: 4e-2})
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
np.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype):
np_fun = lambda: np.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: getattr(np, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTrilIndices(self, n, k, m):
np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.tril_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTriuIndices(self, n, k, m):
np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.triu_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTriuIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTrilIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
np.testing.assert_equal(np.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "arr_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)
),
"dtype": dtype, "shape": shape}
for dtype in default_dtypes
for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))
def testDiagIndicesFrom(self, dtype, shape):
rng = jtu.rand_default(self.rng())
np_fun = np.diag_indices_from
jnp_fun = jnp.diag_indices_from
args_maker = lambda : [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in all_shapes
for k in range(-4, 4)))
def testDiagFlat(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
# numpy has inconsistencies for scalar values
# https://github.com/numpy/numpy/issues/16477
# jax differs in that it treats scalars values as length-1 arrays
np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)
jnp_fun = lambda arg: jnp.diagflat(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a1_shape={}_a2_shape2={}".format(
jtu.format_shape_dtype_string(a1_shape, dtype),
jtu.format_shape_dtype_string(a2_shape, dtype)),
"dtype": dtype, "a1_shape": a1_shape, "a2_shape": a2_shape}
for dtype in default_dtypes
for a1_shape in one_dim_array_shapes
for a2_shape in one_dim_array_shapes))
def testPolyMul(self, a1_shape, a2_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)
jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)
jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)
args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]
tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}
self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(np.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
np_fun = lambda: np.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_period={}_left={}_right={}".format(
jtu.format_shape_dtype_string(shape, dtype), period, left, right),
"shape": shape, "dtype": dtype,
"period": period, "left": left, "right": right}
for shape in nonempty_shapes
for period in [None, 0.59]
for left in [None, 0]
for right in [None, 1]
for dtype in default_dtypes
# following types lack precision for meaningful tests
if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]
))
def testInterp(self, shape, dtype, period, left, right):
rng = jtu.rand_default(self.rng(), scale=10)
kwds = dict(period=period, left=left, right=right)
np_fun = partial(np.interp, **kwds)
jnp_fun = partial(jnp.interp, **kwds)
args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]
# skip numpy comparison for integer types with period specified, because numpy
# uses an unstable sort and so results differ for duplicate values.
if not (period and np.issubdtype(dtype, np.integer)):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, np.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory(self.rng())
x2_rng = x2_rng_factory(self.rng())
np_fun = lambda x1, x2: np.ldexp(x1, x2)
np_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, np.int32)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory(self.rng())
np_fun = lambda x: np.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=np.issubdtype(dtype, np.inexact))
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
if out_dtype == jnp.bfloat16:
return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)
else:
return np.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_v={}_side={}".format(
jtu.format_shape_dtype_string(ashape, dtype),
jtu.format_shape_dtype_string(vshape, dtype),
side), "ashape": ashape, "vshape": vshape, "side": side,
"dtype": dtype}
for ashape in [(15,), (16,), (17,)]
for vshape in [(), (5,), (5, 5)]
for side in ['left', 'right']
for dtype in default_dtypes
))
def testSearchsorted(self, ashape, vshape, side, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]
np_fun = lambda a, v: np.searchsorted(a, v, side=side)
jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(binshape, dtype),
right, reverse), "xshape": xshape, "binshape": binshape,
"right": right, "reverse": reverse, "dtype": dtype}
for xshape in [(20,), (5, 4)]
for binshape in [(1,), (5,)]
for right in [True, False]
for reverse in [True, False]
for dtype in default_dtypes
))
def testDigitize(self, xshape, binshape, right, reverse, dtype):
order = jax.ops.index[::-1] if reverse else jax.ops.index[:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]
np_fun = lambda x, bins: np.digitize(x, bins, right=right)
jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 5)]
for array_input in [True, False]))
def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
"shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for array_input in [True, False]))
def testStack(self, shape, axis, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(partial(np.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}_array={}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for array_input in [True, False]))
def testHVDStack(self, shape, op, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(getattr(np, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}_fillshape={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
np.dtype(out_dtype).name if out_dtype else "None",
fill_value_shape),
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"shape": shape, "out_dtype": out_dtype}
for shape in array_shapes + [3, np.array(7, dtype=np.int32)]
for fill_value_dtype in default_dtypes
for fill_value_shape in _compatible_shapes(shape)
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, fill_value_shape, out_dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda fill_value: np.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_shape={}_n={}_axis={}_prepend={}_append={}".format(
jtu.format_shape_dtype_string(shape, dtype),
n, axis, prepend, append),
"shape": shape, "dtype": dtype, "n": n, "axis": axis,
"prepend": prepend, "append": append
} for shape, dtype in s(_shape_and_dtypes(nonempty_nonscalar_array_shapes, default_dtypes))
for n in s([0, 1, 2])
for axis in s(list(range(-len(shape), max(1, len(shape)))))
for prepend in s([None, 1, np.zeros(shape, dtype=dtype)])
for append in s([None, 1, np.zeros(shape, dtype=dtype)])
)))
def testDiff(self, shape, dtype, n, axis, prepend, append):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):
if prepend is None:
prepend = np._NoValue
elif not np.isscalar(prepend) and prepend.dtype == jnp.bfloat16:
prepend = prepend.astype(np.float32)
if append is None:
append = np._NoValue
elif not np.isscalar(append) and append.dtype == jnp.bfloat16:
append = append.astype(np.float32)
if x.dtype == jnp.bfloat16:
return np.diff(x.astype(np.float32), n=n, axis=axis, prepend=prepend, append=append).astype(jnp.bfloat16)
else:
return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)
jnp_fun = lambda x: jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"np_op": getattr(np, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),
np.array(4, dtype=np.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, np_op, jnp_op, shape, dtype):
args_maker = lambda: []
np_op = partial(np_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_inshape={}_filldtype={}_fillshape={}_outdtype={}_outshape={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
np.dtype(fill_value_dtype).name, fill_value_shape,
np.dtype(out_dtype).name, out_shape),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"out_dtype": out_dtype, "out_shape": out_shape
} for shape in s(array_shapes)
for out_shape in s([None] + array_shapes)
for in_dtype in s(default_dtypes)
for fill_value_dtype in s(default_dtypes)
for fill_value_shape in s(_compatible_shapes(shape if out_shape is None else out_shape))
for out_dtype in s(default_dtypes))))
def testFullLike(self, shape, in_dtype, fill_value_dtype, fill_value_shape, out_dtype, out_shape):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x, fill_value: np.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x, fill_value: jnp.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype), rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
out_shape, out_dtype),
"func": func, "shape": shape, "in_dtype": in_dtype,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for out_shape in [None] + array_shapes
for in_dtype in default_dtypes
for func in ["ones_like", "zeros_like"]
for out_dtype in default_dtypes))
def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x: getattr(np, func)(x, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x: getattr(jnp, func)(x, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_weak_type={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
weak_type, out_shape, out_dtype),
"func": func, "args": args,
"shape": shape, "in_dtype": in_dtype, "weak_type": weak_type,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for in_dtype in [np.int32, np.float32, np.complex64]
for weak_type in [True, False]
for out_shape in [None, (), (10,)]
for func, args in [("full_like", (-100,)), ("ones_like", ()), ("zeros_like", ())]
for out_dtype in [None, float]))
def testZerosOnesFullLikeWeakType(self, func, args, shape, in_dtype, weak_type, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, in_dtype), weak_type=weak_type)
fun = lambda x: getattr(jnp, func)(x, *args, dtype=out_dtype, shape=out_shape)
expected_weak_type = weak_type and (out_dtype is None)
self.assertEqual(dtypes.is_weakly_typed(fun(x)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(jax.jit(fun)(x)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_funcname={}_input_type={}_val={}_dtype={}".format(
funcname, input_type, val, dtype),
"funcname": funcname, "input_type": input_type, "val": val, "dtype": dtype}
for funcname in ["array", "asarray"]
for dtype in [int, float, None]
for val in [0, 1]
for input_type in [int, float, np.int32, np.float32]))
def testArrayWeakType(self, funcname, input_type, val, dtype):
func = lambda x: getattr(jnp, funcname)(x, dtype=dtype)
fjit = jax.jit(func)
val = input_type(val)
expected_weak_type = dtype is None and input_type in set(dtypes._weak_types)
self.assertEqual(dtypes.is_weakly_typed(func(val)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(fjit(val)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_weak_type={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), weak_type, slc),
"shape": shape, "dtype": dtype, "weak_type": weak_type, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in [int, float, complex]
for weak_type in [True, False]
for slc in [slice(None), slice(0), slice(3), 0, ...]))
def testSliceWeakTypes(self, shape, dtype, weak_type, slc):
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, dtype), weak_type=weak_type)
op = lambda x: x[slc]
self.assertEqual(op(x).aval.weak_type, weak_type)
self.assertEqual(jax.jit(op)(x).aval.weak_type, weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis, "dtype": dtype}
# All testcases split the specified axis unequally
for shape, axis, num_sections in [
((3,), 0, 2), ((12,), 0, 5), ((12, 4), 0, 7), ((12, 4), 1, 3),
((2, 3, 5), -1, 2), ((2, 4, 4), -2, 3), ((7, 2, 2), 0, 3)]
for dtype in default_dtypes))
def testArraySplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.array_split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.array_split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testSplitTypeError(self):
# If we pass an ndarray for indices_or_sections -> no error
self.assertEqual(3, len(jnp.split(jnp.zeros(3), jnp.array([1, 2]))))
CONCRETIZATION_MSG = "Abstract tracer value encountered where concrete value is expected."
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# An abstract tracer for idx
jax.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), idx))(2.)
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# A list including an abstract tracer
jax.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), [2, idx]))(2.)
# A concrete tracer -> no error
jax.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), idx),
(2.,), (1.,))
# A tuple including a concrete tracer -> no error
jax.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), (1, idx)),
(2.,), (1.,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_range={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, range, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"range": range,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in number_dtypes
for bins in [10, np.arange(-5, 6), np.array([-5, 0, 3])]
for range in [None, (0, 0), (0, 10)]
for weights in [True, False]
))
def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w, r: np.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
jnp_fun = lambda a, w, r: jnp.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), range]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-2}
# linspace() compares poorly to numpy when using bfloat16
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_density={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, density, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"density": density,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in default_dtypes
# We only test explicit integer-valued bin edges because in other cases
# rounding errors lead to flaky tests.
for bins in [np.arange(-5, 6), np.array([-5, 0, 3])]
for density in [True, False]
for weights in [True, False]
))
def testHistogram(self, shape, dtype, bins, density, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,
weights=_weights(w))
jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}_range={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),
"shape": shape, "dtype": dtype, "bins": bins, "weights": weights, "density": density, "range": range,
}
for shape in [(5,), (12,)]
for dtype in int_dtypes
for bins in [2, [2, 2], [np.array([0, 1, 3, 5]), np.array([0, 2, 3, 4, 6])]]
for weights in [False, True]
for density in [False, True]
for range in [None, [(-1, 1), None], [(-1, 1), (-2, 2)]]
))
def testHistogram2d(self, shape, dtype, bins, weights, density, range):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")(
lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range))
jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range)
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
with np.errstate(divide='ignore', invalid='ignore'):
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}_range={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),
"shape": shape, "dtype": dtype, "bins": bins, "weights": weights, "density": density, "range": range,
}
for shape in [(5, 3), (10, 3)]
for dtype in int_dtypes
for bins in [(2, 2, 2), [np.array([-5, 0, 4]), np.array([-4, -1, 2]), np.array([-6, -1, 4])]]
for weights in [False, True]
for density in [False, True]
for range in [None, [(-1, 1), None, None], [(-1, 1), (-2, 2), (-3, 3)]]
))
def testHistogramdd(self, shape, dtype, bins, weights, density, range):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")(
lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range))
jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range)
args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
np_fun = lambda x: fn(np, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in itertools.product(all_shapes, array_shapes)))
def testResize(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.resize(x, out_shape)
jnp_fun = lambda x: jnp.resize(x, out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
if len(out_shape) > 0 or numpy_version >= (1, 20, 0):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in (list(range(-len(arg_shape)+1, len(arg_shape)))
+ [np.array(0), np.array(-1), (0,), [np.array(0)],
(len(arg_shape), len(arg_shape) + 1)])))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((3, 1), -1),
((3, 1), np.array(1)),
((1, 3, 1), (0, 2)),
((1, 3, 1), (0,)),
((1, 4, 1), (np.array(0),))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned):
rng = jtu.rand_default(self.rng())
if weights_shape is None:
np_fun = lambda x: np.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
np_fun = lambda x, weights: np.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
np_fun = _promote_like_jnp(np_fun, inexact=True)
tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5,
np.float64: 1e-12, np.complex64: 1e-5}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
f"_arg{i}_ndmin={ndmin}_dtype={np.dtype(dtype) if dtype else None}",
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtypes) in enumerate([
([True, False, True], all_dtypes),
(3., all_dtypes),
([1, 2, 3], all_dtypes),
(np.array([1, 2, 3], dtype=np.int64), all_dtypes),
([1., 2., 3.], all_dtypes),
([[1, 2], [3, 4], [5, 6]], all_dtypes),
([[1, 2.], [3, 4], [5, 6]], all_dtypes),
([[1., 2j], [3., 4.], [5., 6.]], complex_dtypes),
([[3, np.array(2, dtype=jnp.float_), 1],
np.arange(3., dtype=jnp.float_)], all_dtypes),
])
for dtype in [None] + dtypes
for ndmin in [None, np.ndim(arg), np.ndim(arg) + 1, np.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
canonical_dtype = dtypes.canonicalize_dtype(dtype or np.array(arg).dtype)
if ndmin is not None:
np_fun = partial(np.array, ndmin=ndmin, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin, dtype=dtype)
else:
np_fun = partial(np.array, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, dtype=dtype)
# We are testing correct canonicalization behavior here, so we turn off the
# permissive canonicalization logic in the test harness.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
canonicalize_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testArrayUnsupportedDtypeError(self):
with self.assertRaisesRegex(TypeError,
"JAX only supports number and bool dtypes.*"):
jnp.array(3, [('a','<i4'),('b','<i4')])
def testArrayFromInteger(self):
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = jnp.iinfo(int_dtype).max
int_min = jnp.iinfo(int_dtype).min
# Values at extremes are converted correctly.
for val in [int_min, 0, int_max]:
self.assertEqual(jnp.array(val).dtype, int_dtype)
# out of bounds leads to an OverflowError
val = int_max + 1
with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to {int_dtype.name}"):
jnp.array(val)
# explicit uint64 should work
if config.x64_enabled:
self.assertEqual(np.uint64(val), jnp.array(val, dtype='uint64'))
# TODO(jakevdp): fix list inputs to jnp.array and enable the following test
# def testArrayFromList(self):
# int_max = jnp.iinfo(jnp.int64).max
# int_min = jnp.iinfo(jnp.int64).min
#
# # Values at extremes are converted correctly.
# for val in [int_min, 0, int_max]:
# self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))
#
# # list of values results in promoted type.
# self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))
#
# # out of bounds leads to an OverflowError
# val = int_min - 1
# with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to int64"):
# jnp.array([0, val])
def testIssue121(self):
assert not np.isscalar(jnp.array(3))
def testArrayOutputsDeviceArrays(self):
assert xla.type_is_device_array(jnp.array([]))
assert xla.type_is_device_array(jnp.array(np.array([])))
class NDArrayLike:
def __array__(self, dtype=None):
return np.array([], dtype=dtype)
assert xla.type_is_device_array(jnp.array(NDArrayLike()))
# NOTE(mattjj): disabled b/c __array__ must produce ndarrays
# class DeviceArrayLike:
# def __array__(self, dtype=None):
# return jnp.array([], dtype=dtype)
# assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))
def testArrayMethod(self):
class arraylike(object):
dtype = np.float32
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
np.array([0x2a], dtype=np.uint8))
def testIsClose(self):
c_isclose = jax.jit(jnp.isclose)
c_isclose_nan = jax.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = np.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = np.asarray(n * [np.inf]).reshape([n, 1])
nan = np.asarray(n * [np.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = np.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = np.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}_equal_nan={}".format(x, y, equal_nan),
"x": x, "y": y, "equal_nan": equal_nan}
for x, y in itertools.product([
1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)
for equal_nan in [True, False]))
def testAllClose(self, x, y, equal_nan):
jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)
np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)
args_maker = lambda: [np.array(x), np.array(y)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testZeroStridesConstantHandler(self):
raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = jax.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = np.ones(3)
@jax.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = np.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: jax.jit(g)(x, y))
self.assertRaises(TypeError, lambda: jax.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@jax.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))
@jax.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(np.arange(3))
cfoo = jax.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples
))
def testFlip(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
np_op = lambda x: np.flip(x, axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFlipud(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
np_op = lambda x: np.flipud(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFliplr(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
np_op = lambda x: np.fliplr(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes))
def testRot90(self, shape, dtype, k, axes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
np_op = lambda x: np.rot90(x, k, axes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_mode={}".format(
shape, order, mode),
"shape": shape, "order": order, "mode": mode}
for shape in nonempty_nonscalar_array_shapes
for order in ['C', 'F']
for mode in ['wrap', 'clip', 'raise']))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRavelMultiIndex(self, shape, order, mode):
# generate indices in each dimension with a few out of bounds.
rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)
for dim in shape]
# generate multi_indices of different dimensions that broadcast.
args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)
for ndim, rng in enumerate(rngs))]
def np_fun(x):
try:
return np.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
def jnp_fun(x):
try:
return jnp.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because ravel_multi_index was jit-compiled "
"with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ashape={}{}_cshapes={}{}_mode={}".format(
adtype.__name__, ashape, cdtype.__name__, cshapes, mode),
"ashape": ashape, "adtype": adtype, "cshapes": cshapes, "cdtype": cdtype, "mode": mode}
for ashape in ((), (4,), (3, 4))
for cshapes in [
[(), (4,)],
[(3, 4), (4,), (3, 1)]
]
for adtype in int_dtypes
for cdtype in default_dtypes
for mode in ['wrap', 'clip', 'raise']))
def testChoose(self, ashape, adtype, cshapes, cdtype, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]
def np_fun(a, c):
try:
return np.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
def jnp_fun(a, c):
try:
return jnp.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
(0, (2, 1, 3)),
(5, (2, 1, 3)),
(0, ()),
(np.array([0, 1, 2]), (2, 2)),
(np.array([[[0, 1], [2, 3]]]), (2, 2)))
def testUnravelIndex(self, flat_index, shape):
args_maker = lambda: (flat_index, shape)
self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,
args_maker)
self._CompileAndCheck(jnp.unravel_index, args_maker)
def testUnravelIndexOOB(self):
self.assertEqual(jnp.unravel_index(2, (2,)), (1,))
self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))
self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))
def testAstype(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
np_op = lambda x: np.asarray(x).astype(jnp.int32)
jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in all_dtypes))
def testNbytes(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_op = lambda x: np.asarray(x).nbytes
jnp_op = lambda x: jnp.asarray(x).nbytes
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dtype={}".format(
jtu.format_shape_dtype_string(shape, a_dtype), dtype),
"shape": shape, "a_dtype": a_dtype, "dtype": dtype}
for shape in [(8,), (3, 8)] # last dim = 8 to ensure shape compatibility
for a_dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)
for dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)))
def testView(self, shape, a_dtype, dtype):
if jtu.device_under_test() == 'tpu':
if jnp.dtype(a_dtype).itemsize in [1, 2] or jnp.dtype(dtype).itemsize in [1, 2]:
self.skipTest("arr.view() not supported on TPU for 8- or 16-bit types.")
if not config.x64_enabled:
if jnp.dtype(a_dtype).itemsize == 8 or jnp.dtype(dtype).itemsize == 8:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_fullrange(self.rng())
args_maker = lambda: [rng(shape, a_dtype)]
np_op = lambda x: np.asarray(x).view(dtype)
jnp_op = lambda x: jnp.asarray(x).view(dtype)
# Above may produce signaling nans; ignore warnings from invalid values.
with np.errstate(invalid='ignore'):
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testPathologicalFloats(self):
args_maker = lambda: [np.array([
0b_0111_1111_1000_0000_0000_0000_0000_0000, # inf
0b_1111_1111_1000_0000_0000_0000_0000_0000, # -inf
0b_0111_1111_1100_0000_0000_0000_0000_0000, # qnan
0b_1111_1111_1100_0000_0000_0000_0000_0000, # -qnan
0b_0111_1111_1000_0000_0000_0000_0000_0001, # snan
0b_1111_1111_1000_0000_0000_0000_0000_0001, # -snan
0b_0111_1111_1000_0000_0000_1100_0000_0000, # nonstandard nan
0b_1111_1111_1000_0000_0000_1100_0000_0000, # -nonstandard nan
0b_0000_0000_0000_0000_0000_0000_0000_0000, # zero
0b_1000_0000_0000_0000_0000_0000_0000_0000, # -zero
], dtype='uint32')]
np_op = lambda x: np.asarray(x).view('float32').view('uint32')
jnp_op = lambda x: jnp.asarray(x).view('float32').view('uint32')
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test other ndarray-like method overrides
def testNpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=float), 0.)
ans = np.mean(x)
self.assertAllClose(ans, np.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
self.assertAllClose(np.arange(0.0, 1.0, 0.1, dtype=jnp.float_),
jnp.arange(0.0, 1.0, 0.1))
# from https://github.com/google/jax/issues/3450
self.assertAllClose(np.arange(2.5, dtype=jnp.float_),
jnp.arange(2.5))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testSort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.sort
np_fun = np.sort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in one_dim_array_shapes
for axis in [None]))
def testSortComplex(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.sort_complex, jnp.sort_complex, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp.sort_complex, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_input_type={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
input_type.__name__, axis),
"shape": shape, "dtype": dtype, "input_type": input_type, "axis": axis}
for dtype in all_dtypes
for shape in nonempty_nonscalar_array_shapes
for input_type in [np.array, tuple]
for axis in (-1, *range(len(shape) - 1))))
def testLexsort(self, dtype, shape, input_type, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [input_type(rng(shape, dtype))]
jnp_op = lambda x: jnp.lexsort(x, axis=axis)
np_op = lambda x: np.lexsort(x, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testArgsort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.argsort
np_fun = np.argsort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for dtype in all_dtypes
for shape in nonzerodim_shapes))
def testMsort(self, dtype, shape):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.msort, jnp.msort, args_maker)
self._CompileAndCheck(jnp.msort, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"shape": shape, "dtype": dtype, "shifts": shifts, "axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]))
def testRoll(self, shape, dtype, shifts, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
np_op = partial(np.roll, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]))
def testRollaxis(self, shape, dtype, start, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
np_op = partial(np.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [np.uint8, np.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]))
def testPackbits(self, shape, dtype, axis, bitorder):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
np_op = partial(np.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"shape": shape, "dtype": dtype, "axis": axis, "bitorder": bitorder,
"count": count}
for dtype in [np.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count):
rng = jtu.rand_int(self.rng(), 0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
np_op = partial(np.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in [None, 'wrap', 'clip']))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = jtu.rand_default(self.rng())
if mode is None:
rng_indices = jtu.rand_int(self.rng(), -shape[axis or 0], shape[axis or 0])
else:
rng_indices = jtu.rand_int(self.rng(), -5, 5)
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
np_op = lambda x, i: np.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeEmpty(self):
np.testing.assert_array_equal(
jnp.array([], dtype=jnp.float32),
jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))
np.testing.assert_array_equal(
jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),
axis=1))
with self.assertRaisesRegex(IndexError, "non-empty jnp.take"):
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.array([0], jnp.int32), axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype),
jtu.format_shape_dtype_string(i_shape, index_dtype), axis),
"x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for index_dtype in int_dtypes))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, index_dtype, axis):
rng = jtu.rand_default(self.rng())
i_shape = np.array(i_shape)
if axis is None:
i_shape = [np.prod(i_shape, dtype=np.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = np.prod(x_shape, dtype=np.int32) if axis is None else x_shape[axis]
if np.issubdtype(index_dtype, np.unsignedinteger):
index_rng = jtu.rand_int(self.rng(), 0, n)
else:
index_rng = jtu.rand_int(self.rng(), -n, n)
i = index_rng(i_shape, index_dtype)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(np, "take_along_axis"):
np_op = lambda x, i: np.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeAlongAxisWithUint8IndicesDoesNotOverflow(self):
# https://github.com/google/jax/issues/5088
h = jtu.rand_default(self.rng())((256, 256, 100), np.float32)
g = jtu.rand_int(self.rng(), 0, 100)((256, 256, 1), np.uint8)
q0 = jnp.take_along_axis(h, g, axis=-1)
q1 = np.take_along_axis( h, g, axis=-1)
np.testing.assert_equal(q0, q1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
arg = arg.astype(np.float32) if dtype == jnp.bfloat16 else arg
return np.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol={np.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
"nan_to_num", [shape], [dtype]),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, shape, dtype):
rng = jtu.rand_some_inf_and_nan(self.rng())
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
def np_fun(x):
if dtype == jnp.bfloat16:
x = np.where(np.isnan(x), dtype(0), x)
x = np.where(np.isposinf(x), jnp.finfo(dtype).max, x)
x = np.where(np.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return np.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (np.int32,)),
(((3,), (4,)), (np.int32, np.int32)),
(((3,), (1,), (4,)), (np.int32, np.int32, np.int32)),
)))
def testIx_(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(np.ix_, jnp.ix_, args_maker)
self._CompileAndCheck(jnp.ix_, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dimensions={}_dtype={}_sparse={}".format(
dimensions, dtype, sparse),
"dimensions": dimensions, "dtype": dtype, "sparse": sparse}
for dimensions in [(), (2,), (3, 0), (4, 5, 6)]
for dtype in number_dtypes
for sparse in [True, False]))
def testIndices(self, dimensions, dtype, sparse):
def args_maker(): return []
np_fun = partial(np.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
jnp_fun = partial(jnp.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_some_nan,
"q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", partial(jtu.rand_uniform, low=0., high=100.)),
("quantile", partial(jtu.rand_uniform, low=0., high=1.)),
("nanpercentile", partial(jtu.rand_uniform, low=0., high=100.)),
("nanquantile", partial(jtu.rand_uniform, low=0., high=1.)),
)
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [np.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest',
'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
a_rng = a_rng(self.rng())
q_rng = q_rng(self.rng())
if "median" in op:
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_a_shape={}_axis={}_keepdims={}".format(
op, jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"op": op, "a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]
for op in ["median", "nanmedian"]))
def testMedian(self, op, a_shape, a_dtype, axis, keepdims):
if op == "median":
a_rng = jtu.rand_default(self.rng())
else:
a_rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.where(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of
# this behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.where(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"shapes": shapes, "dtypes": dtypes
} for shapes in s(filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 3)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, 3)))))
def testWhereThreeArgument(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
def np_fun(cond, x, y):
return _promote_like_jnp(partial(np.where, cond))(x, y)
self._CheckAgainstNumpy(np_fun, jnp.where, args_maker)
self._CompileAndCheck(jnp.where, args_maker)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, np.dtype(np.float32))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": jtu.format_test_name_suffix("", shapes, (np.bool_,) * n + dtypes),
"shapes": shapes, "dtypes": dtypes
} for n in s(range(1, 3))
for shapes in s(filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2 * n + 1)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, n + 1)))))
def testSelect(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, np.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def np_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(np.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return np.select(condlist,
[np.asarray(x, dtype=dtype) for x in choicelist],
np.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(np_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker,
rtol={np.float64: 1e-7, np.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + np.eye(1, dtype=np.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = np.eye(3, dtype=np.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = jax.grad(test_fail)(x)
# assert not np.any(np.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = np.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = np.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
np_fun = lambda arg: getattr(np, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{
"testcase_name": "_shape={}_dtype={}_weights={}_minlength={}_length={}".format(
shape, dtype, weights, minlength, length
),
"shape": shape,
"dtype": dtype,
"weights": weights,
"minlength": minlength,
"length": length}
for shape in [(0,), (5,), (10,)]
for dtype in int_dtypes
for weights in [True, False]
for minlength in [0, 20]
for length in [None, 10]
))
def testBincount(self, shape, dtype, weights, minlength, length):
rng = jtu.rand_positive(self.rng())
args_maker = lambda: (rng(shape, dtype), (rng(shape, 'float32') if weights else None))
np_fun = partial(np.bincount, minlength=minlength)
jnp_fun = partial(jnp.bincount, minlength=minlength, length=length)
if length is not None:
self._CompileAndCheck(jnp_fun, args_maker)
if length is None:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
def testBincountNegative(self):
# Test that jnp.bincount ignores negative values.
x_rng = jtu.rand_int(self.rng(), -100, 100)
w_rng = jtu.rand_uniform(self.rng())
shape = (1000,)
x = x_rng(shape, 'int32')
w = w_rng(shape, 'float32')
xn = np.array(x)
xn[xn < 0] = 0
wn = np.array(w)
np_result = np.bincount(xn[xn >= 0], wn[xn >= 0])
jnp_result = jnp.bincount(x, w)
self.assertAllClose(np_result, jnp_result, check_dtypes=False)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[np.array(3)],
[np.array([3])],
[[np.array(3)]],
[[np.array([3])]],
[3, 4, 5],
[
[np.eye(2, dtype=np.int32) * 2, np.zeros((2, 3), dtype=np.int32)],
[np.ones((3, 2), dtype=np.int32), np.eye(3, dtype=np.int32) * 3],
],
[np.array([1, 2, 3]), np.array([2, 3, 4]), 10],
[np.ones((2, 2), dtype=np.int32), np.zeros((2, 2), dtype=np.int32)],
[[np.array([1, 2, 3])], [np.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(np.block, jnp.block, args_maker)
self._CompileAndCheck(jnp.block, args_maker)
def testLongLong(self):
self.assertAllClose(np.int64(7), jax.jit(lambda x: x)(np.longlong(7)))
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype.*")
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/main/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
np.arange(77, dtype=jnp.int_))
self.assertAllClose(jnp.arange(2, 13),
np.arange(2, 13, dtype=jnp.int_))
self.assertAllClose(jnp.arange(4, 21, 9),
np.arange(4, 21, 9, dtype=jnp.int_))
self.assertAllClose(jnp.arange(53, 5, -3),
np.arange(53, 5, -3, dtype=jnp.int_))
self.assertAllClose(jnp.arange(77, dtype=float),
np.arange(77, dtype=float))
self.assertAllClose(jnp.arange(2, 13, dtype=int),
np.arange(2, 13, dtype=int))
self.assertAllClose(jnp.arange(0, 1, -0.5),
np.arange(0, 1, -0.5, dtype=jnp.float_))
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(np.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(np.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(np.arange(77, dtype=np.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(np.int32, 77)))
def testArangeJit(self):
ans = jax.jit(lambda: jnp.arange(5))()
expected = np.arange(5)
self.assertAllClose(ans, expected)
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), np.eye(5000))
self.assertEqual(0, np.sum(jnp.eye(1050) - np.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = jax.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = np.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], np.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jnp.ones(10).at[np.array([2, 4, 5])].add(u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(np.zeros(3,), jax.grad(f)(np.ones(3,)))
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=np.float32)
# f = jax.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), np.array([0., 0., 0., 0.25], dtype=np.float32))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
np_op = getattr(np, op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_op)
jnp_op = getattr(jnp, op)
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (np.nan, -np.inf, -100., -2., -1., 0., 1., 2., 100., np.inf,
jnp.finfo(dtype).max, np.sqrt(jnp.finfo(dtype).max),
np.sqrt(jnp.finfo(dtype).max) * 2.):
if (op in ("sin", "cos", "tan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789): fix and reenable.
x = dtype(x)
expected = np_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {np.float32: 1e-3, np.float64: 1e-7})
self.assertAllClose(expected, actual, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
raise SkipTest("we decided to disallow arrays as static args")
@partial(jax.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
_ = f(x, v)
_ = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.var(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testNanVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_some_nan(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.nanvar(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.nanvar, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_y_shape={}_y_dtype={}_rowvar={}_ddof={}_bias={}_fweights={}_aweights={}".format(
shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights),
"shape": shape, "y_shape": y_shape, "dtype": dtype, "y_dtype": y_dtype,"rowvar": rowvar, "ddof": ddof,
"bias": bias, "fweights": fweights, "aweights": aweights}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for y_dtype in [None, dtype]
for rowvar in [True, False]
for y_shape in _get_y_shapes(y_dtype, shape, rowvar)
for bias in [True, False]
for ddof in [None, 2, 3]
for fweights in [True, False]
for aweights in [True, False]))
def testCov(self, shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights):
rng = jtu.rand_default(self.rng())
wrng = jtu.rand_positive(self.rng())
wdtype = np.real(dtype(0)).dtype
wshape = shape[-1:] if rowvar or shape[0] == 1 else shape[:1]
args_maker = lambda: [rng(shape, dtype),
rng(y_shape, y_dtype) if y_dtype else None,
wrng(wshape, int) if fweights else None,
wrng(wshape, wdtype) if aweights else None]
kwargs = dict(rowvar=rowvar, ddof=ddof, bias=bias)
np_fun = lambda m, y, f, a: np.cov(m, y, fweights=f, aweights=a, **kwargs)
jnp_fun = lambda m, y, f, a: jnp.cov(m, y, fweights=f, aweights=a, **kwargs)
tol = {jnp.bfloat16: 5E-2, np.float16: 1E-2, np.float32: 1e-5,
np.float64: 1e-13, np.complex64: 1e-5, np.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype.__name__, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]))
def testCorrCoef(self, shape, dtype, rowvar):
rng = jtu.rand_default(self.rng())
def args_maker():
ok = False
while not ok:
x = rng(shape, dtype)
ok = not np.any(np.isclose(np.std(x), 0.0))
return (x,)
np_fun = partial(np.corrcoef, rowvar=rowvar)
np_fun = jtu.ignore_warning(
category=RuntimeWarning, message="invalid value encountered.*")(np_fun)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
tol = 1e-2 if jtu.device_under_test() == "tpu" else None
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(jtu.format_shape_dtype_string(shape, dtype),
"None" if end_dtype is None else jtu.format_shape_dtype_string(end_shape, end_dtype),
"None" if begin_dtype is None else jtu.format_shape_dtype_string(begin_shape, begin_dtype)),
"shape": shape, "dtype": dtype, "end_shape": end_shape,
"end_dtype": end_dtype, "begin_shape": begin_shape,
"begin_dtype": begin_dtype}
for dtype in number_dtypes
for end_dtype in [None] + [dtype]
for begin_dtype in [None] + [dtype]
for shape in [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE]
for begin_shape in (
[None] if begin_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])
for end_shape in (
[None] if end_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])))
def testEDiff1d(self, shape, dtype, end_shape, end_dtype, begin_shape,
begin_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype),
(None if end_dtype is None else rng(end_shape, end_dtype)),
(None if begin_dtype is None else rng(begin_shape, begin_dtype))]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testEDiff1dWithDtypeCast(self):
rng = jtu.rand_default(self.rng())
shape = jtu.NUMPY_SCALAR_SHAPE
dtype = jnp.float32
end_dtype = jnp.int32
args_maker = lambda: [rng(shape, dtype), rng(shape, end_dtype), rng(shape, dtype)]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]))
def testMeshGrid(self, shapes, dtype, indexing, sparse):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
np_fun = partial(np.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testMgrid(self):
assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0)
assertAllEqual(np.mgrid[:4], jnp.mgrid[:4])
assertAllEqual(np.mgrid[:4,], jnp.mgrid[:4,])
assertAllEqual(np.mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())
assertAllEqual(np.mgrid[:5, :5], jnp.mgrid[:5, :5])
assertAllEqual(np.mgrid[:3, :2], jnp.mgrid[:3, :2])
assertAllEqual(np.mgrid[1:4:2], jnp.mgrid[1:4:2])
assertAllEqual(np.mgrid[1:5:3, :5], jnp.mgrid[1:5:3, :5])
assertAllEqual(np.mgrid[:3, :2, :5], jnp.mgrid[:3, :2, :5])
assertAllEqual(np.mgrid[:3:2, :2, :5], jnp.mgrid[:3:2, :2, :5])
# Corner cases
assertAllEqual(np.mgrid[:], jnp.mgrid[:])
# When the step length is a complex number, because of float calculation,
# the values between jnp and np might slightly different.
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.mgrid[-1:1:5j],
jnp.mgrid[-1:1:5j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[3:4:7j],
jnp.mgrid[3:4:7j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1:6:8j, 2:4],
jnp.mgrid[1:6:8j, 2:4],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.mgrid[0:3.5:0.5],
jnp.mgrid[0:3.5:0.5],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1.3:4.2:0.3],
jnp.mgrid[1.3:4.2:0.3],
atol=atol,
rtol=rtol)
# abstract tracer value for jnp.mgrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.mgrid"):
jax.jit(lambda a, b: jnp.mgrid[a:b])(0, 2)
def testOgrid(self):
def assertListOfArraysEqual(xs, ys):
self.assertIsInstance(xs, list)
self.assertIsInstance(ys, list)
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
self.assertArraysEqual(x, y)
self.assertArraysEqual(np.ogrid[:5], jnp.ogrid[:5])
self.assertArraysEqual(np.ogrid[:5], jax.jit(lambda: jnp.ogrid[:5])())
self.assertArraysEqual(np.ogrid[1:7:2], jnp.ogrid[1:7:2])
# List of arrays
assertListOfArraysEqual(np.ogrid[:5,], jnp.ogrid[:5,])
assertListOfArraysEqual(np.ogrid[0:5, 1:3], jnp.ogrid[0:5, 1:3])
assertListOfArraysEqual(np.ogrid[1:3:2, 2:9:3], jnp.ogrid[1:3:2, 2:9:3])
assertListOfArraysEqual(np.ogrid[:5, :9, :11], jnp.ogrid[:5, :9, :11])
# Corner cases
self.assertArraysEqual(np.ogrid[:], jnp.ogrid[:])
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.ogrid[-1:1:5j],
jnp.ogrid[-1:1:5j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.ogrid[0:3.5:0.3],
jnp.ogrid[0:3.5:0.3],
atol=atol,
rtol=rtol)
self.assertAllClose(np.ogrid[1.2:4.8:0.24],
jnp.ogrid[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
# abstract tracer value for ogrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.ogrid"):
jax.jit(lambda a, b: jnp.ogrid[a:b])(0, 2)
def testR_(self):
a = np.arange(6).reshape((2,3))
self.assertArraysEqual(np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])],
jnp.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])])
self.assertArraysEqual(np.r_['-1', a, a], jnp.r_['-1', a, a])
self.assertArraysEqual(np.r_['0,2', [1,2,3], [4,5,6]], jnp.r_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,2,0', [1,2,3], [4,5,6]], jnp.r_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['1,2,0', [1,2,3], [4,5,6]], jnp.r_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.r_['0,4,-1', [1,2,3], [4,5,6]], jnp.r_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,4,-2', [1,2,3], [4,5,6]], jnp.r_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.r_['r',[1,2,3], [4,5,6]], jnp.r_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['c', [1, 2, 3], [4, 5, 6]], jnp.r_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.r_["asdfgh",[1,2,3]]
# abstract tracer value for r_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.r_"):
jax.jit(lambda a, b: jnp.r_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.r_[-1:1:6j],
jnp.r_[-1:1:6j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.r_[-1:1:6j, [0]*3, 5, 6],
jnp.r_[-1:1:6j, [0]*3, 5, 6],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.r_[1.2:4.8:0.24],
jnp.r_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testC_(self):
a = np.arange(6).reshape((2, 3))
self.assertArraysEqual(np.c_[np.array([1,2,3]), np.array([4,5,6])],
jnp.c_[np.array([1,2,3]), np.array([4,5,6])])
self.assertArraysEqual(np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])],
jnp.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])])
self.assertArraysEqual(np.c_['-1', a, a], jnp.c_['-1', a, a])
self.assertArraysEqual(np.c_['0,2', [1,2,3], [4,5,6]], jnp.c_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,2,0', [1,2,3], [4,5,6]], jnp.c_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['1,2,0', [1,2,3], [4,5,6]], jnp.c_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.c_['0,4,-1', [1,2,3], [4,5,6]], jnp.c_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,4,-2', [1,2,3], [4,5,6]], jnp.c_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives, avoid numpy deprecation warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.c_['r',[1,2,3], [4,5,6]], jnp.c_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['c', [1, 2, 3], [4, 5, 6]], jnp.c_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.c_["asdfgh",[1,2,3]]
# abstract tracer value for c_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.c_"):
jax.jit(lambda a, b: jnp.c_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.c_[-1:1:6j],
jnp.c_[-1:1:6j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.c_[1.2:4.8:0.24],
jnp.c_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testS_(self):
self.assertEqual(np.s_[1:2:20],jnp.s_[1:2:20])
def testIndex_exp(self):
self.assertEqual(np.index_exp[5:3:2j],jnp.index_exp[5:3:2j])
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": f"_start_shape={start_shape}_stop_shape={stop_shape}"
f"_num={num}_endpoint={endpoint}_retstep={retstep}"
f"_dtype={dtype.__name__ if dtype else "None"}",
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs, so
# we currently only test inexact 'dtype' arguments.
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLinspace(self, start_shape, stop_shape, num, endpoint, retstep, dtype):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else np.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
# NumPy 1.20.0 changed the semantics of linspace to floor for integer
# dtypes.
if numpy_version >= (1, 20) or not np.issubdtype(dtype, np.integer):
np_op = lambda start, stop: np.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
else:
def np_op(start, stop):
out = np.linspace(start, stop, num, endpoint=endpoint,
retstep=retstep, axis=axis)
if retstep:
return np.floor(out[0]).astype(dtype), out[1]
else:
return np.floor(out).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in number_dtypes))
def testLinspaceEndpoints(self, dtype):
"""Regression test for Issue #3014."""
rng = jtu.rand_default(self.rng())
endpoints = rng((2,), dtype)
out = jnp.linspace(*endpoints, 10, dtype=dtype)
self.assertAllClose(out[np.array([0, -1])], endpoints, rtol=0, atol=0)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, np.e]
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not config.x64_enabled):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 2e-2, np.float32: 1e-2, np.float64: 1e-6,
np.complex64: 1e-3, np.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered in power")
def np_op(start, stop):
return np.logspace(start, stop, num, endpoint=endpoint,
base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {np.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}_axis={}").format(
start_shape, stop_shape, num, endpoint,
dtype.__name__ if dtype else "None", axis),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "axis": axis}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for axis in range(-max(len(start_shape), len(stop_shape)),
max(len(start_shape), len(stop_shape)))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, axis):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 4e-3, np.float32: 2e-3, np.float64: 1e-14,
np.complex128: 1e-14}
def args_maker():
"""Test the set of inputs np.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# np.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def np_op(start, stop):
start = start.astype(np.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(np.float32) if dtype == jnp.bfloat16 else stop
return np.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else np.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
@unittest.skip("Test fails on CI, perhaps due to JIT caching")
def testDisableNumpyRankPromotionBroadcastingDecorator(self):
with jax.numpy_rank_promotion("allow"):
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
with jax.numpy_rank_promotion("raise"):
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
with jax.numpy_rank_promotion("warn"):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@jax.jit
def foo(x):
return jnp.stack(x)
foo(np.zeros(2)) # doesn't crash
@jax.jit
def foo(x):
return jnp.concatenate(x)
foo(np.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
jaxpr = jax.make_jaxpr(f)(np.zeros((3, 4), np.float32))
self.assertFalse(
any(np.array_equal(x, np.full((3, 4), 2., dtype=np.float32))
for x in jaxpr.consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
[(1,), 3],
])
def testBroadcastTo(self, from_shape, to_shape):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [from_shape], [np.float32])
np_op = lambda x: np.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(
{"testcase_name": f"_{shapes}", "shapes": shapes, "broadcasted_shape": broadcasted_shape}
for shapes, broadcasted_shape in [
[[], ()],
[[()], ()],
[[(1, 3), (4, 3)], (4, 3)],
[[(3,), (2, 1, 3)], (2, 1, 3)],
[[(3,), (3, 3)], (3, 3)],
[[(1,), (3,)], (3,)],
[[(1,), 3], (3,)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[[1], [0, 1]], (0, 1)],
[[(1,), np.array([0, 1])], (0, 1)],
])
def testBroadcastShapes(self, shapes, broadcasted_shape):
# Test against np.broadcast_shapes once numpy 1.20 is minimum required version
np.testing.assert_equal(jnp.broadcast_shapes(*shapes), broadcasted_shape)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(np.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), np.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(np.broadcast_to(10.0, ()), np.ndarray)
def testPrecision(self):
ones_1d = np.ones((2,))
ones_2d = np.ones((2, 2))
ones_3d = np.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_varargs={} axis={}_dtype={}".format(
shape, varargs, axis, dtype),
"shape": shape, "varargs": varargs, "axis": axis, "dtype": dtype}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for varargs in itertools.combinations(range(1, len(shape) + 1), _num_axes)
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes))
def testGradient(self, shape, varargs, axis, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, *varargs, axis=axis)
np_fun = lambda y: np.gradient(y, *varargs, axis=axis)
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
r"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: jax.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = self.rng().randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace())
self.assertAllClose(x.trace(), jax.jit(lambda y: y.trace())(x))
def testIntegerPowersArePrecise(self):
# See https://github.com/google/jax/pull/3036
# Checks if the squares of float32 integers have no numerical errors.
# It should be satisfied with all integers less than sqrt(2**24).
x = jnp.arange(-2**12, 2**12, dtype=jnp.int32)
np.testing.assert_array_equal(jnp.square(x.astype(jnp.float32)), x * x)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 2, x * x)
# Similarly for cubes.
x = jnp.arange(-2**8, 2**8, dtype=jnp.int32)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 3, x * x * x)
x = np.arange(10, dtype=np.float32)
for i in range(10):
self.assertAllClose(x.astype(jnp.float32) ** i, x ** i,
check_dtypes=False)
def testToBytes(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
for order in ['C', 'F']:
self.assertEqual(jnp.asarray(v).tobytes(order), v.tobytes(order))
def testToList(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
self.assertEqual(jnp.asarray(v).tolist(), v.tolist())
def testReductionWithRepeatedAxisError(self):
with self.assertRaisesRegex(ValueError, r"duplicate value in 'axis': \(0, 0\)"):
jnp.sum(jnp.arange(3), (0, 0))
def testArangeConcretizationError(self):
msg = r"It arose in jax.numpy.arange argument `{}`".format
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(jnp.arange)(3)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('start')):
jax.jit(lambda start: jnp.arange(start, 3))(0)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(lambda stop: jnp.arange(0, stop))(3)
def testIssue2347(self):
# https://github.com/google/jax/issues/2347
object_list = List[Tuple[jnp.array, float, float, jnp.array, bool]]
self.assertRaises(TypeError, jnp.array, object_list)
np_object_list = np.array(object_list)
self.assertRaises(TypeError, jnp.array, np_object_list)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexpComplex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log(np.exp(x1) + np.exp(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexp2Complex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log2(np.exp2(x1) + np.exp2(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in itertools.combinations_with_replacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,
np.complex64: 1e-1, np.complex128: 1e-3})
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={np.float32: 3e-3})
def testSincAtZero(self):
# Some manual tests for sinc at zero, since it doesn't have well-behaved
# numerical derivatives at zero
def deriv(f):
return lambda x: jax.jvp(f, (x,), (1.,))[1]
def apply_all(fns, x):
for f in fns:
x = f(x)
return x
d1 = 0.
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 1):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)
d2 = -np.pi ** 2 / 3
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 2):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)
d3 = 0.
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 3):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)
d4 = np.pi ** 4 / 5
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 4):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)
def testSincGradArrayInput(self):
# tests for a bug almost introduced in #5077
jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexpComplex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp, args, 1, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexp2Complex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp2, args, 1, ["fwd", "rev"], tol, tol)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpySignaturesTest(jtu.JaxTestCase):
def testWrappedSignaturesMatch(self):
"""Test that jax.numpy function signatures match numpy."""
jnp_funcs = {name: getattr(jnp, name) for name in dir(jnp)}
func_pairs = {name: (fun, fun.__np_wrapped__) for name, fun in jnp_funcs.items()
if hasattr(fun, '__np_wrapped__')}
assert len(func_pairs) > 0
# TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.
unsupported_params = {
'angle': ['deg'],
'asarray': ['like'],
'broadcast_to': ['subok', 'array'],
'clip': ['kwargs'],
'corrcoef': ['ddof', 'bias', 'dtype'],
'cov': ['dtype'],
'empty_like': ['subok', 'order'],
'einsum': ['kwargs'],
'einsum_path': ['einsum_call'],
'eye': ['order', 'like'],
'identity': ['like'],
'full': ['order', 'like'],
'full_like': ['subok', 'order'],
'histogram': ['normed'],
'histogram2d': ['normed'],
'histogramdd': ['normed'],
'ones': ['order', 'like'],
'ones_like': ['subok', 'order'],
'tri': ['like'],
'unwrap': ['period'],
'zeros_like': ['subok', 'order']
}
extra_params = {
'broadcast_to': ['arr'],
'einsum': ['precision'],
'einsum_path': ['subscripts'],
}
mismatches = {}
for name, (jnp_fun, np_fun) in func_pairs.items():
# broadcast_shapes is not available in numpy < 1.20
if numpy_version < (1, 20) and name == "broadcast_shapes":
continue
# Some signatures have changed; skip for older numpy versions.
if numpy_version < (1, 19) and name in ['einsum_path', 'gradient', 'isscalar']:
continue
# Note: can't use inspect.getfullargspec due to numpy issue
# https://github.com/numpy/numpy/issues/12225
try:
np_params = inspect.signature(np_fun).parameters
except ValueError:
# Some functions cannot be inspected
continue
jnp_params = inspect.signature(jnp_fun).parameters
extra = set(extra_params.get(name, []))
unsupported = set(unsupported_params.get(name, []))
# Checks to prevent tests from becoming out-of-date. If these fail,
# it means that extra_params or unsupported_params need to be updated.
assert extra.issubset(jnp_params), f"{name}: extra={extra} is not a subset of jnp_params={set(jnp_params)}."
assert not unsupported.intersection(jnp_params), f"{name}: unsupported={unsupported} overlaps with jnp_params={set(jnp_params)}."
# Skip functions that only have *args and **kwargs; we can't introspect these further.
var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
if all(p.kind in var_args for p in jnp_params.values()):
continue
if all(p.kind in var_args for p in np_params.values()):
continue
# Remove known extra parameters.
jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}
# Remove known unsupported parameters.
np_params = {a: p for a, p in np_params.items() if a not in unsupported}
# Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy
# versions, we allow for jnp to have more parameters.
if list(jnp_params)[:len(np_params)] != list(np_params):
mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}
self.assertEqual(mismatches, {})
_all_dtypes: List[str] = [
"bool_",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64",
"complex64", "complex128",
]
def _all_numpy_ufuncs() -> Iterator[str]:
"""Generate the names of all ufuncs in the top-level numpy namespace."""
for name in dir(np):
f = getattr(np, name)
if isinstance(f, np.ufunc):
yield name
def _dtypes_for_ufunc(name: str) -> Iterator[Tuple[str, ...]]:
"""Generate valid dtypes of inputs to the given numpy ufunc."""
func = getattr(np, name)
for arg_dtypes in itertools.product(_all_dtypes, repeat=func.nin):
args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero", RuntimeWarning)
_ = func(*args)
except TypeError:
pass
else:
yield arg_dtypes
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyUfuncTests(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{name}_{",".join(arg_dtypes)}",
"name": name, "arg_dtypes": arg_dtypes}
for name in _all_numpy_ufuncs()
for arg_dtypes in jtu.cases_from_list(_dtypes_for_ufunc(name)))
def testUfuncInputTypes(self, name, arg_dtypes):
# TODO(jakevdp): fix following failures and remove from this exception list.
if (name in ['divmod', 'floor_divide', 'fmod', 'gcd', 'left_shift', 'mod',
'power', 'remainder', 'right_shift', 'rint', 'square']
and 'bool_' in arg_dtypes):
self.skipTest(f"jax.numpy does not support {name}{tuple(arg_dtypes)}")
if name == 'arctanh' and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):
self.skipTest("np.arctanh & jnp.arctanh have mismatched NaNs for complex input.")
for dtype in arg_dtypes:
jtu.skip_if_unsupported_type(dtype)
jnp_op = getattr(jnp, name)
np_op = getattr(np, name)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
jnp_op(*args_maker())
except NotImplementedError:
self.skipTest(f"jtu.{name} is not yet implemented.")
# large tol comes from the fact that numpy returns float16 in places
# that jnp returns float32. e.g. np.cos(np.uint8(0))
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyDocTests(jtu.JaxTestCase):
def test_lax_numpy_docstrings(self):
# Test that docstring wrapping & transformation didn't fail.
# Functions that have their own docstrings & don't wrap numpy.
known_exceptions = {'broadcast_arrays', 'vectorize'}
for name in dir(jnp):
if name in known_exceptions or name.startswith('_'):
continue
# We only check signatures of functions.
obj = getattr(jnp, name)
if isinstance(obj, type) or not callable(obj):
continue
# Some jnp functions are imported from numpy or jax.dtypes directly.
if any(obj is getattr(mod, obj.__name__, None) for mod in [np, dtypes]):
continue
wrapped_fun = obj.__np_wrapped__
# If the wrapped function has a docstring, obj should too
if wrapped_fun.__doc__ and not obj.__doc__:
raise Exception(f"jnp.{name} does not contain wrapped docstring.")
if obj.__doc__ and "*Original docstring below.*" not in obj.__doc__:
raise Exception(f"jnp.{name} does not have a wrapped docstring.")
def test_parse_numpydoc(self):
# Unit test ensuring that _parse_numpydoc correctly parses docstrings for all
# functions in NumPy's top-level namespace.
section_titles = {'Attributes', 'Examples', 'Notes',
'Parameters', 'Raises', 'References',
'Returns', 'See also', 'See Also', 'Warnings', 'Warns'}
headings = [title + '\n' + '-'*len(title) for title in section_titles]
for name in dir(np):
if name.startswith('_'):
continue
obj = getattr(np, name)
if isinstance(obj, type):
continue
if not callable(obj):
continue
if 'built-in function' in repr(obj):
continue
parsed = _parse_numpydoc(obj.__doc__)
# Check that no docstring is handled gracefully.
if not obj.__doc__:
self.assertEqual(parsed, ParsedDoc(obj.__doc__))
continue
# Check that no unexpected section names are found.
extra_keys = parsed.sections.keys() - section_titles
if extra_keys:
raise ValueError(f"Extra section headers found in np.{name}: {extra_keys}")
# Check that every docstring has a summary.
if not parsed.summary:
raise ValueError(f"No summary found for np.{name}")
# Check that no expected headings are missed.
for heading in headings:
assert heading not in parsed.front_matter
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import inspect
import itertools
import operator
from typing import cast, Iterator, Optional, List, Tuple
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
try:
import numpy_dispatch
except ImportError:
numpy_dispatch = None
import jax
import jax.ops
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax._src import dtypes
from jax import tree_util
from jax.interpreters import xla
from jax.test_util import check_grads
from jax._src.util import prod
from jax._src.numpy.util import _parse_numpydoc, ParsedDoc
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
unsigned_dtypes = jtu.dtypes.all_unsigned
bool_dtypes = jtu.dtypes.boolean
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
# uint64 is problematic because with any uint type it promotes to float:
int_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
def _compatible_shapes(shape):
if shape in scalar_shapes or np.ndim(shape) == 0:
return [shape]
return (shape[n:] for n in range(len(shape) + 1))
def _get_y_shapes(y_dtype, shape, rowvar):
# Helper function for testCov.
if y_dtype is None:
return [None]
if len(shape) == 1:
return [shape]
elif rowvar or shape[0] == 1:
return [(1, shape[-1]), (2, shape[-1]), (5, shape[-1])]
return [(shape[0], 1), (shape[0], 2), (shape[0], 5)]
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True,
tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes + unsigned_dtypes + bool_dtypes,
all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ceil", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, np.float32: 1e-3,
np.float64: 1e-12, np.complex64: 2e-4,
np.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("i0", 1, float_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False),
op_record("ldexp", 2, int_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equiv", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("trunc", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, low=-1.5, high=1.5), ["rev"],
inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={np.float64: 1e-7, np.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-4, np.complex128: 2E-14}),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-2, np.complex128: 2E-12}),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True, tolerance={np.float64: 1e-9}),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_some_inf, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 4e-2, np.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={np.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={np.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("fix", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, unsigned_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("fmin", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmax", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmod", 2, default_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={np.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float16: 1e-2, np.float64: 2e-14}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={dtypes.bfloat16: 4e-2, np.float16: 1e-2,
np.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={np.complex128: 1e-14}, check_dtypes=False),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("modf", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("modf", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("rint", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan,
[]),
op_record("rint", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("sign", 1, number_dtypes + unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, []),
# numpy 1.16 has trouble mixing uint and bfloat16, so we test these separately.
op_record("copysign", 2, default_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("copysign", 2, unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={np.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("ediff1d", 3, [np.int32], all_shapes, jtu.rand_default, []),
# TODO(phawkins): np.unwrap does not correctly promote its default period
# argument under NumPy 1.21 for bfloat16 inputs. It works fine if we
# explicitly pass a bfloat16 value that does not need promition. We should
# probably add a custom test harness for unwrap that tests the period
# argument anyway.
op_record("unwrap", 1, [t for t in float_dtypes if t != dtypes.bfloat16],
nonempty_nonscalar_array_shapes,
jtu.rand_default, ["rev"],
# numpy.unwrap always returns float64
check_dtypes=False,
# numpy cumsum is inaccurate, see issue #3517
tolerance={dtypes.bfloat16: 1e-1, np.float16: 1e-1}),
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("invert", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, all_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_INITIAL_RECORDS = [
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("max", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_WHERE_NO_INITIAL_RECORDS = [
op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("mean", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("nanmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanvar", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanstd", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("nanargmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanargmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 2e-4, np.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__lshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rlshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rrshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), [])
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = np.zeros([])
for shape in shapes:
try:
accumulator = accumulator + np.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and np have different type promotion semantics; this decorator allows
tests make an np reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: np.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
@jtu.with_config(jax_numpy_rank_promotion="raise")
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if np_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a
for a in out]
return f
def testNotImplemented(self):
for name in jnp._NOT_IMPLEMENTED:
func = getattr(jnp, name)
with self.assertRaises(NotImplementedError):
func()
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory(self.rng())
# np and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory(self.rng())
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
def testArrayEqualExamples(self):
# examples from the array_equal() docstring.
self.assertTrue(jnp.array_equal([1, 2], [1, 2]))
self.assertTrue(jnp.array_equal(np.array([1, 2]), np.array([1, 2])))
self.assertFalse(jnp.array_equal([1, 2], [1, 2, 3]))
self.assertFalse(jnp.array_equal([1, 2], [1, 4]))
a = np.array([1, np.nan])
self.assertFalse(jnp.array_equal(a, a))
self.assertTrue(jnp.array_equal(a, a, equal_nan=True))
a = np.array([1 + 1j])
b = a.copy()
a.real = np.nan
b.imag = np.nan
self.assertTrue(jnp.array_equal(a, b, equal_nan=True))
def testArrayEquivExamples(self):
# examples from the array_equiv() docstring.
self.assertTrue(jnp.array_equiv([1, 2], [1, 2]))
self.assertFalse(jnp.array_equiv([1, 2], [1, 3]))
with jax.numpy_rank_promotion('allow'):
self.assertTrue(jnp.array_equiv([1, 2], [[1, 2], [1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2], [1, 3]]))
def testArrayModule(self):
if numpy_dispatch is None:
raise SkipTest('requires https://github.com/seberg/numpy-dispatch')
jnp_array = jnp.array(1.0)
np_array = np.array(1.0)
module = numpy_dispatch.get_array_module(jnp_array)
self.assertIs(module, jnp)
module = numpy_dispatch.get_array_module(jnp_array, np_array)
self.assertIs(module, jnp)
def f(x):
module = numpy_dispatch.get_array_module(x)
self.assertIs(module, jnp)
return x
jax.jit(f)(jnp_array)
jax.grad(f)(jnp_array)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
itertools.combinations_with_replacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory(self.rng())
if not config.x64_enabled and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op.__name__, shapes, dtypes),
"op": op, "dtypes": dtypes, "shapes": shapes}
for op in [jnp.left_shift, jnp.right_shift]
for shapes in filter(
_shapes_are_broadcast_compatible,
# TODO numpy always promotes to shift dtype for zero-dim shapes:
itertools.combinations_with_replacement(nonzerodim_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testShiftOpAgainstNumpy(self, op, dtypes, shapes):
dtype, shift_dtype = dtypes
signed_mix = np.issubdtype(dtype, np.signedinteger) != \
np.issubdtype(shift_dtype, np.signedinteger)
has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)
promoting_to_64 = has_32 and signed_mix
if promoting_to_64 and not config.x64_enabled:
self.skipTest("np.right_shift/left_shift promoting to int64"
"differs from jnp in 32 bit mode.")
info, shift_info = map(np.iinfo, dtypes)
x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)
# NumPy requires shifts to be non-negative and below the bit width:
shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))
args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))
self._CompileAndCheck(op, args_maker)
np_op = getattr(np, op.__name__)
self._CheckAgainstNumpy(np_op, op, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else np.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, np_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory(self.rng())
@jtu.ignore_warning(category=np.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered.*")
def np_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(np.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else np.float32
return np_op(x_cast, axis, dtype=t, keepdims=keepdims)
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {np.float16: 1e-2, np.int32: 1E-3, np.float32: 1e-3,
np.complex64: 1e-3, np.float64: 1e-5, np.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_NO_DTYPE_RECORDS))
def testReducerNoDtype(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="All-NaN slice encountered.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol = {np.float16: 0.002}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerWhere(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact, whereshape):
if (shape in [()] + scalar_shapes and
dtype in [jnp.int16, jnp.uint16] and
jnp_op in [jnp.min, jnp.max]):
self.skipTest("Known XLA failure; see https://github.com/google/jax/issues/4971.")
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 20), "where parameter not supported in older numpy")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_WHERE_NO_INITIAL_RECORDS))
def testReducerWhereNoInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact, whereshape):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="Mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="invalid value encountered in true_divide*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
if numpy_version >= (1, 20, 2) or np_op.__name__ in ("all", "any"):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.nonzero(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_size={}_fill_value={}".format(
jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
"shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
for shape in nonempty_array_shapes
for dtype in all_dtypes
for fill_value in [None, -1]
for size in [1, 5, 10]))
def testNonzeroSize(self, shape, dtype, size, fill_value):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
@jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
def np_fun(x):
result = np.nonzero(x)
if size <= len(result[0]):
return tuple(arg[:size] for arg in result)
else:
return tuple(np.concatenate([arg, np.full(size - len(arg), fill_value or 0, arg.dtype)])
for arg in result)
jnp_fun = lambda x: jnp.nonzero(x, size=size, fill_value=fill_value)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testFlatNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.flatnonzero)
jnp_fun = jnp.flatnonzero
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying the size statically:
jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testArgWhere(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.argwhere)
jnp_fun = jnp.argwhere
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of this
# behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, np_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory(self.rng())
if dtype == np.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
if "nan" in np_op.__name__ and dtype == jnp.bfloat16:
raise unittest.SkipTest("NumPy doesn't correctly handle bfloat16 arrays")
def np_fun(array_to_reduce):
return np_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
except ValueError as e:
if str(e) == "All-NaN slice encountered":
self.skipTest("JAX doesn't support checking for all-NaN slices")
else:
raise
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name.capitalize(), "name": rec.name,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for rec in JAX_ARGMINMAX_RECORDS))
def testArgMinMaxEmpty(self, name, np_op, jnp_op):
name = name[3:] if name.startswith("nan") else name
msg = "attempt to get {} of an empty sequence".format(name)
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.array([]))
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.zeros((2, 0)), axis=1)
np_fun = partial(np_op, axis=0)
jnp_fun = partial(jnp_op, axis=0)
args_maker = lambda: [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def np_fun(a, b):
a = a.astype(np.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(np.float32) if rhs_dtype == jnp.bfloat16 else b
out = np.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, np.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-14,
np.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
def np_dot(x, y):
x = x.astype(np.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(np.float32) if rhs_dtype == jnp.bfloat16 else y
return np.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(np_dot, jnp.dot, args_maker,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
def np_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 4e-2
self._CheckAgainstNumpy(np_fun, jnp.matmul, args_maker, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def np_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(np.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.tensordot(a, b, axes).astype(dtype)
tol = {np.float16: 1e-1, np.float32: 1e-3, np.float64: 1e-12,
np.complex64: 1e-3, np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
def testTensordotErrors(self):
a = np.random.random((3, 2, 2))
b = np.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIsin(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.isin(e, t, invert=invert)
np_fun = lambda e, t: np.isin(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIn1d(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.in1d(e, t, invert=invert)
np_fun = lambda e, t: np.in1d(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes))
def testSetdiff1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
self._CheckAgainstNumpy(np.setdiff1d, jnp.setdiff1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes))
def testUnion1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
return np.union1d(arg1, arg2).astype(dtype)
self._CheckAgainstNumpy(np_fun, jnp.union1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_size={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2), size),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2, "size": size}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes
for size in [1, 5, 10]))
def testUnion1dSize(self, shape1, shape2, dtype1, dtype2, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
result = np.union1d(arg1, arg2).astype(dtype)
if size <= len(result):
return result[:size]
else:
return np.concatenate([result, np.full(size - len(result), result[0], result.dtype)])
def jnp_fun(arg1, arg2):
return jnp.union1d(arg1, arg2, size=size)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]))
def testSetxor1d(self, shape1, dtype1, shape2, dtype2, assume_unique):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.setxor1d(ar1, ar2, assume_unique=assume_unique)
def np_fun(ar1, ar2):
if assume_unique:
# pre-flatten the arrays to match with jax implementation
ar1 = np.ravel(ar1)
ar2 = np.ravel(ar2)
return np.setxor1d(ar1, ar2, assume_unique)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}_return_indices={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique,
return_indices),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique, "return_indices": return_indices}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]
for return_indices in [False, True]))
def testIntersect1d(self, shape1, dtype1, shape2, dtype2, assume_unique, return_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
np_fun = lambda ar1, ar2: np.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def np_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(np.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-13,
np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_deg={}_rcond={}_full={}_w={}_cov={}".format(
jtu.format_shape_dtype_string(shape, dtype),
deg,
rcond,
full,
w,
cov),
"shape": shape, "dtype": dtype, "deg": deg,
"rcond": rcond, "full": full, "w":w, "cov":cov}
for dtype in [dt for dt in float_dtypes if dt not in [jnp.float16, jnp.bfloat16]]
for shape in [shape for shape in one_dim_array_shapes if shape != (1,)]
for deg in [1, 2, 3]
for rcond in [None, -1, 10e-3, 10e-5, 10e-10]
for full in [False, True]
for w in [False, True]
for cov in [False, True, "unscaled"]))
def testPolyfit(self, shape, dtype, deg, rcond, full, w, cov):
rng = jtu.rand_default(self.rng())
tol_spec = {np.float32: 1e-3, np.float64: 1e-13, np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = jtu.tolerance(dtype, tol_spec)
_w = lambda a: abs(a) if w else None
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
jnp_fun = lambda x, y, a: jnp.polyfit(x, y, deg=deg, rcond=rcond, full=full, w=_w(a), cov=cov)
np_fun = jtu.ignore_warning(
message="Polyfit may be poorly conditioned*")(lambda x, y, a: np.polyfit(x, y, deg=deg, rcond=rcond, full=full, w=_w(a), cov=cov))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-0.9, 1),
(-np.ones(1), None),
(None, np.ones(1)),
(np.full(1, -0.9), np.ones(1))]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testClipStaticBounds(self, shape, dtype, a_min, a_max):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals):
rng = jtu.rand_default(self.rng())
if jnp.issubdtype(dtype, np.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
np_fun = lambda x: np.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, np.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.float32(7.5), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.float32(1.234), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_padwidth={}_constantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width,
constant_values),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width": pad_width, "constant_values": constant_values}
for mode, shapes in [
('constant', all_shapes),
('wrap', nonempty_shapes),
('edge', nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for constant_values in [
# None is used for modes other than 'constant'
None,
# constant
0, 1,
# (constant,)
(0,), (2.718,),
# ((before_const, after_const),)
((0, 2),), ((-1, 3.14),),
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i / 2, -3.14 * i) for i in range(len(shape))),
]
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
if (pad_width != () and constant_values != () and
((mode == 'constant' and constant_values is not None) or
(mode != 'constant' and constant_values is None)))))
def testPad(self, shape, dtype, mode, pad_width, constant_values):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if constant_values is None:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode)
else:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_stat_length={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, stat_length),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"stat_length": stat_length}
for mode in ['maximum', 'minimum', 'mean', 'median']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for stat_length in [
None,
# ((before_1, after_1), ..., (before_N, after_N))
tuple(((i % 3 + 1), ((i + 1) % 3) + 1) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 2),),
# (before, after) (not in the docstring but works in numpy)
(1, 1), (3, 4),
# (pad,)
(1,), (2,),
# pad
1, 2
]
if (pad_width != () and stat_length != () and
not (dtype in bool_dtypes and mode == 'mean'))))
def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_reflect_type={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, reflect_type),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"reflect_type": reflect_type}
for mode in ['symmetric', 'reflect']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 3),),
# (before, after) (not in the docstring but works in numpy)
(2, 1), (1, 2),
# (pad,)
(1,), (2,), (3,),
# pad
0, 5, 7, 10
]
for reflect_type in ['even', 'odd']
if (pad_width != () and
# following types lack precision when calculating odd values
(reflect_type != 'odd' or dtype not in [np.bool_, np.float16, jnp.bfloat16]))))
def testPadSymmetricAndReflect(self, shape, dtype, mode, pad_width, reflect_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,
tol={np.float32: 1e-3, np.complex64: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_end_values={}".format(
jtu.format_shape_dtype_string(shape, dtype), "linear_ramp", pad_width, end_values),
"shape": shape, "dtype": dtype, "pad_width": pad_width,
"end_values": end_values}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for end_values in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2.0, 3.14),),
# (before, after) (not in the docstring but works in numpy)
(0, 0), (-8.0, 2.0),
# (end_values,)
(1,), (2,),
# end_values
0, 1, 100, 10.0, 3.5, 4.2, -5, -3
]
if (pad_width != () and end_values != () and
# following types lack precision
dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16])))
def testPadLinearRamp(self, shape, dtype, pad_width, end_values):
if numpy_version < (1, 20) and np.issubdtype(dtype, np.integer):
raise unittest.SkipTest("NumPy 1.20 changed the semantics of np.linspace")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadEmpty(self):
arr = np.arange(6).reshape(2, 3)
pad_width = ((2, 3), (3, 1))
np_res = np.pad(arr, pad_width=pad_width, mode="empty")
jnp_res = jnp.pad(arr, pad_width=pad_width, mode="empty")
np.testing.assert_equal(np_res.shape, jnp_res.shape)
np.testing.assert_equal(arr, np_res[2:-3, 3:-1])
np.testing.assert_equal(arr, jnp_res[2:-3, 3:-1])
np.testing.assert_equal(np_res[2:-3, 3:-1], jnp_res[2:-3, 3:-1])
def testPadKwargs(self):
modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
arr = jnp.array([1, 2, 3])
pad_width = 1
for mode in modes.keys():
allowed = modes[mode]
not_allowed = {}
for kwargs in modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
jnp.pad(arr, pad_width, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
match = "unsupported keyword arguments for mode '{}'".format(mode)
for key, value in not_allowed.items():
with self.assertRaisesRegex(ValueError, match):
jnp.pad(arr, pad_width, mode, **{key: value})
# Test if unsupported mode raise error.
unsupported_modes = [1, None, "foo"]
for mode in unsupported_modes:
match = "Unimplemented padding mode '{}' for np.pad.".format(mode)
with self.assertRaisesRegex(NotImplementedError, match):
jnp.pad(arr, pad_width, mode)
def testPadFunction(self):
def np_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def jnp_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector = vector.at[:pad_width[0]].set(pad_value)
vector = vector.at[-pad_width[1]:].set(pad_value)
return vector
arr = np.arange(6).reshape(2, 3)
np_res = np.pad(arr, 2, np_pad_with)
jnp_res = jnp.pad(arr, 2, jnp_pad_with)
np.testing.assert_equal(np_res, jnp_res)
arr = np.arange(24).reshape(2, 3, 4)
np_res = np.pad(arr, 1, np_pad_with, padder=100)
jnp_res = jnp.pad(arr, 1, jnp_pad_with, padder=100)
np.testing.assert_equal(np_res, jnp_res)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arr.shape, arr.dtype)]
jnp_fun = partial(jnp.pad, pad_width=1, mode=jnp_pad_with)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadWithNumpyPadWidth(self):
a = jnp.array([1, 2, 3, 4, 5])
f = jax.jit(
partial(
jnp.pad,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
np.testing.assert_array_equal(
f(a),
np.pad(
a,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps}
for reps in [(), (2,), (3, 4), (2, 3, 4), (1, 0, 2)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in all_dtypes))
def testExtract(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, jnp.float32), rng(shape, dtype)]
self._CheckAgainstNumpy(np.extract, jnp.extract, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ncond={}_nfunc={}".format(
jtu.format_shape_dtype_string(shape, dtype), ncond, nfunc),
"shape": shape, "dtype": dtype, "ncond": ncond, "nfunc": nfunc}
for ncond in [1, 2, 3]
for nfunc in [ncond, ncond + 1]
for shape in all_shapes
for dtype in all_dtypes))
def testPiecewise(self, shape, dtype, ncond, nfunc):
rng = jtu.rand_default(self.rng())
rng_bool = jtu.rand_int(self.rng(), 0, 2)
funclist = [lambda x: x - 1, 1, lambda x: x, 0][:nfunc]
args_maker = lambda: (rng(shape, dtype), [rng_bool(shape, bool) for i in range(ncond)])
np_fun = partial(np.piecewise, funclist=funclist)
jnp_fun = partial(jnp.piecewise, funclist=funclist)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
# This is a higher-order function, so the cache miss check will fail.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_perm={}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), perm, arg_type),
"dtype": dtype, "shape": shape, "perm": perm, "arg_type": arg_type}
for dtype in default_dtypes
for shape in array_shapes
for arg_type in ["splat", "value"]
for perm in [None, tuple(np.random.RandomState(0).permutation(np.zeros(shape).ndim))]))
def testTransposeTuple(self, shape, dtype, perm, arg_type):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if arg_type == "value":
np_fun = lambda x: x.transpose(perm)
jnp_fun = lambda x: jnp.array(x).transpose(perm)
else:
np_fun = lambda x: x.transpose(*(perm or ()))
jnp_fun = lambda x: jnp.array(x).transpose(*(perm or ()))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_trim={}".format(
jtu.format_shape_dtype_string(a_shape, dtype), trim),
"dtype": dtype, "a_shape": a_shape, "trim": trim}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for trim in ["f", "b", "fb"]))
def testTrimZeros(self, a_shape, dtype, trim):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(a_shape, dtype)]
np_fun = lambda arg1: np.trim_zeros(arg1, trim)
jnp_fun = lambda arg1: jnp.trim_zeros(arg1, trim)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_rank{}".format(
jtu.format_shape_dtype_string(a_shape, dtype), rank),
"dtype": dtype, "a_shape": a_shape, "rank": rank}
for rank in (1, 2)
for dtype in default_dtypes
for a_shape in one_dim_array_shapes))
def testPoly(self, a_shape, dtype, rank):
if dtype in (np.float16, jnp.bfloat16, np.int16):
self.skipTest(f"{dtype} gets promoted to {np.float16}, which is not supported.")
elif rank == 2 and jtu.device_under_test() in ("tpu", "gpu"):
self.skipTest("Nonsymmetric eigendecomposition is only implemented on the CPU backend.")
rng = jtu.rand_default(self.rng())
tol = { np.int8: 1e-3, np.int32: 1e-3, np.float32: 1e-3, np.float64: 1e-6 }
if jtu.device_under_test() == "tpu":
tol[np.int32] = tol[np.float32] = 1e-1
tol = jtu.tolerance(dtype, tol)
args_maker = lambda: [rng(a_shape * rank, dtype)]
self._CheckAgainstNumpy(np.poly, jnp.poly, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp.poly, args_maker, check_dtypes=True, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolyAdd(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polyadd(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polyadd(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolySub(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polysub(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polysub(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_k={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order, k),
"dtype": dtype, "a_shape": a_shape, "order" : order, "k": k}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)
for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))
def testPolyInt(self, a_shape, order, k, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)
jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order),
"dtype": dtype, "a_shape": a_shape, "order" : order}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)))
def testPolyDer(self, a_shape, order, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyder(arg1, m=order)
jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ptype={}".format(ptype), "ptype": ptype}
for ptype in ['int', 'np.int', 'jnp.int']))
def testIntegerPower(self, ptype):
p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]
jaxpr = jax.make_jaxpr(partial(jnp.power, x2=p))(1)
eqns = jaxpr.jaxpr.eqns
self.assertLen(eqns, 1)
self.assertEqual(eqns[0].primitive, lax.integer_pow_p)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}".format(x, y), "x": x, "y": y}
for x in [-1, 0, 1]
for y in [0, 32, 64, 128]))
def testIntegerPowerOverflow(self, x, y):
# Regression test for https://github.com/google/jax/issues/5987
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np.power, jnp.power, args_maker)
self._CompileAndCheck(jnp.power, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompress(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_condition=array[{}]_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),
"shape": shape, "dtype": dtype, "condition": condition, "axis": axis}
for shape in [(2, 3)]
for dtype in int_dtypes
# condition entries beyond axis size must be zero.
for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]
for axis in [None, 0, 1]))
def testCompressMismatchedShapes(self, shape, dtype, condition, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.array(condition), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompressMethod(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = lambda condition, x: np.compress(condition, x, axis=axis)
jnp_fun = lambda condition, x: x.compress(condition, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for num_arrs in [3]
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return np.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(4, 1), (4, 3), (4, 5, 6)]
for dtype in all_dtypes
for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))
def testConcatenateArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.concatenate(x, axis=axis)
jnp_fun = lambda x: jnp.concatenate(x, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testConcatenateAxisNone(self):
# https://github.com/google/jax/issues/3419
a = jnp.array([[1, 2], [3, 4]])
b = jnp.array([[5]])
jnp.concatenate((a, b), axis=None)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(arr, values):
arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(np.float32) if values.dtype == jnp.bfloat16
else values)
out = np.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idx),
"dtype": dtype, "shape": shape, "axis": axis, "idx": idx}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx in (range(-prod(shape), prod(shape))
if axis is None else
range(-shape[axis], shape[axis]))))
def testDeleteInteger(self, shape, dtype, idx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, slc),
"dtype": dtype, "shape": shape, "axis": axis, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))
def testDeleteSlice(self, shape, dtype, axis, slc):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, slc, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
jtu.format_shape_dtype_string(idx_shape, int)),
"dtype": dtype, "shape": shape, "axis": axis, "idx_shape": idx_shape}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx_shape in all_shapes))
def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
rng = jtu.rand_default(self.rng())
max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
# Previous to numpy 1.19, negative indices were ignored so we don't test this.
low = 0 if numpy_version < (1, 19, 0) else -max_idx
idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 19), "boolean mask not supported in numpy < 1.19.0")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"dtype": dtype, "shape": shape, "axis": axis}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testDeleteMaskArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, mask, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_out_dims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, out_dims),
"shape": shape, "dtype": dtype, "axis": axis, "out_dims": out_dims}
for shape in nonempty_array_shapes
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for out_dims in [0, 1, 2]))
def testApplyAlongAxis(self, shape, dtype, axis, out_dims):
def func(x, out_dims):
if out_dims == 0:
return x.sum()
elif out_dims == 1:
return x * x[0]
elif out_dims == 2:
return x[:, None] + x[None, :]
else:
raise NotImplementedError(f"out_dims={out_dims}")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)
jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_func={}_keepdims={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype),
func, keepdims, axes),
"shape": shape, "dtype": dtype, "func": func, "keepdims": keepdims, "axes": axes}
for shape in nonempty_shapes
for func in ["sum"]
for keepdims in [True, False]
for axes in itertools.combinations(range(len(shape)), 2)
# Avoid low-precision types in sum()
for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))
def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):
f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
np_fun = lambda a: np.apply_over_axes(f, a, axes)
jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}_fixed_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, repeats, fixed_size),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
'fixed_size': fixed_size}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), max(1, len(shape))))
for fixed_size in [True, False]))
def testRepeat(self, axis, shape, dtype, repeats, fixed_size):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis)
np_fun = _promote_like_jnp(np_fun)
if fixed_size:
total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis,
total_repeat_length=total_repeat_length)
jnp_args_maker = lambda: [rng(shape, dtype), repeats]
clo_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis,
total_repeat_length=total_repeat_length)
clo_fun_args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(jnp_fun, jnp_args_maker)
self._CheckAgainstNumpy(np_fun, clo_fun, clo_fun_args_maker)
else:
# Now repeats is in a closure, so a constant.
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testRepeatScalarFastPath(self):
a = jnp.array([1,2,3,4])
f = lambda a: jnp.repeat(a, repeats=2)
jaxpr = jax.make_jaxpr(f)(a)
self.assertLessEqual(len(jaxpr.jaxpr.eqns), 6)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_ind={}_inv={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
return_index, return_inverse, return_counts),
"shape": shape, "dtype": dtype, "axis": axis,
"return_index": return_index, "return_inverse": return_inverse,
"return_counts": return_counts}
for dtype in number_dtypes
for shape in all_shapes
for axis in [None] + list(range(len(shape)))
for return_index in [False, True]
for return_inverse in [False, True]
for return_counts in [False, True]))
def testUnique(self, shape, dtype, axis, return_index, return_inverse, return_counts):
if axis is not None and numpy_version < (1, 19) and np.empty(shape).size == 0:
self.skipTest("zero-sized axis in unique leads to error in older numpy.")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.unique(x, return_index, return_inverse, return_counts, axis=axis)
jnp_fun = lambda x: jnp.unique(x, return_index, return_inverse, return_counts, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_size={}".format(
jtu.format_shape_dtype_string(shape, dtype), size),
"shape": shape, "dtype": dtype, "size": size}
for dtype in number_dtypes
for size in [1, 5, 10]
for shape in nonempty_array_shapes))
def testUniqueSize(self, shape, dtype, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
kwds = dict(return_index=True, return_inverse=True, return_counts=True)
def np_fun(x):
u, ind, inv, counts = jnp.unique(x, **kwds)
if size <= len(u):
u, ind, counts = u[:size], ind[:size], counts[:size]
else:
extra = size - len(u)
u = np.concatenate([u, np.full(extra, u[0], u.dtype)])
ind = np.concatenate([ind, np.full(extra, ind[0], ind.dtype)])
counts = np.concatenate([counts, np.zeros(extra, counts.dtype)])
return u, ind, inv, counts
jnp_fun = lambda x: jnp.unique(x, size=size, **kwds)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_fixed_size={}".format(fixed_size),
"fixed_size": fixed_size}
for fixed_size in [True, False]))
def testNonScalarRepeats(self, fixed_size):
'''
Following numpy test suite from `test_repeat` at
https://github.com/numpy/numpy/blob/main/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = np.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, rtol=tol, atol=tol)
if fixed_size:
# Calculate expected size of the repeated axis.
rep_length = np.repeat(np.zeros_like(m), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(
arg, repeats=rep, axis=axis, total_repeat_length=rep_length)
else:
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker)
m = jnp.array([1,2,3,4,5,6])
if fixed_size:
args_maker = lambda: [m, repeats]
else:
args_maker = lambda: [m]
for repeats in [2, jnp.array([1,3,0,1,1,2]), jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, axis=None)
test_single(m, args_maker, repeats, axis=0)
m_rect = m.reshape((2,3))
if fixed_size:
args_maker = lambda: [m_rect, repeats]
else:
args_maker = lambda: [m_rect]
for repeats in [2, jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
np_input = np.ones((1))
jnp_input = jnp.ones((1))
expected_np_input_after_call = np.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertTrue(xla.type_is_device_array(jnp.concatenate([np_input])))
attempt_sideeffect(np_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(np_input, expected_np_input_after_call)
self.assertAllClose(jnp_input, expected_jnp_input_after_call)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"jnp_op": getattr(jnp, op),
"np_op": getattr(np, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in number_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, jnp_op, np_op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
np_fun = partial(np_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode, precision=precision)
tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
np.complex128: 1e-14}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np_op(arg, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["nancumsum", "nancumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testNanCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_some_nan(self.rng())
np_fun = partial(np_op, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = partial(jnp_op, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
if dtype != jnp.bfloat16:
# numpy functions do not properly handle bfloat16
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_yshape={}_xshape={}_dx={}_axis={}".format(
jtu.format_shape_dtype_string(yshape, dtype),
jtu.format_shape_dtype_string(xshape, dtype) if xshape is not None else None,
dx, axis),
"yshape": yshape, "xshape": xshape, "dtype": dtype, "dx": dx, "axis": axis}
for dtype in default_dtypes
for yshape, xshape, dx, axis in [
((10,), None, 1.0, -1),
((3, 10), None, 2.0, -1),
((3, 10), None, 3.0, -0),
((10, 3), (10,), 1.0, -2),
((3, 10), (10,), 1.0, -1),
((3, 10), (3, 10), 1.0, -1),
((2, 3, 10), (3, 10), 1.0, -2),
]))
@jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and reenable this test.
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testTrapz(self, yshape, xshape, dtype, dx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
np_fun = partial(np.trapz, dx=dx, axis=axis)
jnp_fun = partial(jnp.trapz, dx=dx, axis=axis)
tol = jtu.tolerance(dtype, {np.float64: 1e-12,
dtypes.bfloat16: 4e-2})
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
np.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype):
np_fun = lambda: np.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: getattr(np, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTrilIndices(self, n, k, m):
np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.tril_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTriuIndices(self, n, k, m):
np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.triu_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTriuIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTrilIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
np.testing.assert_equal(np.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "arr_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)
),
"dtype": dtype, "shape": shape}
for dtype in default_dtypes
for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))
def testDiagIndicesFrom(self, dtype, shape):
rng = jtu.rand_default(self.rng())
np_fun = np.diag_indices_from
jnp_fun = jnp.diag_indices_from
args_maker = lambda : [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in all_shapes
for k in range(-4, 4)))
def testDiagFlat(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
# numpy has inconsistencies for scalar values
# https://github.com/numpy/numpy/issues/16477
# jax differs in that it treats scalars values as length-1 arrays
np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)
jnp_fun = lambda arg: jnp.diagflat(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a1_shape={}_a2_shape2={}".format(
jtu.format_shape_dtype_string(a1_shape, dtype),
jtu.format_shape_dtype_string(a2_shape, dtype)),
"dtype": dtype, "a1_shape": a1_shape, "a2_shape": a2_shape}
for dtype in default_dtypes
for a1_shape in one_dim_array_shapes
for a2_shape in one_dim_array_shapes))
def testPolyMul(self, a1_shape, a2_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)
jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)
jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)
args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]
tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}
self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(np.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
np_fun = lambda: np.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_period={}_left={}_right={}".format(
jtu.format_shape_dtype_string(shape, dtype), period, left, right),
"shape": shape, "dtype": dtype,
"period": period, "left": left, "right": right}
for shape in nonempty_shapes
for period in [None, 0.59]
for left in [None, 0]
for right in [None, 1]
for dtype in default_dtypes
# following types lack precision for meaningful tests
if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]
))
def testInterp(self, shape, dtype, period, left, right):
rng = jtu.rand_default(self.rng(), scale=10)
kwds = dict(period=period, left=left, right=right)
np_fun = partial(np.interp, **kwds)
jnp_fun = partial(jnp.interp, **kwds)
args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]
# skip numpy comparison for integer types with period specified, because numpy
# uses an unstable sort and so results differ for duplicate values.
if not (period and np.issubdtype(dtype, np.integer)):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, np.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory(self.rng())
x2_rng = x2_rng_factory(self.rng())
np_fun = lambda x1, x2: np.ldexp(x1, x2)
np_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, np.int32)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory(self.rng())
np_fun = lambda x: np.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=np.issubdtype(dtype, np.inexact))
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
if out_dtype == jnp.bfloat16:
return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)
else:
return np.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_v={}_side={}".format(
jtu.format_shape_dtype_string(ashape, dtype),
jtu.format_shape_dtype_string(vshape, dtype),
side), "ashape": ashape, "vshape": vshape, "side": side,
"dtype": dtype}
for ashape in [(15,), (16,), (17,)]
for vshape in [(), (5,), (5, 5)]
for side in ['left', 'right']
for dtype in default_dtypes
))
def testSearchsorted(self, ashape, vshape, side, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]
np_fun = lambda a, v: np.searchsorted(a, v, side=side)
jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(binshape, dtype),
right, reverse), "xshape": xshape, "binshape": binshape,
"right": right, "reverse": reverse, "dtype": dtype}
for xshape in [(20,), (5, 4)]
for binshape in [(1,), (5,)]
for right in [True, False]
for reverse in [True, False]
for dtype in default_dtypes
))
def testDigitize(self, xshape, binshape, right, reverse, dtype):
order = jax.ops.index[::-1] if reverse else jax.ops.index[:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]
np_fun = lambda x, bins: np.digitize(x, bins, right=right)
jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 5)]
for array_input in [True, False]))
def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
"shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for array_input in [True, False]))
def testStack(self, shape, axis, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(partial(np.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}_array={}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for array_input in [True, False]))
def testHVDStack(self, shape, op, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(getattr(np, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}_fillshape={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
np.dtype(out_dtype).name if out_dtype else "None",
fill_value_shape),
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"shape": shape, "out_dtype": out_dtype}
for shape in array_shapes + [3, np.array(7, dtype=np.int32)]
for fill_value_dtype in default_dtypes
for fill_value_shape in _compatible_shapes(shape)
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, fill_value_shape, out_dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda fill_value: np.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_shape={}_n={}_axis={}_prepend={}_append={}".format(
jtu.format_shape_dtype_string(shape, dtype),
n, axis, prepend, append),
"shape": shape, "dtype": dtype, "n": n, "axis": axis,
"prepend": prepend, "append": append
} for shape, dtype in s(_shape_and_dtypes(nonempty_nonscalar_array_shapes, default_dtypes))
for n in s([0, 1, 2])
for axis in s(list(range(-len(shape), max(1, len(shape)))))
for prepend in s([None, 1, np.zeros(shape, dtype=dtype)])
for append in s([None, 1, np.zeros(shape, dtype=dtype)])
)))
def testDiff(self, shape, dtype, n, axis, prepend, append):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):
if prepend is None:
prepend = np._NoValue
elif not np.isscalar(prepend) and prepend.dtype == jnp.bfloat16:
prepend = prepend.astype(np.float32)
if append is None:
append = np._NoValue
elif not np.isscalar(append) and append.dtype == jnp.bfloat16:
append = append.astype(np.float32)
if x.dtype == jnp.bfloat16:
return np.diff(x.astype(np.float32), n=n, axis=axis, prepend=prepend, append=append).astype(jnp.bfloat16)
else:
return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)
jnp_fun = lambda x: jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"np_op": getattr(np, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),
np.array(4, dtype=np.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, np_op, jnp_op, shape, dtype):
args_maker = lambda: []
np_op = partial(np_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_inshape={}_filldtype={}_fillshape={}_outdtype={}_outshape={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
np.dtype(fill_value_dtype).name, fill_value_shape,
np.dtype(out_dtype).name, out_shape),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"out_dtype": out_dtype, "out_shape": out_shape
} for shape in s(array_shapes)
for out_shape in s([None] + array_shapes)
for in_dtype in s(default_dtypes)
for fill_value_dtype in s(default_dtypes)
for fill_value_shape in s(_compatible_shapes(shape if out_shape is None else out_shape))
for out_dtype in s(default_dtypes))))
def testFullLike(self, shape, in_dtype, fill_value_dtype, fill_value_shape, out_dtype, out_shape):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x, fill_value: np.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x, fill_value: jnp.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype), rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
out_shape, out_dtype),
"func": func, "shape": shape, "in_dtype": in_dtype,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for out_shape in [None] + array_shapes
for in_dtype in default_dtypes
for func in ["ones_like", "zeros_like"]
for out_dtype in default_dtypes))
def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x: getattr(np, func)(x, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x: getattr(jnp, func)(x, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_weak_type={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
weak_type, out_shape, out_dtype),
"func": func, "args": args,
"shape": shape, "in_dtype": in_dtype, "weak_type": weak_type,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for in_dtype in [np.int32, np.float32, np.complex64]
for weak_type in [True, False]
for out_shape in [None, (), (10,)]
for func, args in [("full_like", (-100,)), ("ones_like", ()), ("zeros_like", ())]
for out_dtype in [None, float]))
def testZerosOnesFullLikeWeakType(self, func, args, shape, in_dtype, weak_type, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, in_dtype), weak_type=weak_type)
fun = lambda x: getattr(jnp, func)(x, *args, dtype=out_dtype, shape=out_shape)
expected_weak_type = weak_type and (out_dtype is None)
self.assertEqual(dtypes.is_weakly_typed(fun(x)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(jax.jit(fun)(x)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_funcname={}_input_type={}_val={}_dtype={}".format(
funcname, input_type, val, dtype),
"funcname": funcname, "input_type": input_type, "val": val, "dtype": dtype}
for funcname in ["array", "asarray"]
for dtype in [int, float, None]
for val in [0, 1]
for input_type in [int, float, np.int32, np.float32]))
def testArrayWeakType(self, funcname, input_type, val, dtype):
func = lambda x: getattr(jnp, funcname)(x, dtype=dtype)
fjit = jax.jit(func)
val = input_type(val)
expected_weak_type = dtype is None and input_type in set(dtypes._weak_types)
self.assertEqual(dtypes.is_weakly_typed(func(val)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(fjit(val)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_weak_type={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), weak_type, slc),
"shape": shape, "dtype": dtype, "weak_type": weak_type, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in [int, float, complex]
for weak_type in [True, False]
for slc in [slice(None), slice(0), slice(3), 0, ...]))
def testSliceWeakTypes(self, shape, dtype, weak_type, slc):
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, dtype), weak_type=weak_type)
op = lambda x: x[slc]
self.assertEqual(op(x).aval.weak_type, weak_type)
self.assertEqual(jax.jit(op)(x).aval.weak_type, weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis, "dtype": dtype}
# All testcases split the specified axis unequally
for shape, axis, num_sections in [
((3,), 0, 2), ((12,), 0, 5), ((12, 4), 0, 7), ((12, 4), 1, 3),
((2, 3, 5), -1, 2), ((2, 4, 4), -2, 3), ((7, 2, 2), 0, 3)]
for dtype in default_dtypes))
def testArraySplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.array_split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.array_split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testSplitTypeError(self):
# If we pass an ndarray for indices_or_sections -> no error
self.assertEqual(3, len(jnp.split(jnp.zeros(3), jnp.array([1, 2]))))
CONCRETIZATION_MSG = "Abstract tracer value encountered where concrete value is expected."
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# An abstract tracer for idx
jax.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), idx))(2.)
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# A list including an abstract tracer
jax.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), [2, idx]))(2.)
# A concrete tracer -> no error
jax.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), idx),
(2.,), (1.,))
# A tuple including a concrete tracer -> no error
jax.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), (1, idx)),
(2.,), (1.,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_range={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, range, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"range": range,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in number_dtypes
for bins in [10, np.arange(-5, 6), np.array([-5, 0, 3])]
for range in [None, (0, 0), (0, 10)]
for weights in [True, False]
))
def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w, r: np.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
jnp_fun = lambda a, w, r: jnp.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), range]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-2}
# linspace() compares poorly to numpy when using bfloat16
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_density={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, density, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"density": density,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in default_dtypes
# We only test explicit integer-valued bin edges because in other cases
# rounding errors lead to flaky tests.
for bins in [np.arange(-5, 6), np.array([-5, 0, 3])]
for density in [True, False]
for weights in [True, False]
))
def testHistogram(self, shape, dtype, bins, density, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,
weights=_weights(w))
jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}_range={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),
"shape": shape, "dtype": dtype, "bins": bins, "weights": weights, "density": density, "range": range,
}
for shape in [(5,), (12,)]
for dtype in int_dtypes
for bins in [2, [2, 2], [np.array([0, 1, 3, 5]), np.array([0, 2, 3, 4, 6])]]
for weights in [False, True]
for density in [False, True]
for range in [None, [(-1, 1), None], [(-1, 1), (-2, 2)]]
))
def testHistogram2d(self, shape, dtype, bins, weights, density, range):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")(
lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range))
jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range)
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
with np.errstate(divide='ignore', invalid='ignore'):
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}_range={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),
"shape": shape, "dtype": dtype, "bins": bins, "weights": weights, "density": density, "range": range,
}
for shape in [(5, 3), (10, 3)]
for dtype in int_dtypes
for bins in [(2, 2, 2), [np.array([-5, 0, 4]), np.array([-4, -1, 2]), np.array([-6, -1, 4])]]
for weights in [False, True]
for density in [False, True]
for range in [None, [(-1, 1), None, None], [(-1, 1), (-2, 2), (-3, 3)]]
))
def testHistogramdd(self, shape, dtype, bins, weights, density, range):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")(
lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range))
jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range)
args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
np_fun = lambda x: fn(np, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in itertools.product(all_shapes, array_shapes)))
def testResize(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.resize(x, out_shape)
jnp_fun = lambda x: jnp.resize(x, out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
if len(out_shape) > 0 or numpy_version >= (1, 20, 0):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in (list(range(-len(arg_shape)+1, len(arg_shape)))
+ [np.array(0), np.array(-1), (0,), [np.array(0)],
(len(arg_shape), len(arg_shape) + 1)])))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((3, 1), -1),
((3, 1), np.array(1)),
((1, 3, 1), (0, 2)),
((1, 3, 1), (0,)),
((1, 4, 1), (np.array(0),))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned):
rng = jtu.rand_default(self.rng())
if weights_shape is None:
np_fun = lambda x: np.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
np_fun = lambda x, weights: np.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
np_fun = _promote_like_jnp(np_fun, inexact=True)
tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5,
np.float64: 1e-12, np.complex64: 1e-5}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
f"_arg{i}_ndmin={ndmin}_dtype={np.dtype(dtype) if dtype else None}",
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtypes) in enumerate([
([True, False, True], all_dtypes),
(3., all_dtypes),
([1, 2, 3], all_dtypes),
(np.array([1, 2, 3], dtype=np.int64), all_dtypes),
([1., 2., 3.], all_dtypes),
([[1, 2], [3, 4], [5, 6]], all_dtypes),
([[1, 2.], [3, 4], [5, 6]], all_dtypes),
([[1., 2j], [3., 4.], [5., 6.]], complex_dtypes),
([[3, np.array(2, dtype=jnp.float_), 1],
np.arange(3., dtype=jnp.float_)], all_dtypes),
])
for dtype in [None] + dtypes
for ndmin in [None, np.ndim(arg), np.ndim(arg) + 1, np.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
canonical_dtype = dtypes.canonicalize_dtype(dtype or np.array(arg).dtype)
if ndmin is not None:
np_fun = partial(np.array, ndmin=ndmin, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin, dtype=dtype)
else:
np_fun = partial(np.array, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, dtype=dtype)
# We are testing correct canonicalization behavior here, so we turn off the
# permissive canonicalization logic in the test harness.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
canonicalize_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testArrayUnsupportedDtypeError(self):
with self.assertRaisesRegex(TypeError,
"JAX only supports number and bool dtypes.*"):
jnp.array(3, [('a','<i4'),('b','<i4')])
def testArrayFromInteger(self):
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = jnp.iinfo(int_dtype).max
int_min = jnp.iinfo(int_dtype).min
# Values at extremes are converted correctly.
for val in [int_min, 0, int_max]:
self.assertEqual(jnp.array(val).dtype, int_dtype)
# out of bounds leads to an OverflowError
val = int_max + 1
with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to {int_dtype.name}"):
jnp.array(val)
# explicit uint64 should work
if config.x64_enabled:
self.assertEqual(np.uint64(val), jnp.array(val, dtype='uint64'))
# TODO(jakevdp): fix list inputs to jnp.array and enable the following test
# def testArrayFromList(self):
# int_max = jnp.iinfo(jnp.int64).max
# int_min = jnp.iinfo(jnp.int64).min
#
# # Values at extremes are converted correctly.
# for val in [int_min, 0, int_max]:
# self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))
#
# # list of values results in promoted type.
# self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))
#
# # out of bounds leads to an OverflowError
# val = int_min - 1
# with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to int64"):
# jnp.array([0, val])
def testIssue121(self):
assert not np.isscalar(jnp.array(3))
def testArrayOutputsDeviceArrays(self):
assert xla.type_is_device_array(jnp.array([]))
assert xla.type_is_device_array(jnp.array(np.array([])))
class NDArrayLike:
def __array__(self, dtype=None):
return np.array([], dtype=dtype)
assert xla.type_is_device_array(jnp.array(NDArrayLike()))
# NOTE(mattjj): disabled b/c __array__ must produce ndarrays
# class DeviceArrayLike:
# def __array__(self, dtype=None):
# return jnp.array([], dtype=dtype)
# assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))
def testArrayMethod(self):
class arraylike(object):
dtype = np.float32
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
np.array([0x2a], dtype=np.uint8))
def testIsClose(self):
c_isclose = jax.jit(jnp.isclose)
c_isclose_nan = jax.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = np.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = np.asarray(n * [np.inf]).reshape([n, 1])
nan = np.asarray(n * [np.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = np.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = np.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}_equal_nan={}".format(x, y, equal_nan),
"x": x, "y": y, "equal_nan": equal_nan}
for x, y in itertools.product([
1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)
for equal_nan in [True, False]))
def testAllClose(self, x, y, equal_nan):
jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)
np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)
args_maker = lambda: [np.array(x), np.array(y)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testZeroStridesConstantHandler(self):
raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = jax.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = np.ones(3)
@jax.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = np.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: jax.jit(g)(x, y))
self.assertRaises(TypeError, lambda: jax.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@jax.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))
@jax.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(np.arange(3))
cfoo = jax.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples
))
def testFlip(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
np_op = lambda x: np.flip(x, axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFlipud(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
np_op = lambda x: np.flipud(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFliplr(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
np_op = lambda x: np.fliplr(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes))
def testRot90(self, shape, dtype, k, axes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
np_op = lambda x: np.rot90(x, k, axes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_mode={}".format(
shape, order, mode),
"shape": shape, "order": order, "mode": mode}
for shape in nonempty_nonscalar_array_shapes
for order in ['C', 'F']
for mode in ['wrap', 'clip', 'raise']))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRavelMultiIndex(self, shape, order, mode):
# generate indices in each dimension with a few out of bounds.
rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)
for dim in shape]
# generate multi_indices of different dimensions that broadcast.
args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)
for ndim, rng in enumerate(rngs))]
def np_fun(x):
try:
return np.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
def jnp_fun(x):
try:
return jnp.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because ravel_multi_index was jit-compiled "
"with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ashape={}{}_cshapes={}{}_mode={}".format(
adtype.__name__, ashape, cdtype.__name__, cshapes, mode),
"ashape": ashape, "adtype": adtype, "cshapes": cshapes, "cdtype": cdtype, "mode": mode}
for ashape in ((), (4,), (3, 4))
for cshapes in [
[(), (4,)],
[(3, 4), (4,), (3, 1)]
]
for adtype in int_dtypes
for cdtype in default_dtypes
for mode in ['wrap', 'clip', 'raise']))
def testChoose(self, ashape, adtype, cshapes, cdtype, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]
def np_fun(a, c):
try:
return np.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
def jnp_fun(a, c):
try:
return jnp.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
(0, (2, 1, 3)),
(5, (2, 1, 3)),
(0, ()),
(np.array([0, 1, 2]), (2, 2)),
(np.array([[[0, 1], [2, 3]]]), (2, 2)))
def testUnravelIndex(self, flat_index, shape):
args_maker = lambda: (flat_index, shape)
self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,
args_maker)
self._CompileAndCheck(jnp.unravel_index, args_maker)
def testUnravelIndexOOB(self):
self.assertEqual(jnp.unravel_index(2, (2,)), (1,))
self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))
self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))
def testAstype(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
np_op = lambda x: np.asarray(x).astype(jnp.int32)
jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in all_dtypes))
def testNbytes(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_op = lambda x: np.asarray(x).nbytes
jnp_op = lambda x: jnp.asarray(x).nbytes
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dtype={}".format(
jtu.format_shape_dtype_string(shape, a_dtype), dtype),
"shape": shape, "a_dtype": a_dtype, "dtype": dtype}
for shape in [(8,), (3, 8)] # last dim = 8 to ensure shape compatibility
for a_dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)
for dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)))
def testView(self, shape, a_dtype, dtype):
if jtu.device_under_test() == 'tpu':
if jnp.dtype(a_dtype).itemsize in [1, 2] or jnp.dtype(dtype).itemsize in [1, 2]:
self.skipTest("arr.view() not supported on TPU for 8- or 16-bit types.")
if not config.x64_enabled:
if jnp.dtype(a_dtype).itemsize == 8 or jnp.dtype(dtype).itemsize == 8:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_fullrange(self.rng())
args_maker = lambda: [rng(shape, a_dtype)]
np_op = lambda x: np.asarray(x).view(dtype)
jnp_op = lambda x: jnp.asarray(x).view(dtype)
# Above may produce signaling nans; ignore warnings from invalid values.
with np.errstate(invalid='ignore'):
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testPathologicalFloats(self):
args_maker = lambda: [np.array([
0b_0111_1111_1000_0000_0000_0000_0000_0000, # inf
0b_1111_1111_1000_0000_0000_0000_0000_0000, # -inf
0b_0111_1111_1100_0000_0000_0000_0000_0000, # qnan
0b_1111_1111_1100_0000_0000_0000_0000_0000, # -qnan
0b_0111_1111_1000_0000_0000_0000_0000_0001, # snan
0b_1111_1111_1000_0000_0000_0000_0000_0001, # -snan
0b_0111_1111_1000_0000_0000_1100_0000_0000, # nonstandard nan
0b_1111_1111_1000_0000_0000_1100_0000_0000, # -nonstandard nan
0b_0000_0000_0000_0000_0000_0000_0000_0000, # zero
0b_1000_0000_0000_0000_0000_0000_0000_0000, # -zero
], dtype='uint32')]
np_op = lambda x: np.asarray(x).view('float32').view('uint32')
jnp_op = lambda x: jnp.asarray(x).view('float32').view('uint32')
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test other ndarray-like method overrides
def testNpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=float), 0.)
ans = np.mean(x)
self.assertAllClose(ans, np.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
self.assertAllClose(np.arange(0.0, 1.0, 0.1, dtype=jnp.float_),
jnp.arange(0.0, 1.0, 0.1))
# from https://github.com/google/jax/issues/3450
self.assertAllClose(np.arange(2.5, dtype=jnp.float_),
jnp.arange(2.5))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testSort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.sort
np_fun = np.sort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in one_dim_array_shapes
for axis in [None]))
def testSortComplex(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.sort_complex, jnp.sort_complex, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp.sort_complex, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_input_type={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
input_type.__name__, axis),
"shape": shape, "dtype": dtype, "input_type": input_type, "axis": axis}
for dtype in all_dtypes
for shape in nonempty_nonscalar_array_shapes
for input_type in [np.array, tuple]
for axis in (-1, *range(len(shape) - 1))))
def testLexsort(self, dtype, shape, input_type, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [input_type(rng(shape, dtype))]
jnp_op = lambda x: jnp.lexsort(x, axis=axis)
np_op = lambda x: np.lexsort(x, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testArgsort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.argsort
np_fun = np.argsort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for dtype in all_dtypes
for shape in nonzerodim_shapes))
def testMsort(self, dtype, shape):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.msort, jnp.msort, args_maker)
self._CompileAndCheck(jnp.msort, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"shape": shape, "dtype": dtype, "shifts": shifts, "axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]))
def testRoll(self, shape, dtype, shifts, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
np_op = partial(np.roll, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]))
def testRollaxis(self, shape, dtype, start, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
np_op = partial(np.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [np.uint8, np.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]))
def testPackbits(self, shape, dtype, axis, bitorder):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
np_op = partial(np.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"shape": shape, "dtype": dtype, "axis": axis, "bitorder": bitorder,
"count": count}
for dtype in [np.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count):
rng = jtu.rand_int(self.rng(), 0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
np_op = partial(np.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in [None, 'wrap', 'clip']))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = jtu.rand_default(self.rng())
if mode is None:
rng_indices = jtu.rand_int(self.rng(), -shape[axis or 0], shape[axis or 0])
else:
rng_indices = jtu.rand_int(self.rng(), -5, 5)
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
np_op = lambda x, i: np.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeEmpty(self):
np.testing.assert_array_equal(
jnp.array([], dtype=jnp.float32),
jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))
np.testing.assert_array_equal(
jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),
axis=1))
with self.assertRaisesRegex(IndexError, "non-empty jnp.take"):
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.array([0], jnp.int32), axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype),
jtu.format_shape_dtype_string(i_shape, index_dtype), axis),
"x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for index_dtype in int_dtypes))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, index_dtype, axis):
rng = jtu.rand_default(self.rng())
i_shape = np.array(i_shape)
if axis is None:
i_shape = [np.prod(i_shape, dtype=np.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = np.prod(x_shape, dtype=np.int32) if axis is None else x_shape[axis]
if np.issubdtype(index_dtype, np.unsignedinteger):
index_rng = jtu.rand_int(self.rng(), 0, n)
else:
index_rng = jtu.rand_int(self.rng(), -n, n)
i = index_rng(i_shape, index_dtype)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(np, "take_along_axis"):
np_op = lambda x, i: np.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeAlongAxisWithUint8IndicesDoesNotOverflow(self):
# https://github.com/google/jax/issues/5088
h = jtu.rand_default(self.rng())((256, 256, 100), np.float32)
g = jtu.rand_int(self.rng(), 0, 100)((256, 256, 1), np.uint8)
q0 = jnp.take_along_axis(h, g, axis=-1)
q1 = np.take_along_axis( h, g, axis=-1)
np.testing.assert_equal(q0, q1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
arg = arg.astype(np.float32) if dtype == jnp.bfloat16 else arg
return np.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol={np.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
"nan_to_num", [shape], [dtype]),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, shape, dtype):
rng = jtu.rand_some_inf_and_nan(self.rng())
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
def np_fun(x):
if dtype == jnp.bfloat16:
x = np.where(np.isnan(x), dtype(0), x)
x = np.where(np.isposinf(x), jnp.finfo(dtype).max, x)
x = np.where(np.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return np.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (np.int32,)),
(((3,), (4,)), (np.int32, np.int32)),
(((3,), (1,), (4,)), (np.int32, np.int32, np.int32)),
)))
def testIx_(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(np.ix_, jnp.ix_, args_maker)
self._CompileAndCheck(jnp.ix_, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dimensions={}_dtype={}_sparse={}".format(
dimensions, dtype, sparse),
"dimensions": dimensions, "dtype": dtype, "sparse": sparse}
for dimensions in [(), (2,), (3, 0), (4, 5, 6)]
for dtype in number_dtypes
for sparse in [True, False]))
def testIndices(self, dimensions, dtype, sparse):
def args_maker(): return []
np_fun = partial(np.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
jnp_fun = partial(jnp.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_some_nan,
"q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", partial(jtu.rand_uniform, low=0., high=100.)),
("quantile", partial(jtu.rand_uniform, low=0., high=1.)),
("nanpercentile", partial(jtu.rand_uniform, low=0., high=100.)),
("nanquantile", partial(jtu.rand_uniform, low=0., high=1.)),
)
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [np.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest',
'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
a_rng = a_rng(self.rng())
q_rng = q_rng(self.rng())
if "median" in op:
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_a_shape={}_axis={}_keepdims={}".format(
op, jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"op": op, "a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]
for op in ["median", "nanmedian"]))
def testMedian(self, op, a_shape, a_dtype, axis, keepdims):
if op == "median":
a_rng = jtu.rand_default(self.rng())
else:
a_rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.where(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of
# this behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.where(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"shapes": shapes, "dtypes": dtypes
} for shapes in s(filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 3)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, 3)))))
def testWhereThreeArgument(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
def np_fun(cond, x, y):
return _promote_like_jnp(partial(np.where, cond))(x, y)
self._CheckAgainstNumpy(np_fun, jnp.where, args_maker)
self._CompileAndCheck(jnp.where, args_maker)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, np.dtype(np.float32))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": jtu.format_test_name_suffix("", shapes, (np.bool_,) * n + dtypes),
"shapes": shapes, "dtypes": dtypes
} for n in s(range(1, 3))
for shapes in s(filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2 * n + 1)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, n + 1)))))
def testSelect(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, np.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def np_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(np.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return np.select(condlist,
[np.asarray(x, dtype=dtype) for x in choicelist],
np.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(np_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker,
rtol={np.float64: 1e-7, np.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + np.eye(1, dtype=np.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = np.eye(3, dtype=np.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = jax.grad(test_fail)(x)
# assert not np.any(np.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = np.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = np.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
np_fun = lambda arg: getattr(np, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{
"testcase_name": "_shape={}_dtype={}_weights={}_minlength={}_length={}".format(
shape, dtype, weights, minlength, length
),
"shape": shape,
"dtype": dtype,
"weights": weights,
"minlength": minlength,
"length": length}
for shape in [(0,), (5,), (10,)]
for dtype in int_dtypes
for weights in [True, False]
for minlength in [0, 20]
for length in [None, 10]
))
def testBincount(self, shape, dtype, weights, minlength, length):
rng = jtu.rand_positive(self.rng())
args_maker = lambda: (rng(shape, dtype), (rng(shape, 'float32') if weights else None))
np_fun = partial(np.bincount, minlength=minlength)
jnp_fun = partial(jnp.bincount, minlength=minlength, length=length)
if length is not None:
self._CompileAndCheck(jnp_fun, args_maker)
if length is None:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
def testBincountNegative(self):
# Test that jnp.bincount ignores negative values.
x_rng = jtu.rand_int(self.rng(), -100, 100)
w_rng = jtu.rand_uniform(self.rng())
shape = (1000,)
x = x_rng(shape, 'int32')
w = w_rng(shape, 'float32')
xn = np.array(x)
xn[xn < 0] = 0
wn = np.array(w)
np_result = np.bincount(xn[xn >= 0], wn[xn >= 0])
jnp_result = jnp.bincount(x, w)
self.assertAllClose(np_result, jnp_result, check_dtypes=False)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[np.array(3)],
[np.array([3])],
[[np.array(3)]],
[[np.array([3])]],
[3, 4, 5],
[
[np.eye(2, dtype=np.int32) * 2, np.zeros((2, 3), dtype=np.int32)],
[np.ones((3, 2), dtype=np.int32), np.eye(3, dtype=np.int32) * 3],
],
[np.array([1, 2, 3]), np.array([2, 3, 4]), 10],
[np.ones((2, 2), dtype=np.int32), np.zeros((2, 2), dtype=np.int32)],
[[np.array([1, 2, 3])], [np.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(np.block, jnp.block, args_maker)
self._CompileAndCheck(jnp.block, args_maker)
def testLongLong(self):
self.assertAllClose(np.int64(7), jax.jit(lambda x: x)(np.longlong(7)))
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype.*")
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/main/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
np.arange(77, dtype=jnp.int_))
self.assertAllClose(jnp.arange(2, 13),
np.arange(2, 13, dtype=jnp.int_))
self.assertAllClose(jnp.arange(4, 21, 9),
np.arange(4, 21, 9, dtype=jnp.int_))
self.assertAllClose(jnp.arange(53, 5, -3),
np.arange(53, 5, -3, dtype=jnp.int_))
self.assertAllClose(jnp.arange(77, dtype=float),
np.arange(77, dtype=float))
self.assertAllClose(jnp.arange(2, 13, dtype=int),
np.arange(2, 13, dtype=int))
self.assertAllClose(jnp.arange(0, 1, -0.5),
np.arange(0, 1, -0.5, dtype=jnp.float_))
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(np.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(np.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(np.arange(77, dtype=np.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(np.int32, 77)))
def testArangeJit(self):
ans = jax.jit(lambda: jnp.arange(5))()
expected = np.arange(5)
self.assertAllClose(ans, expected)
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), np.eye(5000))
self.assertEqual(0, np.sum(jnp.eye(1050) - np.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = jax.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = np.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], np.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jnp.ones(10).at[np.array([2, 4, 5])].add(u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(np.zeros(3,), jax.grad(f)(np.ones(3,)))
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=np.float32)
# f = jax.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), np.array([0., 0., 0., 0.25], dtype=np.float32))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
np_op = getattr(np, op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_op)
jnp_op = getattr(jnp, op)
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (np.nan, -np.inf, -100., -2., -1., 0., 1., 2., 100., np.inf,
jnp.finfo(dtype).max, np.sqrt(jnp.finfo(dtype).max),
np.sqrt(jnp.finfo(dtype).max) * 2.):
if (op in ("sin", "cos", "tan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789): fix and reenable.
x = dtype(x)
expected = np_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {np.float32: 1e-3, np.float64: 1e-7})
self.assertAllClose(expected, actual, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
raise SkipTest("we decided to disallow arrays as static args")
@partial(jax.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
_ = f(x, v)
_ = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.var(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testNanVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_some_nan(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.nanvar(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.nanvar, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_y_shape={}_y_dtype={}_rowvar={}_ddof={}_bias={}_fweights={}_aweights={}".format(
shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights),
"shape": shape, "y_shape": y_shape, "dtype": dtype, "y_dtype": y_dtype,"rowvar": rowvar, "ddof": ddof,
"bias": bias, "fweights": fweights, "aweights": aweights}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for y_dtype in [None, dtype]
for rowvar in [True, False]
for y_shape in _get_y_shapes(y_dtype, shape, rowvar)
for bias in [True, False]
for ddof in [None, 2, 3]
for fweights in [True, False]
for aweights in [True, False]))
def testCov(self, shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights):
rng = jtu.rand_default(self.rng())
wrng = jtu.rand_positive(self.rng())
wdtype = np.real(dtype(0)).dtype
wshape = shape[-1:] if rowvar or shape[0] == 1 else shape[:1]
args_maker = lambda: [rng(shape, dtype),
rng(y_shape, y_dtype) if y_dtype else None,
wrng(wshape, int) if fweights else None,
wrng(wshape, wdtype) if aweights else None]
kwargs = dict(rowvar=rowvar, ddof=ddof, bias=bias)
np_fun = lambda m, y, f, a: np.cov(m, y, fweights=f, aweights=a, **kwargs)
jnp_fun = lambda m, y, f, a: jnp.cov(m, y, fweights=f, aweights=a, **kwargs)
tol = {jnp.bfloat16: 5E-2, np.float16: 1E-2, np.float32: 1e-5,
np.float64: 1e-13, np.complex64: 1e-5, np.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype.__name__, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]))
def testCorrCoef(self, shape, dtype, rowvar):
rng = jtu.rand_default(self.rng())
def args_maker():
ok = False
while not ok:
x = rng(shape, dtype)
ok = not np.any(np.isclose(np.std(x), 0.0))
return (x,)
np_fun = partial(np.corrcoef, rowvar=rowvar)
np_fun = jtu.ignore_warning(
category=RuntimeWarning, message="invalid value encountered.*")(np_fun)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
tol = 1e-2 if jtu.device_under_test() == "tpu" else None
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(jtu.format_shape_dtype_string(shape, dtype),
"None" if end_dtype is None else jtu.format_shape_dtype_string(end_shape, end_dtype),
"None" if begin_dtype is None else jtu.format_shape_dtype_string(begin_shape, begin_dtype)),
"shape": shape, "dtype": dtype, "end_shape": end_shape,
"end_dtype": end_dtype, "begin_shape": begin_shape,
"begin_dtype": begin_dtype}
for dtype in number_dtypes
for end_dtype in [None] + [dtype]
for begin_dtype in [None] + [dtype]
for shape in [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE]
for begin_shape in (
[None] if begin_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])
for end_shape in (
[None] if end_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])))
def testEDiff1d(self, shape, dtype, end_shape, end_dtype, begin_shape,
begin_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype),
(None if end_dtype is None else rng(end_shape, end_dtype)),
(None if begin_dtype is None else rng(begin_shape, begin_dtype))]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testEDiff1dWithDtypeCast(self):
rng = jtu.rand_default(self.rng())
shape = jtu.NUMPY_SCALAR_SHAPE
dtype = jnp.float32
end_dtype = jnp.int32
args_maker = lambda: [rng(shape, dtype), rng(shape, end_dtype), rng(shape, dtype)]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]))
def testMeshGrid(self, shapes, dtype, indexing, sparse):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
np_fun = partial(np.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testMgrid(self):
assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0)
assertAllEqual(np.mgrid[:4], jnp.mgrid[:4])
assertAllEqual(np.mgrid[:4,], jnp.mgrid[:4,])
assertAllEqual(np.mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())
assertAllEqual(np.mgrid[:5, :5], jnp.mgrid[:5, :5])
assertAllEqual(np.mgrid[:3, :2], jnp.mgrid[:3, :2])
assertAllEqual(np.mgrid[1:4:2], jnp.mgrid[1:4:2])
assertAllEqual(np.mgrid[1:5:3, :5], jnp.mgrid[1:5:3, :5])
assertAllEqual(np.mgrid[:3, :2, :5], jnp.mgrid[:3, :2, :5])
assertAllEqual(np.mgrid[:3:2, :2, :5], jnp.mgrid[:3:2, :2, :5])
# Corner cases
assertAllEqual(np.mgrid[:], jnp.mgrid[:])
# When the step length is a complex number, because of float calculation,
# the values between jnp and np might slightly different.
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.mgrid[-1:1:5j],
jnp.mgrid[-1:1:5j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[3:4:7j],
jnp.mgrid[3:4:7j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1:6:8j, 2:4],
jnp.mgrid[1:6:8j, 2:4],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.mgrid[0:3.5:0.5],
jnp.mgrid[0:3.5:0.5],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1.3:4.2:0.3],
jnp.mgrid[1.3:4.2:0.3],
atol=atol,
rtol=rtol)
# abstract tracer value for jnp.mgrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.mgrid"):
jax.jit(lambda a, b: jnp.mgrid[a:b])(0, 2)
def testOgrid(self):
def assertListOfArraysEqual(xs, ys):
self.assertIsInstance(xs, list)
self.assertIsInstance(ys, list)
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
self.assertArraysEqual(x, y)
self.assertArraysEqual(np.ogrid[:5], jnp.ogrid[:5])
self.assertArraysEqual(np.ogrid[:5], jax.jit(lambda: jnp.ogrid[:5])())
self.assertArraysEqual(np.ogrid[1:7:2], jnp.ogrid[1:7:2])
# List of arrays
assertListOfArraysEqual(np.ogrid[:5,], jnp.ogrid[:5,])
assertListOfArraysEqual(np.ogrid[0:5, 1:3], jnp.ogrid[0:5, 1:3])
assertListOfArraysEqual(np.ogrid[1:3:2, 2:9:3], jnp.ogrid[1:3:2, 2:9:3])
assertListOfArraysEqual(np.ogrid[:5, :9, :11], jnp.ogrid[:5, :9, :11])
# Corner cases
self.assertArraysEqual(np.ogrid[:], jnp.ogrid[:])
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.ogrid[-1:1:5j],
jnp.ogrid[-1:1:5j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.ogrid[0:3.5:0.3],
jnp.ogrid[0:3.5:0.3],
atol=atol,
rtol=rtol)
self.assertAllClose(np.ogrid[1.2:4.8:0.24],
jnp.ogrid[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
# abstract tracer value for ogrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.ogrid"):
jax.jit(lambda a, b: jnp.ogrid[a:b])(0, 2)
def testR_(self):
a = np.arange(6).reshape((2,3))
self.assertArraysEqual(np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])],
jnp.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])])
self.assertArraysEqual(np.r_['-1', a, a], jnp.r_['-1', a, a])
self.assertArraysEqual(np.r_['0,2', [1,2,3], [4,5,6]], jnp.r_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,2,0', [1,2,3], [4,5,6]], jnp.r_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['1,2,0', [1,2,3], [4,5,6]], jnp.r_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.r_['0,4,-1', [1,2,3], [4,5,6]], jnp.r_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,4,-2', [1,2,3], [4,5,6]], jnp.r_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.r_['r',[1,2,3], [4,5,6]], jnp.r_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['c', [1, 2, 3], [4, 5, 6]], jnp.r_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.r_["asdfgh",[1,2,3]]
# abstract tracer value for r_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.r_"):
jax.jit(lambda a, b: jnp.r_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.r_[-1:1:6j],
jnp.r_[-1:1:6j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.r_[-1:1:6j, [0]*3, 5, 6],
jnp.r_[-1:1:6j, [0]*3, 5, 6],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.r_[1.2:4.8:0.24],
jnp.r_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testC_(self):
a = np.arange(6).reshape((2, 3))
self.assertArraysEqual(np.c_[np.array([1,2,3]), np.array([4,5,6])],
jnp.c_[np.array([1,2,3]), np.array([4,5,6])])
self.assertArraysEqual(np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])],
jnp.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])])
self.assertArraysEqual(np.c_['-1', a, a], jnp.c_['-1', a, a])
self.assertArraysEqual(np.c_['0,2', [1,2,3], [4,5,6]], jnp.c_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,2,0', [1,2,3], [4,5,6]], jnp.c_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['1,2,0', [1,2,3], [4,5,6]], jnp.c_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.c_['0,4,-1', [1,2,3], [4,5,6]], jnp.c_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,4,-2', [1,2,3], [4,5,6]], jnp.c_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives, avoid numpy deprecation warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.c_['r',[1,2,3], [4,5,6]], jnp.c_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['c', [1, 2, 3], [4, 5, 6]], jnp.c_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.c_["asdfgh",[1,2,3]]
# abstract tracer value for c_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.c_"):
jax.jit(lambda a, b: jnp.c_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.c_[-1:1:6j],
jnp.c_[-1:1:6j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.c_[1.2:4.8:0.24],
jnp.c_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testS_(self):
self.assertEqual(np.s_[1:2:20],jnp.s_[1:2:20])
def testIndex_exp(self):
self.assertEqual(np.index_exp[5:3:2j],jnp.index_exp[5:3:2j])
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": f"_start_shape={start_shape}_stop_shape={stop_shape}"
f"_num={num}_endpoint={endpoint}_retstep={retstep}"
f"_dtype={dtype.__name__ if dtype else 'None'}",
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs, so
# we currently only test inexact 'dtype' arguments.
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLinspace(self, start_shape, stop_shape, num, endpoint, retstep, dtype):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else np.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
# NumPy 1.20.0 changed the semantics of linspace to floor for integer
# dtypes.
if numpy_version >= (1, 20) or not np.issubdtype(dtype, np.integer):
np_op = lambda start, stop: np.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
else:
def np_op(start, stop):
out = np.linspace(start, stop, num, endpoint=endpoint,
retstep=retstep, axis=axis)
if retstep:
return np.floor(out[0]).astype(dtype), out[1]
else:
return np.floor(out).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in number_dtypes))
def testLinspaceEndpoints(self, dtype):
"""Regression test for Issue #3014."""
rng = jtu.rand_default(self.rng())
endpoints = rng((2,), dtype)
out = jnp.linspace(*endpoints, 10, dtype=dtype)
self.assertAllClose(out[np.array([0, -1])], endpoints, rtol=0, atol=0)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, np.e]
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not config.x64_enabled):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 2e-2, np.float32: 1e-2, np.float64: 1e-6,
np.complex64: 1e-3, np.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered in power")
def np_op(start, stop):
return np.logspace(start, stop, num, endpoint=endpoint,
base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {np.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}_axis={}").format(
start_shape, stop_shape, num, endpoint,
dtype.__name__ if dtype else "None", axis),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "axis": axis}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for axis in range(-max(len(start_shape), len(stop_shape)),
max(len(start_shape), len(stop_shape)))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, axis):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 4e-3, np.float32: 2e-3, np.float64: 1e-14,
np.complex128: 1e-14}
def args_maker():
"""Test the set of inputs np.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# np.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def np_op(start, stop):
start = start.astype(np.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(np.float32) if dtype == jnp.bfloat16 else stop
return np.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else np.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
@unittest.skip("Test fails on CI, perhaps due to JIT caching")
def testDisableNumpyRankPromotionBroadcastingDecorator(self):
with jax.numpy_rank_promotion("allow"):
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
with jax.numpy_rank_promotion("raise"):
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
with jax.numpy_rank_promotion("warn"):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@jax.jit
def foo(x):
return jnp.stack(x)
foo(np.zeros(2)) # doesn't crash
@jax.jit
def foo(x):
return jnp.concatenate(x)
foo(np.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
jaxpr = jax.make_jaxpr(f)(np.zeros((3, 4), np.float32))
self.assertFalse(
any(np.array_equal(x, np.full((3, 4), 2., dtype=np.float32))
for x in jaxpr.consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
[(1,), 3],
])
def testBroadcastTo(self, from_shape, to_shape):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [from_shape], [np.float32])
np_op = lambda x: np.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(
{"testcase_name": f"_{shapes}", "shapes": shapes, "broadcasted_shape": broadcasted_shape}
for shapes, broadcasted_shape in [
[[], ()],
[[()], ()],
[[(1, 3), (4, 3)], (4, 3)],
[[(3,), (2, 1, 3)], (2, 1, 3)],
[[(3,), (3, 3)], (3, 3)],
[[(1,), (3,)], (3,)],
[[(1,), 3], (3,)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[[1], [0, 1]], (0, 1)],
[[(1,), np.array([0, 1])], (0, 1)],
])
def testBroadcastShapes(self, shapes, broadcasted_shape):
# Test against np.broadcast_shapes once numpy 1.20 is minimum required version
np.testing.assert_equal(jnp.broadcast_shapes(*shapes), broadcasted_shape)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(np.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), np.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(np.broadcast_to(10.0, ()), np.ndarray)
def testPrecision(self):
ones_1d = np.ones((2,))
ones_2d = np.ones((2, 2))
ones_3d = np.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_varargs={} axis={}_dtype={}".format(
shape, varargs, axis, dtype),
"shape": shape, "varargs": varargs, "axis": axis, "dtype": dtype}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for varargs in itertools.combinations(range(1, len(shape) + 1), _num_axes)
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes))
def testGradient(self, shape, varargs, axis, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, *varargs, axis=axis)
np_fun = lambda y: np.gradient(y, *varargs, axis=axis)
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
r"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: jax.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = self.rng().randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace())
self.assertAllClose(x.trace(), jax.jit(lambda y: y.trace())(x))
def testIntegerPowersArePrecise(self):
# See https://github.com/google/jax/pull/3036
# Checks if the squares of float32 integers have no numerical errors.
# It should be satisfied with all integers less than sqrt(2**24).
x = jnp.arange(-2**12, 2**12, dtype=jnp.int32)
np.testing.assert_array_equal(jnp.square(x.astype(jnp.float32)), x * x)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 2, x * x)
# Similarly for cubes.
x = jnp.arange(-2**8, 2**8, dtype=jnp.int32)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 3, x * x * x)
x = np.arange(10, dtype=np.float32)
for i in range(10):
self.assertAllClose(x.astype(jnp.float32) ** i, x ** i,
check_dtypes=False)
def testToBytes(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
for order in ['C', 'F']:
self.assertEqual(jnp.asarray(v).tobytes(order), v.tobytes(order))
def testToList(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
self.assertEqual(jnp.asarray(v).tolist(), v.tolist())
def testReductionWithRepeatedAxisError(self):
with self.assertRaisesRegex(ValueError, r"duplicate value in 'axis': \(0, 0\)"):
jnp.sum(jnp.arange(3), (0, 0))
def testArangeConcretizationError(self):
msg = r"It arose in jax.numpy.arange argument `{}`".format
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(jnp.arange)(3)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('start')):
jax.jit(lambda start: jnp.arange(start, 3))(0)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(lambda stop: jnp.arange(0, stop))(3)
def testIssue2347(self):
# https://github.com/google/jax/issues/2347
object_list = List[Tuple[jnp.array, float, float, jnp.array, bool]]
self.assertRaises(TypeError, jnp.array, object_list)
np_object_list = np.array(object_list)
self.assertRaises(TypeError, jnp.array, np_object_list)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexpComplex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log(np.exp(x1) + np.exp(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexp2Complex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log2(np.exp2(x1) + np.exp2(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in itertools.combinations_with_replacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,
np.complex64: 1e-1, np.complex128: 1e-3})
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={np.float32: 3e-3})
def testSincAtZero(self):
# Some manual tests for sinc at zero, since it doesn't have well-behaved
# numerical derivatives at zero
def deriv(f):
return lambda x: jax.jvp(f, (x,), (1.,))[1]
def apply_all(fns, x):
for f in fns:
x = f(x)
return x
d1 = 0.
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 1):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)
d2 = -np.pi ** 2 / 3
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 2):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)
d3 = 0.
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 3):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)
d4 = np.pi ** 4 / 5
for ops in itertools.combinations_with_replacement([deriv, jax.grad], 4):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)
def testSincGradArrayInput(self):
# tests for a bug almost introduced in #5077
jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexpComplex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp, args, 1, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexp2Complex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp2, args, 1, ["fwd", "rev"], tol, tol)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpySignaturesTest(jtu.JaxTestCase):
def testWrappedSignaturesMatch(self):
"""Test that jax.numpy function signatures match numpy."""
jnp_funcs = {name: getattr(jnp, name) for name in dir(jnp)}
func_pairs = {name: (fun, fun.__np_wrapped__) for name, fun in jnp_funcs.items()
if hasattr(fun, '__np_wrapped__')}
assert len(func_pairs) > 0
# TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.
unsupported_params = {
'angle': ['deg'],
'asarray': ['like'],
'broadcast_to': ['subok', 'array'],
'clip': ['kwargs'],
'corrcoef': ['ddof', 'bias', 'dtype'],
'cov': ['dtype'],
'empty_like': ['subok', 'order'],
'einsum': ['kwargs'],
'einsum_path': ['einsum_call'],
'eye': ['order', 'like'],
'identity': ['like'],
'full': ['order', 'like'],
'full_like': ['subok', 'order'],
'histogram': ['normed'],
'histogram2d': ['normed'],
'histogramdd': ['normed'],
'ones': ['order', 'like'],
'ones_like': ['subok', 'order'],
'tri': ['like'],
'unwrap': ['period'],
'zeros_like': ['subok', 'order']
}
extra_params = {
'broadcast_to': ['arr'],
'einsum': ['precision'],
'einsum_path': ['subscripts'],
}
mismatches = {}
for name, (jnp_fun, np_fun) in func_pairs.items():
# broadcast_shapes is not available in numpy < 1.20
if numpy_version < (1, 20) and name == "broadcast_shapes":
continue
# Some signatures have changed; skip for older numpy versions.
if numpy_version < (1, 19) and name in ['einsum_path', 'gradient', 'isscalar']:
continue
# Note: can't use inspect.getfullargspec due to numpy issue
# https://github.com/numpy/numpy/issues/12225
try:
np_params = inspect.signature(np_fun).parameters
except ValueError:
# Some functions cannot be inspected
continue
jnp_params = inspect.signature(jnp_fun).parameters
extra = set(extra_params.get(name, []))
unsupported = set(unsupported_params.get(name, []))
# Checks to prevent tests from becoming out-of-date. If these fail,
# it means that extra_params or unsupported_params need to be updated.
assert extra.issubset(jnp_params), f"{name}: extra={extra} is not a subset of jnp_params={set(jnp_params)}."
assert not unsupported.intersection(jnp_params), f"{name}: unsupported={unsupported} overlaps with jnp_params={set(jnp_params)}."
# Skip functions that only have *args and **kwargs; we can't introspect these further.
var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
if all(p.kind in var_args for p in jnp_params.values()):
continue
if all(p.kind in var_args for p in np_params.values()):
continue
# Remove known extra parameters.
jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}
# Remove known unsupported parameters.
np_params = {a: p for a, p in np_params.items() if a not in unsupported}
# Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy
# versions, we allow for jnp to have more parameters.
if list(jnp_params)[:len(np_params)] != list(np_params):
mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}
self.assertEqual(mismatches, {})
_all_dtypes: List[str] = [
"bool_",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64",
"complex64", "complex128",
]
def _all_numpy_ufuncs() -> Iterator[str]:
"""Generate the names of all ufuncs in the top-level numpy namespace."""
for name in dir(np):
f = getattr(np, name)
if isinstance(f, np.ufunc):
yield name
def _dtypes_for_ufunc(name: str) -> Iterator[Tuple[str, ...]]:
"""Generate valid dtypes of inputs to the given numpy ufunc."""
func = getattr(np, name)
for arg_dtypes in itertools.product(_all_dtypes, repeat=func.nin):
args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero", RuntimeWarning)
_ = func(*args)
except TypeError:
pass
else:
yield arg_dtypes
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyUfuncTests(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{name}_{','.join(arg_dtypes)}",
"name": name, "arg_dtypes": arg_dtypes}
for name in _all_numpy_ufuncs()
for arg_dtypes in jtu.cases_from_list(_dtypes_for_ufunc(name)))
def testUfuncInputTypes(self, name, arg_dtypes):
# TODO(jakevdp): fix following failures and remove from this exception list.
if (name in ['divmod', 'floor_divide', 'fmod', 'gcd', 'left_shift', 'mod',
'power', 'remainder', 'right_shift', 'rint', 'square']
and 'bool_' in arg_dtypes):
self.skipTest(f"jax.numpy does not support {name}{tuple(arg_dtypes)}")
if name == 'arctanh' and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):
self.skipTest("np.arctanh & jnp.arctanh have mismatched NaNs for complex input.")
for dtype in arg_dtypes:
jtu.skip_if_unsupported_type(dtype)
jnp_op = getattr(jnp, name)
np_op = getattr(np, name)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
jnp_op(*args_maker())
except NotImplementedError:
self.skipTest(f"jtu.{name} is not yet implemented.")
# large tol comes from the fact that numpy returns float16 in places
# that jnp returns float32. e.g. np.cos(np.uint8(0))
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyDocTests(jtu.JaxTestCase):
def test_lax_numpy_docstrings(self):
# Test that docstring wrapping & transformation didn't fail.
# Functions that have their own docstrings & don't wrap numpy.
known_exceptions = {'broadcast_arrays', 'vectorize'}
for name in dir(jnp):
if name in known_exceptions or name.startswith('_'):
continue
# We only check signatures of functions.
obj = getattr(jnp, name)
if isinstance(obj, type) or not callable(obj):
continue
# Some jnp functions are imported from numpy or jax.dtypes directly.
if any(obj is getattr(mod, obj.__name__, None) for mod in [np, dtypes]):
continue
wrapped_fun = obj.__np_wrapped__
# If the wrapped function has a docstring, obj should too
if wrapped_fun.__doc__ and not obj.__doc__:
raise Exception(f"jnp.{name} does not contain wrapped docstring.")
if obj.__doc__ and "*Original docstring below.*" not in obj.__doc__:
raise Exception(f"jnp.{name} does not have a wrapped docstring.")
def test_parse_numpydoc(self):
# Unit test ensuring that _parse_numpydoc correctly parses docstrings for all
# functions in NumPy's top-level namespace.
section_titles = {'Attributes', 'Examples', 'Notes',
'Parameters', 'Raises', 'References',
'Returns', 'See also', 'See Also', 'Warnings', 'Warns'}
headings = [title + '\n' + '-'*len(title) for title in section_titles]
for name in dir(np):
if name.startswith('_'):
continue
obj = getattr(np, name)
if isinstance(obj, type):
continue
if not callable(obj):
continue
if 'built-in function' in repr(obj):
continue
parsed = _parse_numpydoc(obj.__doc__)
# Check that no docstring is handled gracefully.
if not obj.__doc__:
self.assertEqual(parsed, ParsedDoc(obj.__doc__))
continue
# Check that no unexpected section names are found.
extra_keys = parsed.sections.keys() - section_titles
if extra_keys:
raise ValueError(f"Extra section headers found in np.{name}: {extra_keys}")
# Check that every docstring has a summary.
if not parsed.summary:
raise ValueError(f"No summary found for np.{name}")
# Check that no expected headings are missed.
for heading in headings:
assert heading not in parsed.front_matter
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for accessing Nvidia MMARs
See Also:
- https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
"""
import json
import os
import warnings
from typing import Mapping, Union
import torch
import monai.networks.nets as monai_nets
from monai.apps.utils import download_and_extract, logger
from monai.utils.module import optional_import
from .model_desc import MODEL_DESC
from .model_desc import RemoteMMARKeys as Keys
__all__ = ["get_model_spec", "download_mmar", "load_from_mmar"]
def get_model_spec(idx: Union[int, str]):
"""get model specification by `idx`. `idx` could be index of the constant tuple of dict or the actual model ID."""
if isinstance(idx, int):
return MODEL_DESC[idx]
if isinstance(idx, str):
key = idx.strip().lower()
for cand in MODEL_DESC:
if str(cand[Keys.ID]).strip().lower() == key:
return cand
logger.info(f"Available specs are: {MODEL_DESC}.")
raise ValueError(f"Unknown MODEL_DESC request: {idx}")
def _get_all_ngc_models(pattern, page_index=0, page_size=50):
url = "https://api.ngc.nvidia.com/v2/search/catalog/resources/MODEL"
query_dict = {
"query": "",
"orderBy": [{"field": "score", "value": "DESC"}],
"queryFields": ["all", "description", "displayName", "name", "resourceId"],
"fields": [
"isPublic",
"attributes",
"guestAccess",
"name",
"orgName",
"teamName",
"displayName",
"dateModified",
"labels",
"description",
],
"page": 0,
}
filter = [dict(field="name", value=f"*{pattern}*")]
query_dict["page"] = page_index
query_dict["pageSize"] = page_size
query_dict["filters"] = filter
query_str = json.dumps(query_dict)
full_url = f"{url}?q={query_str}"
requests_get, has_requests = optional_import("requests", name="get")
if has_requests:
resp = requests_get(full_url)
else:
raise ValueError("NGC API requires requests package. Please install it.")
model_list = json.loads(resp.text)
model_dict = {}
for result in model_list["results"]:
for model in result["resources"]:
current_res_id = model["resourceId"]
model_dict[current_res_id] = {"name": model["name"]}
for attribute in model["attributes"]:
if attribute["key"] == "latestVersionIdStr":
model_dict[current_res_id]["latest"] = attribute["value"]
return model_dict
def _get_ngc_url(model_name: str, version: str, model_prefix=""):
return f"https://api.ngc.nvidia.com/v2/models/{model_prefix}{model_name}/versions/{version}/zip"
def _get_ngc_doc_url(model_name: str, model_prefix=""):
return f"https://ngc.nvidia.com/catalog/models/{model_prefix}{model_name}"
def download_mmar(item, mmar_dir=None, progress: bool = True, api: bool = False, version: int = -1):
"""
Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.
See Also:
- https://docs.nvidia.com/clara/
- Nvidia NGC Registry CLI
- https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
Args:
item: the corresponding model item from `MODEL_DESC`.
Or when api is True, the substring to query NGC's model name field.
mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.
progress: whether to display a progress bar.
api: whether to query NGC and download via api
version: which version of MMAR to download. -1 means the latest from ngc.
Examples::
>>> from monai.apps import download_mmar
>>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")
>>> download_mmar("prostate_mri_segmentation", mmar_dir=".", api=True)
Returns:
The local directory of the downloaded model.
If api is True, a list of local directories of downloaded models.
"""
if not mmar_dir:
get_dir, has_home = optional_import("torch.hub", name="get_dir")
if has_home:
mmar_dir = os.path.join(get_dir(), "mmars")
else:
raise ValueError("mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?")
if api:
model_dict = _get_all_ngc_models(item)
if len(model_dict) == 0:
raise ValueError(f"api query returns no item for pattern {item}. Please change or shorten it.")
model_dir_list = []
for k, v in model_dict.items():
ver = v["latest"] if version == -1 else str(version)
download_url = _get_ngc_url(k, ver)
model_dir = os.path.join(mmar_dir, v["name"])
download_and_extract(
url=download_url,
filepath=os.path.join(mmar_dir, f'{v['name']}_{ver}.zip'),
output_dir=model_dir,
hash_val=None,
hash_type="md5",
file_type="zip",
has_base=False,
progress=progress,
)
model_dir_list.append(model_dir)
return model_dir_list
if not isinstance(item, Mapping):
item = get_model_spec(item)
ver = item.get(Keys.VERSION, 1)
if version > 0:
ver = str(version)
model_fullname = f"{item[Keys.NAME]}_{ver}"
model_dir = os.path.join(mmar_dir, model_fullname)
model_url = item.get(Keys.URL) or _get_ngc_url(item[Keys.NAME], version=ver, model_prefix="nvidia/med/")
download_and_extract(
url=model_url,
filepath=os.path.join(mmar_dir, f"{model_fullname}.{item[Keys.FILE_TYPE]}"),
output_dir=model_dir,
hash_val=item[Keys.HASH_VAL],
hash_type=item[Keys.HASH_TYPE],
file_type=item[Keys.FILE_TYPE],
has_base=False,
progress=progress,
)
return model_dir
def load_from_mmar(
item,
mmar_dir=None,
progress: bool = True,
version: int = -1,
map_location=None,
pretrained=True,
weights_only=False,
model_key: str = "model",
):
"""
Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.
Args:
item: the corresponding model item from `MODEL_DESC`.
mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
progress: whether to display a progress bar when downloading the content.
version: version number of the MMAR. Set it to `-1` to use `item[Keys.VERSION]`.
map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.
pretrained: whether to load the pretrained weights after initializing a network module.
weights_only: whether to load only the weights instead of initializing the network module and assign weights.
model_key: a key to search in the model file or config file for the model dictionary.
Currently this function assumes that the model dictionary has
`{"[name|path]": "test.module", "args": {'kw': 'test'}}`.
Examples::
>>> from monai.apps import load_from_mmar
>>> unet_model = load_from_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".", map_location="cpu")
>>> print(unet_model)
See Also:
https://docs.nvidia.com/clara/
"""
if not isinstance(item, Mapping):
item = get_model_spec(item)
model_dir = download_mmar(item=item, mmar_dir=mmar_dir, progress=progress, version=version)
model_file = os.path.join(model_dir, item[Keys.MODEL_FILE])
logger.info(f'\n*** "{item[Keys.ID]}" available at {model_dir}.')
# loading with `torch.jit.load`
if f"{model_file}".endswith(".ts"):
if not pretrained:
warnings.warn("Loading a ScriptModule, 'pretrained' option ignored.")
if weights_only:
warnings.warn("Loading a ScriptModule, 'weights_only' option ignored.")
return torch.jit.load(model_file, map_location=map_location)
# loading with `torch.load`
model_dict = torch.load(model_file, map_location=map_location)
if weights_only:
return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly
# 1. search `model_dict['train_config]` for model config spec.
model_config = _get_val(dict(model_dict).get("train_conf", {}), key=model_key, default={})
if not model_config:
# 2. search json CONFIG_FILE for model config spec.
json_path = os.path.join(model_dir, item.get(Keys.CONFIG_FILE, "config_train.json"))
with open(json_path) as f:
conf_dict = json.load(f)
conf_dict = dict(conf_dict)
model_config = _get_val(conf_dict, key=model_key, default={})
if not model_config:
# 3. search `model_dict` for model config spec.
model_config = _get_val(dict(model_dict), key=model_key, default={})
if not (model_config and isinstance(model_config, Mapping)):
raise ValueError(
f"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, "
f"or from model file: {item.get(Keys.MODEL_FILE)}."
)
# parse `model_config` for model class and model parameters
if model_config.get("name"): # model config section is a "name"
model_name = model_config["name"]
model_cls = monai_nets.__dict__[model_name]
elif model_config.get("path"): # model config section is a "path"
# https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html
model_module, model_name = model_config.get("path", ".").rsplit(".", 1)
model_cls, has_cls = optional_import(module=model_module, name=model_name)
if not has_cls:
raise ValueError(
f"Could not load MMAR model config {model_config.get("path", "")}, "
f"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH."
"See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html"
)
else:
raise ValueError(f"Could not load model config {model_config}.")
logger.info(f"*** Model: {model_cls}")
model_kwargs = model_config.get("args", None)
if model_kwargs:
model_inst = model_cls(**model_kwargs)
logger.info(f"*** Model params: {model_kwargs}")
else:
model_inst = model_cls()
if pretrained:
model_inst.load_state_dict(model_dict.get(model_key, model_dict))
logger.info("\n---")
doc_url = item.get(Keys.DOC) or _get_ngc_doc_url(item[Keys.NAME], model_prefix="nvidia:med:")
logger.info(f"For more information, please visit {doc_url}\n")
return model_inst
def _get_val(input_dict: Mapping, key="model", default=None):
"""
Search for the item with `key` in `config_dict`.
Returns: the first occurrence of `key` in a breadth first search.
"""
if key in input_dict:
return input_dict[key]
for sub_dict in input_dict:
val = input_dict[sub_dict]
if isinstance(val, Mapping):
found_val = _get_val(val, key=key, default=None)
if found_val is not None:
return found_val
return default
| # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for accessing Nvidia MMARs
See Also:
- https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
"""
import json
import os
import warnings
from typing import Mapping, Union
import torch
import monai.networks.nets as monai_nets
from monai.apps.utils import download_and_extract, logger
from monai.utils.module import optional_import
from .model_desc import MODEL_DESC
from .model_desc import RemoteMMARKeys as Keys
__all__ = ["get_model_spec", "download_mmar", "load_from_mmar"]
def get_model_spec(idx: Union[int, str]):
"""get model specification by `idx`. `idx` could be index of the constant tuple of dict or the actual model ID."""
if isinstance(idx, int):
return MODEL_DESC[idx]
if isinstance(idx, str):
key = idx.strip().lower()
for cand in MODEL_DESC:
if str(cand[Keys.ID]).strip().lower() == key:
return cand
logger.info(f"Available specs are: {MODEL_DESC}.")
raise ValueError(f"Unknown MODEL_DESC request: {idx}")
def _get_all_ngc_models(pattern, page_index=0, page_size=50):
url = "https://api.ngc.nvidia.com/v2/search/catalog/resources/MODEL"
query_dict = {
"query": "",
"orderBy": [{"field": "score", "value": "DESC"}],
"queryFields": ["all", "description", "displayName", "name", "resourceId"],
"fields": [
"isPublic",
"attributes",
"guestAccess",
"name",
"orgName",
"teamName",
"displayName",
"dateModified",
"labels",
"description",
],
"page": 0,
}
filter = [dict(field="name", value=f"*{pattern}*")]
query_dict["page"] = page_index
query_dict["pageSize"] = page_size
query_dict["filters"] = filter
query_str = json.dumps(query_dict)
full_url = f"{url}?q={query_str}"
requests_get, has_requests = optional_import("requests", name="get")
if has_requests:
resp = requests_get(full_url)
else:
raise ValueError("NGC API requires requests package. Please install it.")
model_list = json.loads(resp.text)
model_dict = {}
for result in model_list["results"]:
for model in result["resources"]:
current_res_id = model["resourceId"]
model_dict[current_res_id] = {"name": model["name"]}
for attribute in model["attributes"]:
if attribute["key"] == "latestVersionIdStr":
model_dict[current_res_id]["latest"] = attribute["value"]
return model_dict
def _get_ngc_url(model_name: str, version: str, model_prefix=""):
return f"https://api.ngc.nvidia.com/v2/models/{model_prefix}{model_name}/versions/{version}/zip"
def _get_ngc_doc_url(model_name: str, model_prefix=""):
return f"https://ngc.nvidia.com/catalog/models/{model_prefix}{model_name}"
def download_mmar(item, mmar_dir=None, progress: bool = True, api: bool = False, version: int = -1):
"""
Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.
See Also:
- https://docs.nvidia.com/clara/
- Nvidia NGC Registry CLI
- https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
Args:
item: the corresponding model item from `MODEL_DESC`.
Or when api is True, the substring to query NGC's model name field.
mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.
progress: whether to display a progress bar.
api: whether to query NGC and download via api
version: which version of MMAR to download. -1 means the latest from ngc.
Examples::
>>> from monai.apps import download_mmar
>>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")
>>> download_mmar("prostate_mri_segmentation", mmar_dir=".", api=True)
Returns:
The local directory of the downloaded model.
If api is True, a list of local directories of downloaded models.
"""
if not mmar_dir:
get_dir, has_home = optional_import("torch.hub", name="get_dir")
if has_home:
mmar_dir = os.path.join(get_dir(), "mmars")
else:
raise ValueError("mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?")
if api:
model_dict = _get_all_ngc_models(item)
if len(model_dict) == 0:
raise ValueError(f"api query returns no item for pattern {item}. Please change or shorten it.")
model_dir_list = []
for k, v in model_dict.items():
ver = v["latest"] if version == -1 else str(version)
download_url = _get_ngc_url(k, ver)
model_dir = os.path.join(mmar_dir, v["name"])
download_and_extract(
url=download_url,
filepath=os.path.join(mmar_dir, f'{v["name"]}_{ver}.zip'),
output_dir=model_dir,
hash_val=None,
hash_type="md5",
file_type="zip",
has_base=False,
progress=progress,
)
model_dir_list.append(model_dir)
return model_dir_list
if not isinstance(item, Mapping):
item = get_model_spec(item)
ver = item.get(Keys.VERSION, 1)
if version > 0:
ver = str(version)
model_fullname = f"{item[Keys.NAME]}_{ver}"
model_dir = os.path.join(mmar_dir, model_fullname)
model_url = item.get(Keys.URL) or _get_ngc_url(item[Keys.NAME], version=ver, model_prefix="nvidia/med/")
download_and_extract(
url=model_url,
filepath=os.path.join(mmar_dir, f"{model_fullname}.{item[Keys.FILE_TYPE]}"),
output_dir=model_dir,
hash_val=item[Keys.HASH_VAL],
hash_type=item[Keys.HASH_TYPE],
file_type=item[Keys.FILE_TYPE],
has_base=False,
progress=progress,
)
return model_dir
def load_from_mmar(
item,
mmar_dir=None,
progress: bool = True,
version: int = -1,
map_location=None,
pretrained=True,
weights_only=False,
model_key: str = "model",
):
"""
Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.
Args:
item: the corresponding model item from `MODEL_DESC`.
mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
progress: whether to display a progress bar when downloading the content.
version: version number of the MMAR. Set it to `-1` to use `item[Keys.VERSION]`.
map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.
pretrained: whether to load the pretrained weights after initializing a network module.
weights_only: whether to load only the weights instead of initializing the network module and assign weights.
model_key: a key to search in the model file or config file for the model dictionary.
Currently this function assumes that the model dictionary has
`{"[name|path]": "test.module", "args": {'kw': 'test'}}`.
Examples::
>>> from monai.apps import load_from_mmar
>>> unet_model = load_from_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".", map_location="cpu")
>>> print(unet_model)
See Also:
https://docs.nvidia.com/clara/
"""
if not isinstance(item, Mapping):
item = get_model_spec(item)
model_dir = download_mmar(item=item, mmar_dir=mmar_dir, progress=progress, version=version)
model_file = os.path.join(model_dir, item[Keys.MODEL_FILE])
logger.info(f'\n*** "{item[Keys.ID]}" available at {model_dir}.')
# loading with `torch.jit.load`
if f"{model_file}".endswith(".ts"):
if not pretrained:
warnings.warn("Loading a ScriptModule, 'pretrained' option ignored.")
if weights_only:
warnings.warn("Loading a ScriptModule, 'weights_only' option ignored.")
return torch.jit.load(model_file, map_location=map_location)
# loading with `torch.load`
model_dict = torch.load(model_file, map_location=map_location)
if weights_only:
return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly
# 1. search `model_dict['train_config]` for model config spec.
model_config = _get_val(dict(model_dict).get("train_conf", {}), key=model_key, default={})
if not model_config:
# 2. search json CONFIG_FILE for model config spec.
json_path = os.path.join(model_dir, item.get(Keys.CONFIG_FILE, "config_train.json"))
with open(json_path) as f:
conf_dict = json.load(f)
conf_dict = dict(conf_dict)
model_config = _get_val(conf_dict, key=model_key, default={})
if not model_config:
# 3. search `model_dict` for model config spec.
model_config = _get_val(dict(model_dict), key=model_key, default={})
if not (model_config and isinstance(model_config, Mapping)):
raise ValueError(
f"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, "
f"or from model file: {item.get(Keys.MODEL_FILE)}."
)
# parse `model_config` for model class and model parameters
if model_config.get("name"): # model config section is a "name"
model_name = model_config["name"]
model_cls = monai_nets.__dict__[model_name]
elif model_config.get("path"): # model config section is a "path"
# https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html
model_module, model_name = model_config.get("path", ".").rsplit(".", 1)
model_cls, has_cls = optional_import(module=model_module, name=model_name)
if not has_cls:
raise ValueError(
f"Could not load MMAR model config {model_config.get('path', '')}, "
f"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH."
"See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html"
)
else:
raise ValueError(f"Could not load model config {model_config}.")
logger.info(f"*** Model: {model_cls}")
model_kwargs = model_config.get("args", None)
if model_kwargs:
model_inst = model_cls(**model_kwargs)
logger.info(f"*** Model params: {model_kwargs}")
else:
model_inst = model_cls()
if pretrained:
model_inst.load_state_dict(model_dict.get(model_key, model_dict))
logger.info("\n---")
doc_url = item.get(Keys.DOC) or _get_ngc_doc_url(item[Keys.NAME], model_prefix="nvidia:med:")
logger.info(f"For more information, please visit {doc_url}\n")
return model_inst
def _get_val(input_dict: Mapping, key="model", default=None):
"""
Search for the item with `key` in `config_dict`.
Returns: the first occurrence of `key` in a breadth first search.
"""
if key in input_dict:
return input_dict[key]
for sub_dict in input_dict:
val = input_dict[sub_dict]
if isinstance(val, Mapping):
found_val = _get_val(val, key=key, default=None)
if found_val is not None:
return found_val
return default
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict, List, Optional, Any
import logging
from io import BytesIO, StringIO
from os import linesep
from urllib.parse import urlparse
from wsgiref.headers import Headers
from ._abc import Context
from ._http import HttpRequest, HttpResponse
from ._thirdparty.werkzeug._compat import string_types, wsgi_encoding_dance
class WsgiRequest:
_environ_cache: Optional[Dict[str, Any]] = None
def __init__(self,
func_req: HttpRequest,
func_ctx: Optional[Context] = None):
url = urlparse(func_req.url)
func_req_body = func_req.get_body() or b''
# Convert function request headers to lowercase header
self._lowercased_headers = {
k.lower(): v for k, v in func_req.headers.items()
}
# Implement interfaces for PEP 3333 environ
self.request_method = getattr(func_req, 'method', None)
self.script_name = ''
self.path_info = getattr(url, 'path', None)
self.query_string = getattr(url, 'query', None)
self.content_type = self._lowercased_headers.get('content-type')
self.content_length = str(len(func_req_body))
self.server_name = getattr(url, 'hostname', None)
self.server_port = str(self._get_port(url, self._lowercased_headers))
self.server_protocol = 'HTTP/1.1'
# Propagate http request headers into HTTP_ environ
self._http_environ: Dict[str, str] = self._get_http_headers(
func_req.headers
)
# Wsgi environ
self.wsgi_version = (1, 0)
self.wsgi_url_scheme = url.scheme
self.wsgi_input = BytesIO(func_req_body)
self.wsgi_multithread = False
self.wsgi_multiprocess = False
self.wsgi_run_once = False
# Azure Functions context
self.af_function_directory = getattr(func_ctx,
'function_directory', None)
self.af_function_name = getattr(func_ctx, 'function_name', None)
self.af_invocation_id = getattr(func_ctx, 'invocation_id', None)
def to_environ(self, errors_buffer: StringIO) -> Dict[str, Any]:
if self._environ_cache is not None:
return self._environ_cache
environ = {
'REQUEST_METHOD': self.request_method,
'SCRIPT_NAME': self.script_name,
'PATH_INFO': self.path_info,
'QUERY_STRING': self.query_string,
'CONTENT_TYPE': self.content_type,
'CONTENT_LENGTH': self.content_length,
'SERVER_NAME': self.server_name,
'SERVER_PORT': self.server_port,
'SERVER_PROTOCOL': self.server_protocol,
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.wsgi_url_scheme,
'wsgi.input': self.wsgi_input,
'wsgi.errors': errors_buffer,
'wsgi.multithread': self.wsgi_multithread,
'wsgi.multiprocess': self.wsgi_multiprocess,
'wsgi.run_once': self.wsgi_run_once,
'azure_functions.function_directory': self.af_function_directory,
'azure_functions.function_name': self.af_function_name,
'azure_functions.invocation_id': self.af_invocation_id
}
environ.update(self._http_environ)
# Ensure WSGI string fits in IOS-8859-1 code points
for k, v in environ.items():
if isinstance(v, string_types):
environ[k] = wsgi_encoding_dance(v)
# Remove None values
self._environ_cache = {
k: v for k, v in environ.items() if v is not None
}
return self._environ_cache
def _get_port(self, parsed_url, lowercased_headers: Dict[str, str]) -> int:
port: int = 80
if lowercased_headers.get('x-forwarded-port'):
return int(lowercased_headers['x-forwarded-port'])
elif getattr(parsed_url, 'port', None):
return int(parsed_url.port)
elif parsed_url.scheme == 'https':
return 443
return port
def _get_http_headers(self,
func_headers: Dict[str, str]) -> Dict[str, str]:
# Content-Type -> HTTP_CONTENT_TYPE
return {f'HTTP_{k.upper().replace('-', '_')}': v for k, v in
func_headers.items()}
class WsgiResponse:
def __init__(self):
self._status = ''
self._status_code = 0
self._headers = {}
self._buffer: List[bytes] = []
@classmethod
def from_app(cls, app, environ) -> 'WsgiResponse':
res = cls()
res._buffer = [x or b'' for x in app(environ, res._start_response)]
return res
def to_func_response(self) -> HttpResponse:
lowercased_headers = {k.lower(): v for k, v in self._headers.items()}
return HttpResponse(
body=b''.join(self._buffer),
status_code=self._status_code,
headers=self._headers,
mimetype=lowercased_headers.get('content-type'),
charset=lowercased_headers.get('content-encoding')
)
# PEP 3333 start response implementation
def _start_response(self, status: str, response_headers: List[Any]):
self._status = status
self._headers = Headers(response_headers) # type: ignore
self._status_code = int(self._status.split(' ')[0]) # 200 OK
class WsgiMiddleware:
"""This middleware is to adapt a WSGI supported Python server
framework into Azure Functions. It can be used by either calling the
.handle() function or exposing the .main property in a HttpTrigger.
"""
_logger = logging.getLogger('azure.functions.WsgiMiddleware')
_usage_reported = False
def __init__(self, app):
"""Instantiate a WSGI middleware to convert Azure Functions HTTP
request into WSGI Python object. Example on handling WSGI app in a HTTP
trigger by overwriting the .main() method:
import azure.functions as func
from FlaskApp import app
main = func.WsgiMiddleware(app.wsgi_app).main
"""
if not self._usage_reported:
self._logger.info("Instantiating Azure Functions WSGI middleware.")
self._usage_reported = True
self._app = app
self._wsgi_error_buffer = StringIO()
self.main = self._handle
def handle(self, req: HttpRequest, context: Optional[Context] = None):
"""Method to convert an Azure Functions HTTP request into a WSGI
Python object. Example on handling WSGI app in a HTTP trigger by
calling .handle() in .main() method:
import azure.functions as func
from FlaskApp import app
def main(req, context):
return func.WsgiMiddleware(app.wsgi_app).handle(req, context)
"""
return self._handle(req, context)
def _handle(self, req, context):
wsgi_request = WsgiRequest(req, context)
environ = wsgi_request.to_environ(self._wsgi_error_buffer)
wsgi_response = WsgiResponse.from_app(self._app, environ)
self._handle_errors()
return wsgi_response.to_func_response()
def _handle_errors(self):
if self._wsgi_error_buffer.tell() > 0:
self._wsgi_error_buffer.seek(0)
error_message = linesep.join(
self._wsgi_error_buffer.readline()
)
raise Exception(error_message)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict, List, Optional, Any
import logging
from io import BytesIO, StringIO
from os import linesep
from urllib.parse import urlparse
from wsgiref.headers import Headers
from ._abc import Context
from ._http import HttpRequest, HttpResponse
from ._thirdparty.werkzeug._compat import string_types, wsgi_encoding_dance
class WsgiRequest:
_environ_cache: Optional[Dict[str, Any]] = None
def __init__(self,
func_req: HttpRequest,
func_ctx: Optional[Context] = None):
url = urlparse(func_req.url)
func_req_body = func_req.get_body() or b''
# Convert function request headers to lowercase header
self._lowercased_headers = {
k.lower(): v for k, v in func_req.headers.items()
}
# Implement interfaces for PEP 3333 environ
self.request_method = getattr(func_req, 'method', None)
self.script_name = ''
self.path_info = getattr(url, 'path', None)
self.query_string = getattr(url, 'query', None)
self.content_type = self._lowercased_headers.get('content-type')
self.content_length = str(len(func_req_body))
self.server_name = getattr(url, 'hostname', None)
self.server_port = str(self._get_port(url, self._lowercased_headers))
self.server_protocol = 'HTTP/1.1'
# Propagate http request headers into HTTP_ environ
self._http_environ: Dict[str, str] = self._get_http_headers(
func_req.headers
)
# Wsgi environ
self.wsgi_version = (1, 0)
self.wsgi_url_scheme = url.scheme
self.wsgi_input = BytesIO(func_req_body)
self.wsgi_multithread = False
self.wsgi_multiprocess = False
self.wsgi_run_once = False
# Azure Functions context
self.af_function_directory = getattr(func_ctx,
'function_directory', None)
self.af_function_name = getattr(func_ctx, 'function_name', None)
self.af_invocation_id = getattr(func_ctx, 'invocation_id', None)
def to_environ(self, errors_buffer: StringIO) -> Dict[str, Any]:
if self._environ_cache is not None:
return self._environ_cache
environ = {
'REQUEST_METHOD': self.request_method,
'SCRIPT_NAME': self.script_name,
'PATH_INFO': self.path_info,
'QUERY_STRING': self.query_string,
'CONTENT_TYPE': self.content_type,
'CONTENT_LENGTH': self.content_length,
'SERVER_NAME': self.server_name,
'SERVER_PORT': self.server_port,
'SERVER_PROTOCOL': self.server_protocol,
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.wsgi_url_scheme,
'wsgi.input': self.wsgi_input,
'wsgi.errors': errors_buffer,
'wsgi.multithread': self.wsgi_multithread,
'wsgi.multiprocess': self.wsgi_multiprocess,
'wsgi.run_once': self.wsgi_run_once,
'azure_functions.function_directory': self.af_function_directory,
'azure_functions.function_name': self.af_function_name,
'azure_functions.invocation_id': self.af_invocation_id
}
environ.update(self._http_environ)
# Ensure WSGI string fits in IOS-8859-1 code points
for k, v in environ.items():
if isinstance(v, string_types):
environ[k] = wsgi_encoding_dance(v)
# Remove None values
self._environ_cache = {
k: v for k, v in environ.items() if v is not None
}
return self._environ_cache
def _get_port(self, parsed_url, lowercased_headers: Dict[str, str]) -> int:
port: int = 80
if lowercased_headers.get('x-forwarded-port'):
return int(lowercased_headers['x-forwarded-port'])
elif getattr(parsed_url, 'port', None):
return int(parsed_url.port)
elif parsed_url.scheme == 'https':
return 443
return port
def _get_http_headers(self,
func_headers: Dict[str, str]) -> Dict[str, str]:
# Content-Type -> HTTP_CONTENT_TYPE
return {f'HTTP_{k.upper().replace("-", "_")}': v for k, v in
func_headers.items()}
class WsgiResponse:
def __init__(self):
self._status = ''
self._status_code = 0
self._headers = {}
self._buffer: List[bytes] = []
@classmethod
def from_app(cls, app, environ) -> 'WsgiResponse':
res = cls()
res._buffer = [x or b'' for x in app(environ, res._start_response)]
return res
def to_func_response(self) -> HttpResponse:
lowercased_headers = {k.lower(): v for k, v in self._headers.items()}
return HttpResponse(
body=b''.join(self._buffer),
status_code=self._status_code,
headers=self._headers,
mimetype=lowercased_headers.get('content-type'),
charset=lowercased_headers.get('content-encoding')
)
# PEP 3333 start response implementation
def _start_response(self, status: str, response_headers: List[Any]):
self._status = status
self._headers = Headers(response_headers) # type: ignore
self._status_code = int(self._status.split(' ')[0]) # 200 OK
class WsgiMiddleware:
"""This middleware is to adapt a WSGI supported Python server
framework into Azure Functions. It can be used by either calling the
.handle() function or exposing the .main property in a HttpTrigger.
"""
_logger = logging.getLogger('azure.functions.WsgiMiddleware')
_usage_reported = False
def __init__(self, app):
"""Instantiate a WSGI middleware to convert Azure Functions HTTP
request into WSGI Python object. Example on handling WSGI app in a HTTP
trigger by overwriting the .main() method:
import azure.functions as func
from FlaskApp import app
main = func.WsgiMiddleware(app.wsgi_app).main
"""
if not self._usage_reported:
self._logger.info("Instantiating Azure Functions WSGI middleware.")
self._usage_reported = True
self._app = app
self._wsgi_error_buffer = StringIO()
self.main = self._handle
def handle(self, req: HttpRequest, context: Optional[Context] = None):
"""Method to convert an Azure Functions HTTP request into a WSGI
Python object. Example on handling WSGI app in a HTTP trigger by
calling .handle() in .main() method:
import azure.functions as func
from FlaskApp import app
def main(req, context):
return func.WsgiMiddleware(app.wsgi_app).handle(req, context)
"""
return self._handle(req, context)
def _handle(self, req, context):
wsgi_request = WsgiRequest(req, context)
environ = wsgi_request.to_environ(self._wsgi_error_buffer)
wsgi_response = WsgiResponse.from_app(self._app, environ)
self._handle_errors()
return wsgi_response.to_func_response()
def _handle_errors(self):
if self._wsgi_error_buffer.tell() > 0:
self._wsgi_error_buffer.seek(0)
error_message = linesep.join(
self._wsgi_error_buffer.readline()
)
raise Exception(error_message)
|
#!/usr/bin/env python3
import json
from .constants import constants, BASE_URL
from .base import Base
class Ticker(Base):
"""Request information about stock/certificate/fund/etc
Args:
orderbook_id (int): id of ticker
instrument (str): Type of instrument, Defaults to 'stock'
auth (bool): Set true for additional information, Defaults to False
Note:
Additional information if authenticated
"""
def __init__(self, orderbook_id, **kwargs):
super().__init__()
instrument = kwargs.pop('instrument', 'stock').lower()
auth = kwargs.pop('auth', False)
assert not kwargs
if instrument in ['fund', 'certificate', 'stock']:
path = f"{BASE_URL}{constants["paths"]["INSTRUMENT_PATH"]}"
url = path.format(instrument, orderbook_id)
self.data = self._request(url, auth=auth)
else:
raise TypeError("Invalid option!")
def __str__(self):
return json.dumps(self.data)
@property
def info(self):
"""Grabs full json of ticker call
Returns:
dict:
"""
return self.data
@property
def buy_price(self):
"""Grabs buy price of ticker
Returns:
float:
"""
return self.data['buyPrice']
@property
def sell_price(self):
"""Grabs buy sell of ticker
Returns:
float:
"""
return self.data['sellPrice']
@property
def last_price(self):
"""Grabs last price of ticker
Returns:
float:
"""
return self.data['lastPrice']
@property
def highest_price(self):
"""Grabs highest price of ticker
Returns:
float:
"""
return self.data['highestPrice']
@property
def lowest_price(self):
"""Grabs lowest price of ticker
Returns:
float:
"""
return self.data['lowestPrice']
@property
def symbol(self):
"""Grabs symbol of ticker
Returns:
str:
"""
return self.data['tickerSymbol']
@property
def currency(self):
"""Grabs currency of ticker
Returns:
str:
"""
return self.data['currency']
@property
def isin(self):
"""Grabs ISIN of ticker
Returns:
str:
"""
return self.data['isin']
@property
def marketplace(self):
"""Grabs marketplace of ticker
Returns:
str:
"""
return self.data['marketPlace']
@property
def name(self):
"""Grabs full name of ticker
Returns:
str:
"""
return self.data['name']
@property
def change(self):
"""Grabs change price of ticker
Returns:
float:
"""
return self.data['change']
@property
def change_percent(self):
"""Grabs change price of ticker in percent
Returns:
float:
"""
return self.data['changePercent']
@property
def flag_code(self):
"""Grabs flag code of ticker
Returns:
str:
"""
return self.data['flagCode']
@property
def country(self):
"""Grabs the country of ticker
Returns:
str:
"""
return self.data['country']
@property
def id(self):
"""Grabs the id of ticker
Returns:
int:
"""
return int(self.data['id'])
@property
def quote_updated(self):
"""Grabs last time quote was updated
Returns:
str: ISO 8601
"""
return self.data['quoteUpdated']
@property
def last_price_updated(self):
"""Grabs last time price was updated
Returns:
str: ISO 8601
"""
return self.data['lastPriceUpdated']
| #!/usr/bin/env python3
import json
from .constants import constants, BASE_URL
from .base import Base
class Ticker(Base):
"""Request information about stock/certificate/fund/etc
Args:
orderbook_id (int): id of ticker
instrument (str): Type of instrument, Defaults to 'stock'
auth (bool): Set true for additional information, Defaults to False
Note:
Additional information if authenticated
"""
def __init__(self, orderbook_id, **kwargs):
super().__init__()
instrument = kwargs.pop('instrument', 'stock').lower()
auth = kwargs.pop('auth', False)
assert not kwargs
if instrument in ['fund', 'certificate', 'stock']:
path = f"{BASE_URL}{constants['paths']['INSTRUMENT_PATH']}"
url = path.format(instrument, orderbook_id)
self.data = self._request(url, auth=auth)
else:
raise TypeError("Invalid option!")
def __str__(self):
return json.dumps(self.data)
@property
def info(self):
"""Grabs full json of ticker call
Returns:
dict:
"""
return self.data
@property
def buy_price(self):
"""Grabs buy price of ticker
Returns:
float:
"""
return self.data['buyPrice']
@property
def sell_price(self):
"""Grabs buy sell of ticker
Returns:
float:
"""
return self.data['sellPrice']
@property
def last_price(self):
"""Grabs last price of ticker
Returns:
float:
"""
return self.data['lastPrice']
@property
def highest_price(self):
"""Grabs highest price of ticker
Returns:
float:
"""
return self.data['highestPrice']
@property
def lowest_price(self):
"""Grabs lowest price of ticker
Returns:
float:
"""
return self.data['lowestPrice']
@property
def symbol(self):
"""Grabs symbol of ticker
Returns:
str:
"""
return self.data['tickerSymbol']
@property
def currency(self):
"""Grabs currency of ticker
Returns:
str:
"""
return self.data['currency']
@property
def isin(self):
"""Grabs ISIN of ticker
Returns:
str:
"""
return self.data['isin']
@property
def marketplace(self):
"""Grabs marketplace of ticker
Returns:
str:
"""
return self.data['marketPlace']
@property
def name(self):
"""Grabs full name of ticker
Returns:
str:
"""
return self.data['name']
@property
def change(self):
"""Grabs change price of ticker
Returns:
float:
"""
return self.data['change']
@property
def change_percent(self):
"""Grabs change price of ticker in percent
Returns:
float:
"""
return self.data['changePercent']
@property
def flag_code(self):
"""Grabs flag code of ticker
Returns:
str:
"""
return self.data['flagCode']
@property
def country(self):
"""Grabs the country of ticker
Returns:
str:
"""
return self.data['country']
@property
def id(self):
"""Grabs the id of ticker
Returns:
int:
"""
return int(self.data['id'])
@property
def quote_updated(self):
"""Grabs last time quote was updated
Returns:
str: ISO 8601
"""
return self.data['quoteUpdated']
@property
def last_price_updated(self):
"""Grabs last time price was updated
Returns:
str: ISO 8601
"""
return self.data['lastPriceUpdated']
|
import inspect
import logging
import os
import re
import subprocess
from typing import Dict, Any
from pyhttpd.certs import CertificateSpec
from pyhttpd.conf import HttpdConf
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
log = logging.getLogger(__name__)
class H2TestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["http2", "proxy_http2"])
self._add_h2test()
def _add_h2test(self):
p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'],
capture_output=True,
cwd=os.path.join(self.env.local_dir, 'mod_h2test'))
rv = p.returncode
if rv != 0:
log.error(f"compiling md_h2test failed: {p.stderr}")
raise Exception(f"compiling md_h2test failed: {p.stderr}")
modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf')
with open(modules_conf, 'a') as fd:
# load our test module which is not installed
fd.write(f"LoadModule h2test_module \"{self.env.local_dir}/mod_h2test/.libs/mod_h2test.so\"\n")
class H2TestEnv(HttpdTestEnv):
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(H2TestEnv)),
add_base_conf=[
"H2MinWorkers 1",
"H2MaxWorkers 64",
"Protocols h2 http/1.1 h2c",
],
interesting_modules=["http2", "proxy_http2", "h2test"])
self.add_cert_specs([
CertificateSpec(domains=[
f"push.{self._http_tld}",
f"hints.{self._http_tld}",
f"ssl.{self._http_tld}",
f"pad0.{self._http_tld}",
f"pad1.{self._http_tld}",
f"pad2.{self._http_tld}",
f"pad3.{self._http_tld}",
f"pad8.{self._http_tld}",
]),
CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
'AH02032',
'AH01276',
'AH01630',
'AH00135',
'AH02261', # Re-negotiation handshake failed (our test_101)
])
self.httpd_error_log.add_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
re.compile(r'.*:tls_post_process_client_hello:.*'),
re.compile(r'.*:tls_process_client_certificate:.*'),
])
if setup_dirs:
self._setup = H2TestSetup(env=self)
self._setup.make()
self.issue_certs()
self.setup_data_1k_1m()
def setup_data_1k_1m(self):
s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(self.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-10k"), 'w') as f:
for i in range(100):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-100k"), 'w') as f:
for i in range(1000):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-1m"), 'w') as f:
for i in range(10000):
f.write(f"{i:09d}-{s90}")
class H2Conf(HttpdConf):
def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None):
super().__init__(env=env, extras=HttpdConf.merge_extras(extras, {
f"cgi.{env.http_tld}": [
"SSLOptions +StdEnvVars",
"AddHandler cgi-script .py",
]
}))
def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=False):
super().start_vhost(domains=domains, port=port, doc_root=doc_root, with_ssl=with_ssl)
if f"noh2.{self.env.http_tld}" in domains:
protos = ["http/1.1"]
elif port == self.env.https_port or with_ssl is True:
protos = ["h2", "http/1.1"]
else:
protos = ["h2c", "http/1.1"]
if f"test2.{self.env.http_tld}" in domains:
protos = reversed(protos)
self.add(f"Protocols {" ".join(protos)}")
return self
def add_vhost_noh2(self):
domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"]
self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
return self
def add_vhost_test1(self, proxy_self=False, h2proxy_self=False):
return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self)
def add_vhost_test2(self):
return super().add_vhost_test2()
| import inspect
import logging
import os
import re
import subprocess
from typing import Dict, Any
from pyhttpd.certs import CertificateSpec
from pyhttpd.conf import HttpdConf
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
log = logging.getLogger(__name__)
class H2TestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["http2", "proxy_http2"])
self._add_h2test()
def _add_h2test(self):
p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'],
capture_output=True,
cwd=os.path.join(self.env.local_dir, 'mod_h2test'))
rv = p.returncode
if rv != 0:
log.error(f"compiling md_h2test failed: {p.stderr}")
raise Exception(f"compiling md_h2test failed: {p.stderr}")
modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf')
with open(modules_conf, 'a') as fd:
# load our test module which is not installed
fd.write(f"LoadModule h2test_module \"{self.env.local_dir}/mod_h2test/.libs/mod_h2test.so\"\n")
class H2TestEnv(HttpdTestEnv):
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(H2TestEnv)),
add_base_conf=[
"H2MinWorkers 1",
"H2MaxWorkers 64",
"Protocols h2 http/1.1 h2c",
],
interesting_modules=["http2", "proxy_http2", "h2test"])
self.add_cert_specs([
CertificateSpec(domains=[
f"push.{self._http_tld}",
f"hints.{self._http_tld}",
f"ssl.{self._http_tld}",
f"pad0.{self._http_tld}",
f"pad1.{self._http_tld}",
f"pad2.{self._http_tld}",
f"pad3.{self._http_tld}",
f"pad8.{self._http_tld}",
]),
CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
'AH02032',
'AH01276',
'AH01630',
'AH00135',
'AH02261', # Re-negotiation handshake failed (our test_101)
])
self.httpd_error_log.add_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
re.compile(r'.*:tls_post_process_client_hello:.*'),
re.compile(r'.*:tls_process_client_certificate:.*'),
])
if setup_dirs:
self._setup = H2TestSetup(env=self)
self._setup.make()
self.issue_certs()
self.setup_data_1k_1m()
def setup_data_1k_1m(self):
s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(self.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-10k"), 'w') as f:
for i in range(100):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-100k"), 'w') as f:
for i in range(1000):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.gen_dir, "data-1m"), 'w') as f:
for i in range(10000):
f.write(f"{i:09d}-{s90}")
class H2Conf(HttpdConf):
def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None):
super().__init__(env=env, extras=HttpdConf.merge_extras(extras, {
f"cgi.{env.http_tld}": [
"SSLOptions +StdEnvVars",
"AddHandler cgi-script .py",
]
}))
def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=False):
super().start_vhost(domains=domains, port=port, doc_root=doc_root, with_ssl=with_ssl)
if f"noh2.{self.env.http_tld}" in domains:
protos = ["http/1.1"]
elif port == self.env.https_port or with_ssl is True:
protos = ["h2", "http/1.1"]
else:
protos = ["h2c", "http/1.1"]
if f"test2.{self.env.http_tld}" in domains:
protos = reversed(protos)
self.add(f"Protocols {' '.join(protos)}")
return self
def add_vhost_noh2(self):
domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"]
self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
return self
def add_vhost_test1(self, proxy_self=False, h2proxy_self=False):
return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self)
def add_vhost_test2(self):
return super().add_vhost_test2()
|
"""This module contains the definition for a pylint HoudiniPackageRunner."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
import pathlib
import sys
from io import StringIO
from typing import TYPE_CHECKING, List
# Third Party
from pylint import lint
from pylint.reporters.text import ColorizedTextReporter
# Houdini Package Runner
import houdini_package_runner.config
import houdini_package_runner.parser
import houdini_package_runner.runners.utils
import houdini_package_runner.utils
from houdini_package_runner.discoverers import package
from houdini_package_runner.runners.base import HoudiniPackageRunner
# Imports for type checking.
if TYPE_CHECKING:
import argparse
from houdini_package_runner.config import BaseRunnerConfig
from houdini_package_runner.discoverers.base import BaseItemDiscoverer
from houdini_package_runner.items.base import BaseItem
# =============================================================================
# CLASSES
# =============================================================================
class PyLintRunner(HoudiniPackageRunner):
"""Implementation for a pylint package runner.
:param discoverer: The item discoverer used by the runner.
:param runner_config: Optional BaseRunnerConfig object.
"""
def __init__(
self, discoverer: BaseItemDiscoverer, runner_config: BaseRunnerConfig = None
) -> None:
super().__init__(discoverer, runner_config=runner_config)
self._disabled: List[str] = []
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def name(self) -> str:
"""The runner name used for identification."""
return "pylint"
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@staticmethod
def build_parser(parser: argparse.ArgumentParser = None) -> argparse.ArgumentParser:
"""Build a parser for the runner.
:param parser: Optional parser to add arguments to, otherwise a new one will be created.
:return: The common parser for the runner.
"""
if parser is None:
parser = houdini_package_runner.parser.build_common_parser(
description="""Run pylint on Houdini package items.
Any unknown args will be passed along to the pylint command.
"""
)
parser.add_argument(
"--rcfile",
action="store",
help="Specify a configuration file",
)
parser.add_argument("--disable", action="store", help="Tests to disable.")
return parser
def init_args_options(self, namespace: argparse.Namespace, extra_args: List[str]):
"""Initialize any extra options from parser data.
:param namespace: Argument parser namespace.
:param extra_args: Optional list of extra_args to pass to isort.
"""
super().init_args_options(namespace, extra_args)
if namespace.rcfile:
extra_args.insert(0, f"--rcfile={namespace.rcfile}")
if namespace.disable:
self._disabled = namespace.disable.split(",")
if extra_args:
self._extra_args.extend(extra_args)
def process_path(self, file_path: pathlib.Path, item: BaseItem) -> int:
"""Process a file path.
:param file_path: The path to lint.
:param item: The item to lint.
:return: The process return code.
"""
flags = []
flags.extend(self.extra_args)
to_disable = []
if self._disabled:
to_disable.extend(self._disabled)
to_disable.extend(self.config.get_config_data("to_disable", item, file_path))
flags.extend(self.config.get_config_data("command", item, file_path))
known_builtins: List[str] = item.ignored_builtins
known_builtins.extend(
self.config.get_config_data("known_builtins", item, file_path)
)
if known_builtins:
houdini_package_runner.utils.add_or_append_to_flags(
flags, "--additional-builtins", known_builtins
)
if to_disable:
flags.append(f"--disable={",".join(to_disable)}")
command = flags + [str(file_path)]
if self.verbose:
houdini_package_runner.runners.utils.print_runner_command(
item, command, extra="pylint --output-format=colorized "
)
buf = StringIO()
result = lint.Run(command, reporter=ColorizedTextReporter(buf), exit=False)
output = buf.getvalue()
if output:
sys.stdout.write(output)
return result.linter.msg_status
# =============================================================================
# FUNCTIONS
# =============================================================================
def main() -> int:
"""Run 'pylint' on package files."""
parser = PyLintRunner.build_parser()
parsed_args, unknown = parser.parse_known_args()
discoverer = package.init_standard_package_discoverer(parsed_args)
run_tool = PyLintRunner(discoverer)
run_tool.init_args_options(parsed_args, unknown)
result = run_tool.run()
return result
| """This module contains the definition for a pylint HoudiniPackageRunner."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
import pathlib
import sys
from io import StringIO
from typing import TYPE_CHECKING, List
# Third Party
from pylint import lint
from pylint.reporters.text import ColorizedTextReporter
# Houdini Package Runner
import houdini_package_runner.config
import houdini_package_runner.parser
import houdini_package_runner.runners.utils
import houdini_package_runner.utils
from houdini_package_runner.discoverers import package
from houdini_package_runner.runners.base import HoudiniPackageRunner
# Imports for type checking.
if TYPE_CHECKING:
import argparse
from houdini_package_runner.config import BaseRunnerConfig
from houdini_package_runner.discoverers.base import BaseItemDiscoverer
from houdini_package_runner.items.base import BaseItem
# =============================================================================
# CLASSES
# =============================================================================
class PyLintRunner(HoudiniPackageRunner):
"""Implementation for a pylint package runner.
:param discoverer: The item discoverer used by the runner.
:param runner_config: Optional BaseRunnerConfig object.
"""
def __init__(
self, discoverer: BaseItemDiscoverer, runner_config: BaseRunnerConfig = None
) -> None:
super().__init__(discoverer, runner_config=runner_config)
self._disabled: List[str] = []
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def name(self) -> str:
"""The runner name used for identification."""
return "pylint"
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@staticmethod
def build_parser(parser: argparse.ArgumentParser = None) -> argparse.ArgumentParser:
"""Build a parser for the runner.
:param parser: Optional parser to add arguments to, otherwise a new one will be created.
:return: The common parser for the runner.
"""
if parser is None:
parser = houdini_package_runner.parser.build_common_parser(
description="""Run pylint on Houdini package items.
Any unknown args will be passed along to the pylint command.
"""
)
parser.add_argument(
"--rcfile",
action="store",
help="Specify a configuration file",
)
parser.add_argument("--disable", action="store", help="Tests to disable.")
return parser
def init_args_options(self, namespace: argparse.Namespace, extra_args: List[str]):
"""Initialize any extra options from parser data.
:param namespace: Argument parser namespace.
:param extra_args: Optional list of extra_args to pass to isort.
"""
super().init_args_options(namespace, extra_args)
if namespace.rcfile:
extra_args.insert(0, f"--rcfile={namespace.rcfile}")
if namespace.disable:
self._disabled = namespace.disable.split(",")
if extra_args:
self._extra_args.extend(extra_args)
def process_path(self, file_path: pathlib.Path, item: BaseItem) -> int:
"""Process a file path.
:param file_path: The path to lint.
:param item: The item to lint.
:return: The process return code.
"""
flags = []
flags.extend(self.extra_args)
to_disable = []
if self._disabled:
to_disable.extend(self._disabled)
to_disable.extend(self.config.get_config_data("to_disable", item, file_path))
flags.extend(self.config.get_config_data("command", item, file_path))
known_builtins: List[str] = item.ignored_builtins
known_builtins.extend(
self.config.get_config_data("known_builtins", item, file_path)
)
if known_builtins:
houdini_package_runner.utils.add_or_append_to_flags(
flags, "--additional-builtins", known_builtins
)
if to_disable:
flags.append(f"--disable={','.join(to_disable)}")
command = flags + [str(file_path)]
if self.verbose:
houdini_package_runner.runners.utils.print_runner_command(
item, command, extra="pylint --output-format=colorized "
)
buf = StringIO()
result = lint.Run(command, reporter=ColorizedTextReporter(buf), exit=False)
output = buf.getvalue()
if output:
sys.stdout.write(output)
return result.linter.msg_status
# =============================================================================
# FUNCTIONS
# =============================================================================
def main() -> int:
"""Run 'pylint' on package files."""
parser = PyLintRunner.build_parser()
parsed_args, unknown = parser.parse_known_args()
discoverer = package.init_standard_package_discoverer(parsed_args)
run_tool = PyLintRunner(discoverer)
run_tool.init_args_options(parsed_args, unknown)
result = run_tool.run()
return result
|
# coding: utf-8
import csv
import re
from .splittingmatcher import SplittingMatcher
from .mergingmatcher import MergingMatcher
from .replacingmatcher import ReplacingMatcher
from ..utils.helpers import decomment_file
class AdjustTokens:
"""
Syntax for the .tsv adjustment rules
===================================
- each rule should be as follows: "<matchcql>\t<index>\t<operation>\t<replacecql>"
- comments with # and empty lines are allowed
- CQL rules: "<text>" can be used without specifying that there is "text_cleaned="
- Index format: either "<matching_index>" or "<matching_index>-<splitting-index>"
- Adjustment format:
- "+" for merge
- ":" for split (default: syllable mode)
- "::" for split in character mode
- "=" for replace
- Constraint: "<matching_index>-<splitting-index>" is only allowed if adjustment is ":" or "::"
"""
def __init__(self, main=None, custom=None):
self.paths = []
if custom:
self.paths.extend(custom)
elif main:
self.paths.extend(main)
self.rules = []
self.parse_rules()
def no_token_matched(self, matchcql):
matched_tokens = [token for token in re.split(r'(\[.+?\])', matchcql) if token != " " and token != ""]
return len(matched_tokens)
def adjust(self, token_list):
for rule in self.rules:
if rule["operation"] == "split":
if rule["matchidx"] <= self.no_token_matched(rule['matchcql']):
sm = SplittingMatcher(
rule["matchcql"],
rule["matchidx"],
rule["splitidx"],
token_list,
rule["replacecql"],
)
token_list = sm.split_on_matches(mode=rule["splitmode"])
else:
print(f'[ERROR]: No token to spilt with token number {rule['matchidx']} found in rule {' '.join(rule)}')
elif rule["operation"] == "merge":
if rule["matchidx"] < self.no_token_matched(rule['matchcql']):
mm = MergingMatcher(
rule["matchcql"], rule["matchidx"], token_list, rule["replacecql"]
)
token_list = mm.merge_on_matches()
else:
print(f'[ERROR]: No token to merge with token number {rule['matchidx']} found in rule {' '.join(rule)}')
elif rule["operation"] == "repl":
rm = ReplacingMatcher(
rule["matchcql"], rule["matchidx"], token_list, rule["replacecql"]
)
rm.replace_on_matches()
return token_list
def parse_rules(self):
"""
Files are sorted before being applied. Thus, filenames
:return:
"""
for rule_file in sorted(self.paths):
for rule in csv.reader(
decomment_file(rule_file.open(encoding="utf-8-sig")), delimiter="\t"
):
self.rules.append(self.parse_rule(rule))
@staticmethod
def parse_rule(rule):
idx_sep = "-"
# sanity checks
if len(rule) != 4:
raise SyntaxError("There can't be more than three columns per rule.")
if not rule[1]:
raise SyntaxError("There needs to be an index for every rule.")
if idx_sep in rule[1] and rule[2] not in [":", "::"]:
raise SyntaxError(
"The double index in only intended for split adjustments."
)
if rule[2] not in ["+", "=", ":", "::"]:
raise SyntaxError(
'The supported operations are either of ["+", "=", ":", "::"].'
)
# parse
rule_dict = {
"matchcql": None,
"matchidx": None,
"operation": None,
"splitidx": None,
"splitmode": None,
"replacecql": None,
}
rule_dict["matchcql"] = rule[0]
if idx_sep in rule[1]:
match_idx, split_idx = rule[1].split("-")
rule_dict["matchidx"] = int(match_idx)
rule_dict["splitidx"] = int(split_idx)
else:
rule_dict["matchidx"] = int(rule[1])
if rule[2] == "=":
rule_dict["operation"] = "repl"
elif rule[2] == "+":
rule_dict["operation"] = "merge"
elif rule[2] == ":":
rule_dict["operation"] = "split"
rule_dict["splitmode"] = "syl"
elif rule[2] == "::":
rule_dict["operation"] = "split"
rule_dict["splitmode"] = "char"
rule_dict["replacecql"] = rule[3]
return rule_dict
| # coding: utf-8
import csv
import re
from .splittingmatcher import SplittingMatcher
from .mergingmatcher import MergingMatcher
from .replacingmatcher import ReplacingMatcher
from ..utils.helpers import decomment_file
class AdjustTokens:
"""
Syntax for the .tsv adjustment rules
===================================
- each rule should be as follows: "<matchcql>\t<index>\t<operation>\t<replacecql>"
- comments with # and empty lines are allowed
- CQL rules: "<text>" can be used without specifying that there is "text_cleaned="
- Index format: either "<matching_index>" or "<matching_index>-<splitting-index>"
- Adjustment format:
- "+" for merge
- ":" for split (default: syllable mode)
- "::" for split in character mode
- "=" for replace
- Constraint: "<matching_index>-<splitting-index>" is only allowed if adjustment is ":" or "::"
"""
def __init__(self, main=None, custom=None):
self.paths = []
if custom:
self.paths.extend(custom)
elif main:
self.paths.extend(main)
self.rules = []
self.parse_rules()
def no_token_matched(self, matchcql):
matched_tokens = [token for token in re.split(r'(\[.+?\])', matchcql) if token != " " and token != ""]
return len(matched_tokens)
def adjust(self, token_list):
for rule in self.rules:
if rule["operation"] == "split":
if rule["matchidx"] <= self.no_token_matched(rule['matchcql']):
sm = SplittingMatcher(
rule["matchcql"],
rule["matchidx"],
rule["splitidx"],
token_list,
rule["replacecql"],
)
token_list = sm.split_on_matches(mode=rule["splitmode"])
else:
print(f'[ERROR]: No token to spilt with token number {rule["matchidx"]} found in rule {" ".join(rule)}')
elif rule["operation"] == "merge":
if rule["matchidx"] < self.no_token_matched(rule['matchcql']):
mm = MergingMatcher(
rule["matchcql"], rule["matchidx"], token_list, rule["replacecql"]
)
token_list = mm.merge_on_matches()
else:
print(f'[ERROR]: No token to merge with token number {rule["matchidx"]} found in rule {" ".join(rule)}')
elif rule["operation"] == "repl":
rm = ReplacingMatcher(
rule["matchcql"], rule["matchidx"], token_list, rule["replacecql"]
)
rm.replace_on_matches()
return token_list
def parse_rules(self):
"""
Files are sorted before being applied. Thus, filenames
:return:
"""
for rule_file in sorted(self.paths):
for rule in csv.reader(
decomment_file(rule_file.open(encoding="utf-8-sig")), delimiter="\t"
):
self.rules.append(self.parse_rule(rule))
@staticmethod
def parse_rule(rule):
idx_sep = "-"
# sanity checks
if len(rule) != 4:
raise SyntaxError("There can't be more than three columns per rule.")
if not rule[1]:
raise SyntaxError("There needs to be an index for every rule.")
if idx_sep in rule[1] and rule[2] not in [":", "::"]:
raise SyntaxError(
"The double index in only intended for split adjustments."
)
if rule[2] not in ["+", "=", ":", "::"]:
raise SyntaxError(
'The supported operations are either of ["+", "=", ":", "::"].'
)
# parse
rule_dict = {
"matchcql": None,
"matchidx": None,
"operation": None,
"splitidx": None,
"splitmode": None,
"replacecql": None,
}
rule_dict["matchcql"] = rule[0]
if idx_sep in rule[1]:
match_idx, split_idx = rule[1].split("-")
rule_dict["matchidx"] = int(match_idx)
rule_dict["splitidx"] = int(split_idx)
else:
rule_dict["matchidx"] = int(rule[1])
if rule[2] == "=":
rule_dict["operation"] = "repl"
elif rule[2] == "+":
rule_dict["operation"] = "merge"
elif rule[2] == ":":
rule_dict["operation"] = "split"
rule_dict["splitmode"] = "syl"
elif rule[2] == "::":
rule_dict["operation"] = "split"
rule_dict["splitmode"] = "char"
rule_dict["replacecql"] = rule[3]
return rule_dict
|
import pymysql.cursors
#faz a conexao com o banco de dados
conexao = pymysql.connect(host = 'localhost',
user = 'root',
password = '',
db = 'aula',
charset = 'utf8mb4',
cursorclass = pymysql.cursors.DictCursor)
with conexao.cursor() as cursor: # faz a conexao com o cursor do mysql
tabela = f'create table usuarios ({'nome varchar(50), senha varchar(50), nivel int not null, data varchar(10)'})'
cursor.execute(tabela) #execução do comando no banco de dados
conexao.commit() #gravação do comando no banco de dados
| import pymysql.cursors
#faz a conexao com o banco de dados
conexao = pymysql.connect(host = 'localhost',
user = 'root',
password = '',
db = 'aula',
charset = 'utf8mb4',
cursorclass = pymysql.cursors.DictCursor)
with conexao.cursor() as cursor: # faz a conexao com o cursor do mysql
tabela = f'create table usuarios ({"nome varchar(50), senha varchar(50), nivel int not null, data varchar(10)"})'
cursor.execute(tabela) #execução do comando no banco de dados
conexao.commit() #gravação do comando no banco de dados
|
import shutil
from typing import (
List,
Union,
)
from pathlib import Path
from datetime import datetime
from bigflow import commons
from bigflow.workflow import (
DEFAULT_EXECUTION_TIMEOUT_IN_SECONDS,
Workflow,
WorkflowJob
)
def clear_dags_output_dir(workdir: str) -> None:
dags_dir_path = get_dags_output_dir(workdir)
print("clearing dags_output_dir", str(dags_dir_path.resolve()))
shutil.rmtree(str(dags_dir_path.resolve()))
def secret_template(secret: str) -> str:
return f"secret.Secret(deploy_type='env', deploy_target='{secret}", secret="{secret.replace("_", "-")}', key='{secret}')"
def generate_dag_file(workdir: str,
image_version: str,
workflow: Workflow,
start_from: Union[datetime, str],
build_ver: str,
root_package_name: str) -> str:
start_from = _str_to_datetime(start_from)
print(f'start_from: {start_from}')
print(f'build_ver: {build_ver}')
print(f'image version: {image_version}')
dag_deployment_id = get_dag_deployment_id(workflow.workflow_id, start_from, build_ver)
dag_file_path = get_dags_output_dir(workdir) / (dag_deployment_id + '_dag.py')
start_date_as_str = repr(workflow.start_time_factory(start_from))
print(f'dag_file_path: {dag_file_path.resolve()}')
dag_chunks = []
dag_chunks.append("""
import datetime
from airflow import DAG
from airflow.contrib.operators import kubernetes_pod_operator
from airflow.contrib.kubernetes import secret
default_args = {{
'owner': 'airflow',
'depends_on_past': {depends_on_past},
'start_date': {start_date_as_str},
'email_on_failure': False,
'email_on_retry': False,
'execution_timeout': datetime.timedelta(seconds={execution_timeout_sec}),
}}
dag = DAG(
'{dag_id}',
default_args=default_args,
max_active_runs=1,
schedule_interval='{schedule_interval}'
)
""".format(dag_id=dag_deployment_id,
start_date_as_str=start_date_as_str,
schedule_interval=workflow.schedule_interval,
depends_on_past=workflow.depends_on_past,
execution_timeout_sec=DEFAULT_EXECUTION_TIMEOUT_IN_SECONDS))
def build_dag_operator(workflow_job: WorkflowJob, dependencies: List[WorkflowJob]) -> None:
job = workflow_job.job
job_var = "t" + str(job.id)
task_id = job.id.replace("_", "-")
execution_timeout_sec = commons.as_timedelta(
getattr(job, 'execution_timeout_sec', None)
or DEFAULT_EXECUTION_TIMEOUT_IN_SECONDS)
dag_chunks.append("""
{job_var} = kubernetes_pod_operator.KubernetesPodOperator(
task_id='{task_id}',
name='{task_id}',
cmds=['bf'],
arguments=['run', '--job', '{bf_job}', '--runtime', '{{{{ execution_date.strftime("%Y-%m-%d %H:%M:%S") }}}}', '--project-package', '{root_folder}', '--config', '{{{{var.value.env}}}}'],
namespace='default',
image='{docker_image}',
is_delete_operator_pod=True,
retries={retries},
retry_delay=datetime.timedelta(seconds={retry_delay}),
dag=dag,
secrets={secrets_definition},
execution_timeout={execution_timeout_sec!r})
""".format(job_var=job_var,
task_id=task_id,
docker_image=image_version,
bf_job=workflow.workflow_id+"."+job.id,
root_folder=root_package_name,
retries=job.retry_count if hasattr(job, 'retry_count') else 3,
retry_delay=job.retry_pause_sec if hasattr(job, 'retry_pause_sec') else 60,
secrets_definition=f'[{', '.join([secret_template(secret) for secret in workflow.secrets])}]',
execution_timeout_sec=execution_timeout_sec,
))
for d in dependencies:
up_job_var = "t" + str(d.job.id)
dag_chunks.append("{job_var}.set_upstream({up_job_var})".format(job_var=job_var, up_job_var=up_job_var))
workflow._call_on_graph_nodes(build_dag_operator)
dag_file_content = '\n'.join(dag_chunks) + '\n'
dag_file_path.write_text(dag_file_content)
return dag_file_path.as_posix()
def get_dag_deployment_id(workflow_name: str,
start_from: datetime,
build_ver: str) -> str:
return '{workflow_name}__v{ver}__{start_from}'.format(
workflow_name=workflow_name,
ver=build_ver.translate(str.maketrans(".-+", "___")),
start_from=_str_to_datetime(start_from).strftime('%Y_%m_%d_%H_%M_%S')
)
def get_dags_output_dir(workdir: str) -> Path:
dags_dir_path = Path(workdir) / '.dags'
if not dags_dir_path.exists():
dags_dir_path.mkdir()
return dags_dir_path
def _str_to_datetime(dt: Union[str, datetime]) -> datetime:
if isinstance(dt, datetime):
return dt
elif len(dt) <= 10:
return datetime.strptime(dt, "%Y-%m-%d")
else:
return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
| import shutil
from typing import (
List,
Union,
)
from pathlib import Path
from datetime import datetime
from bigflow import commons
from bigflow.workflow import (
DEFAULT_EXECUTION_TIMEOUT_IN_SECONDS,
Workflow,
WorkflowJob
)
def clear_dags_output_dir(workdir: str) -> None:
dags_dir_path = get_dags_output_dir(workdir)
print("clearing dags_output_dir", str(dags_dir_path.resolve()))
shutil.rmtree(str(dags_dir_path.resolve()))
def secret_template(secret: str) -> str:
return f"secret.Secret(deploy_type='env', deploy_target='{secret}', secret='{secret.replace('_', '-')}', key='{secret}')"
def generate_dag_file(workdir: str,
image_version: str,
workflow: Workflow,
start_from: Union[datetime, str],
build_ver: str,
root_package_name: str) -> str:
start_from = _str_to_datetime(start_from)
print(f'start_from: {start_from}')
print(f'build_ver: {build_ver}')
print(f'image version: {image_version}')
dag_deployment_id = get_dag_deployment_id(workflow.workflow_id, start_from, build_ver)
dag_file_path = get_dags_output_dir(workdir) / (dag_deployment_id + '_dag.py')
start_date_as_str = repr(workflow.start_time_factory(start_from))
print(f'dag_file_path: {dag_file_path.resolve()}')
dag_chunks = []
dag_chunks.append("""
import datetime
from airflow import DAG
from airflow.contrib.operators import kubernetes_pod_operator
from airflow.contrib.kubernetes import secret
default_args = {{
'owner': 'airflow',
'depends_on_past': {depends_on_past},
'start_date': {start_date_as_str},
'email_on_failure': False,
'email_on_retry': False,
'execution_timeout': datetime.timedelta(seconds={execution_timeout_sec}),
}}
dag = DAG(
'{dag_id}',
default_args=default_args,
max_active_runs=1,
schedule_interval='{schedule_interval}'
)
""".format(dag_id=dag_deployment_id,
start_date_as_str=start_date_as_str,
schedule_interval=workflow.schedule_interval,
depends_on_past=workflow.depends_on_past,
execution_timeout_sec=DEFAULT_EXECUTION_TIMEOUT_IN_SECONDS))
def build_dag_operator(workflow_job: WorkflowJob, dependencies: List[WorkflowJob]) -> None:
job = workflow_job.job
job_var = "t" + str(job.id)
task_id = job.id.replace("_", "-")
execution_timeout_sec = commons.as_timedelta(
getattr(job, 'execution_timeout_sec', None)
or DEFAULT_EXECUTION_TIMEOUT_IN_SECONDS)
dag_chunks.append("""
{job_var} = kubernetes_pod_operator.KubernetesPodOperator(
task_id='{task_id}',
name='{task_id}',
cmds=['bf'],
arguments=['run', '--job', '{bf_job}', '--runtime', '{{{{ execution_date.strftime("%Y-%m-%d %H:%M:%S") }}}}', '--project-package', '{root_folder}', '--config', '{{{{var.value.env}}}}'],
namespace='default',
image='{docker_image}',
is_delete_operator_pod=True,
retries={retries},
retry_delay=datetime.timedelta(seconds={retry_delay}),
dag=dag,
secrets={secrets_definition},
execution_timeout={execution_timeout_sec!r})
""".format(job_var=job_var,
task_id=task_id,
docker_image=image_version,
bf_job=workflow.workflow_id+"."+job.id,
root_folder=root_package_name,
retries=job.retry_count if hasattr(job, 'retry_count') else 3,
retry_delay=job.retry_pause_sec if hasattr(job, 'retry_pause_sec') else 60,
secrets_definition=f'[{", ".join([secret_template(secret) for secret in workflow.secrets])}]',
execution_timeout_sec=execution_timeout_sec,
))
for d in dependencies:
up_job_var = "t" + str(d.job.id)
dag_chunks.append("{job_var}.set_upstream({up_job_var})".format(job_var=job_var, up_job_var=up_job_var))
workflow._call_on_graph_nodes(build_dag_operator)
dag_file_content = '\n'.join(dag_chunks) + '\n'
dag_file_path.write_text(dag_file_content)
return dag_file_path.as_posix()
def get_dag_deployment_id(workflow_name: str,
start_from: datetime,
build_ver: str) -> str:
return '{workflow_name}__v{ver}__{start_from}'.format(
workflow_name=workflow_name,
ver=build_ver.translate(str.maketrans(".-+", "___")),
start_from=_str_to_datetime(start_from).strftime('%Y_%m_%d_%H_%M_%S')
)
def get_dags_output_dir(workdir: str) -> Path:
dags_dir_path = Path(workdir) / '.dags'
if not dags_dir_path.exists():
dags_dir_path.mkdir()
return dags_dir_path
def _str_to_datetime(dt: Union[str, datetime]) -> datetime:
if isinstance(dt, datetime):
return dt
elif len(dt) <= 10:
return datetime.strptime(dt, "%Y-%m-%d")
else:
return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
|
"""
Search endpoint for annotations
"""
from imc.endpoints import IMCEndpoint
from imc.models import AnnotationSearch, allowed_item_types, codelists
from restapi import decorators
from restapi.connectors import neo4j
from restapi.exceptions import BadRequest, NotFound, ServerError
from restapi.models import fields
from restapi.utilities.logs import log
class SearchAnnotations(IMCEndpoint):
@decorators.auth.require()
@decorators.use_kwargs(
{"filtering": fields.Nested(AnnotationSearch, data_key="filter")}
)
@decorators.endpoint(
path="/annotations/search",
summary="Search for annotations",
description="Search for annotations",
responses={200: "A list of annotation matching search criteria."},
)
def post(self, filtering=None):
self.graph = neo4j.get_instance()
filters = []
starters = []
projections = []
order_by = ""
if filtering:
anno_type = filtering.get("annotation_type")
filters.append(f"WHERE anno.annotation_type='{anno_type}'")
# add filter for processed content with COMPLETE status
filters.append(
"MATCH (creation:Creation)<-[:CREATION]-(:Item)-[:CONTENT_SOURCE]->(content:ContentStage) "
+ "WHERE content.status = 'COMPLETED' "
)
filters.append(
"MATCH (title:Title)<-[:HAS_TITLE]-(creation)<-[:CREATION]-(i:Item)<-[:SOURCE]-(anno)"
)
projections.append(
"collect(distinct creation{.*, type:i.item_type, titles }) AS creations"
)
if anno_type == "TAG":
# look for geo distance filter
geo_distance = filtering.get("geo_distance")
if geo_distance is not None:
distance = geo_distance["distance"]
location = geo_distance["location"]
starters.append(
"WITH point({{longitude: {lon}, latitude: {lat} }}) as cityPosition, "
"{dist} as distanceInMeters".format(
lon=location["longitude"],
lat=location["latitude"],
dist=distance,
)
)
filters.append(
"MATCH (anno)-[:HAS_BODY]-(body:ResourceBody) "
"WHERE body.spatial IS NOT NULL AND "
"distance(cityPosition, point({latitude:body.spatial[0], longitude:body.spatial[1]})) < distanceInMeters"
)
projections.append(
"distance(cityPosition, point({longitude:body.spatial[0],latitude:body.spatial[1]})) as distance"
)
order_by = "ORDER BY distance"
if creation := filtering.get("creation"):
if c_match := creation.get("match"):
if term := c_match.get("term"):
term = self.graph.sanitize_input(term)
multi_match = []
multi_match_where = []
multi_match_query = ""
fields = c_match.get("fields")
if term is not None and (fields is None or len(fields) == 0):
raise BadRequest("Match term fields cannot be empty")
if fields is None:
fields = []
multi_match_fields = []
multi_optional_match = []
for f in fields:
if not term:
# catch '*'
break
if f == "title":
multi_match.append(
"MATCH (creation)-[:HAS_TITLE]->(t:Title)"
)
multi_match_fields.append("t")
multi_match_where.append(f"t.text =~ '(?i).*{term}.*'")
elif f == "description":
multi_match.append(
"OPTIONAL MATCH (creation)-[:HAS_DESCRIPTION]->(d:Description)"
)
multi_match_fields.append("d")
multi_match_where.append(f"d.text =~ '(?i).*{term}.*'")
elif f == "keyword":
multi_optional_match.append(
"OPTIONAL MATCH (creation)-[:HAS_KEYWORD]->(k:Keyword)"
)
multi_match_fields.append("k")
multi_match_where.append(f"k.term =~ '(?i){term}'")
elif f == "contributor":
multi_optional_match.append(
"OPTIONAL MATCH (creation)-[:CONTRIBUTED_BY]->(a:Agent)"
)
multi_match_fields.append("a")
multi_match_where.append(
"ANY(item in a.names where item =~ '(?i).*{term}.*')".format(
term=term
)
)
else:
# should never be reached
raise ServerError("Unexpected field type")
if len(multi_match) > 0:
multi_match_query = (
" ".join(multi_match)
+ " "
+ " ".join(multi_optional_match)
+ " WITH creation, cityPosition, title, i, body, "
+ ", ".join(multi_match_fields)
+ " WHERE "
+ " OR ".join(multi_match_where)
)
filters.append(multi_match_query)
c_filter = creation.get("filtering")
# TYPE
c_types = c_filter.get("type")
if c_types and isinstance(c_types, str):
c_types = [c_types]
if "all" not in c_types or set(c_types) == {
i for i in allowed_item_types if i != "all"
}:
where_c_types = []
for c_type in c_types:
where_c_types.append(f"i.item_type =~ '(?i){c_type}'")
filters.append(f"MATCH (i) WHERE {" or ".join(where_c_types)}")
# PROVIDER
c_provider = c_filter.get("provider")
if c_provider is not None:
filters.append(
"MATCH (creation)-[:RECORD_SOURCE]->(:RecordSource)-[:PROVIDED_BY]->(p:Provider)"
" WHERE p.identifier='{provider}'".format(
provider=c_provider.strip()
)
)
# IPR STATUS
if c_iprstatus := c_filter.get("iprstatus"):
if codelists.fromCode(c_iprstatus, codelists.RIGHTS_STATUS) is None:
raise NotFound(f"Invalid IPR status code for: {c_iprstatus}")
filters.append(
"MATCH (creation) WHERE creation.rights_status = '{iprstatus}'".format(
iprstatus=c_iprstatus
)
)
# PRODUCTION YEAR
c_year_from = c_filter.get("yearfrom")
c_year_to = c_filter.get("yearto")
if c_year_from is not None or c_year_to is not None:
# set defaults if year is missing
c_year_from = "1890" if c_year_from is None else str(c_year_from)
c_year_to = "1999" if c_year_to is None else str(c_year_to)
date_clauses = []
if "video" in c_types or "all" in c_types:
date_clauses.append(
"ANY(item IN creation.production_years WHERE item >= '{yfrom}') "
"AND ANY(item IN creation.production_years WHERE item <= '{yto}')".format(
yfrom=c_year_from, yto=c_year_to
)
)
if "image" in c_types or "3d-model" in c_types or "all" in c_types:
date_clauses.append(
"ANY(item IN creation.date_created WHERE substring(item, 0, 4) >= '{yfrom}') "
"AND ANY(item IN creation.date_created WHERE substring(item, 0 , 4) <= '{yto}')".format(
yfrom=c_year_from, yto=c_year_to
)
)
filters.append(
"MATCH (creation) WHERE {clauses}".format(
clauses=" or ".join(date_clauses)
)
)
# ANNOTATED TERMS
terms = c_filter.get("terms")
if terms:
term_clauses = []
iris = [term["iri"] for term in terms if "iri" in term]
if iris:
term_clauses.append(f"term.iri IN {iris}")
free_terms = [
term["label"]
for term in terms
if "iri" not in term and "label" in term
]
if free_terms:
term_clauses.append(f"term.value IN {free_terms}")
if term_clauses:
filters.append(
"MATCH (i)<-[:SOURCE]-(anno2)-[:HAS_BODY]-(term) WHERE {clauses}".format(
clauses=" or ".join(term_clauses)
)
)
query = (
"{starters} MATCH (anno:Annotation)"
" {filters} "
"WITH body, i, cityPosition, creation, collect(distinct title) AS titles "
"RETURN DISTINCT body, {projections} {orderBy}".format(
starters=" ".join(starters),
filters=" ".join(filters),
projections=", ".join(projections),
orderBy=order_by,
)
)
# log.debug(query)
data = []
result = self.graph.cypher(query)
for row in result:
# AD-HOC implementation at the moment
body = self.graph.ResourceBody.inflate(row[0])
res = {
"iri": body.iri,
"name": body.name,
"spatial": body.spatial,
"sources": [],
}
for source in row[1]:
creation = {
"uuid": source["uuid"],
"external_ids": source["external_ids"],
"rights_status": source["rights_status"],
"type": source["type"],
}
# PRODUCTION YEAR: get the first year in the array
if "production_years" in source:
creation["year"] = source["production_years"][0]
elif "date_created" in source:
creation["year"] = source["date_created"][0]
# TITLE
if "identifying_title" in source:
creation["title"] = source["identifying_title"]
elif "titles" in source and len(source["titles"]) > 0:
# at the moment get the first always!
title_node = self.graph.Title.inflate(source["titles"][0])
creation["title"] = title_node.text
res["sources"].append(creation)
res["distance"] = row[2]
data.append(res)
return self.response(data)
| """
Search endpoint for annotations
"""
from imc.endpoints import IMCEndpoint
from imc.models import AnnotationSearch, allowed_item_types, codelists
from restapi import decorators
from restapi.connectors import neo4j
from restapi.exceptions import BadRequest, NotFound, ServerError
from restapi.models import fields
from restapi.utilities.logs import log
class SearchAnnotations(IMCEndpoint):
@decorators.auth.require()
@decorators.use_kwargs(
{"filtering": fields.Nested(AnnotationSearch, data_key="filter")}
)
@decorators.endpoint(
path="/annotations/search",
summary="Search for annotations",
description="Search for annotations",
responses={200: "A list of annotation matching search criteria."},
)
def post(self, filtering=None):
self.graph = neo4j.get_instance()
filters = []
starters = []
projections = []
order_by = ""
if filtering:
anno_type = filtering.get("annotation_type")
filters.append(f"WHERE anno.annotation_type='{anno_type}'")
# add filter for processed content with COMPLETE status
filters.append(
"MATCH (creation:Creation)<-[:CREATION]-(:Item)-[:CONTENT_SOURCE]->(content:ContentStage) "
+ "WHERE content.status = 'COMPLETED' "
)
filters.append(
"MATCH (title:Title)<-[:HAS_TITLE]-(creation)<-[:CREATION]-(i:Item)<-[:SOURCE]-(anno)"
)
projections.append(
"collect(distinct creation{.*, type:i.item_type, titles }) AS creations"
)
if anno_type == "TAG":
# look for geo distance filter
geo_distance = filtering.get("geo_distance")
if geo_distance is not None:
distance = geo_distance["distance"]
location = geo_distance["location"]
starters.append(
"WITH point({{longitude: {lon}, latitude: {lat} }}) as cityPosition, "
"{dist} as distanceInMeters".format(
lon=location["longitude"],
lat=location["latitude"],
dist=distance,
)
)
filters.append(
"MATCH (anno)-[:HAS_BODY]-(body:ResourceBody) "
"WHERE body.spatial IS NOT NULL AND "
"distance(cityPosition, point({latitude:body.spatial[0], longitude:body.spatial[1]})) < distanceInMeters"
)
projections.append(
"distance(cityPosition, point({longitude:body.spatial[0],latitude:body.spatial[1]})) as distance"
)
order_by = "ORDER BY distance"
if creation := filtering.get("creation"):
if c_match := creation.get("match"):
if term := c_match.get("term"):
term = self.graph.sanitize_input(term)
multi_match = []
multi_match_where = []
multi_match_query = ""
fields = c_match.get("fields")
if term is not None and (fields is None or len(fields) == 0):
raise BadRequest("Match term fields cannot be empty")
if fields is None:
fields = []
multi_match_fields = []
multi_optional_match = []
for f in fields:
if not term:
# catch '*'
break
if f == "title":
multi_match.append(
"MATCH (creation)-[:HAS_TITLE]->(t:Title)"
)
multi_match_fields.append("t")
multi_match_where.append(f"t.text =~ '(?i).*{term}.*'")
elif f == "description":
multi_match.append(
"OPTIONAL MATCH (creation)-[:HAS_DESCRIPTION]->(d:Description)"
)
multi_match_fields.append("d")
multi_match_where.append(f"d.text =~ '(?i).*{term}.*'")
elif f == "keyword":
multi_optional_match.append(
"OPTIONAL MATCH (creation)-[:HAS_KEYWORD]->(k:Keyword)"
)
multi_match_fields.append("k")
multi_match_where.append(f"k.term =~ '(?i){term}'")
elif f == "contributor":
multi_optional_match.append(
"OPTIONAL MATCH (creation)-[:CONTRIBUTED_BY]->(a:Agent)"
)
multi_match_fields.append("a")
multi_match_where.append(
"ANY(item in a.names where item =~ '(?i).*{term}.*')".format(
term=term
)
)
else:
# should never be reached
raise ServerError("Unexpected field type")
if len(multi_match) > 0:
multi_match_query = (
" ".join(multi_match)
+ " "
+ " ".join(multi_optional_match)
+ " WITH creation, cityPosition, title, i, body, "
+ ", ".join(multi_match_fields)
+ " WHERE "
+ " OR ".join(multi_match_where)
)
filters.append(multi_match_query)
c_filter = creation.get("filtering")
# TYPE
c_types = c_filter.get("type")
if c_types and isinstance(c_types, str):
c_types = [c_types]
if "all" not in c_types or set(c_types) == {
i for i in allowed_item_types if i != "all"
}:
where_c_types = []
for c_type in c_types:
where_c_types.append(f"i.item_type =~ '(?i){c_type}'")
filters.append(f"MATCH (i) WHERE {' or '.join(where_c_types)}")
# PROVIDER
c_provider = c_filter.get("provider")
if c_provider is not None:
filters.append(
"MATCH (creation)-[:RECORD_SOURCE]->(:RecordSource)-[:PROVIDED_BY]->(p:Provider)"
" WHERE p.identifier='{provider}'".format(
provider=c_provider.strip()
)
)
# IPR STATUS
if c_iprstatus := c_filter.get("iprstatus"):
if codelists.fromCode(c_iprstatus, codelists.RIGHTS_STATUS) is None:
raise NotFound(f"Invalid IPR status code for: {c_iprstatus}")
filters.append(
"MATCH (creation) WHERE creation.rights_status = '{iprstatus}'".format(
iprstatus=c_iprstatus
)
)
# PRODUCTION YEAR
c_year_from = c_filter.get("yearfrom")
c_year_to = c_filter.get("yearto")
if c_year_from is not None or c_year_to is not None:
# set defaults if year is missing
c_year_from = "1890" if c_year_from is None else str(c_year_from)
c_year_to = "1999" if c_year_to is None else str(c_year_to)
date_clauses = []
if "video" in c_types or "all" in c_types:
date_clauses.append(
"ANY(item IN creation.production_years WHERE item >= '{yfrom}') "
"AND ANY(item IN creation.production_years WHERE item <= '{yto}')".format(
yfrom=c_year_from, yto=c_year_to
)
)
if "image" in c_types or "3d-model" in c_types or "all" in c_types:
date_clauses.append(
"ANY(item IN creation.date_created WHERE substring(item, 0, 4) >= '{yfrom}') "
"AND ANY(item IN creation.date_created WHERE substring(item, 0 , 4) <= '{yto}')".format(
yfrom=c_year_from, yto=c_year_to
)
)
filters.append(
"MATCH (creation) WHERE {clauses}".format(
clauses=" or ".join(date_clauses)
)
)
# ANNOTATED TERMS
terms = c_filter.get("terms")
if terms:
term_clauses = []
iris = [term["iri"] for term in terms if "iri" in term]
if iris:
term_clauses.append(f"term.iri IN {iris}")
free_terms = [
term["label"]
for term in terms
if "iri" not in term and "label" in term
]
if free_terms:
term_clauses.append(f"term.value IN {free_terms}")
if term_clauses:
filters.append(
"MATCH (i)<-[:SOURCE]-(anno2)-[:HAS_BODY]-(term) WHERE {clauses}".format(
clauses=" or ".join(term_clauses)
)
)
query = (
"{starters} MATCH (anno:Annotation)"
" {filters} "
"WITH body, i, cityPosition, creation, collect(distinct title) AS titles "
"RETURN DISTINCT body, {projections} {orderBy}".format(
starters=" ".join(starters),
filters=" ".join(filters),
projections=", ".join(projections),
orderBy=order_by,
)
)
# log.debug(query)
data = []
result = self.graph.cypher(query)
for row in result:
# AD-HOC implementation at the moment
body = self.graph.ResourceBody.inflate(row[0])
res = {
"iri": body.iri,
"name": body.name,
"spatial": body.spatial,
"sources": [],
}
for source in row[1]:
creation = {
"uuid": source["uuid"],
"external_ids": source["external_ids"],
"rights_status": source["rights_status"],
"type": source["type"],
}
# PRODUCTION YEAR: get the first year in the array
if "production_years" in source:
creation["year"] = source["production_years"][0]
elif "date_created" in source:
creation["year"] = source["date_created"][0]
# TITLE
if "identifying_title" in source:
creation["title"] = source["identifying_title"]
elif "titles" in source and len(source["titles"]) > 0:
# at the moment get the first always!
title_node = self.graph.Title.inflate(source["titles"][0])
creation["title"] = title_node.text
res["sources"].append(creation)
res["distance"] = row[2]
data.append(res)
return self.response(data)
|
import time
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from captum._utils.models.linear_model.model import LinearModel
def l2_loss(x1, x2, weights=None):
if weights is None:
return torch.mean((x1 - x2) ** 2) / 2.0
else:
return torch.sum((weights / weights.norm(p=1)) * ((x1 - x2) ** 2)) / 2.0
def sgd_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
max_epoch: int = 100,
reduce_lr: bool = True,
initial_lr: float = 0.01,
alpha: float = 1.0,
loss_fn: Callable = l2_loss,
reg_term: Optional[int] = 1,
patience: int = 10,
threshold: float = 1e-4,
running_loss_window: Optional[int] = None,
device: Optional[str] = None,
init_scheme: str = "zeros",
debug: bool = False,
) -> Dict[str, float]:
r"""
Trains a linear model with SGD. This will continue to iterate your
dataloader until we converged to a solution or alternatively until we have
exhausted `max_epoch`.
Convergence is defined by the loss not changing by `threshold` amount for
`patience` number of iterations.
Args:
model
The model to train
dataloader
The data to train it with. We will assume the dataloader produces
either pairs or triples of the form (x, y) or (x, y, w). Where x and
y are typical pairs for supervised learning and w is a weight
vector.
We will call `model._construct_model_params` with construct_kwargs
and the input features set to `x.shape[1]` (`x.shape[0]` corresponds
to the batch size). We assume that `len(x.shape) == 2`, i.e. the
tensor is flat. The number of output features will be set to
y.shape[1] or 1 (if `len(y.shape) == 1`); we require `len(y.shape)
<= 2`.
max_epoch
The maximum number of epochs to exhaust
reduce_lr
Whether or not to reduce the learning rate as iterations progress.
Halves the learning rate when the training loss does not move. This
uses torch.optim.lr_scheduler.ReduceLROnPlateau and uses the
parameters `patience` and `threshold`
initial_lr
The initial learning rate to use.
alpha
A constant for the regularization term.
loss_fn
The loss to optimise for. This must accept three parameters:
x1 (predicted), x2 (labels) and a weight vector
reg_term
Regularization is defined by the `reg_term` norm of the weights.
Please use `None` if you do not wish to use regularization.
patience
Defines the number of iterations in a row the loss must remain
within `threshold` in order to be classified as converged.
threshold
Threshold for convergence detection.
running_loss_window
Used to report the training loss once we have finished training and
to determine when we have converged (along with reducing the
learning rate).
The reported training loss will take the last `running_loss_window`
iterations and average them.
If `None` we will approximate this to be the number of examples in
an epoch.
init_scheme
Initialization to use prior to training the linear model.
device
The device to send the model and data to. If None then no `.to` call
will be used.
debug
Whether to print the loss, learning rate per iteration
Returns
This will return the final training loss (averaged with
`running_loss_window`)
"""
loss_window: List[torch.Tensor] = []
min_avg_loss = None
convergence_counter = 0
converged = False
def get_point(datapoint):
if len(datapoint) == 2:
x, y = datapoint
w = None
else:
x, y, w = datapoint
if device is not None:
x = x.to(device)
y = y.to(device)
if w is not None:
w = w.to(device)
return x, y, w
# get a point and construct the model
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
model._construct_model_params(
in_features=x.shape[1],
out_features=y.shape[1] if len(y.shape) == 2 else 1,
**construct_kwargs,
)
model.train()
assert model.linear is not None
if init_scheme is not None:
assert init_scheme in ["xavier", "zeros"]
with torch.no_grad():
if init_scheme == "xavier":
torch.nn.init.xavier_uniform_(model.linear.weight)
else:
model.linear.weight.zero_()
if model.linear.bias is not None:
model.linear.bias.zero_()
optim = torch.optim.SGD(model.parameters(), lr=initial_lr)
if reduce_lr:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim, factor=0.5, patience=patience, threshold=threshold
)
t1 = time.time()
epoch = 0
i = 0
while epoch < max_epoch:
while True: # for x, y, w in dataloader
if running_loss_window is None:
running_loss_window = x.shape[0] * len(dataloader)
y = y.view(x.shape[0], -1)
if w is not None:
w = w.view(x.shape[0], -1)
i += 1
out = model(x)
loss = loss_fn(y, out, w)
if reg_term is not None:
reg = torch.norm(model.linear.weight, p=reg_term)
loss += reg.sum() * alpha
if len(loss_window) >= running_loss_window:
loss_window = loss_window[1:]
loss_window.append(loss.clone().detach())
assert len(loss_window) <= running_loss_window
average_loss = torch.mean(torch.stack(loss_window))
if min_avg_loss is not None:
# if we haven't improved by at least `threshold`
if average_loss > min_avg_loss or torch.isclose(
min_avg_loss, average_loss, atol=threshold
):
convergence_counter += 1
if convergence_counter >= patience:
converged = True
break
else:
convergence_counter = 0
if min_avg_loss is None or min_avg_loss >= average_loss:
min_avg_loss = average_loss.clone()
if debug:
print(
f"lr={optim.param_groups[0]["lr"]}, Loss={loss},"
+ "Aloss={average_loss}, min_avg_loss={min_avg_loss}"
)
loss.backward()
optim.step()
model.zero_grad()
if scheduler:
scheduler.step(average_loss)
temp = next(data_iter, None)
if temp is None:
break
x, y, w = get_point(temp)
if converged:
break
epoch += 1
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
t2 = time.time()
return {
"train_time": t2 - t1,
"train_loss": torch.mean(torch.stack(loss_window)).item(),
"train_iter": i,
"train_epoch": epoch,
}
class NormLayer(nn.Module):
def __init__(self, mean, std, n=None, eps=1e-8) -> None:
super().__init__()
self.mean = mean
self.std = std
self.eps = eps
def forward(self, x):
return (x - self.mean) / (self.std + self.eps)
def sklearn_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
sklearn_trainer: str = "Lasso",
norm_input: bool = False,
**fit_kwargs,
):
r"""
Alternative method to train with sklearn. This does introduce some slight
overhead as we convert the tensors to numpy and then convert the resulting
trained model to a `LinearModel` object. However, this conversion
should be negligible.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
Args
model
The model to train.
dataloader
The data to use. This will be exhausted and converted to numpy
arrays. Therefore please do not feed an infinite dataloader.
norm_input
Whether or not to normalize the input
sklearn_trainer
The sklearn model to use to train the model. Please refer to
sklearn.linear_model for a list of modules to use.
construct_kwargs
Additional arguments provided to the `sklearn_trainer` constructor
fit_kwargs
Other arguments to send to `sklearn_trainer`'s `.fit` method
"""
from functools import reduce
try:
import numpy as np
except ImportError:
raise ValueError("numpy is not available. Please install numpy.")
try:
import sklearn
import sklearn.linear_model
import sklearn.svm
except ImportError:
raise ValueError("sklearn is not available. Please install sklearn >= 0.23")
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
num_batches = 0
xs, ys, ws = [], [], []
for data in dataloader:
if len(data) == 3:
x, y, w = data
else:
assert len(data) == 2
x, y = data
w = None
xs.append(x.cpu().numpy())
ys.append(y.cpu().numpy())
if w is not None:
ws.append(w.cpu().numpy())
num_batches += 1
x = np.concatenate(xs, axis=0)
y = np.concatenate(ys, axis=0)
if len(ws) > 0:
w = np.concatenate(ws, axis=0)
else:
w = None
if norm_input:
mean, std = x.mean(0), x.std(0)
x -= mean
x /= std
t1 = time.time()
sklearn_model = reduce(
lambda val, el: getattr(val, el), [sklearn] + sklearn_trainer.split(".")
)(**construct_kwargs)
sklearn_model.fit(x, y, sample_weight=w, **fit_kwargs)
t2 = time.time()
# Convert weights to pytorch
num_outputs = 1 if len(y.shape) == 1 else y.shape[1]
weight_values = torch.FloatTensor(sklearn_model.coef_) # type: ignore
bias_values = torch.FloatTensor([sklearn_model.intercept_]) # type: ignore
model._construct_model_params(
norm_type=None,
weight_values=weight_values.view(num_outputs, -1),
bias_value=bias_values.squeeze().unsqueeze(0),
)
if norm_input:
model.norm = NormLayer(mean, std)
return {"train_time": t2 - t1}
| import time
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from captum._utils.models.linear_model.model import LinearModel
def l2_loss(x1, x2, weights=None):
if weights is None:
return torch.mean((x1 - x2) ** 2) / 2.0
else:
return torch.sum((weights / weights.norm(p=1)) * ((x1 - x2) ** 2)) / 2.0
def sgd_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
max_epoch: int = 100,
reduce_lr: bool = True,
initial_lr: float = 0.01,
alpha: float = 1.0,
loss_fn: Callable = l2_loss,
reg_term: Optional[int] = 1,
patience: int = 10,
threshold: float = 1e-4,
running_loss_window: Optional[int] = None,
device: Optional[str] = None,
init_scheme: str = "zeros",
debug: bool = False,
) -> Dict[str, float]:
r"""
Trains a linear model with SGD. This will continue to iterate your
dataloader until we converged to a solution or alternatively until we have
exhausted `max_epoch`.
Convergence is defined by the loss not changing by `threshold` amount for
`patience` number of iterations.
Args:
model
The model to train
dataloader
The data to train it with. We will assume the dataloader produces
either pairs or triples of the form (x, y) or (x, y, w). Where x and
y are typical pairs for supervised learning and w is a weight
vector.
We will call `model._construct_model_params` with construct_kwargs
and the input features set to `x.shape[1]` (`x.shape[0]` corresponds
to the batch size). We assume that `len(x.shape) == 2`, i.e. the
tensor is flat. The number of output features will be set to
y.shape[1] or 1 (if `len(y.shape) == 1`); we require `len(y.shape)
<= 2`.
max_epoch
The maximum number of epochs to exhaust
reduce_lr
Whether or not to reduce the learning rate as iterations progress.
Halves the learning rate when the training loss does not move. This
uses torch.optim.lr_scheduler.ReduceLROnPlateau and uses the
parameters `patience` and `threshold`
initial_lr
The initial learning rate to use.
alpha
A constant for the regularization term.
loss_fn
The loss to optimise for. This must accept three parameters:
x1 (predicted), x2 (labels) and a weight vector
reg_term
Regularization is defined by the `reg_term` norm of the weights.
Please use `None` if you do not wish to use regularization.
patience
Defines the number of iterations in a row the loss must remain
within `threshold` in order to be classified as converged.
threshold
Threshold for convergence detection.
running_loss_window
Used to report the training loss once we have finished training and
to determine when we have converged (along with reducing the
learning rate).
The reported training loss will take the last `running_loss_window`
iterations and average them.
If `None` we will approximate this to be the number of examples in
an epoch.
init_scheme
Initialization to use prior to training the linear model.
device
The device to send the model and data to. If None then no `.to` call
will be used.
debug
Whether to print the loss, learning rate per iteration
Returns
This will return the final training loss (averaged with
`running_loss_window`)
"""
loss_window: List[torch.Tensor] = []
min_avg_loss = None
convergence_counter = 0
converged = False
def get_point(datapoint):
if len(datapoint) == 2:
x, y = datapoint
w = None
else:
x, y, w = datapoint
if device is not None:
x = x.to(device)
y = y.to(device)
if w is not None:
w = w.to(device)
return x, y, w
# get a point and construct the model
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
model._construct_model_params(
in_features=x.shape[1],
out_features=y.shape[1] if len(y.shape) == 2 else 1,
**construct_kwargs,
)
model.train()
assert model.linear is not None
if init_scheme is not None:
assert init_scheme in ["xavier", "zeros"]
with torch.no_grad():
if init_scheme == "xavier":
torch.nn.init.xavier_uniform_(model.linear.weight)
else:
model.linear.weight.zero_()
if model.linear.bias is not None:
model.linear.bias.zero_()
optim = torch.optim.SGD(model.parameters(), lr=initial_lr)
if reduce_lr:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim, factor=0.5, patience=patience, threshold=threshold
)
t1 = time.time()
epoch = 0
i = 0
while epoch < max_epoch:
while True: # for x, y, w in dataloader
if running_loss_window is None:
running_loss_window = x.shape[0] * len(dataloader)
y = y.view(x.shape[0], -1)
if w is not None:
w = w.view(x.shape[0], -1)
i += 1
out = model(x)
loss = loss_fn(y, out, w)
if reg_term is not None:
reg = torch.norm(model.linear.weight, p=reg_term)
loss += reg.sum() * alpha
if len(loss_window) >= running_loss_window:
loss_window = loss_window[1:]
loss_window.append(loss.clone().detach())
assert len(loss_window) <= running_loss_window
average_loss = torch.mean(torch.stack(loss_window))
if min_avg_loss is not None:
# if we haven't improved by at least `threshold`
if average_loss > min_avg_loss or torch.isclose(
min_avg_loss, average_loss, atol=threshold
):
convergence_counter += 1
if convergence_counter >= patience:
converged = True
break
else:
convergence_counter = 0
if min_avg_loss is None or min_avg_loss >= average_loss:
min_avg_loss = average_loss.clone()
if debug:
print(
f"lr={optim.param_groups[0]['lr']}, Loss={loss},"
+ "Aloss={average_loss}, min_avg_loss={min_avg_loss}"
)
loss.backward()
optim.step()
model.zero_grad()
if scheduler:
scheduler.step(average_loss)
temp = next(data_iter, None)
if temp is None:
break
x, y, w = get_point(temp)
if converged:
break
epoch += 1
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
t2 = time.time()
return {
"train_time": t2 - t1,
"train_loss": torch.mean(torch.stack(loss_window)).item(),
"train_iter": i,
"train_epoch": epoch,
}
class NormLayer(nn.Module):
def __init__(self, mean, std, n=None, eps=1e-8) -> None:
super().__init__()
self.mean = mean
self.std = std
self.eps = eps
def forward(self, x):
return (x - self.mean) / (self.std + self.eps)
def sklearn_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
sklearn_trainer: str = "Lasso",
norm_input: bool = False,
**fit_kwargs,
):
r"""
Alternative method to train with sklearn. This does introduce some slight
overhead as we convert the tensors to numpy and then convert the resulting
trained model to a `LinearModel` object. However, this conversion
should be negligible.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
Args
model
The model to train.
dataloader
The data to use. This will be exhausted and converted to numpy
arrays. Therefore please do not feed an infinite dataloader.
norm_input
Whether or not to normalize the input
sklearn_trainer
The sklearn model to use to train the model. Please refer to
sklearn.linear_model for a list of modules to use.
construct_kwargs
Additional arguments provided to the `sklearn_trainer` constructor
fit_kwargs
Other arguments to send to `sklearn_trainer`'s `.fit` method
"""
from functools import reduce
try:
import numpy as np
except ImportError:
raise ValueError("numpy is not available. Please install numpy.")
try:
import sklearn
import sklearn.linear_model
import sklearn.svm
except ImportError:
raise ValueError("sklearn is not available. Please install sklearn >= 0.23")
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
num_batches = 0
xs, ys, ws = [], [], []
for data in dataloader:
if len(data) == 3:
x, y, w = data
else:
assert len(data) == 2
x, y = data
w = None
xs.append(x.cpu().numpy())
ys.append(y.cpu().numpy())
if w is not None:
ws.append(w.cpu().numpy())
num_batches += 1
x = np.concatenate(xs, axis=0)
y = np.concatenate(ys, axis=0)
if len(ws) > 0:
w = np.concatenate(ws, axis=0)
else:
w = None
if norm_input:
mean, std = x.mean(0), x.std(0)
x -= mean
x /= std
t1 = time.time()
sklearn_model = reduce(
lambda val, el: getattr(val, el), [sklearn] + sklearn_trainer.split(".")
)(**construct_kwargs)
sklearn_model.fit(x, y, sample_weight=w, **fit_kwargs)
t2 = time.time()
# Convert weights to pytorch
num_outputs = 1 if len(y.shape) == 1 else y.shape[1]
weight_values = torch.FloatTensor(sklearn_model.coef_) # type: ignore
bias_values = torch.FloatTensor([sklearn_model.intercept_]) # type: ignore
model._construct_model_params(
norm_type=None,
weight_values=weight_values.view(num_outputs, -1),
bias_value=bias_values.squeeze().unsqueeze(0),
)
if norm_input:
model.norm = NormLayer(mean, std)
return {"train_time": t2 - t1}
|
"""
Built in Scrolls language features.
.. include:: pdoc/builtins.md
"""
import operator
import random
import typing as t
from . import ast, containers, datatypes, interpreter
__all__ = (
"StdIoCommandHandler",
"BuiltinControlHandler",
"BuiltinCommandHandler",
"RandomExpansionHandler",
"ArithmeticExpansionHandler",
"ComparisonExpansionHandler",
"LogicExpansionHandler",
"StringExpansionHandler",
"BuiltinInitializer",
"FileCommandHandler",
"FileExpansionHandler",
"base_config",
"file_config"
)
base_config: containers.DecoratorInterpreterConfig = containers.DecoratorInterpreterConfig()
"""
A configuration object containing the Scrolls base language. This currently consists of:
- `BuiltinControlHandler`
- `BuiltinCommandHandler`
- `BuiltinInitializer`
- `ArithmeticExpansionHandler`
- `ComparisonExpansionHandler`
- `LogicExpansionHandler`
- `StringExpansionHandler`
.. WARNING::
`print` and `input` are **not** defined as part of the base language, and must be added manually. See
`StdIoCommandHandler`.
"""
file_config: containers.DecoratorInterpreterConfig = containers.DecoratorInterpreterConfig()
"""
A configuration object containing base Scrolls utilities for working with files.
Consists of:
- `FileExpansionHandler`
- `FileCommandHandler`
"""
class StdIoCommandHandler(interpreter.CallbackCommandHandler):
"""
Implements input and output commands using stdout/stdin.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("print", self.print)
self.add_call("write", self.write)
self.add_call("input", self.input)
def print(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `print` command. Prints all arguments passed to it, joined by spaces.
**Usage**
```scrolls
print hello world foo bar
```
"""
print(" ".join(context.args))
def write(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `write` command. Prints all arguments passed to it, joined
by spaces. The difference vs. `print` is that `print` appends a newline,
while `write` does not.
**Usage**
```scrolls
write hello world foo bar
```
"""
print(" ".join(context.args), end="")
def input(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `input` command. Reads `stdin` for input, and stores the input in a variable named
by the first argument.
**Usage**
```scrolls
input foo
print $foo # prints what you entered
```
"""
if not context.args:
raise interpreter.InterpreterError(
context,
"input: variable name is not specified"
)
result = input()
context.set_var(context.args[0], result)
@file_config.commandhandler
class FileCommandHandler(interpreter.CallbackCommandHandler):
"""
Defines commands for working with files.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("file-close", self.close)
def close(self, context: interpreter.InterpreterContext) -> None:
"""
Implements `file-close`. If you're looking for `file-open`, see
`FileExpansionHandler.open`.
**Usage**
```scrolls
set f $(file-open file w)
# do things to file...
file-close $f
```
"""
datatypes.require_arg_length(context, 1)
fid, _ = datatypes.require_numeric(context, context.args[0])
context.close_file(int(fid))
@file_config.expansionhandler
class FileExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Defines expansions for working with files.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("file-open", self.open)
self.add_call("file-read", self.read)
def open(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `file-open`. If you're looking for `file-close`, see
`FileCommandHandler.close`.
`file-open` returns an integer ID used as a handle to the file.
This ID should be saved and used for all `file-*` functions.
**Usage**
```scrolls
set f $(file-open file w)
# do things to file...
file-close $f
```
"""
datatypes.require_arg_length(context, 1)
if len(context.args) > 1:
mode = context.args[1]
else:
# default is read
mode = "r"
return str(context.open_file(context.args[0], mode))
def read(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `file-read`. Reads an entire file and returns a string.
**Usage**
```scrolls
set f $(file-open file w)
print $(file-read $f)
file-close $f
```
"""
datatypes.require_arg_length(context, 1)
fid, _ = datatypes.require_numeric(context, context.args[0])
f = context.get_file(int(fid))
return f.read()
@base_config.initializer
class BuiltinInitializer(interpreter.Initializer):
"""
Sets built in constants, and initializes plumbing used by
[`def`](#scrolls.builtins.BuiltinControlHandler.def_) and
[`return`](#scrolls.builtins.BuiltinCommandHandler.return_).
### Variables
- `$true` - A true boolean.
- `$false` - A false boolean.
"""
def handle_call(self, context: interpreter.InterpreterContext) -> None:
context.set_var("true", datatypes.TRUE)
context.set_var("false", datatypes.FALSE)
context.runtime_commands.add(interpreter.RuntimeCallHandler(), "__def__")
context.runtime_expansions.add(interpreter.RuntimeCallHandler(), "__def__")
@base_config.commandhandler
class BuiltinCommandHandler(interpreter.CallbackCommandHandler):
"""
Implements built-in command statements. In order for
[`return`](#scrolls.builtins.BuiltinCommandHandler.return_)
to be functional, `BuiltinControlHandler` and `BuiltinInitializer` must also be loaded.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("set", self.set)
self.add_call("unset", self.unset)
self.add_call("stop", self.stop)
self.add_call("return", self.return_)
self.add_call("nonlocal", self.nonlocal_)
self.add_call("global", self.global_)
def return_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `return` command. Returns all arguments passed to it as a single string, joined by spaces.
If this command is present in a [`def`](#scrolls.builtins.BuiltinControlHandler.def_) block, that `def` block
will define a new expansion call. Otherwise, it defines a command.
Using this command outside a `def` block will result in an error.
**Usage**
```scrolls
!def(example foo) {
return $foo
}
print $(example "hello world")
```
"""
retval = " ".join(context.args)
context.set_retval(retval)
raise interpreter.InterpreterReturn()
def set(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `set` command. Sets a variable. The first argument is the name of the variable. The rest of the
arguments are joined by spaces and stored in the named variable.
**Usage**
```scrolls
set varname arg1 arg2 arg3
print $varname # prints arg1 arg2 arg3
```
"""
if not context.args:
raise interpreter.InterpreterError(
context,
"set: variable name is not specified"
)
context.set_var(context.args[0], " ".join(context.args[1:]))
def unset(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `unset` command. Deletes a variable. The first argument is the name of the variable to delete.
**Usage**
```scrolls
set varname hello
print $varname # prints hello
unset varname
print $varname # ERROR
```
"""
if not context.args:
raise interpreter.InterpreterError(
context,
"unset: variable name is not specified"
)
try:
context.del_var(context.args[0])
except KeyError:
raise interpreter.InterpreterError(
context,
f"unset: no such variable {context.args[0]}"
)
def nonlocal_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `nonlocal` command. Declares a variable as nonlocal, which allows variable references to modify
variables in the enclosing scope.
**Usage**
```scrolls
!def(zero varname) {
nonlocal $varname
set $varname 0
}
!def(main) {
set example 42
zero example
# "0" is printed, since example was declared nonlocal
# in the zero function.
print $example
}
set example 200
main # prints "0"
# Prints "200", since the zero call in main only
# modifies the DIRECTLY enclosing scope.
print $example
```
"""
datatypes.require_arg_length(context, 1)
context.vars.declare_nonlocal(context.args[0])
def global_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `global` command. Declares a variable as global, which allows variable references to modify
variables in the global scope.
**Usage**
```scrolls
!def(set_global varname *args) {
global $varname
set $varname $args
}
!def(main) {
set_global example arg1 arg2 arg3
}
main
# prints "arg1 arg2 arg3", since main->set_global example
# sets a variable in the global scope.
print $example
```
"""
datatypes.require_arg_length(context, 1)
context.vars.declare_global(context.args[0])
def stop(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `stop` command. Stops the script execution. Takes no arguments.
"""
raise interpreter.InterpreterStop()
@base_config.controlhandler
class BuiltinControlHandler(interpreter.CallbackControlHandler):
"""
Implements built-in command statements. In order for
[`def`](#scrolls.builtins.BuiltinControlHandler.def_)
to be functional, `BuiltinCommandHandler` and `BuiltinInitializer` must also be loaded.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("repeat", self.repeat)
self.add_call("for", self.for_)
self.add_call("if", self.if_)
self.add_call("while", self.while_)
self.add_call("def", self.def_)
def def_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `def` control structure. Allows the definition of new commands and expansion calls.
The first argument is the name of the call to define. The rest of the arguments name the parameters to the
call. The last parameter name may be prefixed with `*` to support variable arguments.
**Usage**
```scrolls
!def(example a b) {
print "a is" $a
print "b is" $b
}
# prints
# a is foo
# b is bar
example foo bar
!def(varargs_example x *args) {
print "x is" $x
print "the rest of the args are:"
!for(i in $^args) print $i
}
# prints
# x is 10
# the rest of the args are:
# foo
# bar
# baz
varargs_example 10 foo bar baz
```
"""
args = context.args
datatypes.require_arg_length(context, 1)
command_calls = context.control_node.find_all(
lambda node: (node.type == ast.ASTNodeType.COMMAND_CALL and
bool(node.children))
)
has_return = False
for node in command_calls:
name_node = node.children[0]
if name_node.type == ast.ASTNodeType.STRING and name_node.str_content() == "return":
has_return = True
break
if has_return:
t.cast(
interpreter.RuntimeCallHandler[str],
context.runtime_expansions.get("__def__")
).define(args[0], context.control_node, args[1:])
else:
t.cast(
interpreter.RuntimeCallHandler[None],
context.runtime_commands.get("__def__")
).define(args[0], context.control_node, args[1:])
def repeat(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `repeat` control structure. Takes a single integer argument, that repeats the body n times.
**Usage**
```scrolls
# prints "hello world" 4 times
!repeat(4) {
print "hello world"
}
```
"""
if len(context.args) != 1:
raise interpreter.InterpreterError(
context,
"repeat requires exactly one argument, the number of times to repeat"
)
context.current_node = context.arg_nodes[0]
try:
repeat_times = int(context.args[0])
except ValueError:
raise interpreter.InterpreterError(
context,
f"'{context.args[0]}' is not a valid integer"
)
control_node = context.control_node
for _ in range(repeat_times):
context.interpreter.interpret_statement(context, control_node)
def for_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `for` control structure. The syntax is as follows: `!for(VARNAME in VECTOR) ...`
**Usage**
```scrolls
# prints
# 1
# 2
# 3
# 4
# 5
!for(x in 1 2 3 4 5) {
print $x
}
```
"""
if not context.args or len(context.args) < 3:
raise interpreter.InterpreterError(
context,
"bad format in !for: expected !for(VARNAME in ARGS)"
)
var_name, _in, *items = context.args
if _in != "in":
context.current_node = context.arg_nodes[1]
raise interpreter.InterpreterError(
context,
f"unexpected token '{_in}', should be 'in'"
)
control_node = context.control_node
for item in items:
context.set_var(var_name, item)
context.interpreter.interpret_statement(context, control_node)
context.del_var(var_name)
def if_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `if` control structure. Takes one argument, a boolean. If it's `scrolls.datatypes.TRUE`,
executes the body statement. Otherwise, the body is skipped. `else` is not supported.
**Usage**
```scrolls
!if($true) {
print "this will print"
}
!if($false) {
print "this will not print"
}
```
"""
if len(context.args) != 1:
raise interpreter.InterpreterError(
context,
f"if: needs one and only one argument"
)
if datatypes.str_to_bool(context.args[0]):
context.interpreter.interpret_statement(context, context.control_node)
def while_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `while` control structure. Takes one argument, a boolean. Repeats the body while
the condition is `scrolls.datatypes.TRUE`.
**Usage**
```scrolls
# counting down from 10 to 1
set i 10
!while($(> 0 $i)) {
print $i
set i $(- $i 1)
}
```
"""
if len(context.args) != 1:
raise interpreter.InterpreterError(
context,
f"while: needs one and only one argument"
)
arg = context.args[0]
while datatypes.str_to_bool(arg):
context.interpreter.interpret_statement(context, context.control_node)
# HACK:
# In order for while to work right, we need to re-evaluate the argument
# every time.
arg = context.interpreter.interpret_string_or_expansion(context, context.arg_nodes[0])[0]
class RandomExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements random expansions.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("select", self.select)
self.add_call("shuffle", self.shuffle)
self.add_call("uniform", self.uniform)
def select(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `select` expansion. Randomly selects one of the arguments and returns it.
**Usage**
```scrolls
# randomly prints either foo, bar, or baz
print $(select foo bar baz)
```
"""
return random.choice(context.args)
def shuffle(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `shuffle` expansion. Shuffle the arguments given and return them.
**Usage**
```scrolls
print $(shuffle 1 2 3 4 5)
```
"""
args = list(context.args)
random.shuffle(args)
return " ".join(args)
def uniform(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `uniform` expansion. Returns a random floating point number between two bounds, inclusive.
**Usage**
```scrolls
print $(uniform 0 2) # print a random float between 0 and 2.
```
"""
if len(context.args) != 2:
raise interpreter.InterpreterError(
context,
f"uniform: must have two args. (got {", ".join(context.args)})"
)
try:
lower = float(context.args[0])
upper = float(context.args[1])
except ValueError as e:
raise interpreter.InterpreterError(
context,
f"uniform: {str(e)}"
)
return str(random.uniform(lower, upper))
@base_config.expansionhandler
class ArithmeticExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic arithmetic expansions. These aren't very efficient, but
if you want efficiency, you shouldn't be using an interpreted language
with no JIT being interpreted by another interpreted language `:)`.
Most of these are self-explanatory. Examples will be provided where appropriate.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("toint", self.toint)
self.add_call("tofloat", self.tofloat)
self.add_call("+", self.add)
self.add_call("-", self.sub)
self.add_call("*", self.mul)
self.add_call("/", self.div)
self.add_call("//", self.intdiv)
self.add_call("%", self.mod)
@staticmethod
def __unary(context: interpreter.InterpreterContext, op: datatypes.UnaryNumOpT) -> str:
return str(datatypes.apply_unary_num_op(context, op)[0])
@staticmethod
def __binary(context: interpreter.InterpreterContext, op: datatypes.BinaryNumOpT) -> str:
return str(datatypes.apply_binary_num_op(context, op)[0])
@staticmethod
def __mass(
context: interpreter.InterpreterContext,
reduce_op: datatypes.BinaryNumOpT,
final_op: datatypes.BinaryNumOpT
) -> str:
return str(datatypes.apply_mass_binary_num_op(context, reduce_op, final_op)[0])
@staticmethod
def __reduce(
context: interpreter.InterpreterContext,
reduce_op: datatypes.BinaryNumOpT
) -> str:
return str(datatypes.apply_reduce_binary_num_op(context, reduce_op)[0])
def sub(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `-`.
**Usage**
```scrolls
print $(- 4) # negate a number
print $(- 10 3) # subtract 3 from 10
print $(- 10 1 2 3) # subtract 1, 2, and 3 from 10.
```
"""
# Sub behaves a little differently. If only one arg, negate instead of subtracting.
if len(context.args) == 1:
return self.__unary(context, operator.neg)
return self.__mass(context, reduce_op=operator.add, final_op=operator.sub)
def toint(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `toint`. Forces a number to be an integer. If the input is a float, the decimal point
will be truncated.
"""
return self.__unary(context, datatypes.toint)
def tofloat(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `tofloat`. Forces a number to be a float.
"""
return self.__unary(context, datatypes.tofloat)
def add(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `+`. `+` will take 2 or more arguments, and sum them all together.
**Usage**
```scrolls
print $(+ 2 3)
print $(+ 1 10 34)
```
"""
return self.__reduce(context, operator.add)
def mul(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `*`. `*` will take 2 or more arguments, and multiplies them all together.
**Usage**
```scrolls
print $(* 2 3)
print $(* 1 10 34)
```
"""
return self.__reduce(context, operator.mul)
def div(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `/`.
**Usage**
```scrolls
print $(/ 6 2) # prints 3.0
print $(/ 20 2 5) # divides 20 by 2, then by 5. prints 2.0
```
"""
return self.__mass(context, reduce_op=operator.mul, final_op=operator.truediv)
def intdiv(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `//` (integer division).
**Usage**
```scrolls
print $(// 5 2) # prints 2.
print $(// 20 2 3) # divides 20 by 2*3 (6), (3.3333...), then truncates float part. prints 3.
```
"""
return self.__mass(context, reduce_op=operator.mul, final_op=operator.floordiv)
def mod(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `%` (modulo). Takes only two arguments.
**Usage**
```scrolls
print $(% 5 2) # prints 1.
```
"""
return self.__binary(context, operator.mod)
@base_config.expansionhandler
class ComparisonExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic comparison operators.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("eq?", self.equals)
self.add_alias("==", "eq?")
self.add_call("neq?", self.not_equals)
self.add_call("===", self.str_equals)
self.add_call(">", self.gt)
self.add_call("<", self.lt)
self.add_call(">=", self.gte)
self.add_call("<=", self.lte)
self.add_call("in?", self._in)
def __equals_bool(self, context: interpreter.InterpreterContext) -> bool:
args = context.args
if len(args) != 2:
raise interpreter.InterpreterError(
context,
f"{context.call_name}: must have exactly 2 args"
)
try:
num_args, _ = datatypes.require_all_numeric(context, args)
return num_args[0] == num_args[1]
except interpreter.InterpreterError:
return args[0] == args[1]
def __get_numeric_compare_args(self, context: interpreter.InterpreterContext) -> t.Tuple[float, float]:
args = context.args
if len(args) != 2:
raise interpreter.InterpreterError(
context,
f"{context.call_name}: must have exactly 2 args"
)
(a, b), _ = datatypes.require_all_numeric(context, args)
return a, b
def str_equals(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `===`. Takes only two arguments.
`===` is the strong comparison operator. It only operates on strings,
and no implicit conversion is done.
Contrast with the behavior of `ComparisonExpansionHandler.equals`.
**Usage**
```scrolls
print $(=== 0123 123) # prints 0
print $(=== hello hello) # prints 0
```
"""
args = context.args
if len(args) != 2:
raise interpreter.InterpreterError(
context,
f"{context.call_name}: must have exactly 2 args"
)
return datatypes.bool_to_str(args[0] == args[1])
def equals(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `==`, or `eq?`. Takes only two arguments.
`==` is a weak comparison operator. If both arguments can be interpreted numerically, they will be converted
to numbers before testing for equivalence. Otherwise, `==` just tests if the strings passed are equal.
Contrast with the behavior of `ComparisonExpansionHandler.str_equals`.
**Usage**
```scrolls
print $(eq? 0123 123) # prints 1, numeric comparison
print $(eq? hello hello) # prints 1, string comparison
```
"""
return datatypes.bool_to_str(self.__equals_bool(context))
def not_equals(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `ne?`. Note this is not aliased to `!=` due to `!` being a reserved character. Takes only two arguments.
Same as with `ComparisonExpansionHandler.equals`, this operator implicitly converts to numbers when possible.
**Usage**
```scrolls
print $(ne? 0123 123) # prints 0, numeric comparison
print $(ne? hello world) # prints 1, string comparison
```
"""
return datatypes.bool_to_str(not self.__equals_bool(context))
def gt(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `>`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(> 0 3) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a > b)
def lt(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `<`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(< 4 10) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a < b)
def gte(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `>=`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(>= 10 4) # prints 1.
print $(>= 4 4) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a >= b)
def lte(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `<=`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(<= 4 10) # prints 1.
print $(<= 4 4) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a <= b)
def _in(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `in?`. Takes at least one argument.
**Usage**
```scrolls
# in? x args...
# Tests if x is in the following arguments.
print $(in? blah) # always returns '0'.
print $(in? bar foo bar baz) # returns '1'.
```
"""
if len(context.args) == 0:
raise interpreter.InterpreterError(
context,
f"{context.call_name} requires at least one argument"
)
return datatypes.bool_to_str(context.args[0] in context.args[1:])
@base_config.expansionhandler
class LogicExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic logic operators.
Related:
`scrolls.datatypes.TRUE`
`scrolls.datatypes.FALSE`
"""
def __init__(self) -> None:
super().__init__()
self.add_call("not", self.not_)
self.add_call("and", self.and_)
self.add_call("or", self.or_)
self.add_call("xor", self.xor_)
@staticmethod
def __unary(context: interpreter.InterpreterContext, op: datatypes.UnaryNumOpT) -> str:
return datatypes.bool_to_str(datatypes.apply_unary_bool_op(context, op))
@staticmethod
def __reduce(context: interpreter.InterpreterContext, op: datatypes.BinaryNumOpT) -> str:
return datatypes.bool_to_str(datatypes.apply_reduce_bool_op(context, op))
def not_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `not` operator.
**Usage**
```scrolls
print $(not $true) # prints 0.
```
"""
return self.__unary(context, operator.not_)
def and_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `and` operator. Takes 2 or more arguments, and `and`s them all together.
**Usage**
```scrolls
print $(and $true $false $true) # prints 0.
print $(and $true $true) # prints 1.
```
"""
return self.__reduce(context, operator.and_)
def or_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `or` operator. Takes 2 or more arguments, and `or`s them all together.
**Usage**
```scrolls
print $(or $true $false $true) # prints 1.
print $(or $false $false) # prints 0.
```
"""
return self.__reduce(context, operator.or_)
def xor_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `xor` operator. Takes 2 or more arguments. With 2 arguments, `xor` performs a standard XOR
operation. With more arguments, `xor` will perform a parity check. It will return `scrolls.datatypes.TRUE`
for an odd number of `scrolls.datatypes.TRUE` inputs, and `scrolls.datatypes.FALSE` for an even number of
`scrolls.datatypes.TRUE` inputs.
**Usage**
```scrolls
print $(xor $true $false) # prints 1.
print $(xor $true $false $true) # prints 0.
```
"""
return self.__reduce(context, operator.xor)
@base_config.expansionhandler
class StringExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic string manipulation expansions.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("cat", self.concat)
self.add_alias("concat", "cat")
self.add_call("getc", self.getc)
self.add_call("len", self.len)
self.add_call("ord", self.ord)
self.add_call("chr", self.chr)
self.add_call("vempty?", self.vempty)
self.add_call("vhead", self.vhead)
self.add_call("vtail", self.vtail)
self.add_call("rangev", self.rangev)
def concat(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `cat`. Concatenates all arguments into one string and returns it. Commonly used to concatenate
punctuation onto variable output.
**Usage**
```scrolls
set example "Hello world"
print $(cat $example "!") # prints Hello World!
```
"""
return "".join(context.args)
def getc(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `getc`. Gets a single character from a string, starting at 0.
**Usage**
```scrolls
set example "Hello"
print $(getc $example 4) # prints 'o'
```
"""
datatypes.require_arg_length(context, 2)
n = int(datatypes.require_numeric(context, context.args[1])[0])
return context.args[0][n]
def len(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `len`. Gets the length of the passed string.
**Usage**
```scrolls
print $(len "hello") # prints 5
```
"""
datatypes.require_arg_length(context, 1)
return str(len(context.args[0]))
def ord(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `ord`. Converts a single character to its integer equivalent.
**Usage**
```scrolls
print $(ord "h") # prints 104
```
"""
datatypes.require_arg_length(context, 1)
return str(ord(context.args[0]))
def chr(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `chr`. Converts a number into the character it corresponds to.
**Usage**
```scrolls
print $(chr 104) # prints h
```
"""
datatypes.require_arg_length(context, 1)
c = int(datatypes.require_numeric(context, context.args[0])[0])
return chr(c)
def vempty(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `vempty?`. Returns `scrolls.datatypes.TRUE` if the passed vector is empty.
**Usage**
```scrolls
set empty_vec ""
print $(vempty? $empty_vec) # prints 1.
```
"""
datatypes.require_arg_length(context, 1)
return datatypes.bool_to_str(not bool(context.args[0]))
def vhead(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `vhead`. Returns the first element of a vector (the leftmost element).
**Usage**
```scrolls
set vec "2 4 8 16"
print $(vhead $vec) # prints 2.
```
"""
datatypes.require_arg_length(context, 1)
return context.args[0].split(maxsplit=1)[0]
def vtail(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `vtail`. Returns every element of a vector except the first.
**Usage**
```scrolls
set vec "2 4 8 16"
print $(vtail $vec) # prints 4 8 16.
```
"""
datatypes.require_arg_length(context, 1)
return "".join(context.args[0].split(maxsplit=1)[1:])
def rangev(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `rangev`. Returns a vector consisting of a range of numbers.
**Usage**
```scrolls
set min 0
set max 4
print $(rangev $min $max) # prints 0 1 2 3
```
"""
datatypes.require_arg_length(context, 2)
(a, b, *rest), _ = datatypes.require_all_numeric(context, context.args)
a = int(a)
b = int(b)
return " ".join([str(x) for x in range(a, b)])
| """
Built in Scrolls language features.
.. include:: pdoc/builtins.md
"""
import operator
import random
import typing as t
from . import ast, containers, datatypes, interpreter
__all__ = (
"StdIoCommandHandler",
"BuiltinControlHandler",
"BuiltinCommandHandler",
"RandomExpansionHandler",
"ArithmeticExpansionHandler",
"ComparisonExpansionHandler",
"LogicExpansionHandler",
"StringExpansionHandler",
"BuiltinInitializer",
"FileCommandHandler",
"FileExpansionHandler",
"base_config",
"file_config"
)
base_config: containers.DecoratorInterpreterConfig = containers.DecoratorInterpreterConfig()
"""
A configuration object containing the Scrolls base language. This currently consists of:
- `BuiltinControlHandler`
- `BuiltinCommandHandler`
- `BuiltinInitializer`
- `ArithmeticExpansionHandler`
- `ComparisonExpansionHandler`
- `LogicExpansionHandler`
- `StringExpansionHandler`
.. WARNING::
`print` and `input` are **not** defined as part of the base language, and must be added manually. See
`StdIoCommandHandler`.
"""
file_config: containers.DecoratorInterpreterConfig = containers.DecoratorInterpreterConfig()
"""
A configuration object containing base Scrolls utilities for working with files.
Consists of:
- `FileExpansionHandler`
- `FileCommandHandler`
"""
class StdIoCommandHandler(interpreter.CallbackCommandHandler):
"""
Implements input and output commands using stdout/stdin.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("print", self.print)
self.add_call("write", self.write)
self.add_call("input", self.input)
def print(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `print` command. Prints all arguments passed to it, joined by spaces.
**Usage**
```scrolls
print hello world foo bar
```
"""
print(" ".join(context.args))
def write(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `write` command. Prints all arguments passed to it, joined
by spaces. The difference vs. `print` is that `print` appends a newline,
while `write` does not.
**Usage**
```scrolls
write hello world foo bar
```
"""
print(" ".join(context.args), end="")
def input(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `input` command. Reads `stdin` for input, and stores the input in a variable named
by the first argument.
**Usage**
```scrolls
input foo
print $foo # prints what you entered
```
"""
if not context.args:
raise interpreter.InterpreterError(
context,
"input: variable name is not specified"
)
result = input()
context.set_var(context.args[0], result)
@file_config.commandhandler
class FileCommandHandler(interpreter.CallbackCommandHandler):
"""
Defines commands for working with files.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("file-close", self.close)
def close(self, context: interpreter.InterpreterContext) -> None:
"""
Implements `file-close`. If you're looking for `file-open`, see
`FileExpansionHandler.open`.
**Usage**
```scrolls
set f $(file-open file w)
# do things to file...
file-close $f
```
"""
datatypes.require_arg_length(context, 1)
fid, _ = datatypes.require_numeric(context, context.args[0])
context.close_file(int(fid))
@file_config.expansionhandler
class FileExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Defines expansions for working with files.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("file-open", self.open)
self.add_call("file-read", self.read)
def open(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `file-open`. If you're looking for `file-close`, see
`FileCommandHandler.close`.
`file-open` returns an integer ID used as a handle to the file.
This ID should be saved and used for all `file-*` functions.
**Usage**
```scrolls
set f $(file-open file w)
# do things to file...
file-close $f
```
"""
datatypes.require_arg_length(context, 1)
if len(context.args) > 1:
mode = context.args[1]
else:
# default is read
mode = "r"
return str(context.open_file(context.args[0], mode))
def read(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `file-read`. Reads an entire file and returns a string.
**Usage**
```scrolls
set f $(file-open file w)
print $(file-read $f)
file-close $f
```
"""
datatypes.require_arg_length(context, 1)
fid, _ = datatypes.require_numeric(context, context.args[0])
f = context.get_file(int(fid))
return f.read()
@base_config.initializer
class BuiltinInitializer(interpreter.Initializer):
"""
Sets built in constants, and initializes plumbing used by
[`def`](#scrolls.builtins.BuiltinControlHandler.def_) and
[`return`](#scrolls.builtins.BuiltinCommandHandler.return_).
### Variables
- `$true` - A true boolean.
- `$false` - A false boolean.
"""
def handle_call(self, context: interpreter.InterpreterContext) -> None:
context.set_var("true", datatypes.TRUE)
context.set_var("false", datatypes.FALSE)
context.runtime_commands.add(interpreter.RuntimeCallHandler(), "__def__")
context.runtime_expansions.add(interpreter.RuntimeCallHandler(), "__def__")
@base_config.commandhandler
class BuiltinCommandHandler(interpreter.CallbackCommandHandler):
"""
Implements built-in command statements. In order for
[`return`](#scrolls.builtins.BuiltinCommandHandler.return_)
to be functional, `BuiltinControlHandler` and `BuiltinInitializer` must also be loaded.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("set", self.set)
self.add_call("unset", self.unset)
self.add_call("stop", self.stop)
self.add_call("return", self.return_)
self.add_call("nonlocal", self.nonlocal_)
self.add_call("global", self.global_)
def return_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `return` command. Returns all arguments passed to it as a single string, joined by spaces.
If this command is present in a [`def`](#scrolls.builtins.BuiltinControlHandler.def_) block, that `def` block
will define a new expansion call. Otherwise, it defines a command.
Using this command outside a `def` block will result in an error.
**Usage**
```scrolls
!def(example foo) {
return $foo
}
print $(example "hello world")
```
"""
retval = " ".join(context.args)
context.set_retval(retval)
raise interpreter.InterpreterReturn()
def set(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `set` command. Sets a variable. The first argument is the name of the variable. The rest of the
arguments are joined by spaces and stored in the named variable.
**Usage**
```scrolls
set varname arg1 arg2 arg3
print $varname # prints arg1 arg2 arg3
```
"""
if not context.args:
raise interpreter.InterpreterError(
context,
"set: variable name is not specified"
)
context.set_var(context.args[0], " ".join(context.args[1:]))
def unset(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `unset` command. Deletes a variable. The first argument is the name of the variable to delete.
**Usage**
```scrolls
set varname hello
print $varname # prints hello
unset varname
print $varname # ERROR
```
"""
if not context.args:
raise interpreter.InterpreterError(
context,
"unset: variable name is not specified"
)
try:
context.del_var(context.args[0])
except KeyError:
raise interpreter.InterpreterError(
context,
f"unset: no such variable {context.args[0]}"
)
def nonlocal_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `nonlocal` command. Declares a variable as nonlocal, which allows variable references to modify
variables in the enclosing scope.
**Usage**
```scrolls
!def(zero varname) {
nonlocal $varname
set $varname 0
}
!def(main) {
set example 42
zero example
# "0" is printed, since example was declared nonlocal
# in the zero function.
print $example
}
set example 200
main # prints "0"
# Prints "200", since the zero call in main only
# modifies the DIRECTLY enclosing scope.
print $example
```
"""
datatypes.require_arg_length(context, 1)
context.vars.declare_nonlocal(context.args[0])
def global_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `global` command. Declares a variable as global, which allows variable references to modify
variables in the global scope.
**Usage**
```scrolls
!def(set_global varname *args) {
global $varname
set $varname $args
}
!def(main) {
set_global example arg1 arg2 arg3
}
main
# prints "arg1 arg2 arg3", since main->set_global example
# sets a variable in the global scope.
print $example
```
"""
datatypes.require_arg_length(context, 1)
context.vars.declare_global(context.args[0])
def stop(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `stop` command. Stops the script execution. Takes no arguments.
"""
raise interpreter.InterpreterStop()
@base_config.controlhandler
class BuiltinControlHandler(interpreter.CallbackControlHandler):
"""
Implements built-in command statements. In order for
[`def`](#scrolls.builtins.BuiltinControlHandler.def_)
to be functional, `BuiltinCommandHandler` and `BuiltinInitializer` must also be loaded.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("repeat", self.repeat)
self.add_call("for", self.for_)
self.add_call("if", self.if_)
self.add_call("while", self.while_)
self.add_call("def", self.def_)
def def_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `def` control structure. Allows the definition of new commands and expansion calls.
The first argument is the name of the call to define. The rest of the arguments name the parameters to the
call. The last parameter name may be prefixed with `*` to support variable arguments.
**Usage**
```scrolls
!def(example a b) {
print "a is" $a
print "b is" $b
}
# prints
# a is foo
# b is bar
example foo bar
!def(varargs_example x *args) {
print "x is" $x
print "the rest of the args are:"
!for(i in $^args) print $i
}
# prints
# x is 10
# the rest of the args are:
# foo
# bar
# baz
varargs_example 10 foo bar baz
```
"""
args = context.args
datatypes.require_arg_length(context, 1)
command_calls = context.control_node.find_all(
lambda node: (node.type == ast.ASTNodeType.COMMAND_CALL and
bool(node.children))
)
has_return = False
for node in command_calls:
name_node = node.children[0]
if name_node.type == ast.ASTNodeType.STRING and name_node.str_content() == "return":
has_return = True
break
if has_return:
t.cast(
interpreter.RuntimeCallHandler[str],
context.runtime_expansions.get("__def__")
).define(args[0], context.control_node, args[1:])
else:
t.cast(
interpreter.RuntimeCallHandler[None],
context.runtime_commands.get("__def__")
).define(args[0], context.control_node, args[1:])
def repeat(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `repeat` control structure. Takes a single integer argument, that repeats the body n times.
**Usage**
```scrolls
# prints "hello world" 4 times
!repeat(4) {
print "hello world"
}
```
"""
if len(context.args) != 1:
raise interpreter.InterpreterError(
context,
"repeat requires exactly one argument, the number of times to repeat"
)
context.current_node = context.arg_nodes[0]
try:
repeat_times = int(context.args[0])
except ValueError:
raise interpreter.InterpreterError(
context,
f"'{context.args[0]}' is not a valid integer"
)
control_node = context.control_node
for _ in range(repeat_times):
context.interpreter.interpret_statement(context, control_node)
def for_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `for` control structure. The syntax is as follows: `!for(VARNAME in VECTOR) ...`
**Usage**
```scrolls
# prints
# 1
# 2
# 3
# 4
# 5
!for(x in 1 2 3 4 5) {
print $x
}
```
"""
if not context.args or len(context.args) < 3:
raise interpreter.InterpreterError(
context,
"bad format in !for: expected !for(VARNAME in ARGS)"
)
var_name, _in, *items = context.args
if _in != "in":
context.current_node = context.arg_nodes[1]
raise interpreter.InterpreterError(
context,
f"unexpected token '{_in}', should be 'in'"
)
control_node = context.control_node
for item in items:
context.set_var(var_name, item)
context.interpreter.interpret_statement(context, control_node)
context.del_var(var_name)
def if_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `if` control structure. Takes one argument, a boolean. If it's `scrolls.datatypes.TRUE`,
executes the body statement. Otherwise, the body is skipped. `else` is not supported.
**Usage**
```scrolls
!if($true) {
print "this will print"
}
!if($false) {
print "this will not print"
}
```
"""
if len(context.args) != 1:
raise interpreter.InterpreterError(
context,
f"if: needs one and only one argument"
)
if datatypes.str_to_bool(context.args[0]):
context.interpreter.interpret_statement(context, context.control_node)
def while_(self, context: interpreter.InterpreterContext) -> None:
"""
Implements the `while` control structure. Takes one argument, a boolean. Repeats the body while
the condition is `scrolls.datatypes.TRUE`.
**Usage**
```scrolls
# counting down from 10 to 1
set i 10
!while($(> 0 $i)) {
print $i
set i $(- $i 1)
}
```
"""
if len(context.args) != 1:
raise interpreter.InterpreterError(
context,
f"while: needs one and only one argument"
)
arg = context.args[0]
while datatypes.str_to_bool(arg):
context.interpreter.interpret_statement(context, context.control_node)
# HACK:
# In order for while to work right, we need to re-evaluate the argument
# every time.
arg = context.interpreter.interpret_string_or_expansion(context, context.arg_nodes[0])[0]
class RandomExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements random expansions.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("select", self.select)
self.add_call("shuffle", self.shuffle)
self.add_call("uniform", self.uniform)
def select(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `select` expansion. Randomly selects one of the arguments and returns it.
**Usage**
```scrolls
# randomly prints either foo, bar, or baz
print $(select foo bar baz)
```
"""
return random.choice(context.args)
def shuffle(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `shuffle` expansion. Shuffle the arguments given and return them.
**Usage**
```scrolls
print $(shuffle 1 2 3 4 5)
```
"""
args = list(context.args)
random.shuffle(args)
return " ".join(args)
def uniform(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `uniform` expansion. Returns a random floating point number between two bounds, inclusive.
**Usage**
```scrolls
print $(uniform 0 2) # print a random float between 0 and 2.
```
"""
if len(context.args) != 2:
raise interpreter.InterpreterError(
context,
f"uniform: must have two args. (got {', '.join(context.args)})"
)
try:
lower = float(context.args[0])
upper = float(context.args[1])
except ValueError as e:
raise interpreter.InterpreterError(
context,
f"uniform: {str(e)}"
)
return str(random.uniform(lower, upper))
@base_config.expansionhandler
class ArithmeticExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic arithmetic expansions. These aren't very efficient, but
if you want efficiency, you shouldn't be using an interpreted language
with no JIT being interpreted by another interpreted language `:)`.
Most of these are self-explanatory. Examples will be provided where appropriate.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("toint", self.toint)
self.add_call("tofloat", self.tofloat)
self.add_call("+", self.add)
self.add_call("-", self.sub)
self.add_call("*", self.mul)
self.add_call("/", self.div)
self.add_call("//", self.intdiv)
self.add_call("%", self.mod)
@staticmethod
def __unary(context: interpreter.InterpreterContext, op: datatypes.UnaryNumOpT) -> str:
return str(datatypes.apply_unary_num_op(context, op)[0])
@staticmethod
def __binary(context: interpreter.InterpreterContext, op: datatypes.BinaryNumOpT) -> str:
return str(datatypes.apply_binary_num_op(context, op)[0])
@staticmethod
def __mass(
context: interpreter.InterpreterContext,
reduce_op: datatypes.BinaryNumOpT,
final_op: datatypes.BinaryNumOpT
) -> str:
return str(datatypes.apply_mass_binary_num_op(context, reduce_op, final_op)[0])
@staticmethod
def __reduce(
context: interpreter.InterpreterContext,
reduce_op: datatypes.BinaryNumOpT
) -> str:
return str(datatypes.apply_reduce_binary_num_op(context, reduce_op)[0])
def sub(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `-`.
**Usage**
```scrolls
print $(- 4) # negate a number
print $(- 10 3) # subtract 3 from 10
print $(- 10 1 2 3) # subtract 1, 2, and 3 from 10.
```
"""
# Sub behaves a little differently. If only one arg, negate instead of subtracting.
if len(context.args) == 1:
return self.__unary(context, operator.neg)
return self.__mass(context, reduce_op=operator.add, final_op=operator.sub)
def toint(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `toint`. Forces a number to be an integer. If the input is a float, the decimal point
will be truncated.
"""
return self.__unary(context, datatypes.toint)
def tofloat(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `tofloat`. Forces a number to be a float.
"""
return self.__unary(context, datatypes.tofloat)
def add(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `+`. `+` will take 2 or more arguments, and sum them all together.
**Usage**
```scrolls
print $(+ 2 3)
print $(+ 1 10 34)
```
"""
return self.__reduce(context, operator.add)
def mul(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `*`. `*` will take 2 or more arguments, and multiplies them all together.
**Usage**
```scrolls
print $(* 2 3)
print $(* 1 10 34)
```
"""
return self.__reduce(context, operator.mul)
def div(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `/`.
**Usage**
```scrolls
print $(/ 6 2) # prints 3.0
print $(/ 20 2 5) # divides 20 by 2, then by 5. prints 2.0
```
"""
return self.__mass(context, reduce_op=operator.mul, final_op=operator.truediv)
def intdiv(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `//` (integer division).
**Usage**
```scrolls
print $(// 5 2) # prints 2.
print $(// 20 2 3) # divides 20 by 2*3 (6), (3.3333...), then truncates float part. prints 3.
```
"""
return self.__mass(context, reduce_op=operator.mul, final_op=operator.floordiv)
def mod(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `%` (modulo). Takes only two arguments.
**Usage**
```scrolls
print $(% 5 2) # prints 1.
```
"""
return self.__binary(context, operator.mod)
@base_config.expansionhandler
class ComparisonExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic comparison operators.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("eq?", self.equals)
self.add_alias("==", "eq?")
self.add_call("neq?", self.not_equals)
self.add_call("===", self.str_equals)
self.add_call(">", self.gt)
self.add_call("<", self.lt)
self.add_call(">=", self.gte)
self.add_call("<=", self.lte)
self.add_call("in?", self._in)
def __equals_bool(self, context: interpreter.InterpreterContext) -> bool:
args = context.args
if len(args) != 2:
raise interpreter.InterpreterError(
context,
f"{context.call_name}: must have exactly 2 args"
)
try:
num_args, _ = datatypes.require_all_numeric(context, args)
return num_args[0] == num_args[1]
except interpreter.InterpreterError:
return args[0] == args[1]
def __get_numeric_compare_args(self, context: interpreter.InterpreterContext) -> t.Tuple[float, float]:
args = context.args
if len(args) != 2:
raise interpreter.InterpreterError(
context,
f"{context.call_name}: must have exactly 2 args"
)
(a, b), _ = datatypes.require_all_numeric(context, args)
return a, b
def str_equals(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `===`. Takes only two arguments.
`===` is the strong comparison operator. It only operates on strings,
and no implicit conversion is done.
Contrast with the behavior of `ComparisonExpansionHandler.equals`.
**Usage**
```scrolls
print $(=== 0123 123) # prints 0
print $(=== hello hello) # prints 0
```
"""
args = context.args
if len(args) != 2:
raise interpreter.InterpreterError(
context,
f"{context.call_name}: must have exactly 2 args"
)
return datatypes.bool_to_str(args[0] == args[1])
def equals(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `==`, or `eq?`. Takes only two arguments.
`==` is a weak comparison operator. If both arguments can be interpreted numerically, they will be converted
to numbers before testing for equivalence. Otherwise, `==` just tests if the strings passed are equal.
Contrast with the behavior of `ComparisonExpansionHandler.str_equals`.
**Usage**
```scrolls
print $(eq? 0123 123) # prints 1, numeric comparison
print $(eq? hello hello) # prints 1, string comparison
```
"""
return datatypes.bool_to_str(self.__equals_bool(context))
def not_equals(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `ne?`. Note this is not aliased to `!=` due to `!` being a reserved character. Takes only two arguments.
Same as with `ComparisonExpansionHandler.equals`, this operator implicitly converts to numbers when possible.
**Usage**
```scrolls
print $(ne? 0123 123) # prints 0, numeric comparison
print $(ne? hello world) # prints 1, string comparison
```
"""
return datatypes.bool_to_str(not self.__equals_bool(context))
def gt(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `>`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(> 0 3) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a > b)
def lt(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `<`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(< 4 10) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a < b)
def gte(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `>=`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(>= 10 4) # prints 1.
print $(>= 4 4) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a >= b)
def lte(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `<=`. Takes only two arguments, both must be numeric.
**Usage**
```scrolls
print $(<= 4 10) # prints 1.
print $(<= 4 4) # prints 1.
```
"""
a, b = self.__get_numeric_compare_args(context)
return datatypes.bool_to_str(a <= b)
def _in(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `in?`. Takes at least one argument.
**Usage**
```scrolls
# in? x args...
# Tests if x is in the following arguments.
print $(in? blah) # always returns '0'.
print $(in? bar foo bar baz) # returns '1'.
```
"""
if len(context.args) == 0:
raise interpreter.InterpreterError(
context,
f"{context.call_name} requires at least one argument"
)
return datatypes.bool_to_str(context.args[0] in context.args[1:])
@base_config.expansionhandler
class LogicExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic logic operators.
Related:
`scrolls.datatypes.TRUE`
`scrolls.datatypes.FALSE`
"""
def __init__(self) -> None:
super().__init__()
self.add_call("not", self.not_)
self.add_call("and", self.and_)
self.add_call("or", self.or_)
self.add_call("xor", self.xor_)
@staticmethod
def __unary(context: interpreter.InterpreterContext, op: datatypes.UnaryNumOpT) -> str:
return datatypes.bool_to_str(datatypes.apply_unary_bool_op(context, op))
@staticmethod
def __reduce(context: interpreter.InterpreterContext, op: datatypes.BinaryNumOpT) -> str:
return datatypes.bool_to_str(datatypes.apply_reduce_bool_op(context, op))
def not_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `not` operator.
**Usage**
```scrolls
print $(not $true) # prints 0.
```
"""
return self.__unary(context, operator.not_)
def and_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `and` operator. Takes 2 or more arguments, and `and`s them all together.
**Usage**
```scrolls
print $(and $true $false $true) # prints 0.
print $(and $true $true) # prints 1.
```
"""
return self.__reduce(context, operator.and_)
def or_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `or` operator. Takes 2 or more arguments, and `or`s them all together.
**Usage**
```scrolls
print $(or $true $false $true) # prints 1.
print $(or $false $false) # prints 0.
```
"""
return self.__reduce(context, operator.or_)
def xor_(self, context: interpreter.InterpreterContext) -> str:
"""
Implements the `xor` operator. Takes 2 or more arguments. With 2 arguments, `xor` performs a standard XOR
operation. With more arguments, `xor` will perform a parity check. It will return `scrolls.datatypes.TRUE`
for an odd number of `scrolls.datatypes.TRUE` inputs, and `scrolls.datatypes.FALSE` for an even number of
`scrolls.datatypes.TRUE` inputs.
**Usage**
```scrolls
print $(xor $true $false) # prints 1.
print $(xor $true $false $true) # prints 0.
```
"""
return self.__reduce(context, operator.xor)
@base_config.expansionhandler
class StringExpansionHandler(interpreter.CallbackExpansionHandler):
"""
Implements basic string manipulation expansions.
"""
def __init__(self) -> None:
super().__init__()
self.add_call("cat", self.concat)
self.add_alias("concat", "cat")
self.add_call("getc", self.getc)
self.add_call("len", self.len)
self.add_call("ord", self.ord)
self.add_call("chr", self.chr)
self.add_call("vempty?", self.vempty)
self.add_call("vhead", self.vhead)
self.add_call("vtail", self.vtail)
self.add_call("rangev", self.rangev)
def concat(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `cat`. Concatenates all arguments into one string and returns it. Commonly used to concatenate
punctuation onto variable output.
**Usage**
```scrolls
set example "Hello world"
print $(cat $example "!") # prints Hello World!
```
"""
return "".join(context.args)
def getc(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `getc`. Gets a single character from a string, starting at 0.
**Usage**
```scrolls
set example "Hello"
print $(getc $example 4) # prints 'o'
```
"""
datatypes.require_arg_length(context, 2)
n = int(datatypes.require_numeric(context, context.args[1])[0])
return context.args[0][n]
def len(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `len`. Gets the length of the passed string.
**Usage**
```scrolls
print $(len "hello") # prints 5
```
"""
datatypes.require_arg_length(context, 1)
return str(len(context.args[0]))
def ord(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `ord`. Converts a single character to its integer equivalent.
**Usage**
```scrolls
print $(ord "h") # prints 104
```
"""
datatypes.require_arg_length(context, 1)
return str(ord(context.args[0]))
def chr(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `chr`. Converts a number into the character it corresponds to.
**Usage**
```scrolls
print $(chr 104) # prints h
```
"""
datatypes.require_arg_length(context, 1)
c = int(datatypes.require_numeric(context, context.args[0])[0])
return chr(c)
def vempty(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `vempty?`. Returns `scrolls.datatypes.TRUE` if the passed vector is empty.
**Usage**
```scrolls
set empty_vec ""
print $(vempty? $empty_vec) # prints 1.
```
"""
datatypes.require_arg_length(context, 1)
return datatypes.bool_to_str(not bool(context.args[0]))
def vhead(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `vhead`. Returns the first element of a vector (the leftmost element).
**Usage**
```scrolls
set vec "2 4 8 16"
print $(vhead $vec) # prints 2.
```
"""
datatypes.require_arg_length(context, 1)
return context.args[0].split(maxsplit=1)[0]
def vtail(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `vtail`. Returns every element of a vector except the first.
**Usage**
```scrolls
set vec "2 4 8 16"
print $(vtail $vec) # prints 4 8 16.
```
"""
datatypes.require_arg_length(context, 1)
return "".join(context.args[0].split(maxsplit=1)[1:])
def rangev(self, context: interpreter.InterpreterContext) -> str:
"""
Implements `rangev`. Returns a vector consisting of a range of numbers.
**Usage**
```scrolls
set min 0
set max 4
print $(rangev $min $max) # prints 0 1 2 3
```
"""
datatypes.require_arg_length(context, 2)
(a, b, *rest), _ = datatypes.require_all_numeric(context, context.args)
a = int(a)
b = int(b)
return " ".join([str(x) for x in range(a, b)])
|
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2020, 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""`reana-dev`'s wiki commands."""
import click
from reana.config import (
CODECOV_REANAHUB_URL,
GIT_SUPPORTED_MAINT_BRANCHES,
GITHUB_REANAHUB_URL,
REANA_LIST_DEMO_ALL,
)
@click.group()
def wiki_commands():
"""Wiki commands group."""
@wiki_commands.command(name="wiki-create-build-status-page")
def create_build_status_page():
"""Generate Markdown for the Build Status wiki page."""
sections = {
"researchers": {
"title": "For researchers",
"description": "Find out how you can use REANA to describe, run, preserve and reuse your analyses.",
"packages": {
"reana-client": {},
"blog.reana.io": {"coverage": False, "docs": False},
"docs.reana.io": {"coverage": False, "docs": False},
"www.reana.io": {"coverage": False, "docs": False},
},
},
"administrators": {
"title": "For administrators",
"description": "Install and manage the REANA reusable analysis platform on your own compute cloud.",
"packages": {
"reana-commons": {},
"reana-db": {},
"reana-job-controller": {},
"reana-message-broker": {},
"reana-server": {},
"reana-ui": {},
"reana-workflow-controller": {},
"reana-workflow-engine-cwl": {},
"reana-workflow-engine-serial": {},
"reana-workflow-engine-yadage": {},
"reana-workflow-engine-snakemake": {},
},
},
"developers": {
"title": "For developers",
"description": "Understand REANA source code, adapt it to your needs, contribute changes back.",
"packages": {"reana": {}, "pytest-reana": {}},
},
"environments": {
"simple": True,
"title": "Environments",
"description": "Selected containerised environments.",
"packages": {
"reana-env-aliphysics": {},
"reana-env-jupyter": {},
"reana-env-root6": {},
},
},
"authentication": {
"simple": True,
"title": "Authentication",
"description": "Selected authentication environments.",
"packages": {"reana-auth-krb5": {}, "reana-auth-vomsproxy": {},},
},
"examples": {
"simple": True,
"title": "Examples",
"description": "Selected reusable analysis examples.",
"packages": {demo: {} for demo in sorted(REANA_LIST_DEMO_ALL)},
},
}
def _print_section(data):
click.echo(f"### {data["title"]}\n")
click.echo(f"{data["description"]}\n")
_print_table(data["packages"], simple=data.get("simple"))
click.echo()
def _print_header(hs):
header = separator = "|"
for h in hs:
header += f" {h} |"
separator += f" {"-" * len(h)} |"
click.echo(header)
click.echo(separator)
def _print_table(components, simple=False):
if simple:
headers = ["Package", "Build", "Version"]
else:
headers = [
"Package",
"`master`",
"Docs",
"Coverage",
"Version",
]
headers[1:1] = [f"`{branch}`" for branch in GIT_SUPPORTED_MAINT_BRANCHES]
_print_header(headers)
for c, options in components.items():
table_row = f"| [{c}]({GITHUB_REANAHUB_URL}/{c}) "
if not simple:
for branch in GIT_SUPPORTED_MAINT_BRANCHES:
table_row += f"| []({GITHUB_REANAHUB_URL}/{c}/actions?query=branch:{branch}) "
table_row += f"| []({GITHUB_REANAHUB_URL}/{c}/actions?query=branch:master) "
if not simple:
table_row += (
f"| [](https://{c}.readthedocs.io/en/latest/?badge=latest) "
if options.get("docs", True)
else "| N/A "
) + (
f"| []({CODECOV_REANAHUB_URL}/{c}) "
if options.get("coverage", True)
else "| N/A "
)
table_row += f"| []({GITHUB_REANAHUB_URL}/{c}/releases) |"
click.echo(table_row)
click.echo()
click.echo("# REANA build status\n")
for section, data in sections.items():
_print_section(data)
wiki_commands_list = list(wiki_commands.commands.values())
| # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2020, 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""`reana-dev`'s wiki commands."""
import click
from reana.config import (
CODECOV_REANAHUB_URL,
GIT_SUPPORTED_MAINT_BRANCHES,
GITHUB_REANAHUB_URL,
REANA_LIST_DEMO_ALL,
)
@click.group()
def wiki_commands():
"""Wiki commands group."""
@wiki_commands.command(name="wiki-create-build-status-page")
def create_build_status_page():
"""Generate Markdown for the Build Status wiki page."""
sections = {
"researchers": {
"title": "For researchers",
"description": "Find out how you can use REANA to describe, run, preserve and reuse your analyses.",
"packages": {
"reana-client": {},
"blog.reana.io": {"coverage": False, "docs": False},
"docs.reana.io": {"coverage": False, "docs": False},
"www.reana.io": {"coverage": False, "docs": False},
},
},
"administrators": {
"title": "For administrators",
"description": "Install and manage the REANA reusable analysis platform on your own compute cloud.",
"packages": {
"reana-commons": {},
"reana-db": {},
"reana-job-controller": {},
"reana-message-broker": {},
"reana-server": {},
"reana-ui": {},
"reana-workflow-controller": {},
"reana-workflow-engine-cwl": {},
"reana-workflow-engine-serial": {},
"reana-workflow-engine-yadage": {},
"reana-workflow-engine-snakemake": {},
},
},
"developers": {
"title": "For developers",
"description": "Understand REANA source code, adapt it to your needs, contribute changes back.",
"packages": {"reana": {}, "pytest-reana": {}},
},
"environments": {
"simple": True,
"title": "Environments",
"description": "Selected containerised environments.",
"packages": {
"reana-env-aliphysics": {},
"reana-env-jupyter": {},
"reana-env-root6": {},
},
},
"authentication": {
"simple": True,
"title": "Authentication",
"description": "Selected authentication environments.",
"packages": {"reana-auth-krb5": {}, "reana-auth-vomsproxy": {},},
},
"examples": {
"simple": True,
"title": "Examples",
"description": "Selected reusable analysis examples.",
"packages": {demo: {} for demo in sorted(REANA_LIST_DEMO_ALL)},
},
}
def _print_section(data):
click.echo(f"### {data['title']}\n")
click.echo(f"{data['description']}\n")
_print_table(data["packages"], simple=data.get("simple"))
click.echo()
def _print_header(hs):
header = separator = "|"
for h in hs:
header += f" {h} |"
separator += f" {'-' * len(h)} |"
click.echo(header)
click.echo(separator)
def _print_table(components, simple=False):
if simple:
headers = ["Package", "Build", "Version"]
else:
headers = [
"Package",
"`master`",
"Docs",
"Coverage",
"Version",
]
headers[1:1] = [f"`{branch}`" for branch in GIT_SUPPORTED_MAINT_BRANCHES]
_print_header(headers)
for c, options in components.items():
table_row = f"| [{c}]({GITHUB_REANAHUB_URL}/{c}) "
if not simple:
for branch in GIT_SUPPORTED_MAINT_BRANCHES:
table_row += f"| []({GITHUB_REANAHUB_URL}/{c}/actions?query=branch:{branch}) "
table_row += f"| []({GITHUB_REANAHUB_URL}/{c}/actions?query=branch:master) "
if not simple:
table_row += (
f"| [](https://{c}.readthedocs.io/en/latest/?badge=latest) "
if options.get("docs", True)
else "| N/A "
) + (
f"| []({CODECOV_REANAHUB_URL}/{c}) "
if options.get("coverage", True)
else "| N/A "
)
table_row += f"| []({GITHUB_REANAHUB_URL}/{c}/releases) |"
click.echo(table_row)
click.echo()
click.echo("# REANA build status\n")
for section, data in sections.items():
_print_section(data)
wiki_commands_list = list(wiki_commands.commands.values())
|
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QCheckBox, QFrame, QHBoxLayout, QPushButton
from ...utils.interactions import KEY_SYMBOLS
class QtLayerButtons(QFrame):
"""Button controls for napari layers.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
deleteButton : QtDeleteButton
Button to delete selected layers.
newLabelsButton : QtViewerPushButton
Button to add new Label layer.
newPointsButton : QtViewerPushButton
Button to add new Points layer.
newShapesButton : QtViewerPushButton
Button to add new Shapes layer.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.deleteButton = QtDeleteButton(self.viewer)
self.newPointsButton = QtViewerPushButton(
self.viewer,
'new_points',
'New points layer',
lambda: self.viewer.add_points(
ndim=max(self.viewer.dims.ndim, 2),
scale=self.viewer.layers.extent.step,
),
)
self.newShapesButton = QtViewerPushButton(
self.viewer,
'new_shapes',
'New shapes layer',
lambda: self.viewer.add_shapes(
ndim=max(self.viewer.dims.ndim, 2),
scale=self.viewer.layers.extent.step,
),
)
self.newLabelsButton = QtViewerPushButton(
self.viewer,
'new_labels',
'New labels layer',
lambda: self.viewer._new_labels(),
)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.newPointsButton)
layout.addWidget(self.newShapesButton)
layout.addWidget(self.newLabelsButton)
layout.addStretch(0)
layout.addWidget(self.deleteButton)
self.setLayout(layout)
class QtViewerButtons(QFrame):
"""Button controls for the napari viewer.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
consoleButton : QtViewerPushButton
Button to open iPython console within napari.
rollDimsButton : QtViewerPushButton
Button to roll orientation of spatial dimensions in the napari viewer.
transposeDimsButton : QtViewerPushButton
Button to transpose dimensions in the napari viewer.
resetViewButton : QtViewerPushButton
Button resetting the view of the rendered scene.
gridViewButton : QtGridViewButton
Button to toggle grid view mode of layers on and off.
ndisplayButton : QtNDisplayButton
Button to toggle number of displayed dimensions.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.consoleButton = QtViewerPushButton(
self.viewer,
'console',
f"Open IPython terminal ({KEY_SYMBOLS["Control"]}-{KEY_SYMBOLS["Shift"]}-C)",
)
self.consoleButton.setProperty('expanded', False)
self.rollDimsButton = QtViewerPushButton(
self.viewer,
'roll',
f"Roll dimensions order for display ({KEY_SYMBOLS["Control"]}-E)",
lambda: self.viewer.dims._roll(),
)
self.transposeDimsButton = QtViewerPushButton(
self.viewer,
'transpose',
f"Transpose displayed dimensions ({KEY_SYMBOLS["Control"]}-T)",
lambda: self.viewer.dims._transpose(),
)
self.resetViewButton = QtViewerPushButton(
self.viewer,
'home',
f"Reset view ({KEY_SYMBOLS["Control"]}-R)",
lambda: self.viewer.reset_view(),
)
self.gridViewButton = QtGridViewButton(self.viewer)
self.ndisplayButton = QtNDisplayButton(self.viewer)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.consoleButton)
layout.addWidget(self.ndisplayButton)
layout.addWidget(self.rollDimsButton)
layout.addWidget(self.transposeDimsButton)
layout.addWidget(self.gridViewButton)
layout.addWidget(self.resetViewButton)
layout.addStretch(0)
self.setLayout(layout)
class QtDeleteButton(QPushButton):
"""Delete button to remove selected layers.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
hover : bool
Hover is true while mouse cursor is on the button widget.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.setToolTip(
f"Delete selected layers ({KEY_SYMBOLS["Control"]}-{KEY_SYMBOLS["Backspace"]})"
)
self.setAcceptDrops(True)
self.clicked.connect(lambda: self.viewer.layers.remove_selected())
def dragEnterEvent(self, event):
"""The cursor enters the widget during a drag and drop operation.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
event.accept()
self.hover = True
self.update()
def dragLeaveEvent(self, event):
"""The cursor leaves the widget during a drag and drop operation.
Using event.ignore() here allows the event to pass through the
parent widget to its child widget, otherwise the parent widget
would catch the event and not pass it on to the child widget.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
event.ignore()
self.hover = False
self.update()
def dropEvent(self, event):
"""The drag and drop mouse event is completed.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
event.accept()
layer_name = event.mimeData().text()
layer = self.viewer.layers[layer_name]
if not layer.selected:
self.viewer.layers.remove(layer)
else:
self.viewer.layers.remove_selected()
class QtViewerPushButton(QPushButton):
"""Push button.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer, button_name, tooltip=None, slot=None):
super().__init__()
self.viewer = viewer
self.setToolTip(tooltip or button_name)
self.setProperty('mode', button_name)
if slot is not None:
self.clicked.connect(slot)
class QtGridViewButton(QCheckBox):
"""Button to toggle grid view mode of layers on and off.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.setToolTip(f"Toggle grid view ({KEY_SYMBOLS["Control"]}-G)")
self.viewer.grid.events.connect(self._on_grid_change)
self.stateChanged.connect(self.change_grid)
self._on_grid_change()
def change_grid(self, state):
"""Toggle between grid view mode and (the ordinary) stack view mode.
Parameters
----------
state : qtpy.QtCore.Qt.CheckState
State of the checkbox.
"""
self.viewer.grid.enabled = not state == Qt.Checked
def _on_grid_change(self, event=None):
"""Update grid layout size.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
with self.viewer.grid.events.blocker():
self.setChecked(not self.viewer.grid.enabled)
class QtNDisplayButton(QCheckBox):
"""Button to toggle number of displayed dimensions.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.setToolTip(
f"Toggle number of displayed dimensions ({KEY_SYMBOLS["Control"]}-Y)"
)
self.viewer.dims.events.ndisplay.connect(self._on_ndisplay_change)
self.setChecked(self.viewer.dims.ndisplay == 3)
self.stateChanged.connect(self.change_ndisplay)
def change_ndisplay(self, state):
"""Toggle between 2D and 3D display.
Parameters
----------
state : bool
If state is True the display view is 3D, if False display is 2D.
"""
if state == Qt.Checked:
self.viewer.dims.ndisplay = 3
else:
self.viewer.dims.ndisplay = 2
def _on_ndisplay_change(self, event=None):
"""Update number of displayed dimensions, while blocking events.
Parameters
----------
event : qtpy.QtCore.QEvent, optional
Event from the Qt context.
"""
with self.viewer.dims.events.ndisplay.blocker():
self.setChecked(self.viewer.dims.ndisplay == 3)
| from qtpy.QtCore import Qt
from qtpy.QtWidgets import QCheckBox, QFrame, QHBoxLayout, QPushButton
from ...utils.interactions import KEY_SYMBOLS
class QtLayerButtons(QFrame):
"""Button controls for napari layers.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
deleteButton : QtDeleteButton
Button to delete selected layers.
newLabelsButton : QtViewerPushButton
Button to add new Label layer.
newPointsButton : QtViewerPushButton
Button to add new Points layer.
newShapesButton : QtViewerPushButton
Button to add new Shapes layer.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.deleteButton = QtDeleteButton(self.viewer)
self.newPointsButton = QtViewerPushButton(
self.viewer,
'new_points',
'New points layer',
lambda: self.viewer.add_points(
ndim=max(self.viewer.dims.ndim, 2),
scale=self.viewer.layers.extent.step,
),
)
self.newShapesButton = QtViewerPushButton(
self.viewer,
'new_shapes',
'New shapes layer',
lambda: self.viewer.add_shapes(
ndim=max(self.viewer.dims.ndim, 2),
scale=self.viewer.layers.extent.step,
),
)
self.newLabelsButton = QtViewerPushButton(
self.viewer,
'new_labels',
'New labels layer',
lambda: self.viewer._new_labels(),
)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.newPointsButton)
layout.addWidget(self.newShapesButton)
layout.addWidget(self.newLabelsButton)
layout.addStretch(0)
layout.addWidget(self.deleteButton)
self.setLayout(layout)
class QtViewerButtons(QFrame):
"""Button controls for the napari viewer.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
consoleButton : QtViewerPushButton
Button to open iPython console within napari.
rollDimsButton : QtViewerPushButton
Button to roll orientation of spatial dimensions in the napari viewer.
transposeDimsButton : QtViewerPushButton
Button to transpose dimensions in the napari viewer.
resetViewButton : QtViewerPushButton
Button resetting the view of the rendered scene.
gridViewButton : QtGridViewButton
Button to toggle grid view mode of layers on and off.
ndisplayButton : QtNDisplayButton
Button to toggle number of displayed dimensions.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.consoleButton = QtViewerPushButton(
self.viewer,
'console',
f"Open IPython terminal ({KEY_SYMBOLS['Control']}-{KEY_SYMBOLS['Shift']}-C)",
)
self.consoleButton.setProperty('expanded', False)
self.rollDimsButton = QtViewerPushButton(
self.viewer,
'roll',
f"Roll dimensions order for display ({KEY_SYMBOLS['Control']}-E)",
lambda: self.viewer.dims._roll(),
)
self.transposeDimsButton = QtViewerPushButton(
self.viewer,
'transpose',
f"Transpose displayed dimensions ({KEY_SYMBOLS['Control']}-T)",
lambda: self.viewer.dims._transpose(),
)
self.resetViewButton = QtViewerPushButton(
self.viewer,
'home',
f"Reset view ({KEY_SYMBOLS['Control']}-R)",
lambda: self.viewer.reset_view(),
)
self.gridViewButton = QtGridViewButton(self.viewer)
self.ndisplayButton = QtNDisplayButton(self.viewer)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.consoleButton)
layout.addWidget(self.ndisplayButton)
layout.addWidget(self.rollDimsButton)
layout.addWidget(self.transposeDimsButton)
layout.addWidget(self.gridViewButton)
layout.addWidget(self.resetViewButton)
layout.addStretch(0)
self.setLayout(layout)
class QtDeleteButton(QPushButton):
"""Delete button to remove selected layers.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
hover : bool
Hover is true while mouse cursor is on the button widget.
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.setToolTip(
f"Delete selected layers ({KEY_SYMBOLS['Control']}-{KEY_SYMBOLS['Backspace']})"
)
self.setAcceptDrops(True)
self.clicked.connect(lambda: self.viewer.layers.remove_selected())
def dragEnterEvent(self, event):
"""The cursor enters the widget during a drag and drop operation.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
event.accept()
self.hover = True
self.update()
def dragLeaveEvent(self, event):
"""The cursor leaves the widget during a drag and drop operation.
Using event.ignore() here allows the event to pass through the
parent widget to its child widget, otherwise the parent widget
would catch the event and not pass it on to the child widget.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
event.ignore()
self.hover = False
self.update()
def dropEvent(self, event):
"""The drag and drop mouse event is completed.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
event.accept()
layer_name = event.mimeData().text()
layer = self.viewer.layers[layer_name]
if not layer.selected:
self.viewer.layers.remove(layer)
else:
self.viewer.layers.remove_selected()
class QtViewerPushButton(QPushButton):
"""Push button.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer, button_name, tooltip=None, slot=None):
super().__init__()
self.viewer = viewer
self.setToolTip(tooltip or button_name)
self.setProperty('mode', button_name)
if slot is not None:
self.clicked.connect(slot)
class QtGridViewButton(QCheckBox):
"""Button to toggle grid view mode of layers on and off.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
Attributes
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.setToolTip(f"Toggle grid view ({KEY_SYMBOLS['Control']}-G)")
self.viewer.grid.events.connect(self._on_grid_change)
self.stateChanged.connect(self.change_grid)
self._on_grid_change()
def change_grid(self, state):
"""Toggle between grid view mode and (the ordinary) stack view mode.
Parameters
----------
state : qtpy.QtCore.Qt.CheckState
State of the checkbox.
"""
self.viewer.grid.enabled = not state == Qt.Checked
def _on_grid_change(self, event=None):
"""Update grid layout size.
Parameters
----------
event : qtpy.QtCore.QEvent
Event from the Qt context.
"""
with self.viewer.grid.events.blocker():
self.setChecked(not self.viewer.grid.enabled)
class QtNDisplayButton(QCheckBox):
"""Button to toggle number of displayed dimensions.
Parameters
----------
viewer : napari.components.ViewerModel
Napari viewer containing the rendered scene, layers, and controls.
"""
def __init__(self, viewer):
super().__init__()
self.viewer = viewer
self.setToolTip(
f"Toggle number of displayed dimensions ({KEY_SYMBOLS['Control']}-Y)"
)
self.viewer.dims.events.ndisplay.connect(self._on_ndisplay_change)
self.setChecked(self.viewer.dims.ndisplay == 3)
self.stateChanged.connect(self.change_ndisplay)
def change_ndisplay(self, state):
"""Toggle between 2D and 3D display.
Parameters
----------
state : bool
If state is True the display view is 3D, if False display is 2D.
"""
if state == Qt.Checked:
self.viewer.dims.ndisplay = 3
else:
self.viewer.dims.ndisplay = 2
def _on_ndisplay_change(self, event=None):
"""Update number of displayed dimensions, while blocking events.
Parameters
----------
event : qtpy.QtCore.QEvent, optional
Event from the Qt context.
"""
with self.viewer.dims.events.ndisplay.blocker():
self.setChecked(self.viewer.dims.ndisplay == 3)
|
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
software_update.py
This is the module that holds the APIs and utility functions required to manage
the software udpates system. It contains APIs to fetch the current software
update status, execute software update, check if the device has rebooted and webserver
is back up online.
"""
import itertools
import time
import os
import json
from flask import (Blueprint,
jsonify,
request,
Response)
from deepracer_interfaces_pkg.srv import (SoftwareUpdateCheckSrv,
BeginSoftwareUpdateSrv,
SoftwareUpdateStateSrv)
from webserver_pkg.constants import (SOFTWARE_UPDATE_STATUS_PATH,
SOFTWARE_UPDATE_FETCH_FREQUENCY,
SLEEP_TIME_BEFORE_REBOOT)
from webserver_pkg.utility import (execute,
call_service_sync)
from webserver_pkg import webserver_publisher_node
SOFTWARE_UPDATE_BLUEPRINT = Blueprint("software_update", __name__)
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/get_mandatory_update_status", methods=["GET"])
def get_mandatory_update_status():
"""API to read the saved mandatory update status from the software update status file
and return the value to the client.
Returns:
dict: Execution status if the API call was successful with mandatory update status
and error reason if failed.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
webserver_node.get_logger().info("Getting mandatory software update status.")
software_update_status = {"update_completed": False}
try:
# if software_update_status file not exists, create one.
if not os.path.isfile(SOFTWARE_UPDATE_STATUS_PATH):
with open(SOFTWARE_UPDATE_STATUS_PATH, "w") as software_update_status_file:
json.dump(software_update_status, software_update_status_file)
else:
with open(SOFTWARE_UPDATE_STATUS_PATH, "r") as software_update_status_file:
software_update_status = json.load(software_update_status_file)
if "update_completed" not in software_update_status:
return jsonify({"success": False,
"reason": "Incorrect software update status object"
})
return jsonify({"success": True,
"status": software_update_status["update_completed"]
})
except Exception as ex:
webserver_node.get_logger().error(f"Unable to read from software update status file {ex}")
return jsonify({"success": False,
"reason": "Unable to read from software update status file"})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/set_mandatory_update_status", methods=["PUT", "POST"])
def set_mandatory_update_status():
"""API to save the mandatory update status to the software update status file.
Returns:
dict: Execution status if the API call was successful.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
update_completed_value = request.json.get("update_completed")
if update_completed_value is None:
return jsonify({"success": False, "reason": "Status must be a boolean"})
webserver_node.get_logger().info("Setting mandatory software update status to "
f"{update_completed_value}")
software_update_status = {"update_completed": update_completed_value}
try:
with open(SOFTWARE_UPDATE_STATUS_PATH, "w") as software_update_status_file:
json.dump(software_update_status, software_update_status_file)
return jsonify({"success": True})
except IOError:
webserver_node.get_logger().error("Unable to set software update status: "
f"{software_update_status}")
return jsonify({"success": False})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/is_software_update_available", methods=["GET"])
def is_software_update_available():
"""API to call the service to check if the software update is available.
Returns:
dict: Execution status if the API call was successful with the software update
status and error reason if failed.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
try:
sw_update_state_req = SoftwareUpdateCheckSrv.Request()
sw_update_state_req.force_update_check = False
sw_update_state_res = call_service_sync(webserver_node.sw_update_state_cli,
sw_update_state_req,
timeout=180)
if sw_update_state_res:
webserver_node.get_logger().info("Status returned from software_update_get_state: "
f"{sw_update_state_res.software_update_state}")
# Software update status == 0 -> up to date,
# 1 -> update available,
# others -> error/pending/progress state
return jsonify({"success": True,
"status": sw_update_state_res.software_update_state == 1
})
else:
return jsonify({"success": False,
"reason": "Unable to reach software update state server"
})
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach software update state server: {ex}")
return jsonify({"success": False,
"reason": "Unable to reach software update state server"})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/begin_software_update", methods=["GET", "POST"])
def begin_software_update():
"""API to call the service to begin the software update process.
Returns:
dict: Execution status if the API call was successful and error reason if failed.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
try:
webserver_node.get_logger().info("Started software update.")
begin_sw_update_req = BeginSoftwareUpdateSrv.Request()
begin_sw_update_req.sleep_time_before_reboot = SLEEP_TIME_BEFORE_REBOOT
begin_sw_update_res = call_service_sync(webserver_node.begin_sw_update_cli,
begin_sw_update_req)
if begin_sw_update_res and begin_sw_update_res.response_status:
return jsonify({"success": True})
else:
webserver_node.get_logger().error("Begin software update service call failed")
return jsonify({"success": False,
"reason": "Update service call failed"
})
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach begin update server: {ex}")
return jsonify({"success": False,
"reason": "Unable to reach begin update server"
})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/update_status", methods=["GET"])
def get_software_update_status():
"""API to stream the software update progress percentage and the current state.
Returns:
flask.Response: Flask response object with the content_type set to text/event-stream.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
webserver_node.get_logger().info("Inside the update status function")
def events():
webserver_node.get_logger().info("Running software update event source")
for i, c in enumerate(itertools.cycle("\|/-")):
try:
pct_dict = webserver_node.pct_dict_db.get_nowait()
percentage_completion = pct_dict["update_pct"]
result = f"status:{pct_dict["status"]}|update_pct:{percentage_completion}"
yield "data: %s %d\n\n" % (result, i)
# The sleep is introduced here so as to fetch the next message from
# the software_update_status service. This is rate at which the UI shows
# the change in the status querying the service. So this will provide us
# to control the rate at which we would like to see the software update
# information on the browser. For now its set to 1 seconds.
time.sleep(SOFTWARE_UPDATE_FETCH_FREQUENCY)
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach update status service: {ex}")
result = "status:checking|update_pct:0"
yield f"data: {result} {1}\n\n"
break
return Response(events(), content_type="text/event-stream")
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/server_ready", methods=["GET"])
def isServerReady():
"""API to check if the server is back up after reboot.
Returns:
dict: A successful call to this API will return a success status.
"""
return jsonify({"success": True, "status": True})
| #################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
software_update.py
This is the module that holds the APIs and utility functions required to manage
the software udpates system. It contains APIs to fetch the current software
update status, execute software update, check if the device has rebooted and webserver
is back up online.
"""
import itertools
import time
import os
import json
from flask import (Blueprint,
jsonify,
request,
Response)
from deepracer_interfaces_pkg.srv import (SoftwareUpdateCheckSrv,
BeginSoftwareUpdateSrv,
SoftwareUpdateStateSrv)
from webserver_pkg.constants import (SOFTWARE_UPDATE_STATUS_PATH,
SOFTWARE_UPDATE_FETCH_FREQUENCY,
SLEEP_TIME_BEFORE_REBOOT)
from webserver_pkg.utility import (execute,
call_service_sync)
from webserver_pkg import webserver_publisher_node
SOFTWARE_UPDATE_BLUEPRINT = Blueprint("software_update", __name__)
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/get_mandatory_update_status", methods=["GET"])
def get_mandatory_update_status():
"""API to read the saved mandatory update status from the software update status file
and return the value to the client.
Returns:
dict: Execution status if the API call was successful with mandatory update status
and error reason if failed.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
webserver_node.get_logger().info("Getting mandatory software update status.")
software_update_status = {"update_completed": False}
try:
# if software_update_status file not exists, create one.
if not os.path.isfile(SOFTWARE_UPDATE_STATUS_PATH):
with open(SOFTWARE_UPDATE_STATUS_PATH, "w") as software_update_status_file:
json.dump(software_update_status, software_update_status_file)
else:
with open(SOFTWARE_UPDATE_STATUS_PATH, "r") as software_update_status_file:
software_update_status = json.load(software_update_status_file)
if "update_completed" not in software_update_status:
return jsonify({"success": False,
"reason": "Incorrect software update status object"
})
return jsonify({"success": True,
"status": software_update_status["update_completed"]
})
except Exception as ex:
webserver_node.get_logger().error(f"Unable to read from software update status file {ex}")
return jsonify({"success": False,
"reason": "Unable to read from software update status file"})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/set_mandatory_update_status", methods=["PUT", "POST"])
def set_mandatory_update_status():
"""API to save the mandatory update status to the software update status file.
Returns:
dict: Execution status if the API call was successful.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
update_completed_value = request.json.get("update_completed")
if update_completed_value is None:
return jsonify({"success": False, "reason": "Status must be a boolean"})
webserver_node.get_logger().info("Setting mandatory software update status to "
f"{update_completed_value}")
software_update_status = {"update_completed": update_completed_value}
try:
with open(SOFTWARE_UPDATE_STATUS_PATH, "w") as software_update_status_file:
json.dump(software_update_status, software_update_status_file)
return jsonify({"success": True})
except IOError:
webserver_node.get_logger().error("Unable to set software update status: "
f"{software_update_status}")
return jsonify({"success": False})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/is_software_update_available", methods=["GET"])
def is_software_update_available():
"""API to call the service to check if the software update is available.
Returns:
dict: Execution status if the API call was successful with the software update
status and error reason if failed.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
try:
sw_update_state_req = SoftwareUpdateCheckSrv.Request()
sw_update_state_req.force_update_check = False
sw_update_state_res = call_service_sync(webserver_node.sw_update_state_cli,
sw_update_state_req,
timeout=180)
if sw_update_state_res:
webserver_node.get_logger().info("Status returned from software_update_get_state: "
f"{sw_update_state_res.software_update_state}")
# Software update status == 0 -> up to date,
# 1 -> update available,
# others -> error/pending/progress state
return jsonify({"success": True,
"status": sw_update_state_res.software_update_state == 1
})
else:
return jsonify({"success": False,
"reason": "Unable to reach software update state server"
})
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach software update state server: {ex}")
return jsonify({"success": False,
"reason": "Unable to reach software update state server"})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/begin_software_update", methods=["GET", "POST"])
def begin_software_update():
"""API to call the service to begin the software update process.
Returns:
dict: Execution status if the API call was successful and error reason if failed.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
try:
webserver_node.get_logger().info("Started software update.")
begin_sw_update_req = BeginSoftwareUpdateSrv.Request()
begin_sw_update_req.sleep_time_before_reboot = SLEEP_TIME_BEFORE_REBOOT
begin_sw_update_res = call_service_sync(webserver_node.begin_sw_update_cli,
begin_sw_update_req)
if begin_sw_update_res and begin_sw_update_res.response_status:
return jsonify({"success": True})
else:
webserver_node.get_logger().error("Begin software update service call failed")
return jsonify({"success": False,
"reason": "Update service call failed"
})
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach begin update server: {ex}")
return jsonify({"success": False,
"reason": "Unable to reach begin update server"
})
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/update_status", methods=["GET"])
def get_software_update_status():
"""API to stream the software update progress percentage and the current state.
Returns:
flask.Response: Flask response object with the content_type set to text/event-stream.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
webserver_node.get_logger().info("Inside the update status function")
def events():
webserver_node.get_logger().info("Running software update event source")
for i, c in enumerate(itertools.cycle("\|/-")):
try:
pct_dict = webserver_node.pct_dict_db.get_nowait()
percentage_completion = pct_dict["update_pct"]
result = f"status:{pct_dict['status']}|update_pct:{percentage_completion}"
yield "data: %s %d\n\n" % (result, i)
# The sleep is introduced here so as to fetch the next message from
# the software_update_status service. This is rate at which the UI shows
# the change in the status querying the service. So this will provide us
# to control the rate at which we would like to see the software update
# information on the browser. For now its set to 1 seconds.
time.sleep(SOFTWARE_UPDATE_FETCH_FREQUENCY)
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach update status service: {ex}")
result = "status:checking|update_pct:0"
yield f"data: {result} {1}\n\n"
break
return Response(events(), content_type="text/event-stream")
@SOFTWARE_UPDATE_BLUEPRINT.route("/api/server_ready", methods=["GET"])
def isServerReady():
"""API to check if the server is back up after reboot.
Returns:
dict: A successful call to this API will return a success status.
"""
return jsonify({"success": True, "status": True})
|
#!/usr/bin/env nix-shell
#!nix-shell -p python3Packages.requests python3Packages.tabulate -i python3
"""
Jormungandr Analysis Tools
"""
__version__ = "0.1.0"
import argparse, requests, os, json, sys
from argparse import RawTextHelpFormatter
from requests.exceptions import HTTPError
from tabulate import tabulate
globalAggregate = None
globalEpochBlocks = None
globalPools = None
api_url_base = None
api_url = None
def get_api(path):
r = endpoint(f'{api_url}/{path}')
return r.text
def get_tip():
return get_api("tip")
def get_block(block_id):
r = endpoint(f'{api_url}/block/{block_id}')
hex_block = r.content.hex()
return hex_block
def parse_block(block):
return {
"epoch": int(block[16:24], 16),
"slot": int(block[24:32], 16),
"parent": block[104:168],
"pool": block[168:232],
}
def aggregateall():
tip = get_tip()
block = parse_block(get_block(tip))
currentEpoch = block['epoch']
poolTotal = {}
blockTotal = 0
while block["parent"] != ("0" * 64):
if args.aggregateall > 0:
if (currentEpoch - args.aggregateall + 1) > block['epoch']:
break
pool = block["pool"]
if pool not in poolTotal:
poolTotal[pool] = {}
poolTotal[pool]['blocks'] = 1
else:
poolTotal[pool]['blocks'] = poolTotal[pool]['blocks'] + 1
block = parse_block(get_block(block['parent']))
blockTotal += 1
lowestEpoch = block['epoch']
if args.aggregateall > 0:
print(f'\nJormungandr Block Aggregate for epochs {lowestEpoch + 1} - {currentEpoch}:\n')
else:
print('\nJormungandr Overall Block Aggregate:\n')
headers = [f'Pool (Node ID)', "Blocks (#)", "Block Percent (%)"]
table = []
for pool, data in poolTotal.items():
record = [ pool, data['blocks'], data['blocks'] / blockTotal * 100 ]
table.append(record)
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt='psql')}')
print(f'TotalBlocks: {blockTotal} \n')
def aggregate(silent=False):
global globalAggregate
global globalEpochBlocks
tip = get_tip()
block = parse_block(get_block(tip))
epochBlockTotal = {}
currentEpoch = block['epoch']
epochs = {}
pools = {}
while block["parent"] != ("0" * 64):
if args.full == False:
if (currentEpoch - args.aggregate + 1) > block['epoch']:
break
epoch = block['epoch']
parent = block['parent']
pool = block['pool']
if epoch not in epochs:
epochs[epoch] = {}
epochBlockTotal[epoch] = 0
if pool not in epochs[epoch]:
epochs[epoch][pool] = {}
epochs[epoch][pool]['blocks'] = 1
epochBlockTotal[epoch] = epochBlockTotal[epoch] + 1
else:
epochs[epoch][pool]['blocks'] = epochs[epoch][pool]['blocks'] + 1
epochBlockTotal[epoch] = epochBlockTotal[epoch] + 1
block = parse_block(get_block(block['parent']))
for epoch, epochData in epochs.items():
epochs[epoch]['stats'] = {}
epochs[epoch]['stats']['blocksum'] = epochBlockTotal[epoch]
for pool, poolData in epochData.items():
if pool != 'stats':
epochs[epoch][pool]['percent'] = poolData['blocks'] / epochBlockTotal[epoch] * 100
if silent == False:
if args.json == True:
print(json.dumps(epochs, sort_keys=True))
else:
print('\nJormungandr Epoch Block Aggregate:\n')
for epoch, epochData in epochs.items():
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Blocks (#)", "Block Percent (%)"]
table = []
for pool, data in epochData.items():
if pool != 'stats':
record = [ pool, data['blocks'], data['percent'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt='psql')}')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt='psql')}')
print(f'{'Totalblocks:':<21}{epochData['stats']['blocksum']}\n\n')
globalAggregate = epochs
def distribution(silent=False):
global globalPools
epoch = 0
unassigned = 0
dangling = 0
stakeSum = 0
totalPercentStaked = 0
total = 0
pools = {}
r = endpoint(f'{api_url}/stake')
raw = r.json()
epoch = raw['epoch']
dangling = raw['stake']['dangling']
unassigned = raw['stake']['unassigned']
if args.bigvaluesort == True:
sortedRaw = sorted(raw['stake']['pools'], key = lambda x: x[1], reverse=True)
else:
sortedRaw = sorted(raw['stake']['pools'])
for [pool, stake] in sortedRaw:
pools[pool] = {}
pools[pool]['stake'] = stake
pools[pool]['percent'] = 0
stakeSum = stakeSum + stake
total = stakeSum + unassigned + dangling
totalPercentStaked = stakeSum / total
# Calculate percentage stake delegation of total staked ADA
for pool in pools.keys():
pools[pool]['percent'] = pools[pool]['stake'] / stakeSum * 100
pools['stats'] = {}
pools['stats']['epoch'] = epoch
pools['stats']['dangling'] = dangling
pools['stats']['unassigned'] = unassigned
pools['stats']['total'] = total
pools['stats']['stakesum'] = stakeSum
pools['stats']['totalpercentstaked'] = totalPercentStaked
if silent == False:
if args.json == True:
print(json.dumps(pools, sort_keys=True))
else:
print('\nJormungandr Stake Pool Distribution:\n')
print(f'{'Epoch:':<21}{epoch}')
print(f'{'Dangling:':<21}{dangling / 1e6:,.6f} ADA')
print(f'{'Unassigned:':<21}{unassigned / 1e6:,.6f} ADA')
print(f'{'Total:':<21}{total / 1e6:,.6f} ADA')
print(f'{'TotalStaked:':<21}{stakeSum / 1e6:,.6f} ADA')
print(f'{'TotalPercentStaked:':<21}{totalPercentStaked * 100:.2f}%\n')
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Stake (ADA)", "Percent (%)"]
table = []
for pool, poolData in pools.items():
if pool != 'stats':
if args.nozero == False or poolData['stake'] != 0:
record = [ pool, poolData['stake'] / 1e6, poolData['percent'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt='psql', floatfmt=('%s', '0.6f'))}\n\n')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt='psql', floatfmt=('%s', '0.6f'))}\n\n')
globalPools = pools
def crossref():
if globalAggregate == None:
args.aggregate = 1
aggregate(silent=True)
if globalPools == None:
distribution(silent=True)
crossref = globalPools
epoch = crossref['stats']['epoch']
for pool, poolData in crossref.items():
if pool != 'stats':
if pool in globalAggregate[epoch]:
crossref[pool]['blocks'] = globalAggregate[epoch][pool]['blocks']
crossref[pool]['percentBlocks'] = globalAggregate[epoch][pool]['percent']
else:
crossref[pool]['blocks'] = None
crossref[pool]['percentBlocks'] = None
if args.json == True:
print(json.dumps(crossref, sort_keys=True))
else:
print('\nJormungandr Stake and Block Distribution Cross Reference:\n')
print(f'{'Epoch:':<21}{epoch}')
print(f'{'Dangling:':<21}{crossref['stats']['dangling'] / 1e6:,.6f} ADA')
print(f'{'Unassigned:':<21}{crossref['stats']['unassigned'] / 1e6:,.6f} ADA')
print(f'{'TotalADA:':<21}{crossref['stats']['total'] / 1e6:,.6f} ADA')
print(f'{'TotalBlocks:':<21}{globalAggregate[epoch]['stats']['blocksum']}')
print(f'{'TotalStaked:':<21}{crossref['stats']['stakesum'] / 1e6:,.6f} ADA')
print(f'{'TotalPercentStaked:':<21}{crossref['stats']['totalpercentstaked'] * 100:.2f}%\n')
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Stake (ADA)", "Blocks (#)", "PercentStaked (%)", "PercentBlocks (%)"]
table = []
for pool, poolData in crossref.items():
if pool != 'stats':
if args.nozero == False or (not (poolData['stake'] == 0 and poolData['blocks'] == None)):
record = [ pool, poolData['stake'] / 1e6, poolData['blocks'], poolData['percent'], poolData['percentBlocks'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt='psql', floatfmt=('%s', '0.6f', 'g', 'g', 'g'))}\n\n')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt='psql', floatfmt=('%s', '0.6f', 'g', 'g', 'g'))}\n\n')
def stats():
r = endpoint(f'{api_url}/node/stats')
if args.json == True:
print(json.dumps(r.json(), sort_keys=True))
else:
print('Current node stats:\n')
print(json.dumps(r.json(), sort_keys=True, indent=2))
def endpoint(url):
try:
r = requests.get(url)
r.raise_for_status()
except HTTPError as http_err:
print("\nWeb API unavailable.\nError Details:\n")
print(f"HTTP error occurred: {http_err}")
exit(1)
except Exception as err:
print("\nWeb API unavailable.\nError Details:\n")
print(f"Other error occurred: {err}")
exit(1)
else:
return(r)
def check_int(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
def main():
global api_url_base
global api_url
if args.restapi is not None:
api_url_base = args.restapi
else:
api_url_base = os.environ.get("JORMUNGANDR_RESTAPI_URL", "http://localhost:3001/api")
api_url = f"{api_url_base}/v0"
if args.stats == True:
stats()
if args.aggregateall is not None:
aggregateall()
if args.aggregate is not None:
aggregate()
if args.distribution == True:
distribution()
if args.crossref == True:
crossref()
exit(0)
if __name__ == "__main__":
if len(sys.argv) == 1:
print(f'\nRun `{sys.argv[0]} -h` for help and usage information\n')
exit(0)
parser = argparse.ArgumentParser(description=(
"Jormungandr analysis tools\n\n"),
formatter_class=RawTextHelpFormatter)
parser.add_argument("-aa", "--aggregateall", nargs="?", metavar="X", type=check_int, const=0,
help="Calculate total block creation per pool for X epochs time starting with the tip or leave blank for all")
parser.add_argument("-a", "--aggregate", nargs="?", metavar="X", type=check_int, const=1,
help="Calculate aggregate block creation per pool for X epochs starting with the tip epoch (default = 1)")
parser.add_argument("-b", "--bigvaluesort", action="store_true",
help="Show non <-j|--json> output sorted by big to small value rather than keys where possible")
parser.add_argument("-d", "--distribution", action="store_true",
help="Calculate the stake distribution for the current epoch only")
parser.add_argument("-f", "--full", action="store_true",
help="Calculate the full epoch history where possible")
parser.add_argument("-j", "--json", action="store_true",
help="Output raw json only")
parser.add_argument("-n", "--nozero", action="store_true",
help="Don't show zero value staking pools (blocks minted or stake valued)")
parser.add_argument("-s", "--stats", action="store_true",
help="Show the current node stats")
parser.add_argument("-v", "--version", action="store_true",
help="Show the program version and exit")
parser.add_argument("-x", "--crossref", action="store_true",
help="Analyse the current epoch, cross referencing both block aggregate and stake distributions")
parser.add_argument("-r", "--restapi", nargs="?", metavar="RESTAPI", type=str, const="http://127.0.0.1:3001/api",
help="Set the rest api to utilize; by default: \"http://127.0.0.1:3001/api\". An env var of JORMUNGANDR_RESTAPI_URL can also be seperately set. ")
args = parser.parse_args()
if args.version:
print(f'Version: {__version__}\n')
exit(0)
main()
| #!/usr/bin/env nix-shell
#!nix-shell -p python3Packages.requests python3Packages.tabulate -i python3
"""
Jormungandr Analysis Tools
"""
__version__ = "0.1.0"
import argparse, requests, os, json, sys
from argparse import RawTextHelpFormatter
from requests.exceptions import HTTPError
from tabulate import tabulate
globalAggregate = None
globalEpochBlocks = None
globalPools = None
api_url_base = None
api_url = None
def get_api(path):
r = endpoint(f'{api_url}/{path}')
return r.text
def get_tip():
return get_api("tip")
def get_block(block_id):
r = endpoint(f'{api_url}/block/{block_id}')
hex_block = r.content.hex()
return hex_block
def parse_block(block):
return {
"epoch": int(block[16:24], 16),
"slot": int(block[24:32], 16),
"parent": block[104:168],
"pool": block[168:232],
}
def aggregateall():
tip = get_tip()
block = parse_block(get_block(tip))
currentEpoch = block['epoch']
poolTotal = {}
blockTotal = 0
while block["parent"] != ("0" * 64):
if args.aggregateall > 0:
if (currentEpoch - args.aggregateall + 1) > block['epoch']:
break
pool = block["pool"]
if pool not in poolTotal:
poolTotal[pool] = {}
poolTotal[pool]['blocks'] = 1
else:
poolTotal[pool]['blocks'] = poolTotal[pool]['blocks'] + 1
block = parse_block(get_block(block['parent']))
blockTotal += 1
lowestEpoch = block['epoch']
if args.aggregateall > 0:
print(f'\nJormungandr Block Aggregate for epochs {lowestEpoch + 1} - {currentEpoch}:\n')
else:
print('\nJormungandr Overall Block Aggregate:\n')
headers = [f'Pool (Node ID)', "Blocks (#)", "Block Percent (%)"]
table = []
for pool, data in poolTotal.items():
record = [ pool, data['blocks'], data['blocks'] / blockTotal * 100 ]
table.append(record)
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql")}')
print(f'TotalBlocks: {blockTotal} \n')
def aggregate(silent=False):
global globalAggregate
global globalEpochBlocks
tip = get_tip()
block = parse_block(get_block(tip))
epochBlockTotal = {}
currentEpoch = block['epoch']
epochs = {}
pools = {}
while block["parent"] != ("0" * 64):
if args.full == False:
if (currentEpoch - args.aggregate + 1) > block['epoch']:
break
epoch = block['epoch']
parent = block['parent']
pool = block['pool']
if epoch not in epochs:
epochs[epoch] = {}
epochBlockTotal[epoch] = 0
if pool not in epochs[epoch]:
epochs[epoch][pool] = {}
epochs[epoch][pool]['blocks'] = 1
epochBlockTotal[epoch] = epochBlockTotal[epoch] + 1
else:
epochs[epoch][pool]['blocks'] = epochs[epoch][pool]['blocks'] + 1
epochBlockTotal[epoch] = epochBlockTotal[epoch] + 1
block = parse_block(get_block(block['parent']))
for epoch, epochData in epochs.items():
epochs[epoch]['stats'] = {}
epochs[epoch]['stats']['blocksum'] = epochBlockTotal[epoch]
for pool, poolData in epochData.items():
if pool != 'stats':
epochs[epoch][pool]['percent'] = poolData['blocks'] / epochBlockTotal[epoch] * 100
if silent == False:
if args.json == True:
print(json.dumps(epochs, sort_keys=True))
else:
print('\nJormungandr Epoch Block Aggregate:\n')
for epoch, epochData in epochs.items():
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Blocks (#)", "Block Percent (%)"]
table = []
for pool, data in epochData.items():
if pool != 'stats':
record = [ pool, data['blocks'], data['percent'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt="psql")}')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql")}')
print(f'{"Totalblocks:":<21}{epochData["stats"]["blocksum"]}\n\n')
globalAggregate = epochs
def distribution(silent=False):
global globalPools
epoch = 0
unassigned = 0
dangling = 0
stakeSum = 0
totalPercentStaked = 0
total = 0
pools = {}
r = endpoint(f'{api_url}/stake')
raw = r.json()
epoch = raw['epoch']
dangling = raw['stake']['dangling']
unassigned = raw['stake']['unassigned']
if args.bigvaluesort == True:
sortedRaw = sorted(raw['stake']['pools'], key = lambda x: x[1], reverse=True)
else:
sortedRaw = sorted(raw['stake']['pools'])
for [pool, stake] in sortedRaw:
pools[pool] = {}
pools[pool]['stake'] = stake
pools[pool]['percent'] = 0
stakeSum = stakeSum + stake
total = stakeSum + unassigned + dangling
totalPercentStaked = stakeSum / total
# Calculate percentage stake delegation of total staked ADA
for pool in pools.keys():
pools[pool]['percent'] = pools[pool]['stake'] / stakeSum * 100
pools['stats'] = {}
pools['stats']['epoch'] = epoch
pools['stats']['dangling'] = dangling
pools['stats']['unassigned'] = unassigned
pools['stats']['total'] = total
pools['stats']['stakesum'] = stakeSum
pools['stats']['totalpercentstaked'] = totalPercentStaked
if silent == False:
if args.json == True:
print(json.dumps(pools, sort_keys=True))
else:
print('\nJormungandr Stake Pool Distribution:\n')
print(f'{"Epoch:":<21}{epoch}')
print(f'{"Dangling:":<21}{dangling / 1e6:,.6f} ADA')
print(f'{"Unassigned:":<21}{unassigned / 1e6:,.6f} ADA')
print(f'{"Total:":<21}{total / 1e6:,.6f} ADA')
print(f'{"TotalStaked:":<21}{stakeSum / 1e6:,.6f} ADA')
print(f'{"TotalPercentStaked:":<21}{totalPercentStaked * 100:.2f}%\n')
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Stake (ADA)", "Percent (%)"]
table = []
for pool, poolData in pools.items():
if pool != 'stats':
if args.nozero == False or poolData['stake'] != 0:
record = [ pool, poolData['stake'] / 1e6, poolData['percent'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt="psql", floatfmt=("%s", "0.6f"))}\n\n')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql", floatfmt=("%s", "0.6f"))}\n\n')
globalPools = pools
def crossref():
if globalAggregate == None:
args.aggregate = 1
aggregate(silent=True)
if globalPools == None:
distribution(silent=True)
crossref = globalPools
epoch = crossref['stats']['epoch']
for pool, poolData in crossref.items():
if pool != 'stats':
if pool in globalAggregate[epoch]:
crossref[pool]['blocks'] = globalAggregate[epoch][pool]['blocks']
crossref[pool]['percentBlocks'] = globalAggregate[epoch][pool]['percent']
else:
crossref[pool]['blocks'] = None
crossref[pool]['percentBlocks'] = None
if args.json == True:
print(json.dumps(crossref, sort_keys=True))
else:
print('\nJormungandr Stake and Block Distribution Cross Reference:\n')
print(f'{"Epoch:":<21}{epoch}')
print(f'{"Dangling:":<21}{crossref["stats"]["dangling"] / 1e6:,.6f} ADA')
print(f'{"Unassigned:":<21}{crossref["stats"]["unassigned"] / 1e6:,.6f} ADA')
print(f'{"TotalADA:":<21}{crossref["stats"]["total"] / 1e6:,.6f} ADA')
print(f'{"TotalBlocks:":<21}{globalAggregate[epoch]["stats"]["blocksum"]}')
print(f'{"TotalStaked:":<21}{crossref["stats"]["stakesum"] / 1e6:,.6f} ADA')
print(f'{"TotalPercentStaked:":<21}{crossref["stats"]["totalpercentstaked"] * 100:.2f}%\n')
headers = [f'EPOCH {epoch}, Pool (Node ID)', "Stake (ADA)", "Blocks (#)", "PercentStaked (%)", "PercentBlocks (%)"]
table = []
for pool, poolData in crossref.items():
if pool != 'stats':
if args.nozero == False or (not (poolData['stake'] == 0 and poolData['blocks'] == None)):
record = [ pool, poolData['stake'] / 1e6, poolData['blocks'], poolData['percent'], poolData['percentBlocks'] ]
table.append(record)
if args.bigvaluesort == True:
print(f'{tabulate(sorted(table, key=lambda x: x[1], reverse=True), headers, tablefmt="psql", floatfmt=("%s", "0.6f", "g", "g", "g"))}\n\n')
else:
print(f'{tabulate(sorted(table, key=lambda x: x[0]), headers, tablefmt="psql", floatfmt=("%s", "0.6f", "g", "g", "g"))}\n\n')
def stats():
r = endpoint(f'{api_url}/node/stats')
if args.json == True:
print(json.dumps(r.json(), sort_keys=True))
else:
print('Current node stats:\n')
print(json.dumps(r.json(), sort_keys=True, indent=2))
def endpoint(url):
try:
r = requests.get(url)
r.raise_for_status()
except HTTPError as http_err:
print("\nWeb API unavailable.\nError Details:\n")
print(f"HTTP error occurred: {http_err}")
exit(1)
except Exception as err:
print("\nWeb API unavailable.\nError Details:\n")
print(f"Other error occurred: {err}")
exit(1)
else:
return(r)
def check_int(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
def main():
global api_url_base
global api_url
if args.restapi is not None:
api_url_base = args.restapi
else:
api_url_base = os.environ.get("JORMUNGANDR_RESTAPI_URL", "http://localhost:3001/api")
api_url = f"{api_url_base}/v0"
if args.stats == True:
stats()
if args.aggregateall is not None:
aggregateall()
if args.aggregate is not None:
aggregate()
if args.distribution == True:
distribution()
if args.crossref == True:
crossref()
exit(0)
if __name__ == "__main__":
if len(sys.argv) == 1:
print(f'\nRun `{sys.argv[0]} -h` for help and usage information\n')
exit(0)
parser = argparse.ArgumentParser(description=(
"Jormungandr analysis tools\n\n"),
formatter_class=RawTextHelpFormatter)
parser.add_argument("-aa", "--aggregateall", nargs="?", metavar="X", type=check_int, const=0,
help="Calculate total block creation per pool for X epochs time starting with the tip or leave blank for all")
parser.add_argument("-a", "--aggregate", nargs="?", metavar="X", type=check_int, const=1,
help="Calculate aggregate block creation per pool for X epochs starting with the tip epoch (default = 1)")
parser.add_argument("-b", "--bigvaluesort", action="store_true",
help="Show non <-j|--json> output sorted by big to small value rather than keys where possible")
parser.add_argument("-d", "--distribution", action="store_true",
help="Calculate the stake distribution for the current epoch only")
parser.add_argument("-f", "--full", action="store_true",
help="Calculate the full epoch history where possible")
parser.add_argument("-j", "--json", action="store_true",
help="Output raw json only")
parser.add_argument("-n", "--nozero", action="store_true",
help="Don't show zero value staking pools (blocks minted or stake valued)")
parser.add_argument("-s", "--stats", action="store_true",
help="Show the current node stats")
parser.add_argument("-v", "--version", action="store_true",
help="Show the program version and exit")
parser.add_argument("-x", "--crossref", action="store_true",
help="Analyse the current epoch, cross referencing both block aggregate and stake distributions")
parser.add_argument("-r", "--restapi", nargs="?", metavar="RESTAPI", type=str, const="http://127.0.0.1:3001/api",
help="Set the rest api to utilize; by default: \"http://127.0.0.1:3001/api\". An env var of JORMUNGANDR_RESTAPI_URL can also be seperately set. ")
args = parser.parse_args()
if args.version:
print(f'Version: {__version__}\n')
exit(0)
main()
|
import unittest
from collections import OrderedDict
from impromptu.query import Query
class QueryTests(unittest.TestCase):
def test_initialization(self):
"""Tests the post-initialization variables.
"""
cases = [
{
'a': 0,
'b': {'$lt': 0},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {'c': 0}},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {'c': 0}},
'dCheck': {'$assign': {'d': 0}},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
'dCheck': {'$assign': {'d': 0}},
},
]
# case[0] - Basic Query
# case[1] - Query with child Query
# case[2] - Query with child Query within child Query (a.k.a. nested child Query)
# case[3] - Query with multiple children Queries
# case[4] - Query with multiple children Queries and a nested child Query
########
# CASE 0
########
q: Query = Query(cases[0])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 1
########
q: Query = Query(cases[1])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 2
########
q: Query = Query(cases[2])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheckPosB']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheckPosB')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheckPosB"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$gt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 3
########
q: Query = Query(cases[3])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck', 'dCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q.parent
q = q.parent.children.get('dCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "dCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 4
########
q: Query = Query(cases[4])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck', 'dCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheckPosB']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheckPosB')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheckPosB"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$gt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q.parent.parent
q = q.parent.parent.children.get('dCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "dCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
def test_get(self):
"""Tests Query.get method and functionality.
"""
case = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
'dCheck': {'$assign': {'d': 0}},
}
q: Query = Query(case)
##########
# Get self
##########
expected = q
self.assertEqual(q.get(''), expected, msg=f"Expected Query {expected}, produced query {q.get("")}")
self.assertEqual(q.get('#root'), expected, msg=f"Expected Query {expected}, produced query {q.get("")}")
####################
# Get without nested
####################
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
self.assertDictEqual(q.get('dCheck').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get("dCheck").definition}")
#################
# Get with nested
#################
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.get('cCheck').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get("cCheck").definition}")
############
# Get nested
############
expected = {'a': 0, 'b': {'$gt': 0}, 'c': 0}
self.assertDictEqual(q.get('cCheck.cCheckPosB').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get("cCheck.cCheckPosB").definition}")
self.assertDictEqual(q.get('cCheck').get('cCheckPosB').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get("cCheck").get("cCheckPosB").definition}")
def test_search(self):
"""Tests Query.search method and functionality.
"""
case = OrderedDict({
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'dCheck': {'$assign': {'d': 0}}
}},
'dCheck': {'$assign': {'d': 0}},
})
q: Query = Query(case)
expected = {'a': 0, 'b': {'$lt': 0}}
result = q.search('')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
#############
# Depth-first
#############
# basic
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0, 'd': 0}
result = q.search('dCheck', method='depth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# default
expected = {}
result = q.search('defaultCheck', default=Query({}), method='depth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# begin
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
result = q.search('dCheck', begin='dCheck', method='depth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
###############
# Breadth-first
###############
# basic
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
result = q.search('dCheck', method='breadth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# default
expected = {}
result = q.search('defaultCheck', default=Query({}), method='breadth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# begin
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0, 'd': 0}
result = q.search('dCheck', begin='cCheck', method='breadth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
def test_jsonify(self):
"""Tests Query.jsonify method and functionality.
"""
case = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
'dCheck': {'$assign': {'d': 0}},
}
q: Query = Query(case)
expected = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'a': 0,
'b': {'$lt': 0},
'c': 0,
'cCheckPosB': {'$assign': {
'a': 0,
'c': 0,
'b': {'$gt': 0}}
}
}},
'dCheck': {'$assign': {
'a': 0,
'b': {'$lt': 0},
'd': 0
}},
}
result = q.jsonify(deduplicate=False)
self.assertDictEqual(result, expected, msg=f"Expected JSON {expected}, produced JSON {result}")
expected = case
result = q.jsonify(deduplicate=True)
self.assertDictEqual(result, expected, msg=f"Expected JSON {expected}, produced JSON {result}")
def test_match(self):
"""Tests Query.match method.
"""
case = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'dCheck': {'$assign': {'d': 0}}
}},
'dCheck': {'$assign': {'d': 0}},
}
q: Query = Query(case)
######
# Root
######
node = q
entry = {'a': 0, 'b': -1}
self.assertTrue(node.match(entry), msg=f"Expected {node.definition} to match entry {entry}")
entry = {'a': 0, 'b': 1}
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
#############
# With nested
#############
node = q.get('cCheck')
entry = {'a': 0, 'b': -1, 'c': 0, 'd': 100} # true case
self.assertTrue(node.match(entry), msg=f"Expected {node.definition} to match entry {entry}")
entry = {'a': 0, 'b': 1, 'c': 1, 'd': 100} # one false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'b': -1, 'c': -1, 'd': 100} # two false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'b': -1, 'd': 100} # missing case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
########
# Nested
########
node = q.get('cCheck.dCheck')
entry = {'a': 0, 'b': -1, 'c': 0, 'd': 0} # true case
self.assertTrue(node.match(entry), msg=f"Expected {node.definition} to match entry {entry}")
entry = {'a': 0, 'b': -1, 'c': 0, 'd': 100} # one false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'b': -1, 'c': -1, 'd': 100} # two false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'c': -1, 'd': 100} # missing case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
| import unittest
from collections import OrderedDict
from impromptu.query import Query
class QueryTests(unittest.TestCase):
def test_initialization(self):
"""Tests the post-initialization variables.
"""
cases = [
{
'a': 0,
'b': {'$lt': 0},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {'c': 0}},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {'c': 0}},
'dCheck': {'$assign': {'d': 0}},
},
{
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
'dCheck': {'$assign': {'d': 0}},
},
]
# case[0] - Basic Query
# case[1] - Query with child Query
# case[2] - Query with child Query within child Query (a.k.a. nested child Query)
# case[3] - Query with multiple children Queries
# case[4] - Query with multiple children Queries and a nested child Query
########
# CASE 0
########
q: Query = Query(cases[0])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 1
########
q: Query = Query(cases[1])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 2
########
q: Query = Query(cases[2])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheckPosB']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheckPosB')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheckPosB"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$gt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 3
########
q: Query = Query(cases[3])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck', 'dCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q.parent
q = q.parent.children.get('dCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "dCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
########
# CASE 4
########
q: Query = Query(cases[4])
self.assertIsNone(q.parent, msg=f"Expected no parent, produced parent {q.parent}")
expected = "#root"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheck', 'dCheck']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = ['cCheckPosB']
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q
q = q.children.get('cCheckPosB')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "cCheckPosB"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$gt': 0}, 'c': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
expected = q.parent.parent
q = q.parent.parent.children.get('dCheck')
self.assertEqual(q.parent, expected, msg=f"Expected parent {expected}, produced parent {q.parent}")
expected = "dCheck"
self.assertEqual(q.label, expected, msg=f"Expected label {expected}, produced label {q.label}")
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
self.assertDictEqual(q.definition, expected, msg=f"Expected definition {expected}, produced definition {q.definition}")
expected = []
self.assertListEqual(list(q.children.keys()), expected, msg=f"Expected children keys {expected}, produced children {q.children}")
def test_get(self):
"""Tests Query.get method and functionality.
"""
case = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
'dCheck': {'$assign': {'d': 0}},
}
q: Query = Query(case)
##########
# Get self
##########
expected = q
self.assertEqual(q.get(''), expected, msg=f"Expected Query {expected}, produced query {q.get('')}")
self.assertEqual(q.get('#root'), expected, msg=f"Expected Query {expected}, produced query {q.get('')}")
####################
# Get without nested
####################
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
self.assertDictEqual(q.get('dCheck').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get('dCheck').definition}")
#################
# Get with nested
#################
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0}
self.assertDictEqual(q.get('cCheck').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get('cCheck').definition}")
############
# Get nested
############
expected = {'a': 0, 'b': {'$gt': 0}, 'c': 0}
self.assertDictEqual(q.get('cCheck.cCheckPosB').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get('cCheck.cCheckPosB').definition}")
self.assertDictEqual(q.get('cCheck').get('cCheckPosB').definition, expected, msg=f"Expected definition {expected}, produced definition {q.get('cCheck').get('cCheckPosB').definition}")
def test_search(self):
"""Tests Query.search method and functionality.
"""
case = OrderedDict({
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'dCheck': {'$assign': {'d': 0}}
}},
'dCheck': {'$assign': {'d': 0}},
})
q: Query = Query(case)
expected = {'a': 0, 'b': {'$lt': 0}}
result = q.search('')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
#############
# Depth-first
#############
# basic
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0, 'd': 0}
result = q.search('dCheck', method='depth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# default
expected = {}
result = q.search('defaultCheck', default=Query({}), method='depth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# begin
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
result = q.search('dCheck', begin='dCheck', method='depth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
###############
# Breadth-first
###############
# basic
expected = {'a': 0, 'b': {'$lt': 0}, 'd': 0}
result = q.search('dCheck', method='breadth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# default
expected = {}
result = q.search('defaultCheck', default=Query({}), method='breadth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
# begin
expected = {'a': 0, 'b': {'$lt': 0}, 'c': 0, 'd': 0}
result = q.search('dCheck', begin='cCheck', method='breadth')
self.assertDictEqual(result.definition, expected, msg=f"Expected definition {expected}, produced definition {result.definition}")
def test_jsonify(self):
"""Tests Query.jsonify method and functionality.
"""
case = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'cCheckPosB': {'$assign': {'b': {'$gt': 0}}}
}},
'dCheck': {'$assign': {'d': 0}},
}
q: Query = Query(case)
expected = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'a': 0,
'b': {'$lt': 0},
'c': 0,
'cCheckPosB': {'$assign': {
'a': 0,
'c': 0,
'b': {'$gt': 0}}
}
}},
'dCheck': {'$assign': {
'a': 0,
'b': {'$lt': 0},
'd': 0
}},
}
result = q.jsonify(deduplicate=False)
self.assertDictEqual(result, expected, msg=f"Expected JSON {expected}, produced JSON {result}")
expected = case
result = q.jsonify(deduplicate=True)
self.assertDictEqual(result, expected, msg=f"Expected JSON {expected}, produced JSON {result}")
def test_match(self):
"""Tests Query.match method.
"""
case = {
'a': 0,
'b': {'$lt': 0},
'cCheck': {'$assign': {
'c': 0,
'dCheck': {'$assign': {'d': 0}}
}},
'dCheck': {'$assign': {'d': 0}},
}
q: Query = Query(case)
######
# Root
######
node = q
entry = {'a': 0, 'b': -1}
self.assertTrue(node.match(entry), msg=f"Expected {node.definition} to match entry {entry}")
entry = {'a': 0, 'b': 1}
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
#############
# With nested
#############
node = q.get('cCheck')
entry = {'a': 0, 'b': -1, 'c': 0, 'd': 100} # true case
self.assertTrue(node.match(entry), msg=f"Expected {node.definition} to match entry {entry}")
entry = {'a': 0, 'b': 1, 'c': 1, 'd': 100} # one false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'b': -1, 'c': -1, 'd': 100} # two false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'b': -1, 'd': 100} # missing case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
########
# Nested
########
node = q.get('cCheck.dCheck')
entry = {'a': 0, 'b': -1, 'c': 0, 'd': 0} # true case
self.assertTrue(node.match(entry), msg=f"Expected {node.definition} to match entry {entry}")
entry = {'a': 0, 'b': -1, 'c': 0, 'd': 100} # one false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'b': -1, 'c': -1, 'd': 100} # two false case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
entry = {'a': 0, 'c': -1, 'd': 100} # missing case
self.assertFalse(node.match(entry), msg=f"Expected {node.definition} to not match entry {entry}")
|
import random
from tkinter import *
from tkinter import messagebox
import pyperclip
import json
# -------------------------- PASSWORD GENERATOR ----------------------------- #
letters = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
def gen_password():
# nr_letters = random.randint(7, 12)
# nr_symbols = random.randint(5, 10)
# nr_numbers = random.randint(5, 10)
my_password = random.choices(letters, k=random.randint(
7, 12)) + random.choices(numbers, k=random.randint(
5, 10)) + random.choices(symbols, k=random.randint(5, 10))
random.shuffle(my_password)
joined_password = ''.join(my_password)
password_entry.insert(END, joined_password)
pyperclip.copy(joined_password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
def save_password_to_file():
site = site_entry.get()
email = email_entry.get()
password = password_entry.get()
data = {
site: {
'email': email,
'password': password,
},
}
if len(site) < 1 or len(email) < 1 or len(password) < 1:
messagebox.showinfo(title='Error',
message='Please fill out all the fields.')
return
save = messagebox.askokcancel(
title=site,
message=
f'These are the details entered.\nSite: {site}\nEmail: {email}\nPassword: {password}\nDo you want to save these?'
)
if save:
try:
with open('passwords.json', 'r') as fl:
new_data = json.load(fl)
except FileNotFoundError:
with open('passwords.json', 'w') as fl:
json.dump(data, fl, indent=4)
else:
new_data.update(data)
with open('passwords.json', 'w') as fl:
json.dump(new_data, fl, indent=4)
finally:
site_entry.delete(0, END)
password_entry.delete(0, END)
# ---------------------------- Search -----------------------------------#
def search_json_file():
try:
with open('passwords.json', 'r') as fl:
new_data = json.load(fl)
except FileNotFoundError:
messagebox.showerror(title='Error', message='No data file found.')
else:
try:
site_data = new_data[site_entry.get()]
except KeyError:
messagebox.showerror(
title='Error',
message=f'No datails for {site_entry.get()} found.')
else:
messagebox.showinfo(
title=site_entry.get(),
message=
f'Email: {site_data['email']}\nPassword: {site_data['password']}'
)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title('Password Manager')
window.config(padx=20, pady=40)
canvas = Canvas(width=200, height=190, highlightthickness=0)
logo = PhotoImage(file='logo.png')
canvas.create_image(100, 85, image=logo)
canvas.grid(column=1, row=0)
site_label = Label(text='Website:')
site_label.grid(column=0, row=1)
email_label = Label(text='Email/Username:')
email_label.grid(column=0, row=2)
pass_label = Label(text='Password:')
pass_label.grid(column=0, row=3)
site_entry = Entry(width=32)
site_entry.grid(column=1, row=1)
site_entry.focus()
email_entry = Entry(width=51)
email_entry.grid(column=1, row=2, columnspan=2)
email_entry.insert(END, 'not-lucky@email.domain')
password_entry = Entry(width=32)
password_entry.grid(column=1, row=3)
search = Button(text='Search', width=14, command=search_json_file)
search.grid(column=2, row=1)
gen_pass = Button(text='Generate Password', command=gen_password)
gen_pass.grid(column=2, row=3)
add = Button(text='Add', width=43, command=save_password_to_file)
add.grid(column=1, row=4, columnspan=2)
window.mainloop()
| import random
from tkinter import *
from tkinter import messagebox
import pyperclip
import json
# -------------------------- PASSWORD GENERATOR ----------------------------- #
letters = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
def gen_password():
# nr_letters = random.randint(7, 12)
# nr_symbols = random.randint(5, 10)
# nr_numbers = random.randint(5, 10)
my_password = random.choices(letters, k=random.randint(
7, 12)) + random.choices(numbers, k=random.randint(
5, 10)) + random.choices(symbols, k=random.randint(5, 10))
random.shuffle(my_password)
joined_password = ''.join(my_password)
password_entry.insert(END, joined_password)
pyperclip.copy(joined_password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
def save_password_to_file():
site = site_entry.get()
email = email_entry.get()
password = password_entry.get()
data = {
site: {
'email': email,
'password': password,
},
}
if len(site) < 1 or len(email) < 1 or len(password) < 1:
messagebox.showinfo(title='Error',
message='Please fill out all the fields.')
return
save = messagebox.askokcancel(
title=site,
message=
f'These are the details entered.\nSite: {site}\nEmail: {email}\nPassword: {password}\nDo you want to save these?'
)
if save:
try:
with open('passwords.json', 'r') as fl:
new_data = json.load(fl)
except FileNotFoundError:
with open('passwords.json', 'w') as fl:
json.dump(data, fl, indent=4)
else:
new_data.update(data)
with open('passwords.json', 'w') as fl:
json.dump(new_data, fl, indent=4)
finally:
site_entry.delete(0, END)
password_entry.delete(0, END)
# ---------------------------- Search -----------------------------------#
def search_json_file():
try:
with open('passwords.json', 'r') as fl:
new_data = json.load(fl)
except FileNotFoundError:
messagebox.showerror(title='Error', message='No data file found.')
else:
try:
site_data = new_data[site_entry.get()]
except KeyError:
messagebox.showerror(
title='Error',
message=f'No datails for {site_entry.get()} found.')
else:
messagebox.showinfo(
title=site_entry.get(),
message=
f'Email: {site_data["email"]}\nPassword: {site_data["password"]}'
)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title('Password Manager')
window.config(padx=20, pady=40)
canvas = Canvas(width=200, height=190, highlightthickness=0)
logo = PhotoImage(file='logo.png')
canvas.create_image(100, 85, image=logo)
canvas.grid(column=1, row=0)
site_label = Label(text='Website:')
site_label.grid(column=0, row=1)
email_label = Label(text='Email/Username:')
email_label.grid(column=0, row=2)
pass_label = Label(text='Password:')
pass_label.grid(column=0, row=3)
site_entry = Entry(width=32)
site_entry.grid(column=1, row=1)
site_entry.focus()
email_entry = Entry(width=51)
email_entry.grid(column=1, row=2, columnspan=2)
email_entry.insert(END, 'not-lucky@email.domain')
password_entry = Entry(width=32)
password_entry.grid(column=1, row=3)
search = Button(text='Search', width=14, command=search_json_file)
search.grid(column=2, row=1)
gen_pass = Button(text='Generate Password', command=gen_password)
gen_pass.grid(column=2, row=3)
add = Button(text='Add', width=43, command=save_password_to_file)
add.grid(column=1, row=4, columnspan=2)
window.mainloop()
|
from dataclasses import dataclass
import os
import logging
import json
from functools import lru_cache
import cv2
import numpy as np
import app
from util import cvimage as Image
logger = logging.getLogger(__name__)
net_file = app.cache_path / 'ark_material.onnx'
index_file = app.cache_path / 'index_itemid_relation.json'
model_timestamp = 0
@dataclass
class DnnItemRecord:
class_id: int
item_id: str
item_name: str
item_type: str
dnn_items_by_class : dict[int, DnnItemRecord] = {}
dnn_items_by_item_id : dict[str, DnnItemRecord] = {}
dnn_items_by_item_name : dict[str, DnnItemRecord] = {}
@lru_cache(1)
def load_net():
update_index_info()
with open(net_file, 'rb') as f:
data = f.read()
net = cv2.dnn.readNetFromONNX(data)
return net
@lru_cache(1)
def _update_index_info():
with open(index_file, 'r', encoding='utf-8') as f:
data = json.load(f)
global model_timestamp
model_timestamp = data['time']
idx2id, id2idx, idx2name, idx2type = data['idx2id'], data['id2idx'], data['idx2name'], data['idx2type']
dnn_items_by_class.clear()
dnn_items_by_item_id.clear()
dnn_items_by_item_name.clear()
for index, item_id in enumerate(idx2id):
record = DnnItemRecord(index, item_id, idx2name[index], idx2type[index])
dnn_items_by_class[index] = record
dnn_items_by_item_id[item_id] = record
dnn_items_by_item_name[idx2name[index]] = record
def update_index_info():
update_net()
return _update_index_info()
def retry_get(url, max_retry=5, timeout=3):
import requests
c = 0
ex = None
while c < max_retry:
try:
return requests.get(url, timeout=timeout)
except Exception as e:
c += 1
ex = e
raise ex
def update_net():
local_cache_time = 0
import time
os.makedirs(os.path.dirname(index_file), exist_ok=True)
try:
stat = os.stat(index_file)
cache_mtime = stat.st_mtime
with open(index_file, 'r', encoding='utf-8') as f:
local_rel = json.load(f)
model_gen_time = local_rel['time'] / 1000
now = time.time()
logger.debug(f'{cache_mtime=} {now=} {model_gen_time=}')
if cache_mtime > model_gen_time and now - cache_mtime < 60 * 60 * 8:
return
except:
pass
logger.info('检查物品识别模型更新')
resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/index_itemid_relation.json')
remote_relation = resp.json()
if remote_relation['time'] > local_cache_time:
from datetime import datetime
logger.info(f'更新物品识别模型, 模型生成时间: {datetime.fromtimestamp(remote_relation['time']/1000).strftime('%Y-%m-%d %H:%M:%S')}')
with open(index_file, 'w', encoding='utf-8') as f:
json.dump(remote_relation, f, ensure_ascii=False)
resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/ark_material.onnx')
with open(net_file, 'wb') as f:
f.write(resp.content)
_update_index_info.cache_clear()
else:
os.utime(index_file, None)
def _update_mat_collection(collection, name, img):
global itemmask
if img.size != (48, 48):
img = img.resize((48, 48), Image.BILINEAR)
mat = np.array(img)
mat[itemmask] = 0
collection[name] = mat
resources_known_items = {}
def load():
from . import resources
from . import minireco
resource_files = [(x[:-4], resources.resolve('items/' + x)) for x in resources.get_entries('items')[1] if x.endswith('.png')]
global resources_itemmats, num_recognizer, itemmask, resources_known_items
resources_itemmats = {}
itemmask = np.asarray(resources.load_image('common/itemmask.png', '1'))
for name, index in resource_files:
img = resources.load_image(index, 'RGB')
_update_mat_collection(resources_itemmats, name, img)
model = resources.load_pickle('minireco/NotoSansCJKsc-DemiLight-nums.dat')
reco = minireco.MiniRecognizer(model, minireco.compare_ccoeff)
num_recognizer=reco
for prefix in ['items', 'items/archive', 'items/not-loot']:
_, files = resources.get_entries(prefix)
for filename in files:
itemname = filename[:-4] if filename.endswith('.png') else filename
path = prefix + '/' + filename
resources_known_items[itemname] = resources.resolve(path)
update_extra_items()
def update_extra_items():
import app
new_mtime = os.path.getmtime(app.extra_items_path)
if new_mtime <= update_extra_items.old_mtime:
return
from . import resources
from glob import glob
extra_files = [(os.path.basename(x)[:-4], resources.FileSystemIndex(x)) for x in glob(os.path.join(
app.extra_items_path, '*.png'))]
extra_known_items = {}
extra_itemmats = {}
for key, value in extra_files:
for name, index in extra_files:
img = resources.load_image(index, 'RGB')
_update_mat_collection(extra_itemmats, name, img)
extra_known_items[key] = value
global itemmats
itemmats = {}
itemmats.update(resources_itemmats)
itemmats.update(extra_itemmats)
global all_known_items
all_known_items = {}
all_known_items.update(resources_known_items)
all_known_items.update(extra_known_items)
update_extra_items.old_mtime = new_mtime
update_extra_items.old_mtime = 0
def add_item(image) -> str:
import os
import time
import app
date = time.strftime('%Y-%m-%d')
index = add_item.last_index + 1
while True:
name = '未知物品-%s-%d' % (date, index)
filename = app.extra_items_path.joinpath(name + '.png')
if not os.path.exists(filename):
break
index += 1
add_item.last_index = index
image.save(filename)
update_extra_items()
return name
add_item.last_index = 0
load()
| from dataclasses import dataclass
import os
import logging
import json
from functools import lru_cache
import cv2
import numpy as np
import app
from util import cvimage as Image
logger = logging.getLogger(__name__)
net_file = app.cache_path / 'ark_material.onnx'
index_file = app.cache_path / 'index_itemid_relation.json'
model_timestamp = 0
@dataclass
class DnnItemRecord:
class_id: int
item_id: str
item_name: str
item_type: str
dnn_items_by_class : dict[int, DnnItemRecord] = {}
dnn_items_by_item_id : dict[str, DnnItemRecord] = {}
dnn_items_by_item_name : dict[str, DnnItemRecord] = {}
@lru_cache(1)
def load_net():
update_index_info()
with open(net_file, 'rb') as f:
data = f.read()
net = cv2.dnn.readNetFromONNX(data)
return net
@lru_cache(1)
def _update_index_info():
with open(index_file, 'r', encoding='utf-8') as f:
data = json.load(f)
global model_timestamp
model_timestamp = data['time']
idx2id, id2idx, idx2name, idx2type = data['idx2id'], data['id2idx'], data['idx2name'], data['idx2type']
dnn_items_by_class.clear()
dnn_items_by_item_id.clear()
dnn_items_by_item_name.clear()
for index, item_id in enumerate(idx2id):
record = DnnItemRecord(index, item_id, idx2name[index], idx2type[index])
dnn_items_by_class[index] = record
dnn_items_by_item_id[item_id] = record
dnn_items_by_item_name[idx2name[index]] = record
def update_index_info():
update_net()
return _update_index_info()
def retry_get(url, max_retry=5, timeout=3):
import requests
c = 0
ex = None
while c < max_retry:
try:
return requests.get(url, timeout=timeout)
except Exception as e:
c += 1
ex = e
raise ex
def update_net():
local_cache_time = 0
import time
os.makedirs(os.path.dirname(index_file), exist_ok=True)
try:
stat = os.stat(index_file)
cache_mtime = stat.st_mtime
with open(index_file, 'r', encoding='utf-8') as f:
local_rel = json.load(f)
model_gen_time = local_rel['time'] / 1000
now = time.time()
logger.debug(f'{cache_mtime=} {now=} {model_gen_time=}')
if cache_mtime > model_gen_time and now - cache_mtime < 60 * 60 * 8:
return
except:
pass
logger.info('检查物品识别模型更新')
resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/index_itemid_relation.json')
remote_relation = resp.json()
if remote_relation['time'] > local_cache_time:
from datetime import datetime
logger.info(f'更新物品识别模型, 模型生成时间: {datetime.fromtimestamp(remote_relation["time"]/1000).strftime("%Y-%m-%d %H:%M:%S")}')
with open(index_file, 'w', encoding='utf-8') as f:
json.dump(remote_relation, f, ensure_ascii=False)
resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/ark_material.onnx')
with open(net_file, 'wb') as f:
f.write(resp.content)
_update_index_info.cache_clear()
else:
os.utime(index_file, None)
def _update_mat_collection(collection, name, img):
global itemmask
if img.size != (48, 48):
img = img.resize((48, 48), Image.BILINEAR)
mat = np.array(img)
mat[itemmask] = 0
collection[name] = mat
resources_known_items = {}
def load():
from . import resources
from . import minireco
resource_files = [(x[:-4], resources.resolve('items/' + x)) for x in resources.get_entries('items')[1] if x.endswith('.png')]
global resources_itemmats, num_recognizer, itemmask, resources_known_items
resources_itemmats = {}
itemmask = np.asarray(resources.load_image('common/itemmask.png', '1'))
for name, index in resource_files:
img = resources.load_image(index, 'RGB')
_update_mat_collection(resources_itemmats, name, img)
model = resources.load_pickle('minireco/NotoSansCJKsc-DemiLight-nums.dat')
reco = minireco.MiniRecognizer(model, minireco.compare_ccoeff)
num_recognizer=reco
for prefix in ['items', 'items/archive', 'items/not-loot']:
_, files = resources.get_entries(prefix)
for filename in files:
itemname = filename[:-4] if filename.endswith('.png') else filename
path = prefix + '/' + filename
resources_known_items[itemname] = resources.resolve(path)
update_extra_items()
def update_extra_items():
import app
new_mtime = os.path.getmtime(app.extra_items_path)
if new_mtime <= update_extra_items.old_mtime:
return
from . import resources
from glob import glob
extra_files = [(os.path.basename(x)[:-4], resources.FileSystemIndex(x)) for x in glob(os.path.join(
app.extra_items_path, '*.png'))]
extra_known_items = {}
extra_itemmats = {}
for key, value in extra_files:
for name, index in extra_files:
img = resources.load_image(index, 'RGB')
_update_mat_collection(extra_itemmats, name, img)
extra_known_items[key] = value
global itemmats
itemmats = {}
itemmats.update(resources_itemmats)
itemmats.update(extra_itemmats)
global all_known_items
all_known_items = {}
all_known_items.update(resources_known_items)
all_known_items.update(extra_known_items)
update_extra_items.old_mtime = new_mtime
update_extra_items.old_mtime = 0
def add_item(image) -> str:
import os
import time
import app
date = time.strftime('%Y-%m-%d')
index = add_item.last_index + 1
while True:
name = '未知物品-%s-%d' % (date, index)
filename = app.extra_items_path.joinpath(name + '.png')
if not os.path.exists(filename):
break
index += 1
add_item.last_index = index
image.save(filename)
update_extra_items()
return name
add_item.last_index = 0
load()
|
import os
import json
import asyncio
import inspect
from pathlib import Path
from http import HTTPStatus
from shutil import make_archive
from tempfile import TemporaryDirectory
from contextlib import AsyncExitStack, ExitStack
from typing import Any, List, Optional, Union, TYPE_CHECKING
import aiohttp
from aiohttp import FormData as aiohttpFormData
from rich.console import Console
from jina.helper import colored
from jina.jaml.helper import complete_path
from jina.enums import RemoteWorkspaceState
from daemon.models.id import daemonize
from daemon.clients.base import AsyncBaseClient
from daemon.clients.mixin import AsyncToSyncMixin
from daemon.models.workspaces import WorkspaceItem
from daemon.helper import error_msg_from, if_alive, change_cwd
if TYPE_CHECKING:
from daemon.models import DaemonID
from rich.status import Status
from jina.logging.logger import JinaLogger
class FormData(aiohttpFormData):
"""FormData used to upload files to remote"""
def __init__(
self,
paths: Optional[List[Path]] = None,
logger: 'JinaLogger' = None,
complete: bool = False,
) -> None:
super().__init__()
self._logger = logger
self._complete = complete
self._cur_dir = os.getcwd()
self.paths = paths
self._stack = ExitStack()
def add(self, path: Path):
"""add a field to Form
:param path: filepath
"""
self.add_field(
name='files',
value=self._stack.enter_context(
open(
complete_path(path, extra_search_paths=[self._cur_dir])
if self._complete
else path,
'rb',
)
),
filename=path.name,
)
@property
def fields(self) -> List[Any]:
"""all fields in current Form
:return: list of fields
"""
return self._fields
@property
def filenames(self) -> List[str]:
"""all filenames in current Form
:return: list of filenames
"""
return [os.path.basename(f[-1].name) for f in self.fields]
def __len__(self):
return len(self._fields)
def __enter__(self):
self._stack.__enter__()
if not self.paths:
return self
tmpdir = self._stack.enter_context(TemporaryDirectory())
self._stack.enter_context(change_cwd(tmpdir))
for path in map(Path, self.paths):
try:
filename = path.name
if path.is_file():
self.add(path)
elif path.is_dir():
make_archive(base_name=filename, format='zip', root_dir=path)
self.add(Path(tmpdir) / f'{filename}.zip')
except TypeError:
self._logger.error(f'invalid path passed {path}')
continue
self._logger.info(
(
f'{len(self)} file(s) ready to be uploaded: {', '.join(self.filenames)}'
if len(self) > 0
else 'No file to be uploaded'
)
)
return self
def __exit__(self, *args, **kwargs):
self._stack.__exit__(*args, **kwargs)
class AsyncWorkspaceClient(AsyncBaseClient):
"""Async Client to create/update/delete Workspaces on remote JinaD"""
_kind = 'workspace'
_endpoint = '/workspaces'
_item_model_cls = WorkspaceItem
async def _get_helper(self, id: 'DaemonID', status: 'Status') -> bool:
"""Helper get with known workspace_id
:param id: workspace id
:param status: rich.console.status object to be updated
:return: True if workspace creation is successful.
"""
status.update('Workspace: Checking if already exists..')
response = (
await self.get(id=id)
if inspect.iscoroutinefunction(self.get)
else self.get(id=id)
)
state = self._item_model_cls(**response).state
if state == RemoteWorkspaceState.ACTIVE:
return True
elif state == RemoteWorkspaceState.FAILED:
return False
else:
return await self.wait(
id=id, status=status, logs=False
) # NOTE: we don't emit logs here
@if_alive
async def create(
self,
paths: Optional[List[Path]] = None,
id: Optional[Union[str, 'DaemonID']] = None,
complete: bool = False,
*args,
**kwargs,
) -> Optional['DaemonID']:
"""Create a workspace
:param paths: local file/directory paths to be uploaded to workspace, defaults to None
:param id: workspace id (if already known), defaults to None
:param complete: True if complete_path is used (used by JinadRuntime), defaults to False
:param args: additional positional args
:param kwargs: keyword args
:return: workspace id
"""
async with AsyncExitStack() as stack:
console = Console()
status = stack.enter_context(
console.status('Workspace: ...', spinner='earth')
)
workspace_id = None
if id:
"""When creating `Peas` with `shards > 1`, `JinadRuntime` knows the workspace_id already.
For shards > 1:
- shard 0 throws TypeError & we create a workspace
- shard N (all other shards) wait for workspace creation & don't emit logs
For shards = 0:
- Throws a TypeError & we create a workspace
"""
workspace_id = daemonize(id)
try:
return (
workspace_id
if await self._get_helper(id=workspace_id, status=status)
else None
)
except (TypeError, ValueError):
self._logger.debug('workspace doesn\'t exist, creating..')
status.update('Workspace: Getting files to upload...')
data = stack.enter_context(
FormData(paths=paths, logger=self._logger, complete=complete)
)
status.update('Workspace: Sending request...')
response = await stack.enter_async_context(
aiohttp.request(
method='POST',
url=self.store_api,
params={'id': workspace_id} if workspace_id else None,
data=data,
)
)
response_json = await response.json()
workspace_id = next(iter(response_json))
if response.status == HTTPStatus.CREATED:
status.update(f'Workspace: {workspace_id} added...')
return (
workspace_id
if await self.wait(id=workspace_id, status=status, logs=True)
else None
)
else:
self._logger.error(
f'{self._kind.title()} creation failed as: {error_msg_from(response_json)}'
)
return None
async def wait(
self,
id: 'DaemonID',
status: 'Status',
logs: bool = True,
sleep: int = 2,
) -> bool:
"""Wait until workspace creation completes
:param id: workspace id
:param status: rich.console.status object to update
:param logs: True if logs need to be streamed, defaults to True
:param sleep: sleep time between each check, defaults to 2
:return: True if workspace creation succeeds
"""
logstream = asyncio.create_task(self.logstream(id=id)) if logs else None
while True:
try:
response = (
await self.get(id=id)
if inspect.iscoroutinefunction(self.get)
else self.get(id=id)
)
state = self._item_model_cls(**response).state
status.update(f'Workspace: {state.value.title()}...')
if state in [
RemoteWorkspaceState.PENDING,
RemoteWorkspaceState.CREATING,
RemoteWorkspaceState.UPDATING,
]:
await asyncio.sleep(sleep)
continue
elif state == RemoteWorkspaceState.ACTIVE:
if logstream:
self._logger.info(f'{colored(id, 'cyan')} created successfully')
logstream.cancel()
return True
elif state == RemoteWorkspaceState.FAILED:
if logstream:
self._logger.critical(
f'{colored(id, 'red')} creation failed. please check logs'
)
logstream.cancel()
return False
except ValueError as e:
if logstream:
self._logger.error(f'invalid response from remote: {e!r}')
logstream.cancel()
return False
@if_alive
async def update(
self,
id: Union[str, 'DaemonID'],
paths: Optional[List[str]] = None,
complete: bool = False,
*args,
**kwargs,
) -> 'DaemonID':
"""Update a workspace
:param id: workspace id
:param paths: local file/directory paths to be uploaded to workspace, defaults to None
:param complete: True if complete_path is used (used by JinadRuntime), defaults to False
:param args: additional positional args
:param kwargs: keyword args
:return: workspace id
"""
async with AsyncExitStack() as stack:
console = Console()
status = stack.enter_context(
console.status('Workspace update: ...', spinner='earth')
)
status.update('Workspace: Getting files to upload...')
data = stack.enter_context(
FormData(paths=paths, logger=self._logger, complete=complete)
)
status.update('Workspace: Sending request for update...')
response = await stack.enter_async_context(
aiohttp.request(
method='PUT',
url=f'{self.store_api}/{id}',
data=data,
)
)
response_json = await response.json()
workspace_id = next(iter(response_json))
if response.status == HTTPStatus.OK:
status.update(f'Workspace: {workspace_id} added...')
return (
workspace_id
if await self.wait(id=workspace_id, status=status, logs=True)
else None
)
else:
return None
@if_alive
async def delete(
self,
id: Union[str, 'DaemonID'],
container: bool = True,
network: bool = True,
files: bool = True,
everything: bool = False,
**kwargs,
) -> bool:
"""Delete a remote workspace
:param id: the identity of that workspace
:param container: True if workspace container needs to be removed, defaults to True
:param network: True if network needs to be removed, defaults to True
:param files: True if files in the workspace needs to be removed, defaults to True
:param everything: True if everything needs to be removed, defaults to False
:param kwargs: keyword arguments
:return: True if the deletion is successful
"""
async with aiohttp.request(
method='DELETE',
url=f'{self.store_api}/{daemonize(id)}',
params={
'container': json.dumps(container), # aiohttp doesn't suppport bool
'network': json.dumps(network),
'files': json.dumps(files),
'everything': json.dumps(everything),
},
) as response:
response_json = await response.json()
if response.status != HTTPStatus.OK:
self._logger.error(
f'deletion of {self._kind.title()} failed as {error_msg_from(response_json)}'
)
return response.status == HTTPStatus.OK
class WorkspaceClient(AsyncToSyncMixin, AsyncWorkspaceClient):
"""Client to create/update/delete workspaces on remote JinaD"""
| import os
import json
import asyncio
import inspect
from pathlib import Path
from http import HTTPStatus
from shutil import make_archive
from tempfile import TemporaryDirectory
from contextlib import AsyncExitStack, ExitStack
from typing import Any, List, Optional, Union, TYPE_CHECKING
import aiohttp
from aiohttp import FormData as aiohttpFormData
from rich.console import Console
from jina.helper import colored
from jina.jaml.helper import complete_path
from jina.enums import RemoteWorkspaceState
from daemon.models.id import daemonize
from daemon.clients.base import AsyncBaseClient
from daemon.clients.mixin import AsyncToSyncMixin
from daemon.models.workspaces import WorkspaceItem
from daemon.helper import error_msg_from, if_alive, change_cwd
if TYPE_CHECKING:
from daemon.models import DaemonID
from rich.status import Status
from jina.logging.logger import JinaLogger
class FormData(aiohttpFormData):
"""FormData used to upload files to remote"""
def __init__(
self,
paths: Optional[List[Path]] = None,
logger: 'JinaLogger' = None,
complete: bool = False,
) -> None:
super().__init__()
self._logger = logger
self._complete = complete
self._cur_dir = os.getcwd()
self.paths = paths
self._stack = ExitStack()
def add(self, path: Path):
"""add a field to Form
:param path: filepath
"""
self.add_field(
name='files',
value=self._stack.enter_context(
open(
complete_path(path, extra_search_paths=[self._cur_dir])
if self._complete
else path,
'rb',
)
),
filename=path.name,
)
@property
def fields(self) -> List[Any]:
"""all fields in current Form
:return: list of fields
"""
return self._fields
@property
def filenames(self) -> List[str]:
"""all filenames in current Form
:return: list of filenames
"""
return [os.path.basename(f[-1].name) for f in self.fields]
def __len__(self):
return len(self._fields)
def __enter__(self):
self._stack.__enter__()
if not self.paths:
return self
tmpdir = self._stack.enter_context(TemporaryDirectory())
self._stack.enter_context(change_cwd(tmpdir))
for path in map(Path, self.paths):
try:
filename = path.name
if path.is_file():
self.add(path)
elif path.is_dir():
make_archive(base_name=filename, format='zip', root_dir=path)
self.add(Path(tmpdir) / f'{filename}.zip')
except TypeError:
self._logger.error(f'invalid path passed {path}')
continue
self._logger.info(
(
f'{len(self)} file(s) ready to be uploaded: {", ".join(self.filenames)}'
if len(self) > 0
else 'No file to be uploaded'
)
)
return self
def __exit__(self, *args, **kwargs):
self._stack.__exit__(*args, **kwargs)
class AsyncWorkspaceClient(AsyncBaseClient):
"""Async Client to create/update/delete Workspaces on remote JinaD"""
_kind = 'workspace'
_endpoint = '/workspaces'
_item_model_cls = WorkspaceItem
async def _get_helper(self, id: 'DaemonID', status: 'Status') -> bool:
"""Helper get with known workspace_id
:param id: workspace id
:param status: rich.console.status object to be updated
:return: True if workspace creation is successful.
"""
status.update('Workspace: Checking if already exists..')
response = (
await self.get(id=id)
if inspect.iscoroutinefunction(self.get)
else self.get(id=id)
)
state = self._item_model_cls(**response).state
if state == RemoteWorkspaceState.ACTIVE:
return True
elif state == RemoteWorkspaceState.FAILED:
return False
else:
return await self.wait(
id=id, status=status, logs=False
) # NOTE: we don't emit logs here
@if_alive
async def create(
self,
paths: Optional[List[Path]] = None,
id: Optional[Union[str, 'DaemonID']] = None,
complete: bool = False,
*args,
**kwargs,
) -> Optional['DaemonID']:
"""Create a workspace
:param paths: local file/directory paths to be uploaded to workspace, defaults to None
:param id: workspace id (if already known), defaults to None
:param complete: True if complete_path is used (used by JinadRuntime), defaults to False
:param args: additional positional args
:param kwargs: keyword args
:return: workspace id
"""
async with AsyncExitStack() as stack:
console = Console()
status = stack.enter_context(
console.status('Workspace: ...', spinner='earth')
)
workspace_id = None
if id:
"""When creating `Peas` with `shards > 1`, `JinadRuntime` knows the workspace_id already.
For shards > 1:
- shard 0 throws TypeError & we create a workspace
- shard N (all other shards) wait for workspace creation & don't emit logs
For shards = 0:
- Throws a TypeError & we create a workspace
"""
workspace_id = daemonize(id)
try:
return (
workspace_id
if await self._get_helper(id=workspace_id, status=status)
else None
)
except (TypeError, ValueError):
self._logger.debug('workspace doesn\'t exist, creating..')
status.update('Workspace: Getting files to upload...')
data = stack.enter_context(
FormData(paths=paths, logger=self._logger, complete=complete)
)
status.update('Workspace: Sending request...')
response = await stack.enter_async_context(
aiohttp.request(
method='POST',
url=self.store_api,
params={'id': workspace_id} if workspace_id else None,
data=data,
)
)
response_json = await response.json()
workspace_id = next(iter(response_json))
if response.status == HTTPStatus.CREATED:
status.update(f'Workspace: {workspace_id} added...')
return (
workspace_id
if await self.wait(id=workspace_id, status=status, logs=True)
else None
)
else:
self._logger.error(
f'{self._kind.title()} creation failed as: {error_msg_from(response_json)}'
)
return None
async def wait(
self,
id: 'DaemonID',
status: 'Status',
logs: bool = True,
sleep: int = 2,
) -> bool:
"""Wait until workspace creation completes
:param id: workspace id
:param status: rich.console.status object to update
:param logs: True if logs need to be streamed, defaults to True
:param sleep: sleep time between each check, defaults to 2
:return: True if workspace creation succeeds
"""
logstream = asyncio.create_task(self.logstream(id=id)) if logs else None
while True:
try:
response = (
await self.get(id=id)
if inspect.iscoroutinefunction(self.get)
else self.get(id=id)
)
state = self._item_model_cls(**response).state
status.update(f'Workspace: {state.value.title()}...')
if state in [
RemoteWorkspaceState.PENDING,
RemoteWorkspaceState.CREATING,
RemoteWorkspaceState.UPDATING,
]:
await asyncio.sleep(sleep)
continue
elif state == RemoteWorkspaceState.ACTIVE:
if logstream:
self._logger.info(f'{colored(id, "cyan")} created successfully')
logstream.cancel()
return True
elif state == RemoteWorkspaceState.FAILED:
if logstream:
self._logger.critical(
f'{colored(id, "red")} creation failed. please check logs'
)
logstream.cancel()
return False
except ValueError as e:
if logstream:
self._logger.error(f'invalid response from remote: {e!r}')
logstream.cancel()
return False
@if_alive
async def update(
self,
id: Union[str, 'DaemonID'],
paths: Optional[List[str]] = None,
complete: bool = False,
*args,
**kwargs,
) -> 'DaemonID':
"""Update a workspace
:param id: workspace id
:param paths: local file/directory paths to be uploaded to workspace, defaults to None
:param complete: True if complete_path is used (used by JinadRuntime), defaults to False
:param args: additional positional args
:param kwargs: keyword args
:return: workspace id
"""
async with AsyncExitStack() as stack:
console = Console()
status = stack.enter_context(
console.status('Workspace update: ...', spinner='earth')
)
status.update('Workspace: Getting files to upload...')
data = stack.enter_context(
FormData(paths=paths, logger=self._logger, complete=complete)
)
status.update('Workspace: Sending request for update...')
response = await stack.enter_async_context(
aiohttp.request(
method='PUT',
url=f'{self.store_api}/{id}',
data=data,
)
)
response_json = await response.json()
workspace_id = next(iter(response_json))
if response.status == HTTPStatus.OK:
status.update(f'Workspace: {workspace_id} added...')
return (
workspace_id
if await self.wait(id=workspace_id, status=status, logs=True)
else None
)
else:
return None
@if_alive
async def delete(
self,
id: Union[str, 'DaemonID'],
container: bool = True,
network: bool = True,
files: bool = True,
everything: bool = False,
**kwargs,
) -> bool:
"""Delete a remote workspace
:param id: the identity of that workspace
:param container: True if workspace container needs to be removed, defaults to True
:param network: True if network needs to be removed, defaults to True
:param files: True if files in the workspace needs to be removed, defaults to True
:param everything: True if everything needs to be removed, defaults to False
:param kwargs: keyword arguments
:return: True if the deletion is successful
"""
async with aiohttp.request(
method='DELETE',
url=f'{self.store_api}/{daemonize(id)}',
params={
'container': json.dumps(container), # aiohttp doesn't suppport bool
'network': json.dumps(network),
'files': json.dumps(files),
'everything': json.dumps(everything),
},
) as response:
response_json = await response.json()
if response.status != HTTPStatus.OK:
self._logger.error(
f'deletion of {self._kind.title()} failed as {error_msg_from(response_json)}'
)
return response.status == HTTPStatus.OK
class WorkspaceClient(AsyncToSyncMixin, AsyncWorkspaceClient):
"""Client to create/update/delete workspaces on remote JinaD"""
|
import argparse
import logging
from typing import Tuple
from deepdiff import DeepDiff
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from urllib.parse import quote
import pprint
from concurrent.futures import ThreadPoolExecutor
import os
import random
import math
import json
import shutil
import time
def diff_response(args: Tuple[str, str, str, str, str]):
# Endpoint
# /cves/:family/:release/:id
# /packs/:family/:release/:pack
path = ''
if args[0] == 'cveid':
path = f'cves/{args[1]}/{args[3]}/{args[4]}'
if args[0] == 'package':
path = f'packs/{args[1]}/{args[3]}/{args[4]}'
if args[2] != "":
path = f'{path}/{args[2]}'
session = requests.Session()
retries = Retry(total=5,
backoff_factor=1,
status_forcelist=[503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
try:
response_old = requests.get(
f'http://127.0.0.1:1325/{path}', timeout=(3.0, 10.0)).json()
response_new = requests.get(
f'http://127.0.0.1:1326/{path}', timeout=(3.0, 10.0)).json()
except requests.ConnectionError as e:
logger.error(
f'Failed to Connection..., err: {e}, {pprint.pformat({'args': args, 'path': path}, indent=2)}')
exit(1)
except requests.ReadTimeout as e:
logger.warning(
f'Failed to ReadTimeout..., err: {e}, {pprint.pformat({'args': args, 'path': path}, indent=2)}')
except Exception as e:
logger.error(
f'Failed to GET request..., err: {e}, {pprint.pformat({'args': args, 'path': path}, indent=2)}')
exit(1)
diff = DeepDiff(response_old, response_new, ignore_order=True)
if diff != {}:
logger.warning(
f'There is a difference between old and new(or RDB and Redis):\n {pprint.pformat({'args': args, 'path': path}, indent=2)}')
diff_path = f'integration/diff/{args[1]}/{args[3]}/{args[0]}/{args[4]}'
if args[2] != "":
diff_path = f'integration/diff/{args[1]}/{args[3]}({args[2]})/{args[0]}/{args[4]}'
with open(f'{diff_path}.old', 'w') as w:
w.write(json.dumps(response_old, indent=4))
with open(f'{diff_path}.new', 'w') as w:
w.write(json.dumps(response_new, indent=4))
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=['cveid', 'package'],
help='Specify the mode to test.')
parser.add_argument('ostype', choices=['alpine', 'amazon', 'debian', 'oracle', 'redhat', 'suse', 'ubuntu', 'fedora'],
help='Specify the OS to be started in server mode when testing.')
parser.add_argument('--arch', default="", choices=['x86_64', 'i386', 'ia64', 'i686', 'sparc64', 'aarch64', 'noarch'],
help='Specify the Architecture to be started in server mode when testing.')
parser.add_argument('release', nargs='+',
help='Specify the Release Version to be started in server mode when testing.')
parser.add_argument('--suse-type', default="", choices=['opensuse', 'opensuse.leap', 'suse.linux.enterprise.server', 'suse.linux.enterprise.desktop'],
help='Specify the SUSE type to be started in server mode when testing.')
parser.add_argument("--sample-rate", type=float, default=0.01,
help="Adjust the rate of data used for testing (len(test_data) * sample_rate)")
parser.add_argument(
'--debug', action=argparse.BooleanOptionalAction, help='print debug message')
args = parser.parse_args()
logger = logging.getLogger(__name__)
stream_handler = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
stream_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(levelname)s[%(asctime)s] %(message)s', "%m-%d|%H:%M:%S")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if args.ostype == "suse":
logger.info(
f'start server mode test(mode: {args.mode}, os: {args.suse_type}, arch: {args.arch}, release: {args.release})')
else:
logger.info(
f'start server mode test(mode: {args.mode}, os: {args.ostype}, arch: {args.arch}, release: {args.release})')
logger.info('check the communication with the server')
for i in range(5):
try:
if requests.get('http://127.0.0.1:1325/health').status_code == requests.codes.ok and requests.get('http://127.0.0.1:1326/health').status_code == requests.codes.ok:
logger.info('communication with the server has been confirmed')
break
except Exception:
pass
time.sleep(1)
else:
logger.error('Failed to communicate with server')
exit(1)
if args.ostype == 'debian':
if len(list(set(args.release) - set(['7', '8', '9', '10', '11']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'ubuntu':
if len(list(set(args.release) - set(['14', '16', '18', '19', '20', '21', '22']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'redhat':
if len(list(set(args.release) - set(['5', '6', '7', '8', '9']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'oracle':
if len(list(set(args.release) - set(['5', '6', '7', '8']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'amazon':
if len(list(set(args.release) - set(['1', '2', '2022']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'alpine':
if len(list(set(args.release) - set(['3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14', '3.15', '3.16']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == "suse":
if args.suse_type == 'opensuse':
if len(list(set(args.release) - set(['10.2', '10.3', '11.0', '11.1', '11.2', '11.3', '11.4', '12.1', '12.2', '12.3', '13.1', '13.2', 'tumbleweed']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.suse_type == 'opensuse.leap':
if len(list(set(args.release) - set(['42.1', '42.2', '42.3', '15.0', '15.1', '15.2', '15.3']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.suse_type == 'suse.linux.enterprise.server':
if len(list(set(args.release) - set(['9', '10', '11', '12', '15']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.suse_type == 'suse.linux.enterprise.desktop':
if len(list(set(args.release) - set(['10', '11', '12', '15']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'fedora':
if len(list(set(args.release) - set(['32', '33', '34', '35']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
else:
logger.error(
f'Failed to diff_response..., err: This OS type({args[1]}) does not support test mode(cveid)')
raise NotImplementedError
ostype = args.ostype
if args.ostype == "suse":
ostype = args.suse_type
for relVer in args.release:
list_path = None
if args.mode == 'cveid':
if args.ostype == "suse":
list_path = f"integration/cveid/{args.ostype}/{args.suse_type}_{relVer}.txt"
else:
list_path = f"integration/cveid/{args.ostype}/{args.ostype}_{relVer}.txt"
if args.mode == 'package':
if args.ostype == "suse":
list_path = f"integration/package/{args.ostype}/{args.suse_type}_{relVer}.txt"
else:
list_path = f"integration/package/{args.ostype}/{args.ostype}_{relVer}.txt"
if not os.path.isfile(list_path):
logger.error(f'Failed to find list path..., list_path: {list_path}')
exit(1)
diff_path = f'integration/diff/{ostype}/{relVer}/{args.mode}'
if args.arch != "":
diff_path = f'integration/diff/{ostype}/{relVer}({args.arch})/{args.mode}'
if os.path.exists(diff_path):
shutil.rmtree(diff_path)
os.makedirs(diff_path, exist_ok=True)
with open(list_path) as f:
list = [s.strip() for s in f.readlines()]
list = random.sample(list, math.ceil(len(list) * args.sample_rate))
with ThreadPoolExecutor() as executor:
ins = ((args.mode, ostype, args.arch, relVer, quote(e))
for e in list)
executor.map(diff_response, ins)
| import argparse
import logging
from typing import Tuple
from deepdiff import DeepDiff
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from urllib.parse import quote
import pprint
from concurrent.futures import ThreadPoolExecutor
import os
import random
import math
import json
import shutil
import time
def diff_response(args: Tuple[str, str, str, str, str]):
# Endpoint
# /cves/:family/:release/:id
# /packs/:family/:release/:pack
path = ''
if args[0] == 'cveid':
path = f'cves/{args[1]}/{args[3]}/{args[4]}'
if args[0] == 'package':
path = f'packs/{args[1]}/{args[3]}/{args[4]}'
if args[2] != "":
path = f'{path}/{args[2]}'
session = requests.Session()
retries = Retry(total=5,
backoff_factor=1,
status_forcelist=[503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
try:
response_old = requests.get(
f'http://127.0.0.1:1325/{path}', timeout=(3.0, 10.0)).json()
response_new = requests.get(
f'http://127.0.0.1:1326/{path}', timeout=(3.0, 10.0)).json()
except requests.ConnectionError as e:
logger.error(
f'Failed to Connection..., err: {e}, {pprint.pformat({"args": args, "path": path}, indent=2)}')
exit(1)
except requests.ReadTimeout as e:
logger.warning(
f'Failed to ReadTimeout..., err: {e}, {pprint.pformat({"args": args, "path": path}, indent=2)}')
except Exception as e:
logger.error(
f'Failed to GET request..., err: {e}, {pprint.pformat({"args": args, "path": path}, indent=2)}')
exit(1)
diff = DeepDiff(response_old, response_new, ignore_order=True)
if diff != {}:
logger.warning(
f'There is a difference between old and new(or RDB and Redis):\n {pprint.pformat({"args": args, "path": path}, indent=2)}')
diff_path = f'integration/diff/{args[1]}/{args[3]}/{args[0]}/{args[4]}'
if args[2] != "":
diff_path = f'integration/diff/{args[1]}/{args[3]}({args[2]})/{args[0]}/{args[4]}'
with open(f'{diff_path}.old', 'w') as w:
w.write(json.dumps(response_old, indent=4))
with open(f'{diff_path}.new', 'w') as w:
w.write(json.dumps(response_new, indent=4))
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=['cveid', 'package'],
help='Specify the mode to test.')
parser.add_argument('ostype', choices=['alpine', 'amazon', 'debian', 'oracle', 'redhat', 'suse', 'ubuntu', 'fedora'],
help='Specify the OS to be started in server mode when testing.')
parser.add_argument('--arch', default="", choices=['x86_64', 'i386', 'ia64', 'i686', 'sparc64', 'aarch64', 'noarch'],
help='Specify the Architecture to be started in server mode when testing.')
parser.add_argument('release', nargs='+',
help='Specify the Release Version to be started in server mode when testing.')
parser.add_argument('--suse-type', default="", choices=['opensuse', 'opensuse.leap', 'suse.linux.enterprise.server', 'suse.linux.enterprise.desktop'],
help='Specify the SUSE type to be started in server mode when testing.')
parser.add_argument("--sample-rate", type=float, default=0.01,
help="Adjust the rate of data used for testing (len(test_data) * sample_rate)")
parser.add_argument(
'--debug', action=argparse.BooleanOptionalAction, help='print debug message')
args = parser.parse_args()
logger = logging.getLogger(__name__)
stream_handler = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
stream_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(levelname)s[%(asctime)s] %(message)s', "%m-%d|%H:%M:%S")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if args.ostype == "suse":
logger.info(
f'start server mode test(mode: {args.mode}, os: {args.suse_type}, arch: {args.arch}, release: {args.release})')
else:
logger.info(
f'start server mode test(mode: {args.mode}, os: {args.ostype}, arch: {args.arch}, release: {args.release})')
logger.info('check the communication with the server')
for i in range(5):
try:
if requests.get('http://127.0.0.1:1325/health').status_code == requests.codes.ok and requests.get('http://127.0.0.1:1326/health').status_code == requests.codes.ok:
logger.info('communication with the server has been confirmed')
break
except Exception:
pass
time.sleep(1)
else:
logger.error('Failed to communicate with server')
exit(1)
if args.ostype == 'debian':
if len(list(set(args.release) - set(['7', '8', '9', '10', '11']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'ubuntu':
if len(list(set(args.release) - set(['14', '16', '18', '19', '20', '21', '22']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'redhat':
if len(list(set(args.release) - set(['5', '6', '7', '8', '9']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'oracle':
if len(list(set(args.release) - set(['5', '6', '7', '8']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'amazon':
if len(list(set(args.release) - set(['1', '2', '2022']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'alpine':
if len(list(set(args.release) - set(['3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14', '3.15', '3.16']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == "suse":
if args.suse_type == 'opensuse':
if len(list(set(args.release) - set(['10.2', '10.3', '11.0', '11.1', '11.2', '11.3', '11.4', '12.1', '12.2', '12.3', '13.1', '13.2', 'tumbleweed']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.suse_type == 'opensuse.leap':
if len(list(set(args.release) - set(['42.1', '42.2', '42.3', '15.0', '15.1', '15.2', '15.3']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.suse_type == 'suse.linux.enterprise.server':
if len(list(set(args.release) - set(['9', '10', '11', '12', '15']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.suse_type == 'suse.linux.enterprise.desktop':
if len(list(set(args.release) - set(['10', '11', '12', '15']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
elif args.ostype == 'fedora':
if len(list(set(args.release) - set(['32', '33', '34', '35']))) > 0:
logger.error(
f'Failed to diff_response..., err: This Release Version({args.release}) does not support test mode')
raise NotImplementedError
else:
logger.error(
f'Failed to diff_response..., err: This OS type({args[1]}) does not support test mode(cveid)')
raise NotImplementedError
ostype = args.ostype
if args.ostype == "suse":
ostype = args.suse_type
for relVer in args.release:
list_path = None
if args.mode == 'cveid':
if args.ostype == "suse":
list_path = f"integration/cveid/{args.ostype}/{args.suse_type}_{relVer}.txt"
else:
list_path = f"integration/cveid/{args.ostype}/{args.ostype}_{relVer}.txt"
if args.mode == 'package':
if args.ostype == "suse":
list_path = f"integration/package/{args.ostype}/{args.suse_type}_{relVer}.txt"
else:
list_path = f"integration/package/{args.ostype}/{args.ostype}_{relVer}.txt"
if not os.path.isfile(list_path):
logger.error(f'Failed to find list path..., list_path: {list_path}')
exit(1)
diff_path = f'integration/diff/{ostype}/{relVer}/{args.mode}'
if args.arch != "":
diff_path = f'integration/diff/{ostype}/{relVer}({args.arch})/{args.mode}'
if os.path.exists(diff_path):
shutil.rmtree(diff_path)
os.makedirs(diff_path, exist_ok=True)
with open(list_path) as f:
list = [s.strip() for s in f.readlines()]
list = random.sample(list, math.ceil(len(list) * args.sample_rate))
with ThreadPoolExecutor() as executor:
ins = ((args.mode, ostype, args.arch, relVer, quote(e))
for e in list)
executor.map(diff_response, ins)
|
from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from ..base import BaseHandler
from ...models import DBSession, Source, Group, Classification, Taxonomy
from .internal.recent_sources import RecentSourcesHandler
from .internal.source_views import SourceViewsHandler
class ClassificationHandler(BaseHandler):
@auth_or_token
def get(self, classification_id):
"""
---
description: Retrieve a classification
parameters:
- in: path
name: classification_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleClassification
400:
content:
application/json:
schema: Error
"""
classification = Classification.get_if_owned_by(
classification_id, self.current_user
)
if classification is None:
return self.error('Invalid classification ID.')
return self.success(data=classification)
@permissions(['Classify'])
def post(self):
"""
---
description: Post a classification
requestBody:
content:
application/json:
schema:
type: object
properties:
obj_id:
type: string
classification:
type: string
taxonomy_id:
type: integer
probability:
type: float
nullable: true
minimum: 0.0
maximum: 1.0
description: |
User-assigned probability of this classification on this
taxonomy. If multiple classifications are given for the
same source by the same user, the sum of the
classifications ought to equal unity. Only individual
probabilities are checked.
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view classification. Defaults to all of
requesting user's groups.
required:
- obj_id
- classification
- taxonomy_id
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
classification_id:
type: integer
description: New classification ID
"""
data = self.get_json()
obj_id = data['obj_id']
# Ensure user/token has access to parent source
source = Source.get_obj_if_owned_by(obj_id, self.current_user)
if source is None:
return self.error("Invalid source.")
user_group_ids = [g.id for g in self.current_user.groups]
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
group_ids = data.pop("group_ids", user_group_ids)
group_ids = [gid for gid in group_ids if gid in user_accessible_group_ids]
if not group_ids:
return self.error(
f"Invalid group IDs field ({group_ids}): "
"You must provide one or more valid group IDs."
)
groups = Group.query.filter(Group.id.in_(group_ids)).all()
author = self.associated_user_object
# check the taxonomy
taxonomy_id = data["taxonomy_id"]
taxonomy = Taxonomy.get_taxonomy_usable_by_user(taxonomy_id, self.current_user)
if len(taxonomy) == 0:
return self.error(
'That taxonomy does not exist or is not available to user.'
)
if not isinstance(taxonomy, list):
return self.error('Problem retrieving taxonomy')
def allowed_classes(hierarchy):
if "class" in hierarchy:
yield hierarchy["class"]
if "subclasses" in hierarchy:
for item in hierarchy.get("subclasses", []):
yield from allowed_classes(item)
if data['classification'] not in allowed_classes(taxonomy[0].hierarchy):
return self.error(
f"That classification ({data["classification"]}) "
'is not in the allowed classes for the chosen '
f'taxonomy (id={taxonomy_id}'
)
probability = data.get('probability')
if probability is not None:
if probability < 0 or probability > 1:
return self.error(
f"That probability ({probability}) is outside "
"the allowable range (0-1)."
)
classification = Classification(
classification=data['classification'],
obj_id=obj_id,
probability=probability,
taxonomy_id=data["taxonomy_id"],
author=author,
author_name=author.username,
groups=groups,
)
DBSession().add(classification)
DBSession().commit()
self.push_all(
action='skyportal/REFRESH_SOURCE',
payload={'obj_key': classification.obj.internal_key},
)
if classification.obj_id in RecentSourcesHandler.get_recent_source_ids(
self.current_user
):
self.push_all(action='skyportal/FETCH_RECENT_SOURCES')
if classification.obj_id in map(
lambda view_obj_tuple: view_obj_tuple[1],
SourceViewsHandler.get_top_source_views_and_ids(self.current_user),
):
self.push_all(action='skyportal/FETCH_TOP_SOURCES')
self.push_all(
action='skyportal/REFRESH_CANDIDATE',
payload={'id': classification.obj.internal_key},
)
return self.success(data={'classification_id': classification.id})
@permissions(['Classify'])
def put(self, classification_id):
"""
---
description: Update a classification
parameters:
- in: path
name: classification
required: true
schema:
type: integer
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/ClassificationNoID'
- type: object
properties:
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view classification.
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
c = Classification.get_if_owned_by(classification_id, self.current_user)
if c is None:
return self.error('Invalid classification ID.')
data = self.get_json()
group_ids = data.pop("group_ids", None)
data['id'] = classification_id
schema = Classification.__schema__()
try:
schema.load(data, partial=True)
except ValidationError as e:
return self.error(
'Invalid/missing parameters: ' f'{e.normalized_messages()}'
)
DBSession().flush()
if group_ids is not None:
c = Classification.get_if_owned_by(classification_id, self.current_user)
groups = Group.query.filter(Group.id.in_(group_ids)).all()
if not groups:
return self.error(
"Invalid group_ids field. " "Specify at least one valid group ID."
)
if not all(
[group in self.current_user.accessible_groups for group in groups]
):
return self.error(
"Cannot associate classification with groups you are "
"not a member of."
)
c.groups = groups
DBSession().commit()
self.push_all(
action='skyportal/REFRESH_SOURCE', payload={'obj_key': c.obj.internal_key},
)
self.push_all(
action='skyportal/REFRESH_CANDIDATE', payload={'id': c.obj.internal_key},
)
if c.obj_id in RecentSourcesHandler.get_recent_source_ids(self.current_user):
self.push_all(action='skyportal/FETCH_RECENT_SOURCES')
return self.success()
@permissions(['Classify'])
def delete(self, classification_id):
"""
---
description: Delete a classification
parameters:
- in: path
name: classification_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
"""
user = self.associated_user_object
roles = self.current_user.roles if hasattr(self.current_user, 'roles') else []
c = Classification.query.get(classification_id)
if c is None:
return self.error("Invalid classification ID")
obj_id = c.obj_id
obj_key = c.obj.internal_key
author = c.author
if ("Super admin" in [role.id for role in roles]) or (user.id == author.id):
Classification.query.filter_by(id=classification_id).delete()
DBSession().commit()
else:
return self.error('Insufficient user permissions.')
self.push_all(
action='skyportal/REFRESH_SOURCE', payload={'obj_key': obj_key},
)
self.push_all(
action='skyportal/REFRESH_CANDIDATE', payload={'id': obj_key},
)
if obj_id in RecentSourcesHandler.get_recent_source_ids(self.current_user):
self.push_all(action='skyportal/FETCH_RECENT_SOURCES')
return self.success()
| from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from ..base import BaseHandler
from ...models import DBSession, Source, Group, Classification, Taxonomy
from .internal.recent_sources import RecentSourcesHandler
from .internal.source_views import SourceViewsHandler
class ClassificationHandler(BaseHandler):
@auth_or_token
def get(self, classification_id):
"""
---
description: Retrieve a classification
parameters:
- in: path
name: classification_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleClassification
400:
content:
application/json:
schema: Error
"""
classification = Classification.get_if_owned_by(
classification_id, self.current_user
)
if classification is None:
return self.error('Invalid classification ID.')
return self.success(data=classification)
@permissions(['Classify'])
def post(self):
"""
---
description: Post a classification
requestBody:
content:
application/json:
schema:
type: object
properties:
obj_id:
type: string
classification:
type: string
taxonomy_id:
type: integer
probability:
type: float
nullable: true
minimum: 0.0
maximum: 1.0
description: |
User-assigned probability of this classification on this
taxonomy. If multiple classifications are given for the
same source by the same user, the sum of the
classifications ought to equal unity. Only individual
probabilities are checked.
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view classification. Defaults to all of
requesting user's groups.
required:
- obj_id
- classification
- taxonomy_id
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
classification_id:
type: integer
description: New classification ID
"""
data = self.get_json()
obj_id = data['obj_id']
# Ensure user/token has access to parent source
source = Source.get_obj_if_owned_by(obj_id, self.current_user)
if source is None:
return self.error("Invalid source.")
user_group_ids = [g.id for g in self.current_user.groups]
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
group_ids = data.pop("group_ids", user_group_ids)
group_ids = [gid for gid in group_ids if gid in user_accessible_group_ids]
if not group_ids:
return self.error(
f"Invalid group IDs field ({group_ids}): "
"You must provide one or more valid group IDs."
)
groups = Group.query.filter(Group.id.in_(group_ids)).all()
author = self.associated_user_object
# check the taxonomy
taxonomy_id = data["taxonomy_id"]
taxonomy = Taxonomy.get_taxonomy_usable_by_user(taxonomy_id, self.current_user)
if len(taxonomy) == 0:
return self.error(
'That taxonomy does not exist or is not available to user.'
)
if not isinstance(taxonomy, list):
return self.error('Problem retrieving taxonomy')
def allowed_classes(hierarchy):
if "class" in hierarchy:
yield hierarchy["class"]
if "subclasses" in hierarchy:
for item in hierarchy.get("subclasses", []):
yield from allowed_classes(item)
if data['classification'] not in allowed_classes(taxonomy[0].hierarchy):
return self.error(
f"That classification ({data['classification']}) "
'is not in the allowed classes for the chosen '
f'taxonomy (id={taxonomy_id}'
)
probability = data.get('probability')
if probability is not None:
if probability < 0 or probability > 1:
return self.error(
f"That probability ({probability}) is outside "
"the allowable range (0-1)."
)
classification = Classification(
classification=data['classification'],
obj_id=obj_id,
probability=probability,
taxonomy_id=data["taxonomy_id"],
author=author,
author_name=author.username,
groups=groups,
)
DBSession().add(classification)
DBSession().commit()
self.push_all(
action='skyportal/REFRESH_SOURCE',
payload={'obj_key': classification.obj.internal_key},
)
if classification.obj_id in RecentSourcesHandler.get_recent_source_ids(
self.current_user
):
self.push_all(action='skyportal/FETCH_RECENT_SOURCES')
if classification.obj_id in map(
lambda view_obj_tuple: view_obj_tuple[1],
SourceViewsHandler.get_top_source_views_and_ids(self.current_user),
):
self.push_all(action='skyportal/FETCH_TOP_SOURCES')
self.push_all(
action='skyportal/REFRESH_CANDIDATE',
payload={'id': classification.obj.internal_key},
)
return self.success(data={'classification_id': classification.id})
@permissions(['Classify'])
def put(self, classification_id):
"""
---
description: Update a classification
parameters:
- in: path
name: classification
required: true
schema:
type: integer
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/ClassificationNoID'
- type: object
properties:
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view classification.
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
c = Classification.get_if_owned_by(classification_id, self.current_user)
if c is None:
return self.error('Invalid classification ID.')
data = self.get_json()
group_ids = data.pop("group_ids", None)
data['id'] = classification_id
schema = Classification.__schema__()
try:
schema.load(data, partial=True)
except ValidationError as e:
return self.error(
'Invalid/missing parameters: ' f'{e.normalized_messages()}'
)
DBSession().flush()
if group_ids is not None:
c = Classification.get_if_owned_by(classification_id, self.current_user)
groups = Group.query.filter(Group.id.in_(group_ids)).all()
if not groups:
return self.error(
"Invalid group_ids field. " "Specify at least one valid group ID."
)
if not all(
[group in self.current_user.accessible_groups for group in groups]
):
return self.error(
"Cannot associate classification with groups you are "
"not a member of."
)
c.groups = groups
DBSession().commit()
self.push_all(
action='skyportal/REFRESH_SOURCE', payload={'obj_key': c.obj.internal_key},
)
self.push_all(
action='skyportal/REFRESH_CANDIDATE', payload={'id': c.obj.internal_key},
)
if c.obj_id in RecentSourcesHandler.get_recent_source_ids(self.current_user):
self.push_all(action='skyportal/FETCH_RECENT_SOURCES')
return self.success()
@permissions(['Classify'])
def delete(self, classification_id):
"""
---
description: Delete a classification
parameters:
- in: path
name: classification_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
"""
user = self.associated_user_object
roles = self.current_user.roles if hasattr(self.current_user, 'roles') else []
c = Classification.query.get(classification_id)
if c is None:
return self.error("Invalid classification ID")
obj_id = c.obj_id
obj_key = c.obj.internal_key
author = c.author
if ("Super admin" in [role.id for role in roles]) or (user.id == author.id):
Classification.query.filter_by(id=classification_id).delete()
DBSession().commit()
else:
return self.error('Insufficient user permissions.')
self.push_all(
action='skyportal/REFRESH_SOURCE', payload={'obj_key': obj_key},
)
self.push_all(
action='skyportal/REFRESH_CANDIDATE', payload={'id': obj_key},
)
if obj_id in RecentSourcesHandler.get_recent_source_ids(self.current_user):
self.push_all(action='skyportal/FETCH_RECENT_SOURCES')
return self.success()
|
import json
import os
import glob
import logging
logging.basicConfig(level=logging.INFO)
def load_database():
db_entries = []
files = glob.glob("../dynamodb/*.json")
for file_name in files:
with open(file_name) as file:
for line in file:
entry = json.loads(line)
form_submission = json.loads(entry["Item"]["FormSubmission"]["S"])
db_entries.append(form_submission["2"])
return db_entries
def load_file(file):
input_file = open(file, "r")
input_content= json.load(input_file)
input_file.close()
return input_content
def checkIfDuplicates(listOfElems):
''' Check if given list contains any duplicates '''
if len(listOfElems) == len(set(listOfElems)):
return False
else:
return True
### Main
if __name__ == "__main__":
form_submissions = load_database()
thread_results = load_file("../threads_output.json")
agg_results = load_file("../aggregated_results.json")
form_input = {"success":[], "failed":[]}
for thread in thread_results["threads"]:
form_input["success"].extend(thread["form_input"]["success"])
form_input["failed"].extend(thread["form_input"]["failed"])
if len(thread_results["threads"]) != agg_results["lambda_invocations"]:
logging.warning(f"Only {len(thread_results["threads"])} reported back out of {agg_results["lambda_invocations"]}")
logging.warning("Results are not accurate")
else:
logging.info(f"All {agg_results["lambda_invocations"]} lambda invocations reported back sucessfully.")
forms_succeeded = 0
forms_multiple = 0
forms_failed = 0
forms_incognito = 0
forms_dropped = 0
logging.info("Starting Analysis")
completion = 0
total_entries = len(form_input["failed"]) + len(form_input["success"])
processed_entries = 0
logging.info(f"Total entries in dynamodb: {len(form_submissions)}")
logging.info(f"Total submissions sent: {total_entries}")
def checking_completion(comp):
completed = int((processed_entries/total_entries)*100)
if completed > comp:
logging.info(f"{completed}%")
return completed
else:
return comp
for input in form_input["success"]:
processed_entries += 1
completion = checking_completion(completion)
instances = 0
for form in form_submissions:
if input == form:
instances += 1
if instances < 1:
# print(f"Submission with uuid: {input} was not saved in the vault")
forms_dropped += 1
elif instances > 1:
# print(f"Submission with uuid: {input} was found multiple times in the vault")
forms_multiple += 1
else:
forms_succeeded += 1
for input in form_input["failed"]:
processed_entries += 1
completion = checking_completion(completion)
instances = 0
for form in form_submissions:
if input == form:
instances += 1
if instances > 0:
forms_incognito +=0
else:
forms_failed += 1
if checkIfDuplicates(form_submissions):
logging.info("Duplicate submissions found in vault")
else:
logging.info("No duplicate submissisons found in vault")
print("=========================================================================")
print(f"Successfull: {forms_succeeded}, Failed with Client aware: {forms_failed}")
print(f"Dropped without warning: {forms_dropped}, Stored without warning: {forms_incognito}")
print(f"Forms with multiple submissions: {forms_multiple}")
print(f"Total Failure %: {int(sum([forms_failed, forms_dropped])/total_entries*100)}%")
print(f"Good (client notified) Failure %: {int(forms_failed/total_entries*100)}%")
print(f"Bad (lost in ether) Failure %: {int(forms_dropped/total_entries*100)}%")
print("=========================================================================") | import json
import os
import glob
import logging
logging.basicConfig(level=logging.INFO)
def load_database():
db_entries = []
files = glob.glob("../dynamodb/*.json")
for file_name in files:
with open(file_name) as file:
for line in file:
entry = json.loads(line)
form_submission = json.loads(entry["Item"]["FormSubmission"]["S"])
db_entries.append(form_submission["2"])
return db_entries
def load_file(file):
input_file = open(file, "r")
input_content= json.load(input_file)
input_file.close()
return input_content
def checkIfDuplicates(listOfElems):
''' Check if given list contains any duplicates '''
if len(listOfElems) == len(set(listOfElems)):
return False
else:
return True
### Main
if __name__ == "__main__":
form_submissions = load_database()
thread_results = load_file("../threads_output.json")
agg_results = load_file("../aggregated_results.json")
form_input = {"success":[], "failed":[]}
for thread in thread_results["threads"]:
form_input["success"].extend(thread["form_input"]["success"])
form_input["failed"].extend(thread["form_input"]["failed"])
if len(thread_results["threads"]) != agg_results["lambda_invocations"]:
logging.warning(f"Only {len(thread_results['threads'])} reported back out of {agg_results['lambda_invocations']}")
logging.warning("Results are not accurate")
else:
logging.info(f"All {agg_results['lambda_invocations']} lambda invocations reported back sucessfully.")
forms_succeeded = 0
forms_multiple = 0
forms_failed = 0
forms_incognito = 0
forms_dropped = 0
logging.info("Starting Analysis")
completion = 0
total_entries = len(form_input["failed"]) + len(form_input["success"])
processed_entries = 0
logging.info(f"Total entries in dynamodb: {len(form_submissions)}")
logging.info(f"Total submissions sent: {total_entries}")
def checking_completion(comp):
completed = int((processed_entries/total_entries)*100)
if completed > comp:
logging.info(f"{completed}%")
return completed
else:
return comp
for input in form_input["success"]:
processed_entries += 1
completion = checking_completion(completion)
instances = 0
for form in form_submissions:
if input == form:
instances += 1
if instances < 1:
# print(f"Submission with uuid: {input} was not saved in the vault")
forms_dropped += 1
elif instances > 1:
# print(f"Submission with uuid: {input} was found multiple times in the vault")
forms_multiple += 1
else:
forms_succeeded += 1
for input in form_input["failed"]:
processed_entries += 1
completion = checking_completion(completion)
instances = 0
for form in form_submissions:
if input == form:
instances += 1
if instances > 0:
forms_incognito +=0
else:
forms_failed += 1
if checkIfDuplicates(form_submissions):
logging.info("Duplicate submissions found in vault")
else:
logging.info("No duplicate submissisons found in vault")
print("=========================================================================")
print(f"Successfull: {forms_succeeded}, Failed with Client aware: {forms_failed}")
print(f"Dropped without warning: {forms_dropped}, Stored without warning: {forms_incognito}")
print(f"Forms with multiple submissions: {forms_multiple}")
print(f"Total Failure %: {int(sum([forms_failed, forms_dropped])/total_entries*100)}%")
print(f"Good (client notified) Failure %: {int(forms_failed/total_entries*100)}%")
print(f"Bad (lost in ether) Failure %: {int(forms_dropped/total_entries*100)}%")
print("=========================================================================") |
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django.utils.html import format_html
from squest_survey.models import Template, Category, Question, Calculation, Operation, Response, Answer
from squest_survey.models import MenuItem, VendorItem
from squest_survey.models import EDB_Template, EDB_Category, EDB_Question
from squest_survey.models import SO_Template, SO_Category, SO_Question
from squest_survey.models import SubscriptionTemplate, SubscriptionPanel, SubscriptionTab, SubscriptionConfig
from squest_survey.models import Hardware
from squest_survey.models import LCA_Operator, LCA_Config
from squest_survey.admin_forms import *
from service_catalog.models import Service as SquestService
from service_catalog.models import Operation as SquestOperation
@admin.register(SquestService)
class SquestServiceAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
fields = ('name', 'description')
@admin.register(SquestOperation)
class SquestOperationAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'type', 'service')
list_display_links = ('name',)
exclude = ('enabled_survey_fields', 'auto_accept')
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'order', 'shown')
list_display_links = ('name',)
@admin.register(Operation)
class OperationAdmin(admin.ModelAdmin):
list_display = ('question', 'operator', 'order')
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'question':
kwargs['queryset'] = Question.objects.filter(type__in=[Question.INTEGER, Question.SELECT, Question.RADIO]).filter(cascade_templates__isnull=True)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
@admin.register(Calculation)
class CalculationAdmin(admin.ModelAdmin):
form = CalculationForm
list_display = ('calculation', 'unit')
filter_horizontal = ('operation',)
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
form = QuestionForm
list_display = ('get_templates', 'display_text', 'category', 'order', 'is_cascading', 'awx_variable_name')
list_display_links = ('display_text',)
# list_filter = ('template__name',)
filter_horizontal = ('templates', 'cascade_templates')
ordering = ('category', 'order')
search_fields = ['templates__name']
# def formfield_for_foreignkey(self, db_field, request, **kwargs):
# if db_field.name == 'templates':
# kwargs['queryset'] = Template.objects.exclude(job_template_id=None)
# return super().formfield_for_foreignkey(db_field, request, **kwargs)
def get_templates(self, obj):
return ', '.join([t.name for t in obj.templates.all()])
get_templates.short_description = _('Templates')
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'cascade_templates':
kwargs['queryset'] = Template.objects.filter(type=Template.CASCADE)
return super().formfield_for_manytomany(db_field, request, **kwargs)
class IsCascadingFilter(admin.SimpleListFilter):
title = 'Is Cascading'
parameter_name = 'is_cascading'
YES = 'Yes'
NO = 'No'
def lookups(self, request, model_admin):
return (
(self.YES, _(self.YES)),
(self.NO, _(self.NO)),
)
def queryset(self, request, queryset):
value = self.value()
if value == self.YES:
return queryset.filter(job_template_id=None)
elif value == self.NO:
return queryset.exclude(job_template_id=None)
return queryset
class IsLCA_Filter(admin.SimpleListFilter):
title = 'Is LCA'
parameter_name = 'is_lca'
YES = 'Yes'
NO = 'No'
def lookups(self, request, model_admin):
return (
(self.YES, _(self.YES)),
(self.NO, _(self.NO)),
)
def queryset(self, request, queryset):
value = self.value()
if value == self.YES:
return queryset.filter(type=Template.LCA)
elif value == self.NO:
return queryset.exclude(type=Template.LCA)
return queryset
@admin.register(Template)
class TemplateAdmin(admin.ModelAdmin):
form = TemplateForm
list_display = ('get_job_template', 'name', 'vendor_item', 'type', 'description', 'preview_image')
list_display_links = ('name',)
list_filter = (IsCascadingFilter, IsLCA_Filter)
ordering = ('-operation__job_template', 'name')
def get_job_template(self, obj):
if not obj.operation:
return None
return obj.operation.job_template
get_job_template.short_description = _('Job Template')
def get_inline_instances(self, request, obj=None):
return obj and super(TemplateAdmin, self).get_inline_instances(request, obj) or []
def preview_image(self, obj):
if obj.is_cascading() or obj.is_lca():
return 'N.A.'
if obj.image:
image_id = 'image_' + str(obj.id)
return format_html(
'''
<img id="{}" style="display:none; position:absolute; top:0px; left:0px; height:calc(200px); width:calc(400px); border:5px solid; z-index:1000;" src="{}" />
<div style="font-size:18pt; position:relative; cursor:pointer;" onmouseover="document.getElementById('{}').style.display='block'; console.log(this.getBoundingClientRect(), this.parentNode.parentNode.getBoundingClientRect()); document.getElementById('{}').style.left=(this.getBoundingClientRect().left-this.parentNode.parentNode.getBoundingClientRect().x+35)+'px'; var scrollHeight = window.pageYOffset || document.documentElement.scrollTop; console.log(window.pageYOffset, document.documentElement.scrollTop, screen.height, scrollHeight); if (this.getBoundingClientRect().top+200<screen.height-200) document.getElementById('{}').style.top=(scrollHeight+this.getBoundingClientRect().top-298)+'px'; document.getElementById('{}').style.cursor='pointer';" onmouseout="document.getElementById('{}').style.display='none';">📷</div>
'''.format(image_id, obj.image.url, image_id, image_id, image_id, image_id, image_id, image_id, image_id))
else:
return 'No image available'
def get_form(self, request, obj=None, **kwargs):
form = super(TemplateAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['operation'].widget.can_add_related = False
form.base_fields['operation'].widget.can_delete_related = False
form.base_fields['operation'].widget.can_change_related = False
return form
class AnswerBaseInline(admin.StackedInline):
fields = ('question', 'body')
readonly_fields = ('question',)
extra = 0
model = Answer
@admin.register(Response)
class ResponseAdmin(admin.ModelAdmin):
list_display = ('uuid', 'template', 'type', 'user', 'created')
list_filter = ('template', 'type', 'created')
date_hierarchy = 'created'
inlines = [AnswerBaseInline]
readonly_fields = ('template', 'created', 'updated', 'uuid', 'user')
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin):
list_display = ('response', 'question', 'body', 'created', 'updated')
list_filter = ('response',)
date_hierarchy = 'created'
readonly_fields = ('response', 'question', 'created', 'updated')
@admin.register(MenuItem)
class MenuItemAdmin(admin.ModelAdmin):
list_display = ('name', 'order')
@admin.register(VendorItem)
class VendorItemAdmin(admin.ModelAdmin):
list_display = ('name', 'menu_item', 'order')
filter_horizontal = ('services',)
@admin.register(EDB_Template)
class EDB_TemplateAdmin(admin.ModelAdmin):
list_display = ('operation', 'description')
ordering = ('operation',)
@admin.register(EDB_Category)
class EDB_CategoryAdmin(admin.ModelAdmin):
list_display = ('get_templates', 'name', 'order', 'description')
list_display_links = ('name',)
ordering = ('order',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.edb_template.all()])
get_templates.short_description = _('Extend Database Templates')
@admin.register(EDB_Question)
class EDB_QuestionAdmin(admin.ModelAdmin):
list_display = ('edb_category', 'display_text', 'order', 'type', 'awx_variable_name')
list_display_links = ('display_text',)
ordering = ('edb_category', 'order')
@admin.register(SO_Template)
class SO_TemplateAdmin(admin.ModelAdmin):
list_display = ('operation', 'description')
ordering = ('operation',)
@admin.register(SO_Category)
class SO_CategoryAdmin(admin.ModelAdmin):
list_display = ('get_templates', 'name', 'order', 'description')
list_display_links = ('name',)
ordering = ('order',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.so_template.all()])
get_templates.short_description = _('Scale Out Templates')
@admin.register(SO_Question)
class SO_QuestionAdmin(admin.ModelAdmin):
list_display = ('so_category', 'display_text', 'order', 'awx_variable_name')
list_display_links = ('display_text',)
ordering = ('so_category', 'order')
@admin.register(SubscriptionTemplate)
class SubscriptionTemplateAdmin(admin.ModelAdmin):
form = SubscriptionTemplateForm
list_display = ('get_services', 'description')
def get_services(self, obj):
return ', '.join([str(s) for s in obj.services.all()])
get_services.short_description = _('Services')
@admin.register(SubscriptionPanel)
class SubscriptionPanelAdmin(admin.ModelAdmin):
form = SubscriptionPanelForm
list_display = ('get_templates', 'name', 'fields')
list_display_links = ('name',)
ordering = ('name',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.sub_templates.all()])
get_templates.short_description = _('Subscription Templates')
@admin.register(SubscriptionTab)
class SubscriptionTabAdmin(admin.ModelAdmin):
list_display = ('get_templates', 'name', 'order', 'fields')
list_display_links = ('name',)
ordering = ('order',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.sub_templates.all()])
get_templates.short_description = _('Subscription Templates')
@admin.register(SubscriptionConfig)
class SubscriptionConfigAdmin(admin.ModelAdmin):
list_display = ('mapping_value', 'colored_field')
def colored_field(self, obj):
return format_html('<span style="color: {}; background-color: {};">{} - {}</span>', obj.text_color, obj.field_color, obj.text_color, obj.field_color)
colored_field.short_description = _('Font-Background Color')
@admin.register(Hardware)
class HardwareAdmin(admin.ModelAdmin):
list_display = ('configuration', 'server_name', 'is_available')
list_filter = ('configuration', 'server_name')
ordering = ('configuration', 'server_name')
@admin.register(LCA_Operator)
class LCA_OperatorAdmin(admin.ModelAdmin):
list_display = ('field', 'value', 'operator', 'order')
@admin.register(LCA_Config)
class LCA_ConfigAdmin(admin.ModelAdmin):
form = LCA_ConfigForm
list_display = ('operation', 'get_operators')
filter_horizontal = ('operators',)
ordering = ('operation',)
def get_operators(self, obj):
return f"{" ".join([op.field + ":" + op.value + " " + str(op.operator) for op in obj.operators.all()])}"
get_operators.short_description = _('LCA Operators')
| # -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django.utils.html import format_html
from squest_survey.models import Template, Category, Question, Calculation, Operation, Response, Answer
from squest_survey.models import MenuItem, VendorItem
from squest_survey.models import EDB_Template, EDB_Category, EDB_Question
from squest_survey.models import SO_Template, SO_Category, SO_Question
from squest_survey.models import SubscriptionTemplate, SubscriptionPanel, SubscriptionTab, SubscriptionConfig
from squest_survey.models import Hardware
from squest_survey.models import LCA_Operator, LCA_Config
from squest_survey.admin_forms import *
from service_catalog.models import Service as SquestService
from service_catalog.models import Operation as SquestOperation
@admin.register(SquestService)
class SquestServiceAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
fields = ('name', 'description')
@admin.register(SquestOperation)
class SquestOperationAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'type', 'service')
list_display_links = ('name',)
exclude = ('enabled_survey_fields', 'auto_accept')
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'order', 'shown')
list_display_links = ('name',)
@admin.register(Operation)
class OperationAdmin(admin.ModelAdmin):
list_display = ('question', 'operator', 'order')
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'question':
kwargs['queryset'] = Question.objects.filter(type__in=[Question.INTEGER, Question.SELECT, Question.RADIO]).filter(cascade_templates__isnull=True)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
@admin.register(Calculation)
class CalculationAdmin(admin.ModelAdmin):
form = CalculationForm
list_display = ('calculation', 'unit')
filter_horizontal = ('operation',)
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
form = QuestionForm
list_display = ('get_templates', 'display_text', 'category', 'order', 'is_cascading', 'awx_variable_name')
list_display_links = ('display_text',)
# list_filter = ('template__name',)
filter_horizontal = ('templates', 'cascade_templates')
ordering = ('category', 'order')
search_fields = ['templates__name']
# def formfield_for_foreignkey(self, db_field, request, **kwargs):
# if db_field.name == 'templates':
# kwargs['queryset'] = Template.objects.exclude(job_template_id=None)
# return super().formfield_for_foreignkey(db_field, request, **kwargs)
def get_templates(self, obj):
return ', '.join([t.name for t in obj.templates.all()])
get_templates.short_description = _('Templates')
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'cascade_templates':
kwargs['queryset'] = Template.objects.filter(type=Template.CASCADE)
return super().formfield_for_manytomany(db_field, request, **kwargs)
class IsCascadingFilter(admin.SimpleListFilter):
title = 'Is Cascading'
parameter_name = 'is_cascading'
YES = 'Yes'
NO = 'No'
def lookups(self, request, model_admin):
return (
(self.YES, _(self.YES)),
(self.NO, _(self.NO)),
)
def queryset(self, request, queryset):
value = self.value()
if value == self.YES:
return queryset.filter(job_template_id=None)
elif value == self.NO:
return queryset.exclude(job_template_id=None)
return queryset
class IsLCA_Filter(admin.SimpleListFilter):
title = 'Is LCA'
parameter_name = 'is_lca'
YES = 'Yes'
NO = 'No'
def lookups(self, request, model_admin):
return (
(self.YES, _(self.YES)),
(self.NO, _(self.NO)),
)
def queryset(self, request, queryset):
value = self.value()
if value == self.YES:
return queryset.filter(type=Template.LCA)
elif value == self.NO:
return queryset.exclude(type=Template.LCA)
return queryset
@admin.register(Template)
class TemplateAdmin(admin.ModelAdmin):
form = TemplateForm
list_display = ('get_job_template', 'name', 'vendor_item', 'type', 'description', 'preview_image')
list_display_links = ('name',)
list_filter = (IsCascadingFilter, IsLCA_Filter)
ordering = ('-operation__job_template', 'name')
def get_job_template(self, obj):
if not obj.operation:
return None
return obj.operation.job_template
get_job_template.short_description = _('Job Template')
def get_inline_instances(self, request, obj=None):
return obj and super(TemplateAdmin, self).get_inline_instances(request, obj) or []
def preview_image(self, obj):
if obj.is_cascading() or obj.is_lca():
return 'N.A.'
if obj.image:
image_id = 'image_' + str(obj.id)
return format_html(
'''
<img id="{}" style="display:none; position:absolute; top:0px; left:0px; height:calc(200px); width:calc(400px); border:5px solid; z-index:1000;" src="{}" />
<div style="font-size:18pt; position:relative; cursor:pointer;" onmouseover="document.getElementById('{}').style.display='block'; console.log(this.getBoundingClientRect(), this.parentNode.parentNode.getBoundingClientRect()); document.getElementById('{}').style.left=(this.getBoundingClientRect().left-this.parentNode.parentNode.getBoundingClientRect().x+35)+'px'; var scrollHeight = window.pageYOffset || document.documentElement.scrollTop; console.log(window.pageYOffset, document.documentElement.scrollTop, screen.height, scrollHeight); if (this.getBoundingClientRect().top+200<screen.height-200) document.getElementById('{}').style.top=(scrollHeight+this.getBoundingClientRect().top-298)+'px'; document.getElementById('{}').style.cursor='pointer';" onmouseout="document.getElementById('{}').style.display='none';">📷</div>
'''.format(image_id, obj.image.url, image_id, image_id, image_id, image_id, image_id, image_id, image_id))
else:
return 'No image available'
def get_form(self, request, obj=None, **kwargs):
form = super(TemplateAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['operation'].widget.can_add_related = False
form.base_fields['operation'].widget.can_delete_related = False
form.base_fields['operation'].widget.can_change_related = False
return form
class AnswerBaseInline(admin.StackedInline):
fields = ('question', 'body')
readonly_fields = ('question',)
extra = 0
model = Answer
@admin.register(Response)
class ResponseAdmin(admin.ModelAdmin):
list_display = ('uuid', 'template', 'type', 'user', 'created')
list_filter = ('template', 'type', 'created')
date_hierarchy = 'created'
inlines = [AnswerBaseInline]
readonly_fields = ('template', 'created', 'updated', 'uuid', 'user')
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin):
list_display = ('response', 'question', 'body', 'created', 'updated')
list_filter = ('response',)
date_hierarchy = 'created'
readonly_fields = ('response', 'question', 'created', 'updated')
@admin.register(MenuItem)
class MenuItemAdmin(admin.ModelAdmin):
list_display = ('name', 'order')
@admin.register(VendorItem)
class VendorItemAdmin(admin.ModelAdmin):
list_display = ('name', 'menu_item', 'order')
filter_horizontal = ('services',)
@admin.register(EDB_Template)
class EDB_TemplateAdmin(admin.ModelAdmin):
list_display = ('operation', 'description')
ordering = ('operation',)
@admin.register(EDB_Category)
class EDB_CategoryAdmin(admin.ModelAdmin):
list_display = ('get_templates', 'name', 'order', 'description')
list_display_links = ('name',)
ordering = ('order',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.edb_template.all()])
get_templates.short_description = _('Extend Database Templates')
@admin.register(EDB_Question)
class EDB_QuestionAdmin(admin.ModelAdmin):
list_display = ('edb_category', 'display_text', 'order', 'type', 'awx_variable_name')
list_display_links = ('display_text',)
ordering = ('edb_category', 'order')
@admin.register(SO_Template)
class SO_TemplateAdmin(admin.ModelAdmin):
list_display = ('operation', 'description')
ordering = ('operation',)
@admin.register(SO_Category)
class SO_CategoryAdmin(admin.ModelAdmin):
list_display = ('get_templates', 'name', 'order', 'description')
list_display_links = ('name',)
ordering = ('order',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.so_template.all()])
get_templates.short_description = _('Scale Out Templates')
@admin.register(SO_Question)
class SO_QuestionAdmin(admin.ModelAdmin):
list_display = ('so_category', 'display_text', 'order', 'awx_variable_name')
list_display_links = ('display_text',)
ordering = ('so_category', 'order')
@admin.register(SubscriptionTemplate)
class SubscriptionTemplateAdmin(admin.ModelAdmin):
form = SubscriptionTemplateForm
list_display = ('get_services', 'description')
def get_services(self, obj):
return ', '.join([str(s) for s in obj.services.all()])
get_services.short_description = _('Services')
@admin.register(SubscriptionPanel)
class SubscriptionPanelAdmin(admin.ModelAdmin):
form = SubscriptionPanelForm
list_display = ('get_templates', 'name', 'fields')
list_display_links = ('name',)
ordering = ('name',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.sub_templates.all()])
get_templates.short_description = _('Subscription Templates')
@admin.register(SubscriptionTab)
class SubscriptionTabAdmin(admin.ModelAdmin):
list_display = ('get_templates', 'name', 'order', 'fields')
list_display_links = ('name',)
ordering = ('order',)
def get_templates(self, obj):
return ', '.join([t.description for t in obj.sub_templates.all()])
get_templates.short_description = _('Subscription Templates')
@admin.register(SubscriptionConfig)
class SubscriptionConfigAdmin(admin.ModelAdmin):
list_display = ('mapping_value', 'colored_field')
def colored_field(self, obj):
return format_html('<span style="color: {}; background-color: {};">{} - {}</span>', obj.text_color, obj.field_color, obj.text_color, obj.field_color)
colored_field.short_description = _('Font-Background Color')
@admin.register(Hardware)
class HardwareAdmin(admin.ModelAdmin):
list_display = ('configuration', 'server_name', 'is_available')
list_filter = ('configuration', 'server_name')
ordering = ('configuration', 'server_name')
@admin.register(LCA_Operator)
class LCA_OperatorAdmin(admin.ModelAdmin):
list_display = ('field', 'value', 'operator', 'order')
@admin.register(LCA_Config)
class LCA_ConfigAdmin(admin.ModelAdmin):
form = LCA_ConfigForm
list_display = ('operation', 'get_operators')
filter_horizontal = ('operators',)
ordering = ('operation',)
def get_operators(self, obj):
return f"{' '.join([op.field + ':' + op.value + ' ' + str(op.operator) for op in obj.operators.all()])}"
get_operators.short_description = _('LCA Operators')
|
# -*- coding: UTF-8 -*-
"""
@author: hhyo、yyukai
@license: Apache Licence
@file: pgsql.py
@time: 2019/03/29
"""
import re
import psycopg2
import logging
import traceback
import sqlparse
from common.config import SysConfig
from common.utils.timer import FuncTimer
from sql.utils.sql_utils import get_syntax_type
from . import EngineBase
from .models import ResultSet, ReviewSet, ReviewResult
from sql.utils.data_masking import simple_column_mask
__author__ = 'hhyo、yyukai'
logger = logging.getLogger('default')
class PgSQLEngine(EngineBase):
def get_connection(self, db_name=None):
db_name = db_name or self.db_name or 'postgres'
if self.conn:
return self.conn
self.conn = psycopg2.connect(host=self.host, port=self.port, user=self.user,
password=self.password, dbname=db_name)
return self.conn
@property
def name(self):
return 'PgSQL'
@property
def info(self):
return 'PgSQL engine'
def get_all_databases(self):
"""
获取数据库列表
:return:
"""
result = self.query(sql=f"SELECT datname FROM pg_database;")
db_list = [row[0] for row in result.rows if row[0] not in ['postgres', 'template0', 'template1']]
result.rows = db_list
return result
def get_all_schemas(self, db_name, **kwargs):
"""
获取模式列表
:return:
"""
result = self.query(db_name=db_name, sql=f"select schema_name from information_schema.schemata;")
schema_list = [row[0] for row in result.rows if row[0] not in ['information_schema',
'pg_catalog', 'pg_toast_temp_1',
'pg_temp_1', 'pg_toast']]
result.rows = schema_list
return result
def get_all_tables(self, db_name, **kwargs):
"""
获取表列表
:param db_name:
:param schema_name:
:return:
"""
schema_name = kwargs.get('schema_name')
sql = f"""SELECT table_name
FROM information_schema.tables
where table_schema ='{schema_name}';"""
result = self.query(db_name=db_name, sql=sql)
tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
result.rows = tb_list
return result
def get_all_columns_by_tb(self, db_name, tb_name, **kwargs):
"""
获取字段列表
:param db_name:
:param tb_name:
:param schema_name:
:return:
"""
schema_name = kwargs.get('schema_name')
sql = f"""SELECT column_name
FROM information_schema.columns
where table_name='{tb_name}'
and table_schema ='{schema_name}';"""
result = self.query(db_name=db_name, sql=sql)
column_list = [row[0] for row in result.rows]
result.rows = column_list
return result
def describe_table(self, db_name, tb_name, **kwargs):
"""
获取表结构信息
:param db_name:
:param tb_name:
:param schema_name:
:return:
"""
schema_name = kwargs.get('schema_name')
sql = f"""select
col.column_name,
col.data_type,
col.character_maximum_length,
col.numeric_precision,
col.numeric_scale,
col.is_nullable,
col.column_default,
des.description
from
information_schema.columns col left join pg_description des on
col.table_name::regclass = des.objoid
and col.ordinal_position = des.objsubid
where table_name = '{tb_name}'
order by ordinal_position;"""
result = self.query(db_name=db_name, schema_name=schema_name, sql=sql)
return result
def query_check(self, db_name=None, sql=''):
# 查询语句的检查、注释去除、切分
result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False}
# 删除注释语句,进行语法判断,执行第一条有效sql
try:
sql = sqlparse.format(sql, strip_comments=True)
sql = sqlparse.split(sql)[0]
result['filtered_sql'] = sql.strip()
except IndexError:
result['bad_query'] = True
result['msg'] = '没有有效的SQL语句'
if re.match(r"^select", sql, re.I) is None:
result['bad_query'] = True
result['msg'] = '不支持的查询语法类型!'
if '*' in sql:
result['has_star'] = True
result['msg'] = 'SQL语句中含有 * '
return result
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
"""返回 ResultSet """
schema_name = kwargs.get('schema_name')
result_set = ResultSet(full_sql=sql)
try:
conn = self.get_connection(db_name=db_name)
max_execution_time = kwargs.get('max_execution_time', 0)
cursor = conn.cursor()
try:
cursor.execute(f"SET statement_timeout TO {max_execution_time};")
except:
pass
if schema_name:
cursor.execute(f"SET search_path TO {schema_name};")
cursor.execute(sql)
effect_row = cursor.rowcount
if int(limit_num) > 0:
rows = cursor.fetchmany(size=int(limit_num))
else:
rows = cursor.fetchall()
fields = cursor.description
result_set.column_list = [i[0] for i in fields] if fields else []
result_set.rows = rows
result_set.affected_rows = effect_row
except Exception as e:
logger.warning(f"PgSQL命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}")
result_set.error = str(e)
finally:
if close_conn:
self.close()
return result_set
def filter_sql(self, sql='', limit_num=0):
# 对查询sql增加limit限制,# TODO limit改写待优化
sql_lower = sql.lower().rstrip(';').strip()
if re.match(r"^select", sql_lower):
if re.search(r"limit\s+(\d+)$", sql_lower) is None:
if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sql_lower) is None:
return f"{sql.rstrip(";")} limit {limit_num};"
return f"{sql.rstrip(";")};"
def query_masking(self, db_name=None, sql='', resultset=None):
"""简单字段脱敏规则, 仅对select有效"""
if re.match(r"^select", sql, re.I):
filtered_result = simple_column_mask(self.instance, resultset)
filtered_result.is_masked = True
else:
filtered_result = resultset
return filtered_result
def execute_check(self, db_name=None, sql=''):
"""上线单执行前的检查, 返回Review set"""
config = SysConfig()
check_result = ReviewSet(full_sql=sql)
# 禁用/高危语句检查
line = 1
critical_ddl_regex = config.get('critical_ddl_regex', '')
p = re.compile(critical_ddl_regex)
check_result.syntax_type = 2 # TODO 工单类型 0、其他 1、DDL,2、DML
for statement in sqlparse.split(sql):
statement = sqlparse.format(statement, strip_comments=True)
# 禁用语句
if re.match(r"^select", statement.lower()):
check_result.is_critical = True
result = ReviewResult(id=line, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=statement)
# 高危语句
elif critical_ddl_regex and p.match(statement.strip().lower()):
check_result.is_critical = True
result = ReviewResult(id=line, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
sql=statement)
# 正常语句
else:
result = ReviewResult(id=line, errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=statement,
affected_rows=0,
execute_time=0, )
# 判断工单类型
if get_syntax_type(statement) == 'DDL':
check_result.syntax_type = 1
check_result.rows += [result]
# 遇到禁用和高危语句直接返回,提高效率
if check_result.is_critical:
check_result.error_count += 1
return check_result
line += 1
return check_result
def execute_workflow(self, workflow, close_conn=True):
"""执行上线单,返回Review set"""
sql = workflow.sqlworkflowcontent.sql_content
execute_result = ReviewSet(full_sql=sql)
# 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中
sql = sqlparse.format(sql, strip_comments=True)
split_sql = sqlparse.split(sql)
line = 1
statement = None
db_name = workflow.db_name
try:
conn = self.get_connection(db_name=db_name)
cursor = conn.cursor()
# 逐条执行切分语句,追加到执行结果中
for statement in split_sql:
statement = statement.rstrip(';')
with FuncTimer() as t:
cursor.execute(statement)
conn.commit()
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=statement,
affected_rows=cursor.rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"PGSQL命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{e}',
sql=statement or sql,
affected_rows=0,
execute_time=0,
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
for statement in split_sql[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
sql=statement,
affected_rows=0,
execute_time=0,
))
line += 1
finally:
if close_conn:
self.close()
return execute_result
def close(self):
if self.conn:
self.conn.close()
self.conn = None
| # -*- coding: UTF-8 -*-
"""
@author: hhyo、yyukai
@license: Apache Licence
@file: pgsql.py
@time: 2019/03/29
"""
import re
import psycopg2
import logging
import traceback
import sqlparse
from common.config import SysConfig
from common.utils.timer import FuncTimer
from sql.utils.sql_utils import get_syntax_type
from . import EngineBase
from .models import ResultSet, ReviewSet, ReviewResult
from sql.utils.data_masking import simple_column_mask
__author__ = 'hhyo、yyukai'
logger = logging.getLogger('default')
class PgSQLEngine(EngineBase):
def get_connection(self, db_name=None):
db_name = db_name or self.db_name or 'postgres'
if self.conn:
return self.conn
self.conn = psycopg2.connect(host=self.host, port=self.port, user=self.user,
password=self.password, dbname=db_name)
return self.conn
@property
def name(self):
return 'PgSQL'
@property
def info(self):
return 'PgSQL engine'
def get_all_databases(self):
"""
获取数据库列表
:return:
"""
result = self.query(sql=f"SELECT datname FROM pg_database;")
db_list = [row[0] for row in result.rows if row[0] not in ['postgres', 'template0', 'template1']]
result.rows = db_list
return result
def get_all_schemas(self, db_name, **kwargs):
"""
获取模式列表
:return:
"""
result = self.query(db_name=db_name, sql=f"select schema_name from information_schema.schemata;")
schema_list = [row[0] for row in result.rows if row[0] not in ['information_schema',
'pg_catalog', 'pg_toast_temp_1',
'pg_temp_1', 'pg_toast']]
result.rows = schema_list
return result
def get_all_tables(self, db_name, **kwargs):
"""
获取表列表
:param db_name:
:param schema_name:
:return:
"""
schema_name = kwargs.get('schema_name')
sql = f"""SELECT table_name
FROM information_schema.tables
where table_schema ='{schema_name}';"""
result = self.query(db_name=db_name, sql=sql)
tb_list = [row[0] for row in result.rows if row[0] not in ['test']]
result.rows = tb_list
return result
def get_all_columns_by_tb(self, db_name, tb_name, **kwargs):
"""
获取字段列表
:param db_name:
:param tb_name:
:param schema_name:
:return:
"""
schema_name = kwargs.get('schema_name')
sql = f"""SELECT column_name
FROM information_schema.columns
where table_name='{tb_name}'
and table_schema ='{schema_name}';"""
result = self.query(db_name=db_name, sql=sql)
column_list = [row[0] for row in result.rows]
result.rows = column_list
return result
def describe_table(self, db_name, tb_name, **kwargs):
"""
获取表结构信息
:param db_name:
:param tb_name:
:param schema_name:
:return:
"""
schema_name = kwargs.get('schema_name')
sql = f"""select
col.column_name,
col.data_type,
col.character_maximum_length,
col.numeric_precision,
col.numeric_scale,
col.is_nullable,
col.column_default,
des.description
from
information_schema.columns col left join pg_description des on
col.table_name::regclass = des.objoid
and col.ordinal_position = des.objsubid
where table_name = '{tb_name}'
order by ordinal_position;"""
result = self.query(db_name=db_name, schema_name=schema_name, sql=sql)
return result
def query_check(self, db_name=None, sql=''):
# 查询语句的检查、注释去除、切分
result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False}
# 删除注释语句,进行语法判断,执行第一条有效sql
try:
sql = sqlparse.format(sql, strip_comments=True)
sql = sqlparse.split(sql)[0]
result['filtered_sql'] = sql.strip()
except IndexError:
result['bad_query'] = True
result['msg'] = '没有有效的SQL语句'
if re.match(r"^select", sql, re.I) is None:
result['bad_query'] = True
result['msg'] = '不支持的查询语法类型!'
if '*' in sql:
result['has_star'] = True
result['msg'] = 'SQL语句中含有 * '
return result
def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):
"""返回 ResultSet """
schema_name = kwargs.get('schema_name')
result_set = ResultSet(full_sql=sql)
try:
conn = self.get_connection(db_name=db_name)
max_execution_time = kwargs.get('max_execution_time', 0)
cursor = conn.cursor()
try:
cursor.execute(f"SET statement_timeout TO {max_execution_time};")
except:
pass
if schema_name:
cursor.execute(f"SET search_path TO {schema_name};")
cursor.execute(sql)
effect_row = cursor.rowcount
if int(limit_num) > 0:
rows = cursor.fetchmany(size=int(limit_num))
else:
rows = cursor.fetchall()
fields = cursor.description
result_set.column_list = [i[0] for i in fields] if fields else []
result_set.rows = rows
result_set.affected_rows = effect_row
except Exception as e:
logger.warning(f"PgSQL命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}")
result_set.error = str(e)
finally:
if close_conn:
self.close()
return result_set
def filter_sql(self, sql='', limit_num=0):
# 对查询sql增加limit限制,# TODO limit改写待优化
sql_lower = sql.lower().rstrip(';').strip()
if re.match(r"^select", sql_lower):
if re.search(r"limit\s+(\d+)$", sql_lower) is None:
if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sql_lower) is None:
return f"{sql.rstrip(';')} limit {limit_num};"
return f"{sql.rstrip(';')};"
def query_masking(self, db_name=None, sql='', resultset=None):
"""简单字段脱敏规则, 仅对select有效"""
if re.match(r"^select", sql, re.I):
filtered_result = simple_column_mask(self.instance, resultset)
filtered_result.is_masked = True
else:
filtered_result = resultset
return filtered_result
def execute_check(self, db_name=None, sql=''):
"""上线单执行前的检查, 返回Review set"""
config = SysConfig()
check_result = ReviewSet(full_sql=sql)
# 禁用/高危语句检查
line = 1
critical_ddl_regex = config.get('critical_ddl_regex', '')
p = re.compile(critical_ddl_regex)
check_result.syntax_type = 2 # TODO 工单类型 0、其他 1、DDL,2、DML
for statement in sqlparse.split(sql):
statement = sqlparse.format(statement, strip_comments=True)
# 禁用语句
if re.match(r"^select", statement.lower()):
check_result.is_critical = True
result = ReviewResult(id=line, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=statement)
# 高危语句
elif critical_ddl_regex and p.match(statement.strip().lower()):
check_result.is_critical = True
result = ReviewResult(id=line, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + critical_ddl_regex + '条件的语句!',
sql=statement)
# 正常语句
else:
result = ReviewResult(id=line, errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=statement,
affected_rows=0,
execute_time=0, )
# 判断工单类型
if get_syntax_type(statement) == 'DDL':
check_result.syntax_type = 1
check_result.rows += [result]
# 遇到禁用和高危语句直接返回,提高效率
if check_result.is_critical:
check_result.error_count += 1
return check_result
line += 1
return check_result
def execute_workflow(self, workflow, close_conn=True):
"""执行上线单,返回Review set"""
sql = workflow.sqlworkflowcontent.sql_content
execute_result = ReviewSet(full_sql=sql)
# 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中
sql = sqlparse.format(sql, strip_comments=True)
split_sql = sqlparse.split(sql)
line = 1
statement = None
db_name = workflow.db_name
try:
conn = self.get_connection(db_name=db_name)
cursor = conn.cursor()
# 逐条执行切分语句,追加到执行结果中
for statement in split_sql:
statement = statement.rstrip(';')
with FuncTimer() as t:
cursor.execute(statement)
conn.commit()
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=statement,
affected_rows=cursor.rowcount,
execute_time=t.cost,
))
line += 1
except Exception as e:
logger.warning(f"PGSQL命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}")
execute_result.error = str(e)
# 追加当前报错语句信息到执行结果中
execute_result.rows.append(ReviewResult(
id=line,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{e}',
sql=statement or sql,
affected_rows=0,
execute_time=0,
))
line += 1
# 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中
for statement in split_sql[line - 1:]:
execute_result.rows.append(ReviewResult(
id=line,
errlevel=0,
stagestatus='Audit completed',
errormessage=f'前序语句失败, 未执行',
sql=statement,
affected_rows=0,
execute_time=0,
))
line += 1
finally:
if close_conn:
self.close()
return execute_result
def close(self):
if self.conn:
self.conn.close()
self.conn = None
|
from text_classification import generate_model, model_validation
from modify_dataset import modify_dataset_and_raw_data_with_percentage_size_to_keep
from modify_dataset import modify_dataset_select_features
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import ComplementNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier
from sklearn.linear_model import LogisticRegression
from joblib import load
from text_preprocessing import _load_data
from codecarbon import EmissionsTracker
import csv
import time
import random
from click import progressbar
import click
RESULTS_FILE = 'results.csv'
RESULTS_HEADER = [
'algorithm',
'RQ',
'experiment_id',
'iteration',
'no_datapoints',
'no_features',
'preprocessing_energy(J)',
'preprocessing_time(s)',
'train_energy(J)',
'train_time(s)',
'predict_energy(J)',
'predict_time(s)',
'datatype',
'accuracy',
'precision',
'recall',
'f1',
]
results = []
raw_data = _load_data()
preprocessed_data = load('output/preprocessed_data.joblib')
NUMBER_OF_EXPERIMENTAL_RUNS = 30
SLEEP_TIME = 5
CLASSIFIERS = {
'SVM': SVC(class_weight="balanced"),
'Decision Tree': DecisionTreeClassifier(),
'Naive Bayes': ComplementNB(),
'KNN': KNeighborsClassifier(),
'Random Forest': RandomForestClassifier(class_weight="balanced"),
'AdaBoost': AdaBoostClassifier(),
'Bagging Classifier': BaggingClassifier()
}
dataset_size_percentages = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
featureset_size_percentages = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
def energy_stats(energy_consumption_kwh, energy_tracker):
"""Extract and compute energy metrics from codecarbon Energy Tracker.
IMPORTANT: this function should be called right after stopping the tracker.
"""
energy_consumption_joules = energy_consumption_kwh * 1000 * 3600 #Joules
duration = energy_tracker._last_measured_time - energy_tracker._start_time
return energy_consumption_joules, duration
def write_header(filename):
with open(filename, mode='w') as results_file:
result_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
result_writer.writerow(RESULTS_HEADER)
def write_result(result, filename):
with open(filename, mode='a') as results_file:
result_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
result_writer.writerow(result)
def run_experiment(RQ, iteration, experiment_id, classifier_name, dataset_percentage, featureset_percentage):
classifier = CLASSIFIERS[classifier_name]
print(f"Starting Experiment: {experiment_id},"
f"\n research question {RQ},"
f"\n iteration {iteration},"
f"\n classifier {classifier},"
f"\n dataset_percentage {dataset_percentage},"
f"\n featureset_percentage {featureset_percentage}"
)
time.sleep(SLEEP_TIME)
preprocessing_tracker = EmissionsTracker(save_to_file=False)
#### START TIMED PREPROCESSING SECTION ####
preprocessing_tracker.start()
# Danger: dataset_percentage
modified_preprocessed_data, modified_raw_data = modify_dataset_and_raw_data_with_percentage_size_to_keep(
preprocessed_data, raw_data, dataset_percentage)
# feature selection
modified_preprocessed_data, modified_raw_data = modify_dataset_select_features(
modified_preprocessed_data, modified_raw_data, featureset_percentage
)
preprocessing_energy_consumption_kwh = preprocessing_tracker.stop()
#### STOP TIMED PREPROCESSING SECTION ####
preprocessing_energy_consumption, preprocessing_duration = energy_stats(preprocessing_energy_consumption_kwh,
preprocessing_tracker)
training_tracker = EmissionsTracker(save_to_file=False)
#### START TIMED TRAINING SECTION ####
training_tracker.start()
classifier, X_train, X_test, y_train, y_test, test_messages = generate_model(classifier, modified_raw_data,
modified_preprocessed_data)
training_energy_consumption_kwh = training_tracker.stop()
#### STOP TIMED TRAINING SECTION ####
training_energy_consumption, training_duration = energy_stats(training_energy_consumption_kwh, training_tracker)
predict_tracker = EmissionsTracker(save_to_file=False)
#### START TIMED PREDICTION SECTION ####
predict_tracker.start()
_, scores, report = model_validation(classifier, X_test, y_test)
print (scores)
print (report)
predict_energy_consumption_kwh = predict_tracker.stop()
#### STOP TIMED PREDICTION SECTION ####
predict_energy_consumption, predict_duration = energy_stats(predict_energy_consumption_kwh, predict_tracker)
number_of_datapoints = len(y_train)
number_of_features = X_train.shape[1]
print(f"Experiment ID {experiment_id}")
print(f"Run {iteration}")
print(f" Energy Consumption: {training_energy_consumption} Joules")
print(f" Duration: {training_duration} seconds")
result_row = [
classifier_name,
RQ,
experiment_id,
iteration,
number_of_datapoints,
number_of_features,
preprocessing_energy_consumption,
preprocessing_duration,
training_energy_consumption,
training_duration,
predict_energy_consumption,
predict_duration,
"float64", #datatype
scores['accuracy'],
scores['precision'],
scores['recall'],
scores['f1'],
]
results.append(result_row)
write_result(result_row, RESULTS_FILE)
def collect_previous_experiments(filename):
try:
with open(filename, mode='r') as results_file:
result_reader = csv.DictReader(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
return list(result_reader)
except FileNotFoundError:
return None
def _compute_experiment_hash(exp):
return f"{exp["experiment_id"]}_{exp["iteration"]}"
def run_experiment_batch(experiments):
previous_experiments = collect_previous_experiments(RESULTS_FILE)
if previous_experiments:
previous_ids = [_compute_experiment_hash(exp) for exp in previous_experiments]
print(f"There were {len(previous_ids)} experiments in {RESULTS_FILE}."
f" Skipping ids {previous_ids}.")
experiments = [exp for exp in experiments if _compute_experiment_hash(exp) not in previous_ids]
else:
write_header('results.csv')
print(f"Remaining experiments: {len(experiments)}.")
random.shuffle(experiments)
with progressbar(experiments) as bar:
for experiment in bar:
print("\n")
run_experiment(**experiment)
def fibonacci(n):
if n<= 0:
print("Incorrect input")
# First Fibonacci number is 0
elif n == 1:
return 0
# Second Fibonacci number is 1
elif n == 2:
return 1
else:
return fibonacci(n-1)+fibonacci(n-2)
# default values
featureset_percentage = 100
dataset_percentage = 100
# initial values
experiment_id=0
experiments = []
# run classification experiment
for classifier_name in CLASSIFIERS.keys():
RQ="2.1"
for dataset_percentage in dataset_size_percentages:
experiment_id += 1
for iteration in range(NUMBER_OF_EXPERIMENTAL_RUNS):
experiments.append({
"RQ": RQ,
"iteration": iteration,
"experiment_id": experiment_id,
"classifier_name": classifier_name,
"dataset_percentage": dataset_percentage,
"featureset_percentage": featureset_percentage,
})
dataset_percentage = 100
RQ="2.2"
for featureset_percentage in featureset_size_percentages:
experiment_id += 1
for iteration in range(NUMBER_OF_EXPERIMENTAL_RUNS):
experiments.append({
"RQ": RQ,
"iteration": iteration,
"experiment_id": experiment_id,
"classifier_name": classifier_name,
"dataset_percentage": dataset_percentage,
"featureset_percentage": featureset_percentage,
})
featureset_percentage = 100
fibonacci(35)
run_experiment_batch(experiments)
| from text_classification import generate_model, model_validation
from modify_dataset import modify_dataset_and_raw_data_with_percentage_size_to_keep
from modify_dataset import modify_dataset_select_features
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import ComplementNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier
from sklearn.linear_model import LogisticRegression
from joblib import load
from text_preprocessing import _load_data
from codecarbon import EmissionsTracker
import csv
import time
import random
from click import progressbar
import click
RESULTS_FILE = 'results.csv'
RESULTS_HEADER = [
'algorithm',
'RQ',
'experiment_id',
'iteration',
'no_datapoints',
'no_features',
'preprocessing_energy(J)',
'preprocessing_time(s)',
'train_energy(J)',
'train_time(s)',
'predict_energy(J)',
'predict_time(s)',
'datatype',
'accuracy',
'precision',
'recall',
'f1',
]
results = []
raw_data = _load_data()
preprocessed_data = load('output/preprocessed_data.joblib')
NUMBER_OF_EXPERIMENTAL_RUNS = 30
SLEEP_TIME = 5
CLASSIFIERS = {
'SVM': SVC(class_weight="balanced"),
'Decision Tree': DecisionTreeClassifier(),
'Naive Bayes': ComplementNB(),
'KNN': KNeighborsClassifier(),
'Random Forest': RandomForestClassifier(class_weight="balanced"),
'AdaBoost': AdaBoostClassifier(),
'Bagging Classifier': BaggingClassifier()
}
dataset_size_percentages = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
featureset_size_percentages = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
def energy_stats(energy_consumption_kwh, energy_tracker):
"""Extract and compute energy metrics from codecarbon Energy Tracker.
IMPORTANT: this function should be called right after stopping the tracker.
"""
energy_consumption_joules = energy_consumption_kwh * 1000 * 3600 #Joules
duration = energy_tracker._last_measured_time - energy_tracker._start_time
return energy_consumption_joules, duration
def write_header(filename):
with open(filename, mode='w') as results_file:
result_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
result_writer.writerow(RESULTS_HEADER)
def write_result(result, filename):
with open(filename, mode='a') as results_file:
result_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
result_writer.writerow(result)
def run_experiment(RQ, iteration, experiment_id, classifier_name, dataset_percentage, featureset_percentage):
classifier = CLASSIFIERS[classifier_name]
print(f"Starting Experiment: {experiment_id},"
f"\n research question {RQ},"
f"\n iteration {iteration},"
f"\n classifier {classifier},"
f"\n dataset_percentage {dataset_percentage},"
f"\n featureset_percentage {featureset_percentage}"
)
time.sleep(SLEEP_TIME)
preprocessing_tracker = EmissionsTracker(save_to_file=False)
#### START TIMED PREPROCESSING SECTION ####
preprocessing_tracker.start()
# Danger: dataset_percentage
modified_preprocessed_data, modified_raw_data = modify_dataset_and_raw_data_with_percentage_size_to_keep(
preprocessed_data, raw_data, dataset_percentage)
# feature selection
modified_preprocessed_data, modified_raw_data = modify_dataset_select_features(
modified_preprocessed_data, modified_raw_data, featureset_percentage
)
preprocessing_energy_consumption_kwh = preprocessing_tracker.stop()
#### STOP TIMED PREPROCESSING SECTION ####
preprocessing_energy_consumption, preprocessing_duration = energy_stats(preprocessing_energy_consumption_kwh,
preprocessing_tracker)
training_tracker = EmissionsTracker(save_to_file=False)
#### START TIMED TRAINING SECTION ####
training_tracker.start()
classifier, X_train, X_test, y_train, y_test, test_messages = generate_model(classifier, modified_raw_data,
modified_preprocessed_data)
training_energy_consumption_kwh = training_tracker.stop()
#### STOP TIMED TRAINING SECTION ####
training_energy_consumption, training_duration = energy_stats(training_energy_consumption_kwh, training_tracker)
predict_tracker = EmissionsTracker(save_to_file=False)
#### START TIMED PREDICTION SECTION ####
predict_tracker.start()
_, scores, report = model_validation(classifier, X_test, y_test)
print (scores)
print (report)
predict_energy_consumption_kwh = predict_tracker.stop()
#### STOP TIMED PREDICTION SECTION ####
predict_energy_consumption, predict_duration = energy_stats(predict_energy_consumption_kwh, predict_tracker)
number_of_datapoints = len(y_train)
number_of_features = X_train.shape[1]
print(f"Experiment ID {experiment_id}")
print(f"Run {iteration}")
print(f" Energy Consumption: {training_energy_consumption} Joules")
print(f" Duration: {training_duration} seconds")
result_row = [
classifier_name,
RQ,
experiment_id,
iteration,
number_of_datapoints,
number_of_features,
preprocessing_energy_consumption,
preprocessing_duration,
training_energy_consumption,
training_duration,
predict_energy_consumption,
predict_duration,
"float64", #datatype
scores['accuracy'],
scores['precision'],
scores['recall'],
scores['f1'],
]
results.append(result_row)
write_result(result_row, RESULTS_FILE)
def collect_previous_experiments(filename):
try:
with open(filename, mode='r') as results_file:
result_reader = csv.DictReader(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
return list(result_reader)
except FileNotFoundError:
return None
def _compute_experiment_hash(exp):
return f"{exp['experiment_id']}_{exp['iteration']}"
def run_experiment_batch(experiments):
previous_experiments = collect_previous_experiments(RESULTS_FILE)
if previous_experiments:
previous_ids = [_compute_experiment_hash(exp) for exp in previous_experiments]
print(f"There were {len(previous_ids)} experiments in {RESULTS_FILE}."
f" Skipping ids {previous_ids}.")
experiments = [exp for exp in experiments if _compute_experiment_hash(exp) not in previous_ids]
else:
write_header('results.csv')
print(f"Remaining experiments: {len(experiments)}.")
random.shuffle(experiments)
with progressbar(experiments) as bar:
for experiment in bar:
print("\n")
run_experiment(**experiment)
def fibonacci(n):
if n<= 0:
print("Incorrect input")
# First Fibonacci number is 0
elif n == 1:
return 0
# Second Fibonacci number is 1
elif n == 2:
return 1
else:
return fibonacci(n-1)+fibonacci(n-2)
# default values
featureset_percentage = 100
dataset_percentage = 100
# initial values
experiment_id=0
experiments = []
# run classification experiment
for classifier_name in CLASSIFIERS.keys():
RQ="2.1"
for dataset_percentage in dataset_size_percentages:
experiment_id += 1
for iteration in range(NUMBER_OF_EXPERIMENTAL_RUNS):
experiments.append({
"RQ": RQ,
"iteration": iteration,
"experiment_id": experiment_id,
"classifier_name": classifier_name,
"dataset_percentage": dataset_percentage,
"featureset_percentage": featureset_percentage,
})
dataset_percentage = 100
RQ="2.2"
for featureset_percentage in featureset_size_percentages:
experiment_id += 1
for iteration in range(NUMBER_OF_EXPERIMENTAL_RUNS):
experiments.append({
"RQ": RQ,
"iteration": iteration,
"experiment_id": experiment_id,
"classifier_name": classifier_name,
"dataset_percentage": dataset_percentage,
"featureset_percentage": featureset_percentage,
})
featureset_percentage = 100
fibonacci(35)
run_experiment_batch(experiments)
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import os
import pathlib
import re
import shutil
import tempfile
from typing import Any, Dict
import pytest
from integration_tests.dbt_integration_test import DbtIntegrationTest
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
temporary_folders = set()
dbt_test_utils = DbtIntegrationTest()
@pytest.fixture(scope="module", autouse=True)
def before_all_tests(request):
dbt_test_utils.change_current_test_dir(request)
dbt_test_utils.setup_db()
os.environ["PATH"] = os.path.abspath("../.venv/bin/") + ":" + os.environ["PATH"]
yield
dbt_test_utils.tear_down_db()
for folder in temporary_folders:
print(f"Deleting temporary test folder {folder}")
shutil.rmtree(folder, ignore_errors=True)
@pytest.fixture
def setup_test_path(request):
dbt_test_utils.change_current_test_dir(request)
print(f"Running from: {pathlib.Path().absolute()}")
print(f"Current PATH is: {os.environ["PATH"]}")
yield
os.chdir(request.config.invocation_dir)
@pytest.mark.parametrize("column_count", [1500])
@pytest.mark.parametrize("integration_type", list(DestinationType))
def test_destination_supported_limits(integration_type: DestinationType, column_count: int):
if integration_type == DestinationType.MYSQL:
# In MySQL, the max number of columns is limited by row size (8KB),
# not by absolute column count. It is way fewer than 1500.
return
if integration_type == DestinationType.ORACLE:
column_count = 998
run_test(integration_type, column_count)
@pytest.mark.parametrize(
"integration_type, column_count, expected_exception_message",
[
("Postgres", 1665, "target lists can have at most 1664 entries"),
(
"BigQuery",
2500,
"The view is too large.",
),
(
"Snowflake",
2000,
"Operation failed because soft limit on objects of type 'Column' per table was exceeded.",
),
("Redshift", 1665, "target lists can have at most 1664 entries"),
("MySQL", 250, "Row size too large"),
("Oracle", 1001, "ORA-01792: maximum number of columns in a table or view is 1000"),
],
)
def test_destination_failure_over_limits(integration_type: str, column_count: int, expected_exception_message: str, setup_test_path):
run_test(DestinationType.from_string(integration_type), column_count, expected_exception_message)
def test_empty_streams(setup_test_path):
run_test(DestinationType.POSTGRES, 0)
def test_stream_with_1_airbyte_column(setup_test_path):
run_test(DestinationType.POSTGRES, 1)
def run_test(destination_type: DestinationType, column_count: int, expected_exception_message: str = ""):
print("Testing ephemeral")
integration_type = destination_type.value
# Create the test folder with dbt project and appropriate destination settings to run integration tests from
test_root_dir = setup_test_dir(integration_type)
destination_config = dbt_test_utils.generate_profile_yaml_file(destination_type, test_root_dir)
dbt_test_utils.generate_project_yaml_file(destination_type, test_root_dir)
# generate a catalog and associated dbt models files
generate_dbt_models(destination_type, test_root_dir, column_count)
# Use destination connector to create empty _airbyte_raw_* tables to use as input for the test
assert setup_input_raw_data(integration_type, test_root_dir, destination_config)
if expected_exception_message:
with pytest.raises(AssertionError):
dbt_test_utils.dbt_run(test_root_dir)
assert search_logs_for_pattern(test_root_dir + "/dbt_output.log", expected_exception_message)
else:
dbt_test_utils.dbt_run(test_root_dir)
def search_logs_for_pattern(log_file: str, pattern: str):
with open(log_file, "r") as file:
for line in file:
if re.search(pattern, line):
return True
return False
def setup_test_dir(integration_type: str) -> str:
"""
We prepare a clean folder to run the tests from.
"""
test_root_dir = f"{pathlib.Path().joinpath("..", "build", "normalization_test_output", integration_type.lower()).resolve()}"
os.makedirs(test_root_dir, exist_ok=True)
test_root_dir = tempfile.mkdtemp(dir=test_root_dir)
temporary_folders.add(test_root_dir)
shutil.rmtree(test_root_dir, ignore_errors=True)
print(f"Setting up test folder {test_root_dir}")
shutil.copytree("../dbt-project-template", test_root_dir)
dbt_test_utils.copy_replace("../dbt-project-template/dbt_project.yml", os.path.join(test_root_dir, "dbt_project.yml"))
return test_root_dir
def setup_input_raw_data(integration_type: str, test_root_dir: str, destination_config: Dict[str, Any]) -> bool:
"""
This should populate the associated "raw" tables from which normalization is reading from when running dbt CLI.
"""
config_file = os.path.join(test_root_dir, "destination_config.json")
with open(config_file, "w") as f:
f.write(json.dumps(destination_config))
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{test_root_dir}:/data",
"--network",
"host",
"-i",
f"airbyte/destination-{integration_type.lower()}:dev",
"write",
"--config",
"/data/destination_config.json",
"--catalog",
"/data/catalog.json",
]
# Force a reset in destination raw tables
return dbt_test_utils.run_destination_process("", test_root_dir, commands)
def generate_dbt_models(destination_type: DestinationType, test_root_dir: str, column_count: int):
"""
This is the normalization step generating dbt models files from the destination_catalog.json taken as input.
"""
output_directory = os.path.join(test_root_dir, "models", "generated")
shutil.rmtree(output_directory, ignore_errors=True)
catalog_processor = CatalogProcessor(output_directory, destination_type)
catalog_config = {
"streams": [
{
"stream": {
"name": f"stream_with_{column_count}_columns",
"json_schema": {
"type": ["null", "object"],
"properties": {},
},
"supported_sync_modes": ["incremental"],
"source_defined_cursor": True,
"default_cursor_field": [],
},
"sync_mode": "incremental",
"cursor_field": [],
"destination_sync_mode": "overwrite",
}
]
}
if column_count == 1:
catalog_config["streams"][0]["stream"]["json_schema"]["properties"]["_airbyte_id"] = {"type": "integer"}
else:
for column in [dbt_test_utils.random_string(5) for _ in range(column_count)]:
catalog_config["streams"][0]["stream"]["json_schema"]["properties"][column] = {"type": "string"}
catalog = os.path.join(test_root_dir, "catalog.json")
with open(catalog, "w") as fh:
fh.write(json.dumps(catalog_config))
catalog_processor.process(catalog, "_airbyte_data", dbt_test_utils.target_schema)
| #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import os
import pathlib
import re
import shutil
import tempfile
from typing import Any, Dict
import pytest
from integration_tests.dbt_integration_test import DbtIntegrationTest
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
temporary_folders = set()
dbt_test_utils = DbtIntegrationTest()
@pytest.fixture(scope="module", autouse=True)
def before_all_tests(request):
dbt_test_utils.change_current_test_dir(request)
dbt_test_utils.setup_db()
os.environ["PATH"] = os.path.abspath("../.venv/bin/") + ":" + os.environ["PATH"]
yield
dbt_test_utils.tear_down_db()
for folder in temporary_folders:
print(f"Deleting temporary test folder {folder}")
shutil.rmtree(folder, ignore_errors=True)
@pytest.fixture
def setup_test_path(request):
dbt_test_utils.change_current_test_dir(request)
print(f"Running from: {pathlib.Path().absolute()}")
print(f"Current PATH is: {os.environ['PATH']}")
yield
os.chdir(request.config.invocation_dir)
@pytest.mark.parametrize("column_count", [1500])
@pytest.mark.parametrize("integration_type", list(DestinationType))
def test_destination_supported_limits(integration_type: DestinationType, column_count: int):
if integration_type == DestinationType.MYSQL:
# In MySQL, the max number of columns is limited by row size (8KB),
# not by absolute column count. It is way fewer than 1500.
return
if integration_type == DestinationType.ORACLE:
column_count = 998
run_test(integration_type, column_count)
@pytest.mark.parametrize(
"integration_type, column_count, expected_exception_message",
[
("Postgres", 1665, "target lists can have at most 1664 entries"),
(
"BigQuery",
2500,
"The view is too large.",
),
(
"Snowflake",
2000,
"Operation failed because soft limit on objects of type 'Column' per table was exceeded.",
),
("Redshift", 1665, "target lists can have at most 1664 entries"),
("MySQL", 250, "Row size too large"),
("Oracle", 1001, "ORA-01792: maximum number of columns in a table or view is 1000"),
],
)
def test_destination_failure_over_limits(integration_type: str, column_count: int, expected_exception_message: str, setup_test_path):
run_test(DestinationType.from_string(integration_type), column_count, expected_exception_message)
def test_empty_streams(setup_test_path):
run_test(DestinationType.POSTGRES, 0)
def test_stream_with_1_airbyte_column(setup_test_path):
run_test(DestinationType.POSTGRES, 1)
def run_test(destination_type: DestinationType, column_count: int, expected_exception_message: str = ""):
print("Testing ephemeral")
integration_type = destination_type.value
# Create the test folder with dbt project and appropriate destination settings to run integration tests from
test_root_dir = setup_test_dir(integration_type)
destination_config = dbt_test_utils.generate_profile_yaml_file(destination_type, test_root_dir)
dbt_test_utils.generate_project_yaml_file(destination_type, test_root_dir)
# generate a catalog and associated dbt models files
generate_dbt_models(destination_type, test_root_dir, column_count)
# Use destination connector to create empty _airbyte_raw_* tables to use as input for the test
assert setup_input_raw_data(integration_type, test_root_dir, destination_config)
if expected_exception_message:
with pytest.raises(AssertionError):
dbt_test_utils.dbt_run(test_root_dir)
assert search_logs_for_pattern(test_root_dir + "/dbt_output.log", expected_exception_message)
else:
dbt_test_utils.dbt_run(test_root_dir)
def search_logs_for_pattern(log_file: str, pattern: str):
with open(log_file, "r") as file:
for line in file:
if re.search(pattern, line):
return True
return False
def setup_test_dir(integration_type: str) -> str:
"""
We prepare a clean folder to run the tests from.
"""
test_root_dir = f"{pathlib.Path().joinpath('..', 'build', 'normalization_test_output', integration_type.lower()).resolve()}"
os.makedirs(test_root_dir, exist_ok=True)
test_root_dir = tempfile.mkdtemp(dir=test_root_dir)
temporary_folders.add(test_root_dir)
shutil.rmtree(test_root_dir, ignore_errors=True)
print(f"Setting up test folder {test_root_dir}")
shutil.copytree("../dbt-project-template", test_root_dir)
dbt_test_utils.copy_replace("../dbt-project-template/dbt_project.yml", os.path.join(test_root_dir, "dbt_project.yml"))
return test_root_dir
def setup_input_raw_data(integration_type: str, test_root_dir: str, destination_config: Dict[str, Any]) -> bool:
"""
This should populate the associated "raw" tables from which normalization is reading from when running dbt CLI.
"""
config_file = os.path.join(test_root_dir, "destination_config.json")
with open(config_file, "w") as f:
f.write(json.dumps(destination_config))
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{test_root_dir}:/data",
"--network",
"host",
"-i",
f"airbyte/destination-{integration_type.lower()}:dev",
"write",
"--config",
"/data/destination_config.json",
"--catalog",
"/data/catalog.json",
]
# Force a reset in destination raw tables
return dbt_test_utils.run_destination_process("", test_root_dir, commands)
def generate_dbt_models(destination_type: DestinationType, test_root_dir: str, column_count: int):
"""
This is the normalization step generating dbt models files from the destination_catalog.json taken as input.
"""
output_directory = os.path.join(test_root_dir, "models", "generated")
shutil.rmtree(output_directory, ignore_errors=True)
catalog_processor = CatalogProcessor(output_directory, destination_type)
catalog_config = {
"streams": [
{
"stream": {
"name": f"stream_with_{column_count}_columns",
"json_schema": {
"type": ["null", "object"],
"properties": {},
},
"supported_sync_modes": ["incremental"],
"source_defined_cursor": True,
"default_cursor_field": [],
},
"sync_mode": "incremental",
"cursor_field": [],
"destination_sync_mode": "overwrite",
}
]
}
if column_count == 1:
catalog_config["streams"][0]["stream"]["json_schema"]["properties"]["_airbyte_id"] = {"type": "integer"}
else:
for column in [dbt_test_utils.random_string(5) for _ in range(column_count)]:
catalog_config["streams"][0]["stream"]["json_schema"]["properties"][column] = {"type": "string"}
catalog = os.path.join(test_root_dir, "catalog.json")
with open(catalog, "w") as fh:
fh.write(json.dumps(catalog_config))
catalog_processor.process(catalog, "_airbyte_data", dbt_test_utils.target_schema)
|
import math
import time
import torch
from copy import deepcopy
from tensornet.engine.ops.regularizer import l1
from tensornet.engine.ops.checkpoint import ModelCheckpoint
from tensornet.engine.ops.tensorboard import TensorBoard
from tensornet.data.processing import InfiniteDataLoader
from tensornet.utils.progress_bar import ProgressBar
class Learner:
"""Model Trainer and Validator.
Args:
train_loader (torch.utils.data.DataLoader): Training data loader.
optimizer (torch.optim): Optimizer for the model.
criterion (torch.nn): Loss Function.
device (:obj:`str` or :obj:`torch.device`, optional): Device where the data
will be loaded. (default='cpu')
epochs (:obj:`int`, optional): Numbers of epochs/iterations to train the model for.
(default: 1)
l1_factor (:obj:`float`, optional): L1 regularization factor. (default: 0)
val_loader (:obj:`torch.utils.data.DataLoader`, optional): Validation data loader.
callbacks (:obj:`list`, optional): List of callbacks to be used during training.
metrics (:obj:`list`, optional): List of names of the metrics for model
evaluation.
*Note*: If the model has multiple outputs, then this will be a nested list
where each individual sub-list will specify the metrics which are to be used for
evaluating each output respectively. In such cases, the model checkpoint will
consider only the metric of the first output for saving checkpoints.
activate_loss_logits (:obj:`bool`, optional): If True, the logits will first pass
through the `activate_logits` function before going to the criterion.
(default: False)
record_train (:obj:`bool`, optional): If False, metrics will be calculated only
during validation. (default: True)
"""
def __init__(
self, train_loader, optimizer, criterion, device='cpu',
epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None,
activate_loss_logits=False, record_train=True
):
self.model = None
self.optimizer = optimizer
self.criterion = criterion
self.train_loader = train_loader
self.device = device
self.epochs = epochs
self.val_loader = val_loader
self.l1_factor = l1_factor
self.activate_loss_logits = activate_loss_logits
self.record_train = record_train
self.lr_schedulers = {
'step_lr': None,
'lr_plateau': None,
'one_cycle_policy': None,
'cyclic_lr': None,
}
self.checkpoint = None
self.summary_writer = None
if callbacks is not None:
self._setup_callbacks(callbacks)
# Training
self.train_losses = [] # Change in loss
self.train_metrics = [] # Change in evaluation metric
self.val_losses = [] # Change in loss
self.val_metrics = [] # Change in evaluation metric
# Set evaluation metrics
self.metrics = []
if metrics:
self._setup_metrics(metrics)
def _setup_callbacks(self, callbacks):
"""Extract callbacks passed to the class.
Args:
callbacks (list): List of callbacks.
"""
for callback in callbacks:
if isinstance(callback, torch.optim.lr_scheduler.StepLR):
self.lr_schedulers['step_lr'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.lr_schedulers['lr_plateau'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR):
self.lr_schedulers['one_cycle_policy'] = callback
elif isinstance(callback, ModelCheckpoint):
if callback.monitor.startswith('train_'):
if self.record_train:
self.checkpoint = callback
else:
raise ValueError(
'Cannot use checkpoint for a training metric if record_train is set to False'
)
else:
self.checkpoint = callback
elif isinstance(callback, TensorBoard):
self.summary_writer = callback
elif isinstance(callback, torch.optim.lr_scheduler.CyclicLR):
self.lr_schedulers['cyclic_lr'] = callback
def set_model(self, model):
"""Assign model to learner.
Args:
model (torch.nn.Module): Model Instance.
"""
self.model = model
if self.summary_writer is not None:
self.summary_writer.write_model(self.model)
def _accuracy(self, label, prediction, idx=0):
"""Calculate accuracy.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
self.metrics[idx]['accuracy']['sum'] += prediction.eq(
label.view_as(prediction)
).sum().item()
self.metrics[idx]['accuracy']['num_steps'] += len(label)
self.metrics[idx]['accuracy']['value'] = round(
100 * self.metrics[idx]['accuracy']['sum'] / self.metrics[idx]['accuracy']['num_steps'], 2
)
def _iou(self, label, prediction, idx=0):
"""Calculate Intersection over Union.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
# Remove 1 channel dimension
label = label.squeeze(1)
prediction = prediction.squeeze(1)
intersection = (prediction * label).sum(2).sum(1)
union = (prediction + label).sum(2).sum(1) - intersection
# epsilon is added to avoid 0/0
epsilon = 1e-6
iou = (intersection + epsilon) / (union + epsilon)
self.metrics[idx]['iou']['sum'] += iou.sum().item()
self.metrics[idx]['iou']['num_steps'] += label.size(0)
self.metrics[idx]['iou']['value'] = round(
self.metrics[idx]['iou']['sum'] / self.metrics[idx]['iou']['num_steps'], 3
)
def _pred_label_diff(self, label, prediction, rel=False):
"""Calculate the difference between label and prediction.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
rel (:obj:`bool`, optional): If True, return the relative
difference. (default: False)
Returns:
Difference between label and prediction
"""
# For numerical stability
valid_labels = label > 0.0001
_label = label[valid_labels]
_prediction = prediction[valid_labels]
valid_element_count = _label.size(0)
if valid_element_count > 0:
diff = torch.abs(_label - _prediction)
if rel:
diff = torch.div(diff, _label)
return diff, valid_element_count
def _rmse(self, label, prediction, idx=0):
"""Calculate Root Mean Square Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction)
rmse = 0
if diff is not None:
rmse = math.sqrt(torch.sum(torch.pow(diff[0], 2)) / diff[1])
self.metrics[idx]['rmse']['num_steps'] += label.size(0)
self.metrics[idx]['rmse']['sum'] += rmse * label.size(0)
self.metrics[idx]['rmse']['value'] = round(
self.metrics[idx]['rmse']['sum'] / self.metrics[idx]['rmse']['num_steps'], 3
)
def _mae(self, label, prediction, idx=0):
"""Calculate Mean Average Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction)
mae = 0
if diff is not None:
mae = torch.sum(diff[0]).item() / diff[1]
self.metrics[idx]['mae']['num_steps'] += label.size(0)
self.metrics[idx]['mae']['sum'] += mae * label.size(0)
self.metrics[idx]['mae']['value'] = round(
self.metrics[idx]['mae']['sum'] / self.metrics[idx]['mae']['num_steps'], 3
)
def _abs_rel(self, label, prediction, idx=0):
"""Calculate Absolute Relative Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction, rel=True)
abs_rel = 0
if diff is not None:
abs_rel = torch.sum(diff[0]).item() / diff[1]
self.metrics[idx]['abs_rel']['num_steps'] += label.size(0)
self.metrics[idx]['abs_rel']['sum'] += abs_rel * label.size(0)
self.metrics[idx]['abs_rel']['value'] = round(
self.metrics[idx]['abs_rel']['sum'] / self.metrics[idx]['abs_rel']['num_steps'], 3
)
def _setup_metrics(self, metrics):
"""Validate the evaluation metrics passed to the class.
Args:
metrics (:obj:`list` or :obj:`dict`): Metrics.
"""
if not isinstance(metrics[0], (list, tuple)):
metrics = [metrics]
for idx, metric_list in enumerate(metrics):
metric_dict = {}
for metric in metric_list:
metric_info = {'value': 0, 'sum': 0, 'num_steps': 0}
if metric == 'accuracy':
metric_info['func'] = self._accuracy
elif metric == 'rmse':
metric_info['func'] = self._rmse
elif metric == 'mae':
metric_info['func'] = self._mae
elif metric == 'abs_rel':
metric_info['func'] = self._abs_rel
elif metric == 'iou':
metric_info['func'] = self._iou
if 'func' in metric_info:
metric_dict[metric] = metric_info
if metric_dict:
self.metrics.append(metric_dict)
self.train_metrics.append({
x: [] for x in metric_dict.keys()
})
self.val_metrics.append({
x: [] for x in metric_dict.keys()
})
def _calculate_metrics(self, labels, predictions):
"""Update evaluation metric values.
Args:
label (:obj:`torch.Tensor` or :obj:`dict`): Ground truth.
prediction (:obj:`torch.Tensor` or :obj:`dict`): Prediction.
"""
predictions = self.activate_logits(predictions)
if not isinstance(labels, (list, tuple)):
labels = [labels]
predictions = [predictions]
for idx, (label, prediction) in enumerate(zip(labels, predictions)):
# If predictions are one-hot encoded
if label.size() != prediction.size():
prediction = prediction.argmax(dim=1, keepdim=True) * 1.0
if idx < len(self.metrics):
for metric in self.metrics[idx]:
self.metrics[idx][metric]['func'](
label, prediction, idx=idx
)
def _reset_metrics(self):
"""Reset metric params."""
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.metrics[idx][metric]['value'] = 0
self.metrics[idx][metric]['sum'] = 0
self.metrics[idx][metric]['num_steps'] = 0
def _get_pbar_values(self, loss):
"""Create progress bar description.
Args:
loss (float): Loss value.
"""
pbar_values = [('loss', round(loss, 2))]
if self.metrics and self.record_train:
for idx in range(len(self.metrics)):
for metric, info in self.metrics[idx].items():
metric_name = metric
if len(self.metrics) > 1:
metric_name = f'{idx} - {metric}'
pbar_values.append((metric_name, info['value']))
return pbar_values
def update_training_history(self, loss):
"""Update the training history.
Args:
loss (float): Loss value.
"""
self.train_losses.append(loss)
if self.record_train:
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.train_metrics[idx][metric].append(
self.metrics[idx][metric]['value']
)
def reset_history(self):
"""Reset the training history"""
self.train_losses = []
self.val_losses = []
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.train_metrics[idx][metric] = []
self.val_metrics[idx][metric] = []
self._reset_metrics()
def activate_logits(self, logits):
"""Apply activation function to the logits if needed.
After this the logits will be sent for calculation of
loss or evaluation metrics.
Args:
logits (torch.Tensor): Model output
Returns:
(*torch.Tensor*): activated logits
"""
return logits
def calculate_criterion(self, logits, targets, train=True):
"""Calculate loss.
Args:
logits (torch.Tensor): Prediction.
targets (torch.Tensor): Ground truth.
train (:obj:`bool`, optional): If True, loss is sent to the
L1 regularization function. (default: True)
Returns:
(*torch.Tensor*): loss value
"""
if self.activate_loss_logits:
logits = self.activate_logits(logits)
if train:
return l1(self.model, self.criterion(logits, targets), self.l1_factor)
return self.criterion(logits, targets)
def fetch_data(self, data):
"""Fetch data from loader and load it to GPU.
Args:
data (:obj:`tuple` or :obj:`list`): List containing inputs and targets.
Returns:
inputs and targets loaded to GPU.
"""
return data[0].to(self.device), data[1].to(self.device)
def train_batch(self, data):
"""Train the model on a batch of data.
Args:
data (:obj:`tuple` or :obj:`list`): Input and target data for the model.
Returns:
(*float*): Batch loss.
"""
inputs, targets = self.fetch_data(data)
self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation
y_pred = self.model(inputs) # Predict output
loss = self.calculate_criterion(y_pred, targets, train=True) # Calculate loss
# Perform backpropagation
loss.backward()
self.optimizer.step()
if self.record_train:
self._calculate_metrics(targets, y_pred)
# One Cycle Policy for learning rate
if self.lr_schedulers['one_cycle_policy'] is not None:
self.lr_schedulers['one_cycle_policy'].step()
# Cyclic LR policy
if self.lr_schedulers['cyclic_lr'] is not None:
self.lr_schedulers['cyclic_lr'].step()
return loss.item()
def train_epoch(self, verbose=True):
"""Run an epoch of model training.
Args:
verbose (:obj:`bool`, optional): Print logs. (default: True)
"""
self.model.train()
if verbose:
pbar = ProgressBar(target=len(self.train_loader), width=8)
for batch_idx, data in enumerate(self.train_loader, 0):
# Train a batch
loss = self.train_batch(data)
# Update Progress Bar
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.update(batch_idx, values=pbar_values)
# Update training history
self.update_training_history(loss)
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.add(1, values=pbar_values)
self._reset_metrics()
def train_iterations(self, verbose=True):
"""Train model for the 'self.epochs' number of batches."""
self.model.train()
if verbose:
pbar = ProgressBar(target=self.epochs, width=8)
iterator = InfiniteDataLoader(self.train_loader)
for iteration in range(self.epochs):
# Train a batch
loss = self.train_batch(iterator.get_batch())
# Update Progress Bar
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.update(iteration, values=pbar_values)
# Update training history
self.update_training_history(loss)
if verbose:
pbar.add(1, values=pbar_values)
def evaluate(self, loader, verbose=True, log_message='Evaluation'):
"""Evaluate the model on a custom data loader.
Args:
loader (torch.utils.data.DataLoader): Data loader.
verbose (:obj:`bool`, optional): Print loss and metrics. (default: True)
log_message (str): Prefix for the logs which are printed at the end.
Returns:
loss and metric values
"""
start_time = time.time()
self.model.eval()
eval_loss = 0
with torch.no_grad():
for data in loader:
inputs, targets = self.fetch_data(data)
output = self.model(inputs) # Get trained model output
eval_loss += self.calculate_criterion(
output, targets, train=False
).item() # Sum up batch loss
self._calculate_metrics(targets, output) # Calculate evaluation metrics
eval_loss /= len(loader.dataset)
eval_metrics = deepcopy(self.metrics)
end_time = time.time()
# Time spent during validation
duration = int(end_time - start_time)
minutes = duration // 60
seconds = duration % 60
if verbose:
log = f'{log_message} (took {minutes} minutes, {seconds} seconds): Average loss: {eval_loss:.4f}'
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
log += f', {metric}: {self.metrics[idx][metric]['value']}'
log += '\n'
print(log)
self._reset_metrics()
return eval_loss, eval_metrics
def validate(self, verbose=True):
"""Validate an epoch of model training.
Args:
verbose (:obj:`bool`, optional): Print validation loss and metrics.
(default: True)
"""
eval_loss, eval_metrics = self.evaluate(
self.val_loader, verbose=verbose, log_message='Validation set'
)
# Update validation logs
self.val_losses.append(eval_loss)
for idx in range(len(eval_metrics)):
for metric in eval_metrics[idx]:
self.val_metrics[idx][metric].append(
eval_metrics[idx][metric]['value']
)
def save_checkpoint(self, epoch=None):
"""Save model checkpoint.
Args:
epoch (:obj:`int`, optional): Current epoch number.
"""
if self.checkpoint is not None:
metric = None
if self.checkpoint.monitor == 'train_loss':
metric = self.train_losses[-1]
elif self.checkpoint.monitor == 'val_loss':
metric = self.val_losses[-1]
elif self.metrics:
if self.checkpoint.monitor.startswith('train_'):
if self.record_train:
metric = self.train_metrics[0][
self.checkpoint.monitor.split('train_')[-1]
][-1]
else:
metric = self.val_metrics[0][
self.checkpoint.monitor.split('val_')[-1]
][-1]
else:
print('Invalid metric function, can\'t save checkpoint.')
return
self.checkpoint(self.model, metric, epoch)
def write_summary(self, epoch, train):
"""Write training summary in tensorboard.
Args:
epoch (int): Current epoch number.
train (bool): If True, summary will be
written for model training else it
will be writtern for model validation.
"""
if self.summary_writer is not None:
if train:
mode = 'train'
# Write Images
self.summary_writer.write_images(
self.model, self.activate_logits, f'prediction_epoch_{epoch}'
)
loss = self.train_losses[-1]
else:
mode = 'val'
loss = self.val_losses[-1]
# Write Loss
self.summary_writer.write_scalar(
f'Loss/{mode}', loss, epoch
)
if not train or self.record_train:
for idx in range(len(self.metrics)):
for metric, info in self.metrics[idx].items():
self.summary_writer.write_scalar(
f'{idx}/{metric.title()}/{mode}',
info['value'], epoch
)
def fit(self, start_epoch=1, epochs=None, reset=True, verbose=True):
"""Perform model training.
Args:
start_epoch (:obj:`int`, optional): Start epoch for training.
(default: 1)
epochs (:obj:`int`, optional): Numbers of epochs/iterations to
train the model for. If no value is given, the original
value given during initialization of learner will be used.
reset (:obj:`bool`, optional): Flag to indicate that training
is starting from scratch. (default: True)
verbose (:obj:`bool`, optional): Print logs. (default: True)
"""
if reset:
self.reset_history()
if epochs is not None:
self.epochs = epochs
for epoch in range(start_epoch, start_epoch + self.epochs):
if verbose:
print(f'Epoch {epoch}:')
# Train an epoch
self.train_epoch(verbose=verbose)
self.write_summary(epoch, True)
# Validate the model
if self.val_loader is not None:
self.validate(verbose=verbose)
self.write_summary(epoch, False)
# Save model checkpoint
self.save_checkpoint(epoch)
# Call Step LR
if not self.lr_schedulers['step_lr'] is None:
self.lr_schedulers['step_lr'].step()
# Call Reduce LR on Plateau
if not self.lr_schedulers['lr_plateau'] is None:
self.lr_schedulers['lr_plateau'].step(self.val_losses[-1])
| import math
import time
import torch
from copy import deepcopy
from tensornet.engine.ops.regularizer import l1
from tensornet.engine.ops.checkpoint import ModelCheckpoint
from tensornet.engine.ops.tensorboard import TensorBoard
from tensornet.data.processing import InfiniteDataLoader
from tensornet.utils.progress_bar import ProgressBar
class Learner:
"""Model Trainer and Validator.
Args:
train_loader (torch.utils.data.DataLoader): Training data loader.
optimizer (torch.optim): Optimizer for the model.
criterion (torch.nn): Loss Function.
device (:obj:`str` or :obj:`torch.device`, optional): Device where the data
will be loaded. (default='cpu')
epochs (:obj:`int`, optional): Numbers of epochs/iterations to train the model for.
(default: 1)
l1_factor (:obj:`float`, optional): L1 regularization factor. (default: 0)
val_loader (:obj:`torch.utils.data.DataLoader`, optional): Validation data loader.
callbacks (:obj:`list`, optional): List of callbacks to be used during training.
metrics (:obj:`list`, optional): List of names of the metrics for model
evaluation.
*Note*: If the model has multiple outputs, then this will be a nested list
where each individual sub-list will specify the metrics which are to be used for
evaluating each output respectively. In such cases, the model checkpoint will
consider only the metric of the first output for saving checkpoints.
activate_loss_logits (:obj:`bool`, optional): If True, the logits will first pass
through the `activate_logits` function before going to the criterion.
(default: False)
record_train (:obj:`bool`, optional): If False, metrics will be calculated only
during validation. (default: True)
"""
def __init__(
self, train_loader, optimizer, criterion, device='cpu',
epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None,
activate_loss_logits=False, record_train=True
):
self.model = None
self.optimizer = optimizer
self.criterion = criterion
self.train_loader = train_loader
self.device = device
self.epochs = epochs
self.val_loader = val_loader
self.l1_factor = l1_factor
self.activate_loss_logits = activate_loss_logits
self.record_train = record_train
self.lr_schedulers = {
'step_lr': None,
'lr_plateau': None,
'one_cycle_policy': None,
'cyclic_lr': None,
}
self.checkpoint = None
self.summary_writer = None
if callbacks is not None:
self._setup_callbacks(callbacks)
# Training
self.train_losses = [] # Change in loss
self.train_metrics = [] # Change in evaluation metric
self.val_losses = [] # Change in loss
self.val_metrics = [] # Change in evaluation metric
# Set evaluation metrics
self.metrics = []
if metrics:
self._setup_metrics(metrics)
def _setup_callbacks(self, callbacks):
"""Extract callbacks passed to the class.
Args:
callbacks (list): List of callbacks.
"""
for callback in callbacks:
if isinstance(callback, torch.optim.lr_scheduler.StepLR):
self.lr_schedulers['step_lr'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.lr_schedulers['lr_plateau'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR):
self.lr_schedulers['one_cycle_policy'] = callback
elif isinstance(callback, ModelCheckpoint):
if callback.monitor.startswith('train_'):
if self.record_train:
self.checkpoint = callback
else:
raise ValueError(
'Cannot use checkpoint for a training metric if record_train is set to False'
)
else:
self.checkpoint = callback
elif isinstance(callback, TensorBoard):
self.summary_writer = callback
elif isinstance(callback, torch.optim.lr_scheduler.CyclicLR):
self.lr_schedulers['cyclic_lr'] = callback
def set_model(self, model):
"""Assign model to learner.
Args:
model (torch.nn.Module): Model Instance.
"""
self.model = model
if self.summary_writer is not None:
self.summary_writer.write_model(self.model)
def _accuracy(self, label, prediction, idx=0):
"""Calculate accuracy.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
self.metrics[idx]['accuracy']['sum'] += prediction.eq(
label.view_as(prediction)
).sum().item()
self.metrics[idx]['accuracy']['num_steps'] += len(label)
self.metrics[idx]['accuracy']['value'] = round(
100 * self.metrics[idx]['accuracy']['sum'] / self.metrics[idx]['accuracy']['num_steps'], 2
)
def _iou(self, label, prediction, idx=0):
"""Calculate Intersection over Union.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
# Remove 1 channel dimension
label = label.squeeze(1)
prediction = prediction.squeeze(1)
intersection = (prediction * label).sum(2).sum(1)
union = (prediction + label).sum(2).sum(1) - intersection
# epsilon is added to avoid 0/0
epsilon = 1e-6
iou = (intersection + epsilon) / (union + epsilon)
self.metrics[idx]['iou']['sum'] += iou.sum().item()
self.metrics[idx]['iou']['num_steps'] += label.size(0)
self.metrics[idx]['iou']['value'] = round(
self.metrics[idx]['iou']['sum'] / self.metrics[idx]['iou']['num_steps'], 3
)
def _pred_label_diff(self, label, prediction, rel=False):
"""Calculate the difference between label and prediction.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
rel (:obj:`bool`, optional): If True, return the relative
difference. (default: False)
Returns:
Difference between label and prediction
"""
# For numerical stability
valid_labels = label > 0.0001
_label = label[valid_labels]
_prediction = prediction[valid_labels]
valid_element_count = _label.size(0)
if valid_element_count > 0:
diff = torch.abs(_label - _prediction)
if rel:
diff = torch.div(diff, _label)
return diff, valid_element_count
def _rmse(self, label, prediction, idx=0):
"""Calculate Root Mean Square Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction)
rmse = 0
if diff is not None:
rmse = math.sqrt(torch.sum(torch.pow(diff[0], 2)) / diff[1])
self.metrics[idx]['rmse']['num_steps'] += label.size(0)
self.metrics[idx]['rmse']['sum'] += rmse * label.size(0)
self.metrics[idx]['rmse']['value'] = round(
self.metrics[idx]['rmse']['sum'] / self.metrics[idx]['rmse']['num_steps'], 3
)
def _mae(self, label, prediction, idx=0):
"""Calculate Mean Average Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction)
mae = 0
if diff is not None:
mae = torch.sum(diff[0]).item() / diff[1]
self.metrics[idx]['mae']['num_steps'] += label.size(0)
self.metrics[idx]['mae']['sum'] += mae * label.size(0)
self.metrics[idx]['mae']['value'] = round(
self.metrics[idx]['mae']['sum'] / self.metrics[idx]['mae']['num_steps'], 3
)
def _abs_rel(self, label, prediction, idx=0):
"""Calculate Absolute Relative Error.
Args:
label (torch.Tensor): Ground truth.
prediction (torch.Tensor): Prediction.
"""
diff = self._pred_label_diff(label, prediction, rel=True)
abs_rel = 0
if diff is not None:
abs_rel = torch.sum(diff[0]).item() / diff[1]
self.metrics[idx]['abs_rel']['num_steps'] += label.size(0)
self.metrics[idx]['abs_rel']['sum'] += abs_rel * label.size(0)
self.metrics[idx]['abs_rel']['value'] = round(
self.metrics[idx]['abs_rel']['sum'] / self.metrics[idx]['abs_rel']['num_steps'], 3
)
def _setup_metrics(self, metrics):
"""Validate the evaluation metrics passed to the class.
Args:
metrics (:obj:`list` or :obj:`dict`): Metrics.
"""
if not isinstance(metrics[0], (list, tuple)):
metrics = [metrics]
for idx, metric_list in enumerate(metrics):
metric_dict = {}
for metric in metric_list:
metric_info = {'value': 0, 'sum': 0, 'num_steps': 0}
if metric == 'accuracy':
metric_info['func'] = self._accuracy
elif metric == 'rmse':
metric_info['func'] = self._rmse
elif metric == 'mae':
metric_info['func'] = self._mae
elif metric == 'abs_rel':
metric_info['func'] = self._abs_rel
elif metric == 'iou':
metric_info['func'] = self._iou
if 'func' in metric_info:
metric_dict[metric] = metric_info
if metric_dict:
self.metrics.append(metric_dict)
self.train_metrics.append({
x: [] for x in metric_dict.keys()
})
self.val_metrics.append({
x: [] for x in metric_dict.keys()
})
def _calculate_metrics(self, labels, predictions):
"""Update evaluation metric values.
Args:
label (:obj:`torch.Tensor` or :obj:`dict`): Ground truth.
prediction (:obj:`torch.Tensor` or :obj:`dict`): Prediction.
"""
predictions = self.activate_logits(predictions)
if not isinstance(labels, (list, tuple)):
labels = [labels]
predictions = [predictions]
for idx, (label, prediction) in enumerate(zip(labels, predictions)):
# If predictions are one-hot encoded
if label.size() != prediction.size():
prediction = prediction.argmax(dim=1, keepdim=True) * 1.0
if idx < len(self.metrics):
for metric in self.metrics[idx]:
self.metrics[idx][metric]['func'](
label, prediction, idx=idx
)
def _reset_metrics(self):
"""Reset metric params."""
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.metrics[idx][metric]['value'] = 0
self.metrics[idx][metric]['sum'] = 0
self.metrics[idx][metric]['num_steps'] = 0
def _get_pbar_values(self, loss):
"""Create progress bar description.
Args:
loss (float): Loss value.
"""
pbar_values = [('loss', round(loss, 2))]
if self.metrics and self.record_train:
for idx in range(len(self.metrics)):
for metric, info in self.metrics[idx].items():
metric_name = metric
if len(self.metrics) > 1:
metric_name = f'{idx} - {metric}'
pbar_values.append((metric_name, info['value']))
return pbar_values
def update_training_history(self, loss):
"""Update the training history.
Args:
loss (float): Loss value.
"""
self.train_losses.append(loss)
if self.record_train:
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.train_metrics[idx][metric].append(
self.metrics[idx][metric]['value']
)
def reset_history(self):
"""Reset the training history"""
self.train_losses = []
self.val_losses = []
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
self.train_metrics[idx][metric] = []
self.val_metrics[idx][metric] = []
self._reset_metrics()
def activate_logits(self, logits):
"""Apply activation function to the logits if needed.
After this the logits will be sent for calculation of
loss or evaluation metrics.
Args:
logits (torch.Tensor): Model output
Returns:
(*torch.Tensor*): activated logits
"""
return logits
def calculate_criterion(self, logits, targets, train=True):
"""Calculate loss.
Args:
logits (torch.Tensor): Prediction.
targets (torch.Tensor): Ground truth.
train (:obj:`bool`, optional): If True, loss is sent to the
L1 regularization function. (default: True)
Returns:
(*torch.Tensor*): loss value
"""
if self.activate_loss_logits:
logits = self.activate_logits(logits)
if train:
return l1(self.model, self.criterion(logits, targets), self.l1_factor)
return self.criterion(logits, targets)
def fetch_data(self, data):
"""Fetch data from loader and load it to GPU.
Args:
data (:obj:`tuple` or :obj:`list`): List containing inputs and targets.
Returns:
inputs and targets loaded to GPU.
"""
return data[0].to(self.device), data[1].to(self.device)
def train_batch(self, data):
"""Train the model on a batch of data.
Args:
data (:obj:`tuple` or :obj:`list`): Input and target data for the model.
Returns:
(*float*): Batch loss.
"""
inputs, targets = self.fetch_data(data)
self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation
y_pred = self.model(inputs) # Predict output
loss = self.calculate_criterion(y_pred, targets, train=True) # Calculate loss
# Perform backpropagation
loss.backward()
self.optimizer.step()
if self.record_train:
self._calculate_metrics(targets, y_pred)
# One Cycle Policy for learning rate
if self.lr_schedulers['one_cycle_policy'] is not None:
self.lr_schedulers['one_cycle_policy'].step()
# Cyclic LR policy
if self.lr_schedulers['cyclic_lr'] is not None:
self.lr_schedulers['cyclic_lr'].step()
return loss.item()
def train_epoch(self, verbose=True):
"""Run an epoch of model training.
Args:
verbose (:obj:`bool`, optional): Print logs. (default: True)
"""
self.model.train()
if verbose:
pbar = ProgressBar(target=len(self.train_loader), width=8)
for batch_idx, data in enumerate(self.train_loader, 0):
# Train a batch
loss = self.train_batch(data)
# Update Progress Bar
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.update(batch_idx, values=pbar_values)
# Update training history
self.update_training_history(loss)
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.add(1, values=pbar_values)
self._reset_metrics()
def train_iterations(self, verbose=True):
"""Train model for the 'self.epochs' number of batches."""
self.model.train()
if verbose:
pbar = ProgressBar(target=self.epochs, width=8)
iterator = InfiniteDataLoader(self.train_loader)
for iteration in range(self.epochs):
# Train a batch
loss = self.train_batch(iterator.get_batch())
# Update Progress Bar
if verbose:
pbar_values = self._get_pbar_values(loss)
pbar.update(iteration, values=pbar_values)
# Update training history
self.update_training_history(loss)
if verbose:
pbar.add(1, values=pbar_values)
def evaluate(self, loader, verbose=True, log_message='Evaluation'):
"""Evaluate the model on a custom data loader.
Args:
loader (torch.utils.data.DataLoader): Data loader.
verbose (:obj:`bool`, optional): Print loss and metrics. (default: True)
log_message (str): Prefix for the logs which are printed at the end.
Returns:
loss and metric values
"""
start_time = time.time()
self.model.eval()
eval_loss = 0
with torch.no_grad():
for data in loader:
inputs, targets = self.fetch_data(data)
output = self.model(inputs) # Get trained model output
eval_loss += self.calculate_criterion(
output, targets, train=False
).item() # Sum up batch loss
self._calculate_metrics(targets, output) # Calculate evaluation metrics
eval_loss /= len(loader.dataset)
eval_metrics = deepcopy(self.metrics)
end_time = time.time()
# Time spent during validation
duration = int(end_time - start_time)
minutes = duration // 60
seconds = duration % 60
if verbose:
log = f'{log_message} (took {minutes} minutes, {seconds} seconds): Average loss: {eval_loss:.4f}'
for idx in range(len(self.metrics)):
for metric in self.metrics[idx]:
log += f', {metric}: {self.metrics[idx][metric]["value"]}'
log += '\n'
print(log)
self._reset_metrics()
return eval_loss, eval_metrics
def validate(self, verbose=True):
"""Validate an epoch of model training.
Args:
verbose (:obj:`bool`, optional): Print validation loss and metrics.
(default: True)
"""
eval_loss, eval_metrics = self.evaluate(
self.val_loader, verbose=verbose, log_message='Validation set'
)
# Update validation logs
self.val_losses.append(eval_loss)
for idx in range(len(eval_metrics)):
for metric in eval_metrics[idx]:
self.val_metrics[idx][metric].append(
eval_metrics[idx][metric]['value']
)
def save_checkpoint(self, epoch=None):
"""Save model checkpoint.
Args:
epoch (:obj:`int`, optional): Current epoch number.
"""
if self.checkpoint is not None:
metric = None
if self.checkpoint.monitor == 'train_loss':
metric = self.train_losses[-1]
elif self.checkpoint.monitor == 'val_loss':
metric = self.val_losses[-1]
elif self.metrics:
if self.checkpoint.monitor.startswith('train_'):
if self.record_train:
metric = self.train_metrics[0][
self.checkpoint.monitor.split('train_')[-1]
][-1]
else:
metric = self.val_metrics[0][
self.checkpoint.monitor.split('val_')[-1]
][-1]
else:
print('Invalid metric function, can\'t save checkpoint.')
return
self.checkpoint(self.model, metric, epoch)
def write_summary(self, epoch, train):
"""Write training summary in tensorboard.
Args:
epoch (int): Current epoch number.
train (bool): If True, summary will be
written for model training else it
will be writtern for model validation.
"""
if self.summary_writer is not None:
if train:
mode = 'train'
# Write Images
self.summary_writer.write_images(
self.model, self.activate_logits, f'prediction_epoch_{epoch}'
)
loss = self.train_losses[-1]
else:
mode = 'val'
loss = self.val_losses[-1]
# Write Loss
self.summary_writer.write_scalar(
f'Loss/{mode}', loss, epoch
)
if not train or self.record_train:
for idx in range(len(self.metrics)):
for metric, info in self.metrics[idx].items():
self.summary_writer.write_scalar(
f'{idx}/{metric.title()}/{mode}',
info['value'], epoch
)
def fit(self, start_epoch=1, epochs=None, reset=True, verbose=True):
"""Perform model training.
Args:
start_epoch (:obj:`int`, optional): Start epoch for training.
(default: 1)
epochs (:obj:`int`, optional): Numbers of epochs/iterations to
train the model for. If no value is given, the original
value given during initialization of learner will be used.
reset (:obj:`bool`, optional): Flag to indicate that training
is starting from scratch. (default: True)
verbose (:obj:`bool`, optional): Print logs. (default: True)
"""
if reset:
self.reset_history()
if epochs is not None:
self.epochs = epochs
for epoch in range(start_epoch, start_epoch + self.epochs):
if verbose:
print(f'Epoch {epoch}:')
# Train an epoch
self.train_epoch(verbose=verbose)
self.write_summary(epoch, True)
# Validate the model
if self.val_loader is not None:
self.validate(verbose=verbose)
self.write_summary(epoch, False)
# Save model checkpoint
self.save_checkpoint(epoch)
# Call Step LR
if not self.lr_schedulers['step_lr'] is None:
self.lr_schedulers['step_lr'].step()
# Call Reduce LR on Plateau
if not self.lr_schedulers['lr_plateau'] is None:
self.lr_schedulers['lr_plateau'].step(self.val_losses[-1])
|
from __future__ import print_function, unicode_literals
import sys
from typing import List
from art import tprint
from PyInquirer import style_from_dict, Token, prompt
from squirrel_maze.resources.actor import Actor
from squirrel_maze.resources import action as sm_action
from squirrel_maze.resources import combat as sm_combat
from squirrel_maze.resources import helpers as sm_helpers
from squirrel_maze.resources import db_helpers as sm_db_helpers
def main_menu() -> None:
tprint("squirrel_maze")
_get_default_style()
choices = [{"name": "Combat", "value": "combat"}, {"name": "Exit", "value": "exit"}]
questions = [
{
"type": "list",
"message": "Main Menu",
"name": "selection",
"choices": choices,
}
]
answers = prompt(questions)
if "not implemented" in answers["selection"]:
print("Feature not implemented")
sys.exit()
elif answers["selection"] == "exit":
_exit_game_menu("main")
else:
if answers["selection"] == "combat":
_location_menu()
# combat_menu()
def format_location_item(location: dict, enemy: dict) -> dict:
return {
"name": f"{location["name"]} - {enemy["name"]}",
"value": location["id"],
"enemy_id": enemy["id"],
}
def combat_menu() -> None:
_get_default_style()
choices = [
{"name": "Big Goblin", "value": "goblin"},
{"name": "Return", "value": "return"},
]
questions = [
{
"type": "list",
"name": "selection",
"message": "Choose an opponent",
"choices": choices,
}
]
answers = prompt(questions)
if "not implemented" in answers["selection"]:
print("Feature not implemented")
sys.exit()
elif answers["selection"] == "return":
go_to_menu("main")
else:
# is this even used?
if answers["selection"] == "goblin":
actors = []
db = sm_db_helpers.Database("squirrel_maze/data/db.json")
actors.append(db.get_actor(0, pc_type="pc", affiliation="friendly"))
actors.append(db.get_actor(1, pc_type="npc", affiliation="unfriendly"))
db.close()
cur_battle = sm_combat.Combat(actors)
_print_battle_header(cur_battle)
cur_battle.battle()
def battle_menu(active_actor: Actor, actors: List[Actor]) -> None:
_get_default_style()
choices = [{"key": "0", "name": "Fight", "value": "fight"}]
questions = [
{
"type": "list",
"name": "selection",
"message": "{} - Choose an action".format(active_actor.name),
"choices": choices,
}
]
prompt(questions)
_unfriendly_target_select_menu(active_actor, actors)
def go_to_menu(menu_name: str) -> None:
if menu_name == "main":
main_menu()
elif menu_name == "combat":
combat_menu()
def victory() -> None:
print("Congratulations, you win!")
go_to_menu("main")
def defeat() -> None:
print("You have lost...")
go_to_menu("main")
def _get_default_style(): # type: ignore
style = style_from_dict(
{
Token.Separator: "#cc5454",
Token.QuestionMark: "#673ab7 bold",
Token.Selected: "#cc5454", # default
Token.Pointer: "#673ab7 bold",
Token.Instruction: "", # default
Token.Answer: "#f44336 bold",
Token.Question: "",
}
)
return style
def _unfriendly_target_select_menu(active_actor: Actor, actors: List[Actor]): # type: ignore
_get_default_style()
choices = []
for actor in sm_helpers.get_affiliated_actors("unfriendly", actors):
choices.append(
{
"key": actor.actor_id,
"name": "{}) {}".format(actor.actor_id, actor.name),
"value": actor.actor_id,
}
)
questions = [
{
"type": "list",
"name": "selection",
"message": "Fight -> Choose a target",
"choices": choices,
}
]
answers = prompt(questions)
for actor in actors:
if actor.actor_id == answers["selection"]:
target_actor = actor
sm_action.fight(active_actor, target_actor)
def _exit_game_menu(prev_menu: str) -> None:
_get_default_style()
choices = ["Yes", "No"]
questions = [
{"type": "list", "name": "selection", "message": "Exit?", "choices": choices}
]
answers = prompt(questions)
if answers["selection"] == "Yes":
sys.exit()
else:
go_to_menu(prev_menu)
def _location_menu() -> None:
_get_default_style()
choices = _get_location_menu_list()
choices.append({"name": "Return", "value": 9999})
questions = [
{
"type": "list",
"name": "selection",
"message": "Choose an opponent",
"choices": choices,
}
]
answers = prompt(questions)
if answers["selection"] == 9999:
go_to_menu("main")
else:
actors = []
db = sm_db_helpers.Database("squirrel_maze/data/db.json")
actors.append(db.get_actor(2, pc_type="pc", affiliation="friendly"))
actors.append(
db.get_actor(
choices[answers["selection"]]["enemy_id"],
pc_type="npc",
affiliation="unfriendly",
)
)
db.close()
cur_battle = sm_combat.Combat(actors)
_print_battle_header(cur_battle)
cur_battle.battle()
def _get_location_menu_list() -> List:
db = sm_db_helpers.Database("squirrel_maze/data/db.json")
locations = db.get_table_contents("locations")
_location_menu_list = []
for location in locations:
enemy = db.get_actor_by_id(location["npcs"])
_location_menu_list.append(format_location_item(location, enemy))
db.close()
return _location_menu_list
def _print_battle_header(battle) -> None: # type: ignore
print("Battle between:")
for team in battle.teams:
print("Team: {}: {}".format(team, battle.teams[team]))
| from __future__ import print_function, unicode_literals
import sys
from typing import List
from art import tprint
from PyInquirer import style_from_dict, Token, prompt
from squirrel_maze.resources.actor import Actor
from squirrel_maze.resources import action as sm_action
from squirrel_maze.resources import combat as sm_combat
from squirrel_maze.resources import helpers as sm_helpers
from squirrel_maze.resources import db_helpers as sm_db_helpers
def main_menu() -> None:
tprint("squirrel_maze")
_get_default_style()
choices = [{"name": "Combat", "value": "combat"}, {"name": "Exit", "value": "exit"}]
questions = [
{
"type": "list",
"message": "Main Menu",
"name": "selection",
"choices": choices,
}
]
answers = prompt(questions)
if "not implemented" in answers["selection"]:
print("Feature not implemented")
sys.exit()
elif answers["selection"] == "exit":
_exit_game_menu("main")
else:
if answers["selection"] == "combat":
_location_menu()
# combat_menu()
def format_location_item(location: dict, enemy: dict) -> dict:
return {
"name": f"{location['name']} - {enemy['name']}",
"value": location["id"],
"enemy_id": enemy["id"],
}
def combat_menu() -> None:
_get_default_style()
choices = [
{"name": "Big Goblin", "value": "goblin"},
{"name": "Return", "value": "return"},
]
questions = [
{
"type": "list",
"name": "selection",
"message": "Choose an opponent",
"choices": choices,
}
]
answers = prompt(questions)
if "not implemented" in answers["selection"]:
print("Feature not implemented")
sys.exit()
elif answers["selection"] == "return":
go_to_menu("main")
else:
# is this even used?
if answers["selection"] == "goblin":
actors = []
db = sm_db_helpers.Database("squirrel_maze/data/db.json")
actors.append(db.get_actor(0, pc_type="pc", affiliation="friendly"))
actors.append(db.get_actor(1, pc_type="npc", affiliation="unfriendly"))
db.close()
cur_battle = sm_combat.Combat(actors)
_print_battle_header(cur_battle)
cur_battle.battle()
def battle_menu(active_actor: Actor, actors: List[Actor]) -> None:
_get_default_style()
choices = [{"key": "0", "name": "Fight", "value": "fight"}]
questions = [
{
"type": "list",
"name": "selection",
"message": "{} - Choose an action".format(active_actor.name),
"choices": choices,
}
]
prompt(questions)
_unfriendly_target_select_menu(active_actor, actors)
def go_to_menu(menu_name: str) -> None:
if menu_name == "main":
main_menu()
elif menu_name == "combat":
combat_menu()
def victory() -> None:
print("Congratulations, you win!")
go_to_menu("main")
def defeat() -> None:
print("You have lost...")
go_to_menu("main")
def _get_default_style(): # type: ignore
style = style_from_dict(
{
Token.Separator: "#cc5454",
Token.QuestionMark: "#673ab7 bold",
Token.Selected: "#cc5454", # default
Token.Pointer: "#673ab7 bold",
Token.Instruction: "", # default
Token.Answer: "#f44336 bold",
Token.Question: "",
}
)
return style
def _unfriendly_target_select_menu(active_actor: Actor, actors: List[Actor]): # type: ignore
_get_default_style()
choices = []
for actor in sm_helpers.get_affiliated_actors("unfriendly", actors):
choices.append(
{
"key": actor.actor_id,
"name": "{}) {}".format(actor.actor_id, actor.name),
"value": actor.actor_id,
}
)
questions = [
{
"type": "list",
"name": "selection",
"message": "Fight -> Choose a target",
"choices": choices,
}
]
answers = prompt(questions)
for actor in actors:
if actor.actor_id == answers["selection"]:
target_actor = actor
sm_action.fight(active_actor, target_actor)
def _exit_game_menu(prev_menu: str) -> None:
_get_default_style()
choices = ["Yes", "No"]
questions = [
{"type": "list", "name": "selection", "message": "Exit?", "choices": choices}
]
answers = prompt(questions)
if answers["selection"] == "Yes":
sys.exit()
else:
go_to_menu(prev_menu)
def _location_menu() -> None:
_get_default_style()
choices = _get_location_menu_list()
choices.append({"name": "Return", "value": 9999})
questions = [
{
"type": "list",
"name": "selection",
"message": "Choose an opponent",
"choices": choices,
}
]
answers = prompt(questions)
if answers["selection"] == 9999:
go_to_menu("main")
else:
actors = []
db = sm_db_helpers.Database("squirrel_maze/data/db.json")
actors.append(db.get_actor(2, pc_type="pc", affiliation="friendly"))
actors.append(
db.get_actor(
choices[answers["selection"]]["enemy_id"],
pc_type="npc",
affiliation="unfriendly",
)
)
db.close()
cur_battle = sm_combat.Combat(actors)
_print_battle_header(cur_battle)
cur_battle.battle()
def _get_location_menu_list() -> List:
db = sm_db_helpers.Database("squirrel_maze/data/db.json")
locations = db.get_table_contents("locations")
_location_menu_list = []
for location in locations:
enemy = db.get_actor_by_id(location["npcs"])
_location_menu_list.append(format_location_item(location, enemy))
db.close()
return _location_menu_list
def _print_battle_header(battle) -> None: # type: ignore
print("Battle between:")
for team in battle.teams:
print("Team: {}: {}".format(team, battle.teams[team]))
|
#!/usr/bin/env python3
import multiprocessing
import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from functools import partial
import inspect
from io import BytesIO, TextIOWrapper
import os
from pathlib import Path
from platform import system
import regex as re
import sys
from tempfile import TemporaryDirectory
import types
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generator,
List,
Iterator,
TypeVar,
)
import unittest
from unittest.mock import patch, MagicMock
import click
from click import unstyle
from click.testing import CliRunner
import black
from black import Feature, TargetVersion
from pathspec import PathSpec
# Import other test classes
from tests.util import THIS_DIR, read_data, DETERMINISTIC_HEADER
from .test_primer import PrimerCLITests # noqa: F401
DEFAULT_MODE = black.FileMode(experimental_string_processing=True)
ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True)
fs = partial(black.format_str, mode=DEFAULT_MODE)
THIS_FILE = Path(__file__)
PY36_VERSIONS = {
TargetVersion.PY36,
TargetVersion.PY37,
TargetVersion.PY38,
TargetVersion.PY39,
}
PY36_ARGS = [f"--target-version={version.name.lower()}" for version in PY36_VERSIONS]
T = TypeVar("T")
R = TypeVar("R")
def dump_to_stderr(*output: str) -> str:
return "\n" + "\n".join(output) + "\n"
@contextmanager
def cache_dir(exists: bool = True) -> Iterator[Path]:
with TemporaryDirectory() as workspace:
cache_dir = Path(workspace)
if not exists:
cache_dir = cache_dir / "new"
with patch("black.CACHE_DIR", cache_dir):
yield cache_dir
@contextmanager
def event_loop() -> Iterator[None]:
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield
finally:
loop.close()
class FakeContext(click.Context):
"""A fake click Context for when calling functions that need it."""
def __init__(self) -> None:
self.default_map: Dict[str, Any] = {}
class FakeParameter(click.Parameter):
"""A fake click Parameter for when calling functions that need it."""
def __init__(self) -> None:
pass
class BlackRunner(CliRunner):
"""Modify CliRunner so that stderr is not merged with stdout.
This is a hack that can be removed once we depend on Click 7.x"""
def __init__(self) -> None:
self.stderrbuf = BytesIO()
self.stdoutbuf = BytesIO()
self.stdout_bytes = b""
self.stderr_bytes = b""
super().__init__()
@contextmanager
def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None]:
with super().isolation(*args, **kwargs) as output:
try:
hold_stderr = sys.stderr
sys.stderr = TextIOWrapper(self.stderrbuf, encoding=self.charset)
yield output
finally:
self.stdout_bytes = sys.stdout.buffer.getvalue() # type: ignore
self.stderr_bytes = sys.stderr.buffer.getvalue() # type: ignore
sys.stderr = hold_stderr
class BlackTestCase(unittest.TestCase):
maxDiff = None
_diffThreshold = 2 ** 20
def assertFormatEqual(self, expected: str, actual: str) -> None:
if actual != expected and not os.environ.get("SKIP_AST_PRINT"):
bdv: black.DebugVisitor[Any]
black.out("Expected tree:", fg="green")
try:
exp_node = black.lib2to3_parse(expected)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
black.out("Actual tree:", fg="red")
try:
exp_node = black.lib2to3_parse(actual)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
self.assertMultiLineEqual(expected, actual)
def invokeBlack(
self, args: List[str], exit_code: int = 0, ignore_config: bool = True
) -> None:
runner = BlackRunner()
if ignore_config:
args = ["--verbose", "--config", str(THIS_DIR / "empty.toml"), *args]
result = runner.invoke(black.main, args)
self.assertEqual(
result.exit_code,
exit_code,
msg=(
f"Failed with args: {args}\n"
f"stdout: {runner.stdout_bytes.decode()!r}\n"
f"stderr: {runner.stderr_bytes.decode()!r}\n"
f"exception: {result.exception}"
),
)
@patch("black.dump_to_file", dump_to_stderr)
def checkSourceFile(self, name: str, mode: black.FileMode = DEFAULT_MODE) -> None:
path = THIS_DIR.parent / name
source, expected = read_data(str(path), data=False)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
self.assertFalse(ff(path))
@patch("black.dump_to_file", dump_to_stderr)
def test_empty(self) -> None:
source = expected = ""
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_empty_ff(self) -> None:
expected = ""
tmp_file = Path(black.dump_to_file())
try:
self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
def test_run_on_test_black(self) -> None:
self.checkSourceFile("tests/test_black.py")
def test_run_on_test_blackd(self) -> None:
self.checkSourceFile("tests/test_blackd.py")
def test_black(self) -> None:
self.checkSourceFile("src/black/__init__.py")
def test_pygram(self) -> None:
self.checkSourceFile("src/blib2to3/pygram.py")
def test_pytree(self) -> None:
self.checkSourceFile("src/blib2to3/pytree.py")
def test_conv(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/conv.py")
def test_driver(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/driver.py")
def test_grammar(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/grammar.py")
def test_literals(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/literals.py")
def test_parse(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/parse.py")
def test_pgen(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/pgen.py")
def test_tokenize(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/tokenize.py")
def test_token(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/token.py")
def test_setup(self) -> None:
self.checkSourceFile("setup.py")
def test_piping(self) -> None:
source, expected = read_data("src/black/__init__", data=False)
result = BlackRunner().invoke(
black.main,
["-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}"],
input=BytesIO(source.encode("utf8")),
)
self.assertEqual(result.exit_code, 0)
self.assertFormatEqual(expected, result.output)
black.assert_equivalent(source, result.output)
black.assert_stable(source, result.output, DEFAULT_MODE)
def test_piping_diff(self) -> None:
diff_header = re.compile(
r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d "
r"\+\d\d\d\d"
)
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
config = THIS_DIR / "data" / "empty_pyproject.toml"
args = [
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
"--diff",
f"--config={config}",
]
result = BlackRunner().invoke(
black.main, args, input=BytesIO(source.encode("utf8"))
)
self.assertEqual(result.exit_code, 0)
actual = diff_header.sub(DETERMINISTIC_HEADER, result.output)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
self.assertEqual(expected, actual)
def test_piping_diff_with_color(self) -> None:
source, _ = read_data("expression.py")
config = THIS_DIR / "data" / "empty_pyproject.toml"
args = [
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
"--diff",
"--color",
f"--config={config}",
]
result = BlackRunner().invoke(
black.main, args, input=BytesIO(source.encode("utf8"))
)
actual = result.output
# Again, the contents are checked in a different test, so only look for colors.
self.assertIn("\033[1;37m", actual)
self.assertIn("\033[36m", actual)
self.assertIn("\033[32m", actual)
self.assertIn("\033[31m", actual)
self.assertIn("\033[0m", actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_function(self) -> None:
source, expected = read_data("function")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_function2(self) -> None:
source, expected = read_data("function2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def _test_wip(self) -> None:
source, expected = read_data("wip")
sys.settrace(tracefunc)
mode = replace(
DEFAULT_MODE,
experimental_string_processing=False,
target_versions={black.TargetVersion.PY38},
)
actual = fs(source, mode=mode)
sys.settrace(None)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, black.FileMode())
@patch("black.dump_to_file", dump_to_stderr)
def test_function_trailing_comma(self) -> None:
source, expected = read_data("function_trailing_comma")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@unittest.expectedFailure
@patch("black.dump_to_file", dump_to_stderr)
def test_trailing_comma_optional_parens_stability1(self) -> None:
source, _expected = read_data("trailing_comma_optional_parens1")
actual = fs(source)
black.assert_stable(source, actual, DEFAULT_MODE)
@unittest.expectedFailure
@patch("black.dump_to_file", dump_to_stderr)
def test_trailing_comma_optional_parens_stability2(self) -> None:
source, _expected = read_data("trailing_comma_optional_parens2")
actual = fs(source)
black.assert_stable(source, actual, DEFAULT_MODE)
@unittest.expectedFailure
@patch("black.dump_to_file", dump_to_stderr)
def test_trailing_comma_optional_parens_stability3(self) -> None:
source, _expected = read_data("trailing_comma_optional_parens3")
actual = fs(source)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_expression(self) -> None:
source, expected = read_data("expression")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_pep_572(self) -> None:
source, expected = read_data("pep_572")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
if sys.version_info >= (3, 8):
black.assert_equivalent(source, actual)
def test_pep_572_version_detection(self) -> None:
source, _ = read_data("pep_572")
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.ASSIGNMENT_EXPRESSIONS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
def test_expression_ff(self) -> None:
source, expected = read_data("expression")
tmp_file = Path(black.dump_to_file(source))
try:
self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
with patch("black.dump_to_file", dump_to_stderr):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_expression_diff(self) -> None:
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
tmp_file = Path(black.dump_to_file(source))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
try:
result = BlackRunner().invoke(black.main, ["--diff", str(tmp_file)])
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.output
actual = diff_header.sub(DETERMINISTIC_HEADER, actual)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
if expected != actual:
dump = black.dump_to_file(actual)
msg = (
"Expected diff isn't equal to the actual. If you made changes to"
" expression.py and this is an anticipated difference, overwrite"
f" tests/data/expression.diff with {dump}"
)
self.assertEqual(expected, actual, msg)
def test_expression_diff_with_color(self) -> None:
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
tmp_file = Path(black.dump_to_file(source))
try:
result = BlackRunner().invoke(
black.main, ["--diff", "--color", str(tmp_file)]
)
finally:
os.unlink(tmp_file)
actual = result.output
# We check the contents of the diff in `test_expression_diff`. All
# we need to check here is that color codes exist in the result.
self.assertIn("\033[1;37m", actual)
self.assertIn("\033[36m", actual)
self.assertIn("\033[32m", actual)
self.assertIn("\033[31m", actual)
self.assertIn("\033[0m", actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_fstring(self) -> None:
source, expected = read_data("fstring")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_pep_570(self) -> None:
source, expected = read_data("pep_570")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
if sys.version_info >= (3, 8):
black.assert_equivalent(source, actual)
def test_detect_pos_only_arguments(self) -> None:
source, _ = read_data("pep_570")
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.POS_ONLY_ARGUMENTS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_quotes(self) -> None:
source, expected = read_data("string_quotes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
mode = replace(DEFAULT_MODE, string_normalization=False)
not_normalized = fs(source, mode=mode)
self.assertFormatEqual(source.replace("\\\n", ""), not_normalized)
black.assert_equivalent(source, not_normalized)
black.assert_stable(source, not_normalized, mode=mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_docstring(self) -> None:
source, expected = read_data("docstring")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_docstring_no_string_normalization(self) -> None:
"""Like test_docstring but with string normalization off."""
source, expected = read_data("docstring_no_string_normalization")
mode = replace(DEFAULT_MODE, string_normalization=False)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
def test_long_strings(self) -> None:
"""Tests for splitting long strings."""
source, expected = read_data("long_strings")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_long_strings_flag_disabled(self) -> None:
"""Tests for turning off the string processing logic."""
source, expected = read_data("long_strings_flag_disabled")
mode = replace(DEFAULT_MODE, experimental_string_processing=False)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_stable(expected, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_long_strings__edge_case(self) -> None:
"""Edge-case tests for splitting long strings."""
source, expected = read_data("long_strings__edge_case")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_long_strings__regression(self) -> None:
"""Regression tests for splitting long strings."""
source, expected = read_data("long_strings__regression")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_slices(self) -> None:
source, expected = read_data("slices")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_percent_precedence(self) -> None:
source, expected = read_data("percent_precedence")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments(self) -> None:
source, expected = read_data("comments")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments2(self) -> None:
source, expected = read_data("comments2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments3(self) -> None:
source, expected = read_data("comments3")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments4(self) -> None:
source, expected = read_data("comments4")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments5(self) -> None:
source, expected = read_data("comments5")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments6(self) -> None:
source, expected = read_data("comments6")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments7(self) -> None:
source, expected = read_data("comments7")
mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38})
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comment_after_escaped_newline(self) -> None:
source, expected = read_data("comment_after_escaped_newline")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_cantfit(self) -> None:
source, expected = read_data("cantfit")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_import_spacing(self) -> None:
source, expected = read_data("import_spacing")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_composition(self) -> None:
source, expected = read_data("composition")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_composition_no_trailing_comma(self) -> None:
source, expected = read_data("composition_no_trailing_comma")
mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38})
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_empty_lines(self) -> None:
source, expected = read_data("empty_lines")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_remove_parens(self) -> None:
source, expected = read_data("remove_parens")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_prefixes(self) -> None:
source, expected = read_data("string_prefixes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_numeric_literals(self) -> None:
source, expected = read_data("numeric_literals")
mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_numeric_literals_ignoring_underscores(self) -> None:
source, expected = read_data("numeric_literals_skip_underscores")
mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_numeric_literals_py2(self) -> None:
source, expected = read_data("numeric_literals_py2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2(self) -> None:
source, expected = read_data("python2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2_print_function(self) -> None:
source, expected = read_data("python2_print_function")
mode = replace(DEFAULT_MODE, target_versions={TargetVersion.PY27})
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2_unicode_literals(self) -> None:
source, expected = read_data("python2_unicode_literals")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_stub(self) -> None:
mode = replace(DEFAULT_MODE, is_pyi=True)
source, expected = read_data("stub.pyi")
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_async_as_identifier(self) -> None:
source_path = (THIS_DIR / "data" / "async_as_identifier.py").resolve()
source, expected = read_data("async_as_identifier")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major < 3 or (major <= 3 and minor < 7):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.6
self.invokeBlack([str(source_path), "--target-version", "py36"])
# but not on 3.7, because async/await is no longer an identifier
self.invokeBlack([str(source_path), "--target-version", "py37"], exit_code=123)
@patch("black.dump_to_file", dump_to_stderr)
def test_python37(self) -> None:
source_path = (THIS_DIR / "data" / "python37.py").resolve()
source, expected = read_data("python37")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 7):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.7
self.invokeBlack([str(source_path), "--target-version", "py37"])
# but not on 3.6, because we use async as a reserved keyword
self.invokeBlack([str(source_path), "--target-version", "py36"], exit_code=123)
@patch("black.dump_to_file", dump_to_stderr)
def test_python38(self) -> None:
source, expected = read_data("python38")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 8):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_python39(self) -> None:
source, expected = read_data("python39")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 9):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff(self) -> None:
source, expected = read_data("fmtonoff")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff2(self) -> None:
source, expected = read_data("fmtonoff2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff3(self) -> None:
source, expected = read_data("fmtonoff3")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff4(self) -> None:
source, expected = read_data("fmtonoff4")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_remove_empty_parentheses_after_class(self) -> None:
source, expected = read_data("class_blank_parentheses")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_new_line_between_class_and_code(self) -> None:
source, expected = read_data("class_methods_new_line")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_bracket_match(self) -> None:
source, expected = read_data("bracketmatch")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_tuple_assign(self) -> None:
source, expected = read_data("tupleassign")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_beginning_backslash(self) -> None:
source, expected = read_data("beginning_backslash")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_tab_comment_indentation(self) -> None:
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t\t# comment\n\tpass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
# mixed tabs and spaces (valid Python 2 code)
contents_tab = "if 1:\n if 2:\n\t\tpass\n\t# comment\n pass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
contents_tab = "if 1:\n if 2:\n\t\tpass\n\t\t# comment\n pass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
def test_report_verbose(self) -> None:
report = black.Report(verbose=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "f1 already well formatted, good job.")
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
out_lines[-1], "f3 wasn't modified on disk since last run."
)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 5)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "wat ignored: no match")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 6)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "f4 already well formatted, good job.")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
def test_report_quiet(self) -> None:
report = black.Report(quiet=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
def test_report_normal(self) -> None:
report = black.Report()
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
def test_lib2to3_parse(self) -> None:
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse("invalid syntax")
straddling = "x + y"
black.lib2to3_parse(straddling)
black.lib2to3_parse(straddling, {TargetVersion.PY27})
black.lib2to3_parse(straddling, {TargetVersion.PY36})
black.lib2to3_parse(straddling, {TargetVersion.PY27, TargetVersion.PY36})
py2_only = "print x"
black.lib2to3_parse(py2_only)
black.lib2to3_parse(py2_only, {TargetVersion.PY27})
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py2_only, {TargetVersion.PY36})
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py2_only, {TargetVersion.PY27, TargetVersion.PY36})
py3_only = "exec(x, end=y)"
black.lib2to3_parse(py3_only)
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py3_only, {TargetVersion.PY27})
black.lib2to3_parse(py3_only, {TargetVersion.PY36})
black.lib2to3_parse(py3_only, {TargetVersion.PY27, TargetVersion.PY36})
def test_get_features_used_decorator(self) -> None:
# Test the feature detection of new decorator syntax
# since this makes some test cases of test_get_features_used()
# fails if it fails, this is tested first so that a useful case
# is identified
simples, relaxed = read_data("decorators")
# skip explanation comments at the top of the file
for simple_test in simples.split("##")[1:]:
node = black.lib2to3_parse(simple_test)
decorator = str(node.children[0].children[0]).strip()
self.assertNotIn(
Feature.RELAXED_DECORATORS,
black.get_features_used(node),
msg=(
f"decorator '{decorator}' follows python<=3.8 syntax"
"but is detected as 3.9+"
# f"The full node is\n{node!r}"
),
)
# skip the '# output' comment at the top of the output part
for relaxed_test in relaxed.split("##")[1:]:
node = black.lib2to3_parse(relaxed_test)
decorator = str(node.children[0].children[0]).strip()
self.assertIn(
Feature.RELAXED_DECORATORS,
black.get_features_used(node),
msg=(
f"decorator '{decorator}' uses python3.9+ syntax"
"but is detected as python<=3.8"
# f"The full node is\n{node!r}"
),
)
def test_get_features_used(self) -> None:
node = black.lib2to3_parse("def f(*, arg): ...\n")
self.assertEqual(black.get_features_used(node), set())
node = black.lib2to3_parse("def f(*, arg,): ...\n")
self.assertEqual(black.get_features_used(node), {Feature.TRAILING_COMMA_IN_DEF})
node = black.lib2to3_parse("f(*arg,)\n")
self.assertEqual(
black.get_features_used(node), {Feature.TRAILING_COMMA_IN_CALL}
)
node = black.lib2to3_parse("def f(*, arg): f'string'\n")
self.assertEqual(black.get_features_used(node), {Feature.F_STRINGS})
node = black.lib2to3_parse("123_456\n")
self.assertEqual(black.get_features_used(node), {Feature.NUMERIC_UNDERSCORES})
node = black.lib2to3_parse("123456\n")
self.assertEqual(black.get_features_used(node), set())
source, expected = read_data("function")
node = black.lib2to3_parse(source)
expected_features = {
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.F_STRINGS,
}
self.assertEqual(black.get_features_used(node), expected_features)
node = black.lib2to3_parse(expected)
self.assertEqual(black.get_features_used(node), expected_features)
source, expected = read_data("expression")
node = black.lib2to3_parse(source)
self.assertEqual(black.get_features_used(node), set())
node = black.lib2to3_parse(expected)
self.assertEqual(black.get_features_used(node), set())
def test_get_future_imports(self) -> None:
node = black.lib2to3_parse("\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import multiple, imports\n")
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import (parenthesized, imports)\n")
self.assertEqual({"parenthesized", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import multiple\nfrom __future__ import imports\n"
)
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("# comment\nfrom __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse('"""docstring"""\nfrom __future__ import black\n')
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("some(other, code)\nfrom __future__ import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from some.module import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import unicode_literals as _unicode_literals"
)
self.assertEqual({"unicode_literals"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import unicode_literals as _lol, print"
)
self.assertEqual({"unicode_literals", "print"}, black.get_future_imports(node))
def test_debug_visitor(self) -> None:
source, _ = read_data("debug_visitor.py")
expected, _ = read_data("debug_visitor.out")
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
black.DebugVisitor.show(source)
actual = "\n".join(out_lines) + "\n"
log_name = ""
if expected != actual:
log_name = black.dump_to_file(*out_lines)
self.assertEqual(
expected,
actual,
f"AST print out is different. Actual version dumped to {log_name}",
)
def test_format_file_contents(self) -> None:
empty = ""
mode = DEFAULT_MODE
with self.assertRaises(black.NothingChanged):
black.format_file_contents(empty, mode=mode, fast=False)
just_nl = "\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(just_nl, mode=mode, fast=False)
same = "j = [1, 2, 3]\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(same, mode=mode, fast=False)
different = "j = [1,2,3]"
expected = same
actual = black.format_file_contents(different, mode=mode, fast=False)
self.assertEqual(expected, actual)
invalid = "return if you can"
with self.assertRaises(black.InvalidInput) as e:
black.format_file_contents(invalid, mode=mode, fast=False)
self.assertEqual(str(e.exception), "Cannot parse: 1:7: return if you can")
def test_endmarker(self) -> None:
n = black.lib2to3_parse("\n")
self.assertEqual(n.type, black.syms.file_input)
self.assertEqual(len(n.children), 1)
self.assertEqual(n.children[0].type, black.token.ENDMARKER)
@unittest.skipIf(os.environ.get("SKIP_AST_PRINT"), "user set SKIP_AST_PRINT")
def test_assertFormatEqual(self) -> None:
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
with self.assertRaises(AssertionError):
self.assertFormatEqual("j = [1, 2, 3]", "j = [1, 2, 3,]")
out_str = "".join(out_lines)
self.assertTrue("Expected tree:" in out_str)
self.assertTrue("Actual tree:" in out_str)
self.assertEqual("".join(err_lines), "")
def test_cache_broken_file(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
cache_file = black.get_cache_file(mode)
with cache_file.open("w") as fobj:
fobj.write("this is not a pickle")
self.assertEqual(black.read_cache(mode), {})
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
self.invokeBlack([str(src)])
cache = black.read_cache(mode)
self.assertIn(src, cache)
def test_cache_single_file_already_cached(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
black.write_cache({}, [src], mode)
self.invokeBlack([str(src)])
with src.open("r") as fobj:
self.assertEqual(fobj.read(), "print('hello')")
@event_loop()
def test_cache_multiple_files(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace, patch(
"black.ProcessPoolExecutor", new=ThreadPoolExecutor
):
one = (workspace / "one.py").resolve()
with one.open("w") as fobj:
fobj.write("print('hello')")
two = (workspace / "two.py").resolve()
with two.open("w") as fobj:
fobj.write("print('hello')")
black.write_cache({}, [one], mode)
self.invokeBlack([str(workspace)])
with one.open("r") as fobj:
self.assertEqual(fobj.read(), "print('hello')")
with two.open("r") as fobj:
self.assertEqual(fobj.read(), 'print("hello")\n')
cache = black.read_cache(mode)
self.assertIn(one, cache)
self.assertIn(two, cache)
def test_no_cache_when_writeback_diff(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.read_cache") as read_cache, patch(
"black.write_cache"
) as write_cache:
self.invokeBlack([str(src), "--diff"])
cache_file = black.get_cache_file(mode)
self.assertFalse(cache_file.exists())
write_cache.assert_not_called()
read_cache.assert_not_called()
def test_no_cache_when_writeback_color_diff(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.read_cache") as read_cache, patch(
"black.write_cache"
) as write_cache:
self.invokeBlack([str(src), "--diff", "--color"])
cache_file = black.get_cache_file(mode)
self.assertFalse(cache_file.exists())
write_cache.assert_not_called()
read_cache.assert_not_called()
@event_loop()
def test_output_locking_when_writeback_diff(self) -> None:
with cache_dir() as workspace:
for tag in range(0, 4):
src = (workspace / f"test{tag}.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.Manager", wraps=multiprocessing.Manager) as mgr:
self.invokeBlack(["--diff", str(workspace)], exit_code=0)
# this isn't quite doing what we want, but if it _isn't_
# called then we cannot be using the lock it provides
mgr.assert_called()
@event_loop()
def test_output_locking_when_writeback_color_diff(self) -> None:
with cache_dir() as workspace:
for tag in range(0, 4):
src = (workspace / f"test{tag}.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.Manager", wraps=multiprocessing.Manager) as mgr:
self.invokeBlack(["--diff", "--color", str(workspace)], exit_code=0)
# this isn't quite doing what we want, but if it _isn't_
# called then we cannot be using the lock it provides
mgr.assert_called()
def test_no_cache_when_stdin(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
result = CliRunner().invoke(
black.main, ["-"], input=BytesIO(b"print('hello')")
)
self.assertEqual(result.exit_code, 0)
cache_file = black.get_cache_file(mode)
self.assertFalse(cache_file.exists())
def test_read_cache_no_cachefile(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
self.assertEqual(black.read_cache(mode), {})
def test_write_cache_read_cache(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.touch()
black.write_cache({}, [src], mode)
cache = black.read_cache(mode)
self.assertIn(src, cache)
self.assertEqual(cache[src], black.get_cache_info(src))
def test_filter_cached(self) -> None:
with TemporaryDirectory() as workspace:
path = Path(workspace)
uncached = (path / "uncached").resolve()
cached = (path / "cached").resolve()
cached_but_changed = (path / "changed").resolve()
uncached.touch()
cached.touch()
cached_but_changed.touch()
cache = {cached: black.get_cache_info(cached), cached_but_changed: (0.0, 0)}
todo, done = black.filter_cached(
cache, {uncached, cached, cached_but_changed}
)
self.assertEqual(todo, {uncached, cached_but_changed})
self.assertEqual(done, {cached})
def test_write_cache_creates_directory_if_needed(self) -> None:
mode = DEFAULT_MODE
with cache_dir(exists=False) as workspace:
self.assertFalse(workspace.exists())
black.write_cache({}, [], mode)
self.assertTrue(workspace.exists())
@event_loop()
def test_failed_formatting_does_not_get_cached(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace, patch(
"black.ProcessPoolExecutor", new=ThreadPoolExecutor
):
failing = (workspace / "failing.py").resolve()
with failing.open("w") as fobj:
fobj.write("not actually python")
clean = (workspace / "clean.py").resolve()
with clean.open("w") as fobj:
fobj.write('print("hello")\n')
self.invokeBlack([str(workspace)], exit_code=123)
cache = black.read_cache(mode)
self.assertNotIn(failing, cache)
self.assertIn(clean, cache)
def test_write_cache_write_fail(self) -> None:
mode = DEFAULT_MODE
with cache_dir(), patch.object(Path, "open") as mock:
mock.side_effect = OSError
black.write_cache({}, [], mode)
@event_loop()
@patch("black.ProcessPoolExecutor", MagicMock(side_effect=OSError))
def test_works_in_mono_process_only_environment(self) -> None:
with cache_dir() as workspace:
for f in [
(workspace / "one.py").resolve(),
(workspace / "two.py").resolve(),
]:
f.write_text('print("hello")\n')
self.invokeBlack([str(workspace)])
@event_loop()
def test_check_diff_use_together(self) -> None:
with cache_dir():
# Files which will be reformatted.
src1 = (THIS_DIR / "data" / "string_quotes.py").resolve()
self.invokeBlack([str(src1), "--diff", "--check"], exit_code=1)
# Files which will not be reformatted.
src2 = (THIS_DIR / "data" / "composition.py").resolve()
self.invokeBlack([str(src2), "--diff", "--check"])
# Multi file command.
self.invokeBlack([str(src1), str(src2), "--diff", "--check"], exit_code=1)
def test_no_files(self) -> None:
with cache_dir():
# Without an argument, black exits with error code 0.
self.invokeBlack([])
def test_broken_symlink(self) -> None:
with cache_dir() as workspace:
symlink = workspace / "broken_link.py"
try:
symlink.symlink_to("nonexistent.py")
except OSError as e:
self.skipTest(f"Can't create symlinks: {e}")
self.invokeBlack([str(workspace.resolve())])
def test_read_cache_line_lengths(self) -> None:
mode = DEFAULT_MODE
short_mode = replace(DEFAULT_MODE, line_length=1)
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
path.touch()
black.write_cache({}, [path], mode)
one = black.read_cache(mode)
self.assertIn(path, one)
two = black.read_cache(short_mode)
self.assertNotIn(path, two)
def test_tricky_unicode_symbols(self) -> None:
source, expected = read_data("tricky_unicode_symbols")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_single_file_force_pyi(self) -> None:
pyi_mode = replace(DEFAULT_MODE, is_pyi=True)
contents, expected = read_data("force_pyi")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
with open(path, "w") as fh:
fh.write(contents)
self.invokeBlack([str(path), "--pyi"])
with open(path, "r") as fh:
actual = fh.read()
# verify cache with --pyi is separate
pyi_cache = black.read_cache(pyi_mode)
self.assertIn(path, pyi_cache)
normal_cache = black.read_cache(DEFAULT_MODE)
self.assertNotIn(path, normal_cache)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(contents, actual)
black.assert_stable(contents, actual, pyi_mode)
@event_loop()
def test_multi_file_force_pyi(self) -> None:
reg_mode = DEFAULT_MODE
pyi_mode = replace(DEFAULT_MODE, is_pyi=True)
contents, expected = read_data("force_pyi")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
with open(path, "w") as fh:
fh.write(contents)
self.invokeBlack([str(p) for p in paths] + ["--pyi"])
for path in paths:
with open(path, "r") as fh:
actual = fh.read()
self.assertEqual(actual, expected)
# verify cache with --pyi is separate
pyi_cache = black.read_cache(pyi_mode)
normal_cache = black.read_cache(reg_mode)
for path in paths:
self.assertIn(path, pyi_cache)
self.assertNotIn(path, normal_cache)
def test_pipe_force_pyi(self) -> None:
source, expected = read_data("force_pyi")
result = CliRunner().invoke(
black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf8"))
)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_single_file_force_py36(self) -> None:
reg_mode = DEFAULT_MODE
py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
source, expected = read_data("force_py36")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
with open(path, "w") as fh:
fh.write(source)
self.invokeBlack([str(path), *PY36_ARGS])
with open(path, "r") as fh:
actual = fh.read()
# verify cache with --target-version is separate
py36_cache = black.read_cache(py36_mode)
self.assertIn(path, py36_cache)
normal_cache = black.read_cache(reg_mode)
self.assertNotIn(path, normal_cache)
self.assertEqual(actual, expected)
@event_loop()
def test_multi_file_force_py36(self) -> None:
reg_mode = DEFAULT_MODE
py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
source, expected = read_data("force_py36")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
with open(path, "w") as fh:
fh.write(source)
self.invokeBlack([str(p) for p in paths] + PY36_ARGS)
for path in paths:
with open(path, "r") as fh:
actual = fh.read()
self.assertEqual(actual, expected)
# verify cache with --target-version is separate
pyi_cache = black.read_cache(py36_mode)
normal_cache = black.read_cache(reg_mode)
for path in paths:
self.assertIn(path, pyi_cache)
self.assertNotIn(path, normal_cache)
def test_collections(self) -> None:
source, expected = read_data("collections")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_pipe_force_py36(self) -> None:
source, expected = read_data("force_py36")
result = CliRunner().invoke(
black.main,
["-", "-q", "--target-version=py36"],
input=BytesIO(source.encode("utf8")),
)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_include_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
include = re.compile(r"\.pyi?$")
exclude = re.compile(r"/exclude/|/\.definitely_exclude/")
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(), this_abs, include, exclude, None, report, gitignore
)
)
self.assertEqual(sorted(expected), sorted(sources))
@patch("black.find_project_root", lambda *args: THIS_DIR.resolve())
def test_exclude_for_issue_1572(self) -> None:
# Exclude shouldn't touch files that were explicitly given to Black through the
# CLI. Exclude is supposed to only apply to the recursive discovery of files.
# https://github.com/psf/black/issues/1572
path = THIS_DIR / "data" / "include_exclude_tests"
include = ""
exclude = r"/exclude/|a\.py"
src = str(path / "b/exclude/a.py")
report = black.Report()
expected = [Path(path / "b/exclude/a.py")]
sources = list(
black.get_sources(
ctx=FakeContext(),
src=(src,),
quiet=True,
verbose=False,
include=include,
exclude=exclude,
force_exclude=None,
report=report,
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_gitignore_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
include = re.compile(r"\.pyi?$")
exclude = re.compile(r"")
report = black.Report()
gitignore = PathSpec.from_lines(
"gitwildmatch", ["exclude/", ".definitely_exclude"]
)
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(), this_abs, include, exclude, None, report, gitignore
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_empty_include(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
empty = re.compile(r"")
sources: List[Path] = []
expected = [
Path(path / "b/exclude/a.pie"),
Path(path / "b/exclude/a.py"),
Path(path / "b/exclude/a.pyi"),
Path(path / "b/dont_exclude/a.pie"),
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
Path(path / "b/.definitely_exclude/a.pie"),
Path(path / "b/.definitely_exclude/a.py"),
Path(path / "b/.definitely_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(),
this_abs,
empty,
re.compile(black.DEFAULT_EXCLUDES),
None,
report,
gitignore,
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_empty_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
empty = re.compile(r"")
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
Path(path / "b/exclude/a.py"),
Path(path / "b/exclude/a.pyi"),
Path(path / "b/.definitely_exclude/a.py"),
Path(path / "b/.definitely_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(),
this_abs,
re.compile(black.DEFAULT_INCLUDES),
empty,
None,
report,
gitignore,
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_invalid_include_exclude(self) -> None:
for option in ["--include", "--exclude"]:
self.invokeBlack(["-", option, "**()(!!*)"], exit_code=2)
def test_preserves_line_endings(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.py"
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
test_file.write_bytes(contents.encode())
ff(test_file, write_back=black.WriteBack.YES)
updated_contents: bytes = test_file.read_bytes()
self.assertIn(nl.encode(), updated_contents)
if nl == "\n":
self.assertNotIn(b"\r\n", updated_contents)
def test_preserves_line_endings_via_stdin(self) -> None:
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
runner = BlackRunner()
result = runner.invoke(
black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf8"))
)
self.assertEqual(result.exit_code, 0)
output = runner.stdout_bytes
self.assertIn(nl.encode("utf8"), output)
if nl == "\n":
self.assertNotIn(b"\r\n", output)
def test_assert_equivalent_different_asts(self) -> None:
with self.assertRaises(AssertionError):
black.assert_equivalent("{}", "None")
def test_symlink_out_of_root_directory(self) -> None:
path = MagicMock()
root = THIS_DIR.resolve()
child = MagicMock()
include = re.compile(black.DEFAULT_INCLUDES)
exclude = re.compile(black.DEFAULT_EXCLUDES)
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
# `child` should behave like a symlink which resolved path is clearly
# outside of the `root` directory.
path.iterdir.return_value = [child]
child.resolve.return_value = Path("/a/b/c")
child.as_posix.return_value = "/a/b/c"
child.is_symlink.return_value = True
try:
list(
black.gen_python_files(
path.iterdir(), root, include, exclude, None, report, gitignore
)
)
except ValueError as ve:
self.fail(f"`get_python_files_in_dir()` failed: {ve}")
path.iterdir.assert_called_once()
child.resolve.assert_called_once()
child.is_symlink.assert_called_once()
# `child` should behave like a strange file which resolved path is clearly
# outside of the `root` directory.
child.is_symlink.return_value = False
with self.assertRaises(ValueError):
list(
black.gen_python_files(
path.iterdir(), root, include, exclude, None, report, gitignore
)
)
path.iterdir.assert_called()
self.assertEqual(path.iterdir.call_count, 2)
child.resolve.assert_called()
self.assertEqual(child.resolve.call_count, 2)
child.is_symlink.assert_called()
self.assertEqual(child.is_symlink.call_count, 2)
def test_shhh_click(self) -> None:
try:
from click import _unicodefun # type: ignore
except ModuleNotFoundError:
self.skipTest("Incompatible Click version")
if not hasattr(_unicodefun, "_verify_python3_env"):
self.skipTest("Incompatible Click version")
# First, let's see if Click is crashing with a preferred ASCII charset.
with patch("locale.getpreferredencoding") as gpe:
gpe.return_value = "ASCII"
with self.assertRaises(RuntimeError):
_unicodefun._verify_python3_env()
# Now, let's silence Click...
black.patch_click()
# ...and confirm it's silent.
with patch("locale.getpreferredencoding") as gpe:
gpe.return_value = "ASCII"
try:
_unicodefun._verify_python3_env()
except RuntimeError as re:
self.fail(f"`patch_click()` failed, exception still raised: {re}")
def test_root_logger_not_used_directly(self) -> None:
def fail(*args: Any, **kwargs: Any) -> None:
self.fail("Record created with root logger")
with patch.multiple(
logging.root,
debug=fail,
info=fail,
warning=fail,
error=fail,
critical=fail,
log=fail,
):
ff(THIS_FILE)
def test_invalid_config_return_code(self) -> None:
tmp_file = Path(black.dump_to_file())
try:
tmp_config = Path(black.dump_to_file())
tmp_config.unlink()
args = ["--config", str(tmp_config), str(tmp_file)]
self.invokeBlack(args, exit_code=2, ignore_config=False)
finally:
tmp_file.unlink()
def test_parse_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "test.toml"
config = black.parse_pyproject_toml(str(test_toml_file))
self.assertEqual(config["verbose"], 1)
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], True)
self.assertEqual(config["line_length"], 79)
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
def test_read_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "test.toml"
fake_ctx = FakeContext()
black.read_pyproject_toml(fake_ctx, FakeParameter(), str(test_toml_file))
config = fake_ctx.default_map
self.assertEqual(config["verbose"], "1")
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], "True")
self.assertEqual(config["line_length"], "79")
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
def test_find_project_root(self) -> None:
with TemporaryDirectory() as workspace:
root = Path(workspace)
test_dir = root / "test"
test_dir.mkdir()
src_dir = root / "src"
src_dir.mkdir()
root_pyproject = root / "pyproject.toml"
root_pyproject.touch()
src_pyproject = src_dir / "pyproject.toml"
src_pyproject.touch()
src_python = src_dir / "foo.py"
src_python.touch()
self.assertEqual(
black.find_project_root((src_dir, test_dir)), root.resolve()
)
self.assertEqual(black.find_project_root((src_dir,)), src_dir.resolve())
self.assertEqual(black.find_project_root((src_python,)), src_dir.resolve())
def test_bpo_33660_workaround(self) -> None:
if system() == "Windows":
return
# https://bugs.python.org/issue33660
old_cwd = Path.cwd()
try:
root = Path("/")
os.chdir(str(root))
path = Path("workspace") / "project"
report = black.Report(verbose=True)
normalized_path = black.normalize_path_maybe_ignore(path, root, report)
self.assertEqual(normalized_path, "workspace/project")
finally:
os.chdir(str(old_cwd))
with open(black.__file__, "r", encoding="utf-8") as _bf:
black_source_lines = _bf.readlines()
def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable:
"""Show function calls `from black/__init__.py` as they happen.
Register this with `sys.settrace()` in a test you're debugging.
"""
if event != "call":
return tracefunc
stack = len(inspect.stack()) - 19
stack *= 2
filename = frame.f_code.co_filename
lineno = frame.f_lineno
func_sig_lineno = lineno - 1
funcname = black_source_lines[func_sig_lineno].strip()
while funcname.startswith("@"):
func_sig_lineno += 1
funcname = black_source_lines[func_sig_lineno].strip()
if "black/__init__.py" in filename:
print(f"{" " * stack}{lineno}:{funcname}")
return tracefunc
if __name__ == "__main__":
unittest.main(module="test_black")
| #!/usr/bin/env python3
import multiprocessing
import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import replace
from functools import partial
import inspect
from io import BytesIO, TextIOWrapper
import os
from pathlib import Path
from platform import system
import regex as re
import sys
from tempfile import TemporaryDirectory
import types
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generator,
List,
Iterator,
TypeVar,
)
import unittest
from unittest.mock import patch, MagicMock
import click
from click import unstyle
from click.testing import CliRunner
import black
from black import Feature, TargetVersion
from pathspec import PathSpec
# Import other test classes
from tests.util import THIS_DIR, read_data, DETERMINISTIC_HEADER
from .test_primer import PrimerCLITests # noqa: F401
DEFAULT_MODE = black.FileMode(experimental_string_processing=True)
ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True)
fs = partial(black.format_str, mode=DEFAULT_MODE)
THIS_FILE = Path(__file__)
PY36_VERSIONS = {
TargetVersion.PY36,
TargetVersion.PY37,
TargetVersion.PY38,
TargetVersion.PY39,
}
PY36_ARGS = [f"--target-version={version.name.lower()}" for version in PY36_VERSIONS]
T = TypeVar("T")
R = TypeVar("R")
def dump_to_stderr(*output: str) -> str:
return "\n" + "\n".join(output) + "\n"
@contextmanager
def cache_dir(exists: bool = True) -> Iterator[Path]:
with TemporaryDirectory() as workspace:
cache_dir = Path(workspace)
if not exists:
cache_dir = cache_dir / "new"
with patch("black.CACHE_DIR", cache_dir):
yield cache_dir
@contextmanager
def event_loop() -> Iterator[None]:
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield
finally:
loop.close()
class FakeContext(click.Context):
"""A fake click Context for when calling functions that need it."""
def __init__(self) -> None:
self.default_map: Dict[str, Any] = {}
class FakeParameter(click.Parameter):
"""A fake click Parameter for when calling functions that need it."""
def __init__(self) -> None:
pass
class BlackRunner(CliRunner):
"""Modify CliRunner so that stderr is not merged with stdout.
This is a hack that can be removed once we depend on Click 7.x"""
def __init__(self) -> None:
self.stderrbuf = BytesIO()
self.stdoutbuf = BytesIO()
self.stdout_bytes = b""
self.stderr_bytes = b""
super().__init__()
@contextmanager
def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None]:
with super().isolation(*args, **kwargs) as output:
try:
hold_stderr = sys.stderr
sys.stderr = TextIOWrapper(self.stderrbuf, encoding=self.charset)
yield output
finally:
self.stdout_bytes = sys.stdout.buffer.getvalue() # type: ignore
self.stderr_bytes = sys.stderr.buffer.getvalue() # type: ignore
sys.stderr = hold_stderr
class BlackTestCase(unittest.TestCase):
maxDiff = None
_diffThreshold = 2 ** 20
def assertFormatEqual(self, expected: str, actual: str) -> None:
if actual != expected and not os.environ.get("SKIP_AST_PRINT"):
bdv: black.DebugVisitor[Any]
black.out("Expected tree:", fg="green")
try:
exp_node = black.lib2to3_parse(expected)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
black.out("Actual tree:", fg="red")
try:
exp_node = black.lib2to3_parse(actual)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
self.assertMultiLineEqual(expected, actual)
def invokeBlack(
self, args: List[str], exit_code: int = 0, ignore_config: bool = True
) -> None:
runner = BlackRunner()
if ignore_config:
args = ["--verbose", "--config", str(THIS_DIR / "empty.toml"), *args]
result = runner.invoke(black.main, args)
self.assertEqual(
result.exit_code,
exit_code,
msg=(
f"Failed with args: {args}\n"
f"stdout: {runner.stdout_bytes.decode()!r}\n"
f"stderr: {runner.stderr_bytes.decode()!r}\n"
f"exception: {result.exception}"
),
)
@patch("black.dump_to_file", dump_to_stderr)
def checkSourceFile(self, name: str, mode: black.FileMode = DEFAULT_MODE) -> None:
path = THIS_DIR.parent / name
source, expected = read_data(str(path), data=False)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
self.assertFalse(ff(path))
@patch("black.dump_to_file", dump_to_stderr)
def test_empty(self) -> None:
source = expected = ""
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_empty_ff(self) -> None:
expected = ""
tmp_file = Path(black.dump_to_file())
try:
self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
def test_run_on_test_black(self) -> None:
self.checkSourceFile("tests/test_black.py")
def test_run_on_test_blackd(self) -> None:
self.checkSourceFile("tests/test_blackd.py")
def test_black(self) -> None:
self.checkSourceFile("src/black/__init__.py")
def test_pygram(self) -> None:
self.checkSourceFile("src/blib2to3/pygram.py")
def test_pytree(self) -> None:
self.checkSourceFile("src/blib2to3/pytree.py")
def test_conv(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/conv.py")
def test_driver(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/driver.py")
def test_grammar(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/grammar.py")
def test_literals(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/literals.py")
def test_parse(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/parse.py")
def test_pgen(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/pgen.py")
def test_tokenize(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/tokenize.py")
def test_token(self) -> None:
self.checkSourceFile("src/blib2to3/pgen2/token.py")
def test_setup(self) -> None:
self.checkSourceFile("setup.py")
def test_piping(self) -> None:
source, expected = read_data("src/black/__init__", data=False)
result = BlackRunner().invoke(
black.main,
["-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}"],
input=BytesIO(source.encode("utf8")),
)
self.assertEqual(result.exit_code, 0)
self.assertFormatEqual(expected, result.output)
black.assert_equivalent(source, result.output)
black.assert_stable(source, result.output, DEFAULT_MODE)
def test_piping_diff(self) -> None:
diff_header = re.compile(
r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d "
r"\+\d\d\d\d"
)
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
config = THIS_DIR / "data" / "empty_pyproject.toml"
args = [
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
"--diff",
f"--config={config}",
]
result = BlackRunner().invoke(
black.main, args, input=BytesIO(source.encode("utf8"))
)
self.assertEqual(result.exit_code, 0)
actual = diff_header.sub(DETERMINISTIC_HEADER, result.output)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
self.assertEqual(expected, actual)
def test_piping_diff_with_color(self) -> None:
source, _ = read_data("expression.py")
config = THIS_DIR / "data" / "empty_pyproject.toml"
args = [
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
"--diff",
"--color",
f"--config={config}",
]
result = BlackRunner().invoke(
black.main, args, input=BytesIO(source.encode("utf8"))
)
actual = result.output
# Again, the contents are checked in a different test, so only look for colors.
self.assertIn("\033[1;37m", actual)
self.assertIn("\033[36m", actual)
self.assertIn("\033[32m", actual)
self.assertIn("\033[31m", actual)
self.assertIn("\033[0m", actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_function(self) -> None:
source, expected = read_data("function")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_function2(self) -> None:
source, expected = read_data("function2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def _test_wip(self) -> None:
source, expected = read_data("wip")
sys.settrace(tracefunc)
mode = replace(
DEFAULT_MODE,
experimental_string_processing=False,
target_versions={black.TargetVersion.PY38},
)
actual = fs(source, mode=mode)
sys.settrace(None)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, black.FileMode())
@patch("black.dump_to_file", dump_to_stderr)
def test_function_trailing_comma(self) -> None:
source, expected = read_data("function_trailing_comma")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@unittest.expectedFailure
@patch("black.dump_to_file", dump_to_stderr)
def test_trailing_comma_optional_parens_stability1(self) -> None:
source, _expected = read_data("trailing_comma_optional_parens1")
actual = fs(source)
black.assert_stable(source, actual, DEFAULT_MODE)
@unittest.expectedFailure
@patch("black.dump_to_file", dump_to_stderr)
def test_trailing_comma_optional_parens_stability2(self) -> None:
source, _expected = read_data("trailing_comma_optional_parens2")
actual = fs(source)
black.assert_stable(source, actual, DEFAULT_MODE)
@unittest.expectedFailure
@patch("black.dump_to_file", dump_to_stderr)
def test_trailing_comma_optional_parens_stability3(self) -> None:
source, _expected = read_data("trailing_comma_optional_parens3")
actual = fs(source)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_expression(self) -> None:
source, expected = read_data("expression")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_pep_572(self) -> None:
source, expected = read_data("pep_572")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
if sys.version_info >= (3, 8):
black.assert_equivalent(source, actual)
def test_pep_572_version_detection(self) -> None:
source, _ = read_data("pep_572")
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.ASSIGNMENT_EXPRESSIONS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
def test_expression_ff(self) -> None:
source, expected = read_data("expression")
tmp_file = Path(black.dump_to_file(source))
try:
self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
with patch("black.dump_to_file", dump_to_stderr):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_expression_diff(self) -> None:
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
tmp_file = Path(black.dump_to_file(source))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
try:
result = BlackRunner().invoke(black.main, ["--diff", str(tmp_file)])
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.output
actual = diff_header.sub(DETERMINISTIC_HEADER, actual)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
if expected != actual:
dump = black.dump_to_file(actual)
msg = (
"Expected diff isn't equal to the actual. If you made changes to"
" expression.py and this is an anticipated difference, overwrite"
f" tests/data/expression.diff with {dump}"
)
self.assertEqual(expected, actual, msg)
def test_expression_diff_with_color(self) -> None:
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
tmp_file = Path(black.dump_to_file(source))
try:
result = BlackRunner().invoke(
black.main, ["--diff", "--color", str(tmp_file)]
)
finally:
os.unlink(tmp_file)
actual = result.output
# We check the contents of the diff in `test_expression_diff`. All
# we need to check here is that color codes exist in the result.
self.assertIn("\033[1;37m", actual)
self.assertIn("\033[36m", actual)
self.assertIn("\033[32m", actual)
self.assertIn("\033[31m", actual)
self.assertIn("\033[0m", actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_fstring(self) -> None:
source, expected = read_data("fstring")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_pep_570(self) -> None:
source, expected = read_data("pep_570")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
if sys.version_info >= (3, 8):
black.assert_equivalent(source, actual)
def test_detect_pos_only_arguments(self) -> None:
source, _ = read_data("pep_570")
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.POS_ONLY_ARGUMENTS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_quotes(self) -> None:
source, expected = read_data("string_quotes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
mode = replace(DEFAULT_MODE, string_normalization=False)
not_normalized = fs(source, mode=mode)
self.assertFormatEqual(source.replace("\\\n", ""), not_normalized)
black.assert_equivalent(source, not_normalized)
black.assert_stable(source, not_normalized, mode=mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_docstring(self) -> None:
source, expected = read_data("docstring")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_docstring_no_string_normalization(self) -> None:
"""Like test_docstring but with string normalization off."""
source, expected = read_data("docstring_no_string_normalization")
mode = replace(DEFAULT_MODE, string_normalization=False)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
def test_long_strings(self) -> None:
"""Tests for splitting long strings."""
source, expected = read_data("long_strings")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_long_strings_flag_disabled(self) -> None:
"""Tests for turning off the string processing logic."""
source, expected = read_data("long_strings_flag_disabled")
mode = replace(DEFAULT_MODE, experimental_string_processing=False)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_stable(expected, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_long_strings__edge_case(self) -> None:
"""Edge-case tests for splitting long strings."""
source, expected = read_data("long_strings__edge_case")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_long_strings__regression(self) -> None:
"""Regression tests for splitting long strings."""
source, expected = read_data("long_strings__regression")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_slices(self) -> None:
source, expected = read_data("slices")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_percent_precedence(self) -> None:
source, expected = read_data("percent_precedence")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments(self) -> None:
source, expected = read_data("comments")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments2(self) -> None:
source, expected = read_data("comments2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments3(self) -> None:
source, expected = read_data("comments3")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments4(self) -> None:
source, expected = read_data("comments4")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments5(self) -> None:
source, expected = read_data("comments5")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments6(self) -> None:
source, expected = read_data("comments6")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments7(self) -> None:
source, expected = read_data("comments7")
mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38})
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_comment_after_escaped_newline(self) -> None:
source, expected = read_data("comment_after_escaped_newline")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_cantfit(self) -> None:
source, expected = read_data("cantfit")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_import_spacing(self) -> None:
source, expected = read_data("import_spacing")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_composition(self) -> None:
source, expected = read_data("composition")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_composition_no_trailing_comma(self) -> None:
source, expected = read_data("composition_no_trailing_comma")
mode = replace(DEFAULT_MODE, target_versions={black.TargetVersion.PY38})
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_empty_lines(self) -> None:
source, expected = read_data("empty_lines")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_remove_parens(self) -> None:
source, expected = read_data("remove_parens")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_prefixes(self) -> None:
source, expected = read_data("string_prefixes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_numeric_literals(self) -> None:
source, expected = read_data("numeric_literals")
mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_numeric_literals_ignoring_underscores(self) -> None:
source, expected = read_data("numeric_literals_skip_underscores")
mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_numeric_literals_py2(self) -> None:
source, expected = read_data("numeric_literals_py2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2(self) -> None:
source, expected = read_data("python2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2_print_function(self) -> None:
source, expected = read_data("python2_print_function")
mode = replace(DEFAULT_MODE, target_versions={TargetVersion.PY27})
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2_unicode_literals(self) -> None:
source, expected = read_data("python2_unicode_literals")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_stub(self) -> None:
mode = replace(DEFAULT_MODE, is_pyi=True)
source, expected = read_data("stub.pyi")
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_async_as_identifier(self) -> None:
source_path = (THIS_DIR / "data" / "async_as_identifier.py").resolve()
source, expected = read_data("async_as_identifier")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major < 3 or (major <= 3 and minor < 7):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.6
self.invokeBlack([str(source_path), "--target-version", "py36"])
# but not on 3.7, because async/await is no longer an identifier
self.invokeBlack([str(source_path), "--target-version", "py37"], exit_code=123)
@patch("black.dump_to_file", dump_to_stderr)
def test_python37(self) -> None:
source_path = (THIS_DIR / "data" / "python37.py").resolve()
source, expected = read_data("python37")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 7):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.7
self.invokeBlack([str(source_path), "--target-version", "py37"])
# but not on 3.6, because we use async as a reserved keyword
self.invokeBlack([str(source_path), "--target-version", "py36"], exit_code=123)
@patch("black.dump_to_file", dump_to_stderr)
def test_python38(self) -> None:
source, expected = read_data("python38")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 8):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_python39(self) -> None:
source, expected = read_data("python39")
actual = fs(source)
self.assertFormatEqual(expected, actual)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 9):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff(self) -> None:
source, expected = read_data("fmtonoff")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff2(self) -> None:
source, expected = read_data("fmtonoff2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff3(self) -> None:
source, expected = read_data("fmtonoff3")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff4(self) -> None:
source, expected = read_data("fmtonoff4")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_remove_empty_parentheses_after_class(self) -> None:
source, expected = read_data("class_blank_parentheses")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_new_line_between_class_and_code(self) -> None:
source, expected = read_data("class_methods_new_line")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_bracket_match(self) -> None:
source, expected = read_data("bracketmatch")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_tuple_assign(self) -> None:
source, expected = read_data("tupleassign")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
@patch("black.dump_to_file", dump_to_stderr)
def test_beginning_backslash(self) -> None:
source, expected = read_data("beginning_backslash")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_tab_comment_indentation(self) -> None:
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t\t# comment\n\tpass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
# mixed tabs and spaces (valid Python 2 code)
contents_tab = "if 1:\n if 2:\n\t\tpass\n\t# comment\n pass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
contents_tab = "if 1:\n if 2:\n\t\tpass\n\t\t# comment\n pass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
def test_report_verbose(self) -> None:
report = black.Report(verbose=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "f1 already well formatted, good job.")
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
out_lines[-1], "f3 wasn't modified on disk since last run."
)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 5)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "wat ignored: no match")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 6)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "f4 already well formatted, good job.")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
def test_report_quiet(self) -> None:
report = black.Report(quiet=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
def test_report_normal(self) -> None:
report = black.Report()
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2 files"
" would fail to reformat.",
)
def test_lib2to3_parse(self) -> None:
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse("invalid syntax")
straddling = "x + y"
black.lib2to3_parse(straddling)
black.lib2to3_parse(straddling, {TargetVersion.PY27})
black.lib2to3_parse(straddling, {TargetVersion.PY36})
black.lib2to3_parse(straddling, {TargetVersion.PY27, TargetVersion.PY36})
py2_only = "print x"
black.lib2to3_parse(py2_only)
black.lib2to3_parse(py2_only, {TargetVersion.PY27})
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py2_only, {TargetVersion.PY36})
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py2_only, {TargetVersion.PY27, TargetVersion.PY36})
py3_only = "exec(x, end=y)"
black.lib2to3_parse(py3_only)
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py3_only, {TargetVersion.PY27})
black.lib2to3_parse(py3_only, {TargetVersion.PY36})
black.lib2to3_parse(py3_only, {TargetVersion.PY27, TargetVersion.PY36})
def test_get_features_used_decorator(self) -> None:
# Test the feature detection of new decorator syntax
# since this makes some test cases of test_get_features_used()
# fails if it fails, this is tested first so that a useful case
# is identified
simples, relaxed = read_data("decorators")
# skip explanation comments at the top of the file
for simple_test in simples.split("##")[1:]:
node = black.lib2to3_parse(simple_test)
decorator = str(node.children[0].children[0]).strip()
self.assertNotIn(
Feature.RELAXED_DECORATORS,
black.get_features_used(node),
msg=(
f"decorator '{decorator}' follows python<=3.8 syntax"
"but is detected as 3.9+"
# f"The full node is\n{node!r}"
),
)
# skip the '# output' comment at the top of the output part
for relaxed_test in relaxed.split("##")[1:]:
node = black.lib2to3_parse(relaxed_test)
decorator = str(node.children[0].children[0]).strip()
self.assertIn(
Feature.RELAXED_DECORATORS,
black.get_features_used(node),
msg=(
f"decorator '{decorator}' uses python3.9+ syntax"
"but is detected as python<=3.8"
# f"The full node is\n{node!r}"
),
)
def test_get_features_used(self) -> None:
node = black.lib2to3_parse("def f(*, arg): ...\n")
self.assertEqual(black.get_features_used(node), set())
node = black.lib2to3_parse("def f(*, arg,): ...\n")
self.assertEqual(black.get_features_used(node), {Feature.TRAILING_COMMA_IN_DEF})
node = black.lib2to3_parse("f(*arg,)\n")
self.assertEqual(
black.get_features_used(node), {Feature.TRAILING_COMMA_IN_CALL}
)
node = black.lib2to3_parse("def f(*, arg): f'string'\n")
self.assertEqual(black.get_features_used(node), {Feature.F_STRINGS})
node = black.lib2to3_parse("123_456\n")
self.assertEqual(black.get_features_used(node), {Feature.NUMERIC_UNDERSCORES})
node = black.lib2to3_parse("123456\n")
self.assertEqual(black.get_features_used(node), set())
source, expected = read_data("function")
node = black.lib2to3_parse(source)
expected_features = {
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.F_STRINGS,
}
self.assertEqual(black.get_features_used(node), expected_features)
node = black.lib2to3_parse(expected)
self.assertEqual(black.get_features_used(node), expected_features)
source, expected = read_data("expression")
node = black.lib2to3_parse(source)
self.assertEqual(black.get_features_used(node), set())
node = black.lib2to3_parse(expected)
self.assertEqual(black.get_features_used(node), set())
def test_get_future_imports(self) -> None:
node = black.lib2to3_parse("\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import multiple, imports\n")
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import (parenthesized, imports)\n")
self.assertEqual({"parenthesized", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import multiple\nfrom __future__ import imports\n"
)
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("# comment\nfrom __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse('"""docstring"""\nfrom __future__ import black\n')
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("some(other, code)\nfrom __future__ import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from some.module import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import unicode_literals as _unicode_literals"
)
self.assertEqual({"unicode_literals"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import unicode_literals as _lol, print"
)
self.assertEqual({"unicode_literals", "print"}, black.get_future_imports(node))
def test_debug_visitor(self) -> None:
source, _ = read_data("debug_visitor.py")
expected, _ = read_data("debug_visitor.out")
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
black.DebugVisitor.show(source)
actual = "\n".join(out_lines) + "\n"
log_name = ""
if expected != actual:
log_name = black.dump_to_file(*out_lines)
self.assertEqual(
expected,
actual,
f"AST print out is different. Actual version dumped to {log_name}",
)
def test_format_file_contents(self) -> None:
empty = ""
mode = DEFAULT_MODE
with self.assertRaises(black.NothingChanged):
black.format_file_contents(empty, mode=mode, fast=False)
just_nl = "\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(just_nl, mode=mode, fast=False)
same = "j = [1, 2, 3]\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(same, mode=mode, fast=False)
different = "j = [1,2,3]"
expected = same
actual = black.format_file_contents(different, mode=mode, fast=False)
self.assertEqual(expected, actual)
invalid = "return if you can"
with self.assertRaises(black.InvalidInput) as e:
black.format_file_contents(invalid, mode=mode, fast=False)
self.assertEqual(str(e.exception), "Cannot parse: 1:7: return if you can")
def test_endmarker(self) -> None:
n = black.lib2to3_parse("\n")
self.assertEqual(n.type, black.syms.file_input)
self.assertEqual(len(n.children), 1)
self.assertEqual(n.children[0].type, black.token.ENDMARKER)
@unittest.skipIf(os.environ.get("SKIP_AST_PRINT"), "user set SKIP_AST_PRINT")
def test_assertFormatEqual(self) -> None:
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
with self.assertRaises(AssertionError):
self.assertFormatEqual("j = [1, 2, 3]", "j = [1, 2, 3,]")
out_str = "".join(out_lines)
self.assertTrue("Expected tree:" in out_str)
self.assertTrue("Actual tree:" in out_str)
self.assertEqual("".join(err_lines), "")
def test_cache_broken_file(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
cache_file = black.get_cache_file(mode)
with cache_file.open("w") as fobj:
fobj.write("this is not a pickle")
self.assertEqual(black.read_cache(mode), {})
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
self.invokeBlack([str(src)])
cache = black.read_cache(mode)
self.assertIn(src, cache)
def test_cache_single_file_already_cached(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
black.write_cache({}, [src], mode)
self.invokeBlack([str(src)])
with src.open("r") as fobj:
self.assertEqual(fobj.read(), "print('hello')")
@event_loop()
def test_cache_multiple_files(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace, patch(
"black.ProcessPoolExecutor", new=ThreadPoolExecutor
):
one = (workspace / "one.py").resolve()
with one.open("w") as fobj:
fobj.write("print('hello')")
two = (workspace / "two.py").resolve()
with two.open("w") as fobj:
fobj.write("print('hello')")
black.write_cache({}, [one], mode)
self.invokeBlack([str(workspace)])
with one.open("r") as fobj:
self.assertEqual(fobj.read(), "print('hello')")
with two.open("r") as fobj:
self.assertEqual(fobj.read(), 'print("hello")\n')
cache = black.read_cache(mode)
self.assertIn(one, cache)
self.assertIn(two, cache)
def test_no_cache_when_writeback_diff(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.read_cache") as read_cache, patch(
"black.write_cache"
) as write_cache:
self.invokeBlack([str(src), "--diff"])
cache_file = black.get_cache_file(mode)
self.assertFalse(cache_file.exists())
write_cache.assert_not_called()
read_cache.assert_not_called()
def test_no_cache_when_writeback_color_diff(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.read_cache") as read_cache, patch(
"black.write_cache"
) as write_cache:
self.invokeBlack([str(src), "--diff", "--color"])
cache_file = black.get_cache_file(mode)
self.assertFalse(cache_file.exists())
write_cache.assert_not_called()
read_cache.assert_not_called()
@event_loop()
def test_output_locking_when_writeback_diff(self) -> None:
with cache_dir() as workspace:
for tag in range(0, 4):
src = (workspace / f"test{tag}.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.Manager", wraps=multiprocessing.Manager) as mgr:
self.invokeBlack(["--diff", str(workspace)], exit_code=0)
# this isn't quite doing what we want, but if it _isn't_
# called then we cannot be using the lock it provides
mgr.assert_called()
@event_loop()
def test_output_locking_when_writeback_color_diff(self) -> None:
with cache_dir() as workspace:
for tag in range(0, 4):
src = (workspace / f"test{tag}.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
with patch("black.Manager", wraps=multiprocessing.Manager) as mgr:
self.invokeBlack(["--diff", "--color", str(workspace)], exit_code=0)
# this isn't quite doing what we want, but if it _isn't_
# called then we cannot be using the lock it provides
mgr.assert_called()
def test_no_cache_when_stdin(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
result = CliRunner().invoke(
black.main, ["-"], input=BytesIO(b"print('hello')")
)
self.assertEqual(result.exit_code, 0)
cache_file = black.get_cache_file(mode)
self.assertFalse(cache_file.exists())
def test_read_cache_no_cachefile(self) -> None:
mode = DEFAULT_MODE
with cache_dir():
self.assertEqual(black.read_cache(mode), {})
def test_write_cache_read_cache(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.touch()
black.write_cache({}, [src], mode)
cache = black.read_cache(mode)
self.assertIn(src, cache)
self.assertEqual(cache[src], black.get_cache_info(src))
def test_filter_cached(self) -> None:
with TemporaryDirectory() as workspace:
path = Path(workspace)
uncached = (path / "uncached").resolve()
cached = (path / "cached").resolve()
cached_but_changed = (path / "changed").resolve()
uncached.touch()
cached.touch()
cached_but_changed.touch()
cache = {cached: black.get_cache_info(cached), cached_but_changed: (0.0, 0)}
todo, done = black.filter_cached(
cache, {uncached, cached, cached_but_changed}
)
self.assertEqual(todo, {uncached, cached_but_changed})
self.assertEqual(done, {cached})
def test_write_cache_creates_directory_if_needed(self) -> None:
mode = DEFAULT_MODE
with cache_dir(exists=False) as workspace:
self.assertFalse(workspace.exists())
black.write_cache({}, [], mode)
self.assertTrue(workspace.exists())
@event_loop()
def test_failed_formatting_does_not_get_cached(self) -> None:
mode = DEFAULT_MODE
with cache_dir() as workspace, patch(
"black.ProcessPoolExecutor", new=ThreadPoolExecutor
):
failing = (workspace / "failing.py").resolve()
with failing.open("w") as fobj:
fobj.write("not actually python")
clean = (workspace / "clean.py").resolve()
with clean.open("w") as fobj:
fobj.write('print("hello")\n')
self.invokeBlack([str(workspace)], exit_code=123)
cache = black.read_cache(mode)
self.assertNotIn(failing, cache)
self.assertIn(clean, cache)
def test_write_cache_write_fail(self) -> None:
mode = DEFAULT_MODE
with cache_dir(), patch.object(Path, "open") as mock:
mock.side_effect = OSError
black.write_cache({}, [], mode)
@event_loop()
@patch("black.ProcessPoolExecutor", MagicMock(side_effect=OSError))
def test_works_in_mono_process_only_environment(self) -> None:
with cache_dir() as workspace:
for f in [
(workspace / "one.py").resolve(),
(workspace / "two.py").resolve(),
]:
f.write_text('print("hello")\n')
self.invokeBlack([str(workspace)])
@event_loop()
def test_check_diff_use_together(self) -> None:
with cache_dir():
# Files which will be reformatted.
src1 = (THIS_DIR / "data" / "string_quotes.py").resolve()
self.invokeBlack([str(src1), "--diff", "--check"], exit_code=1)
# Files which will not be reformatted.
src2 = (THIS_DIR / "data" / "composition.py").resolve()
self.invokeBlack([str(src2), "--diff", "--check"])
# Multi file command.
self.invokeBlack([str(src1), str(src2), "--diff", "--check"], exit_code=1)
def test_no_files(self) -> None:
with cache_dir():
# Without an argument, black exits with error code 0.
self.invokeBlack([])
def test_broken_symlink(self) -> None:
with cache_dir() as workspace:
symlink = workspace / "broken_link.py"
try:
symlink.symlink_to("nonexistent.py")
except OSError as e:
self.skipTest(f"Can't create symlinks: {e}")
self.invokeBlack([str(workspace.resolve())])
def test_read_cache_line_lengths(self) -> None:
mode = DEFAULT_MODE
short_mode = replace(DEFAULT_MODE, line_length=1)
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
path.touch()
black.write_cache({}, [path], mode)
one = black.read_cache(mode)
self.assertIn(path, one)
two = black.read_cache(short_mode)
self.assertNotIn(path, two)
def test_tricky_unicode_symbols(self) -> None:
source, expected = read_data("tricky_unicode_symbols")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_single_file_force_pyi(self) -> None:
pyi_mode = replace(DEFAULT_MODE, is_pyi=True)
contents, expected = read_data("force_pyi")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
with open(path, "w") as fh:
fh.write(contents)
self.invokeBlack([str(path), "--pyi"])
with open(path, "r") as fh:
actual = fh.read()
# verify cache with --pyi is separate
pyi_cache = black.read_cache(pyi_mode)
self.assertIn(path, pyi_cache)
normal_cache = black.read_cache(DEFAULT_MODE)
self.assertNotIn(path, normal_cache)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(contents, actual)
black.assert_stable(contents, actual, pyi_mode)
@event_loop()
def test_multi_file_force_pyi(self) -> None:
reg_mode = DEFAULT_MODE
pyi_mode = replace(DEFAULT_MODE, is_pyi=True)
contents, expected = read_data("force_pyi")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
with open(path, "w") as fh:
fh.write(contents)
self.invokeBlack([str(p) for p in paths] + ["--pyi"])
for path in paths:
with open(path, "r") as fh:
actual = fh.read()
self.assertEqual(actual, expected)
# verify cache with --pyi is separate
pyi_cache = black.read_cache(pyi_mode)
normal_cache = black.read_cache(reg_mode)
for path in paths:
self.assertIn(path, pyi_cache)
self.assertNotIn(path, normal_cache)
def test_pipe_force_pyi(self) -> None:
source, expected = read_data("force_pyi")
result = CliRunner().invoke(
black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf8"))
)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_single_file_force_py36(self) -> None:
reg_mode = DEFAULT_MODE
py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
source, expected = read_data("force_py36")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
with open(path, "w") as fh:
fh.write(source)
self.invokeBlack([str(path), *PY36_ARGS])
with open(path, "r") as fh:
actual = fh.read()
# verify cache with --target-version is separate
py36_cache = black.read_cache(py36_mode)
self.assertIn(path, py36_cache)
normal_cache = black.read_cache(reg_mode)
self.assertNotIn(path, normal_cache)
self.assertEqual(actual, expected)
@event_loop()
def test_multi_file_force_py36(self) -> None:
reg_mode = DEFAULT_MODE
py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
source, expected = read_data("force_py36")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
with open(path, "w") as fh:
fh.write(source)
self.invokeBlack([str(p) for p in paths] + PY36_ARGS)
for path in paths:
with open(path, "r") as fh:
actual = fh.read()
self.assertEqual(actual, expected)
# verify cache with --target-version is separate
pyi_cache = black.read_cache(py36_mode)
normal_cache = black.read_cache(reg_mode)
for path in paths:
self.assertIn(path, pyi_cache)
self.assertNotIn(path, normal_cache)
def test_collections(self) -> None:
source, expected = read_data("collections")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_pipe_force_py36(self) -> None:
source, expected = read_data("force_py36")
result = CliRunner().invoke(
black.main,
["-", "-q", "--target-version=py36"],
input=BytesIO(source.encode("utf8")),
)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_include_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
include = re.compile(r"\.pyi?$")
exclude = re.compile(r"/exclude/|/\.definitely_exclude/")
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(), this_abs, include, exclude, None, report, gitignore
)
)
self.assertEqual(sorted(expected), sorted(sources))
@patch("black.find_project_root", lambda *args: THIS_DIR.resolve())
def test_exclude_for_issue_1572(self) -> None:
# Exclude shouldn't touch files that were explicitly given to Black through the
# CLI. Exclude is supposed to only apply to the recursive discovery of files.
# https://github.com/psf/black/issues/1572
path = THIS_DIR / "data" / "include_exclude_tests"
include = ""
exclude = r"/exclude/|a\.py"
src = str(path / "b/exclude/a.py")
report = black.Report()
expected = [Path(path / "b/exclude/a.py")]
sources = list(
black.get_sources(
ctx=FakeContext(),
src=(src,),
quiet=True,
verbose=False,
include=include,
exclude=exclude,
force_exclude=None,
report=report,
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_gitignore_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
include = re.compile(r"\.pyi?$")
exclude = re.compile(r"")
report = black.Report()
gitignore = PathSpec.from_lines(
"gitwildmatch", ["exclude/", ".definitely_exclude"]
)
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(), this_abs, include, exclude, None, report, gitignore
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_empty_include(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
empty = re.compile(r"")
sources: List[Path] = []
expected = [
Path(path / "b/exclude/a.pie"),
Path(path / "b/exclude/a.py"),
Path(path / "b/exclude/a.pyi"),
Path(path / "b/dont_exclude/a.pie"),
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
Path(path / "b/.definitely_exclude/a.pie"),
Path(path / "b/.definitely_exclude/a.py"),
Path(path / "b/.definitely_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(),
this_abs,
empty,
re.compile(black.DEFAULT_EXCLUDES),
None,
report,
gitignore,
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_empty_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
empty = re.compile(r"")
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
Path(path / "b/exclude/a.py"),
Path(path / "b/exclude/a.pyi"),
Path(path / "b/.definitely_exclude/a.py"),
Path(path / "b/.definitely_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files(
path.iterdir(),
this_abs,
re.compile(black.DEFAULT_INCLUDES),
empty,
None,
report,
gitignore,
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_invalid_include_exclude(self) -> None:
for option in ["--include", "--exclude"]:
self.invokeBlack(["-", option, "**()(!!*)"], exit_code=2)
def test_preserves_line_endings(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.py"
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
test_file.write_bytes(contents.encode())
ff(test_file, write_back=black.WriteBack.YES)
updated_contents: bytes = test_file.read_bytes()
self.assertIn(nl.encode(), updated_contents)
if nl == "\n":
self.assertNotIn(b"\r\n", updated_contents)
def test_preserves_line_endings_via_stdin(self) -> None:
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
runner = BlackRunner()
result = runner.invoke(
black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf8"))
)
self.assertEqual(result.exit_code, 0)
output = runner.stdout_bytes
self.assertIn(nl.encode("utf8"), output)
if nl == "\n":
self.assertNotIn(b"\r\n", output)
def test_assert_equivalent_different_asts(self) -> None:
with self.assertRaises(AssertionError):
black.assert_equivalent("{}", "None")
def test_symlink_out_of_root_directory(self) -> None:
path = MagicMock()
root = THIS_DIR.resolve()
child = MagicMock()
include = re.compile(black.DEFAULT_INCLUDES)
exclude = re.compile(black.DEFAULT_EXCLUDES)
report = black.Report()
gitignore = PathSpec.from_lines("gitwildmatch", [])
# `child` should behave like a symlink which resolved path is clearly
# outside of the `root` directory.
path.iterdir.return_value = [child]
child.resolve.return_value = Path("/a/b/c")
child.as_posix.return_value = "/a/b/c"
child.is_symlink.return_value = True
try:
list(
black.gen_python_files(
path.iterdir(), root, include, exclude, None, report, gitignore
)
)
except ValueError as ve:
self.fail(f"`get_python_files_in_dir()` failed: {ve}")
path.iterdir.assert_called_once()
child.resolve.assert_called_once()
child.is_symlink.assert_called_once()
# `child` should behave like a strange file which resolved path is clearly
# outside of the `root` directory.
child.is_symlink.return_value = False
with self.assertRaises(ValueError):
list(
black.gen_python_files(
path.iterdir(), root, include, exclude, None, report, gitignore
)
)
path.iterdir.assert_called()
self.assertEqual(path.iterdir.call_count, 2)
child.resolve.assert_called()
self.assertEqual(child.resolve.call_count, 2)
child.is_symlink.assert_called()
self.assertEqual(child.is_symlink.call_count, 2)
def test_shhh_click(self) -> None:
try:
from click import _unicodefun # type: ignore
except ModuleNotFoundError:
self.skipTest("Incompatible Click version")
if not hasattr(_unicodefun, "_verify_python3_env"):
self.skipTest("Incompatible Click version")
# First, let's see if Click is crashing with a preferred ASCII charset.
with patch("locale.getpreferredencoding") as gpe:
gpe.return_value = "ASCII"
with self.assertRaises(RuntimeError):
_unicodefun._verify_python3_env()
# Now, let's silence Click...
black.patch_click()
# ...and confirm it's silent.
with patch("locale.getpreferredencoding") as gpe:
gpe.return_value = "ASCII"
try:
_unicodefun._verify_python3_env()
except RuntimeError as re:
self.fail(f"`patch_click()` failed, exception still raised: {re}")
def test_root_logger_not_used_directly(self) -> None:
def fail(*args: Any, **kwargs: Any) -> None:
self.fail("Record created with root logger")
with patch.multiple(
logging.root,
debug=fail,
info=fail,
warning=fail,
error=fail,
critical=fail,
log=fail,
):
ff(THIS_FILE)
def test_invalid_config_return_code(self) -> None:
tmp_file = Path(black.dump_to_file())
try:
tmp_config = Path(black.dump_to_file())
tmp_config.unlink()
args = ["--config", str(tmp_config), str(tmp_file)]
self.invokeBlack(args, exit_code=2, ignore_config=False)
finally:
tmp_file.unlink()
def test_parse_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "test.toml"
config = black.parse_pyproject_toml(str(test_toml_file))
self.assertEqual(config["verbose"], 1)
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], True)
self.assertEqual(config["line_length"], 79)
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
def test_read_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "test.toml"
fake_ctx = FakeContext()
black.read_pyproject_toml(fake_ctx, FakeParameter(), str(test_toml_file))
config = fake_ctx.default_map
self.assertEqual(config["verbose"], "1")
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], "True")
self.assertEqual(config["line_length"], "79")
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
def test_find_project_root(self) -> None:
with TemporaryDirectory() as workspace:
root = Path(workspace)
test_dir = root / "test"
test_dir.mkdir()
src_dir = root / "src"
src_dir.mkdir()
root_pyproject = root / "pyproject.toml"
root_pyproject.touch()
src_pyproject = src_dir / "pyproject.toml"
src_pyproject.touch()
src_python = src_dir / "foo.py"
src_python.touch()
self.assertEqual(
black.find_project_root((src_dir, test_dir)), root.resolve()
)
self.assertEqual(black.find_project_root((src_dir,)), src_dir.resolve())
self.assertEqual(black.find_project_root((src_python,)), src_dir.resolve())
def test_bpo_33660_workaround(self) -> None:
if system() == "Windows":
return
# https://bugs.python.org/issue33660
old_cwd = Path.cwd()
try:
root = Path("/")
os.chdir(str(root))
path = Path("workspace") / "project"
report = black.Report(verbose=True)
normalized_path = black.normalize_path_maybe_ignore(path, root, report)
self.assertEqual(normalized_path, "workspace/project")
finally:
os.chdir(str(old_cwd))
with open(black.__file__, "r", encoding="utf-8") as _bf:
black_source_lines = _bf.readlines()
def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable:
"""Show function calls `from black/__init__.py` as they happen.
Register this with `sys.settrace()` in a test you're debugging.
"""
if event != "call":
return tracefunc
stack = len(inspect.stack()) - 19
stack *= 2
filename = frame.f_code.co_filename
lineno = frame.f_lineno
func_sig_lineno = lineno - 1
funcname = black_source_lines[func_sig_lineno].strip()
while funcname.startswith("@"):
func_sig_lineno += 1
funcname = black_source_lines[func_sig_lineno].strip()
if "black/__init__.py" in filename:
print(f"{' ' * stack}{lineno}:{funcname}")
return tracefunc
if __name__ == "__main__":
unittest.main(module="test_black")
|
from utils.flops_and_params import get_model_complexity_info
from models import *
from utils.prune_utils import *
if __name__ == '__main__':
model = '/Users/ioneliabuzatu/PycharmProjects/all-prunings/yolov3.weights'
model_def = "/Users/ioneliabuzatu/weight-pruning/config/yolov3-custom.cfg"
#
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_origin = Darknet(model_def).to(device)
try:
model_origin.load_state_dict(torch.load(model))
except:
model_origin.load_darknet_weights(model)
# flops, params = get_model_complexity_info(model_origin, (3, 416,416), as_strings=True, print_per_layer_stat=True)
output_origianl = get_model_complexity_info(model_origin, (3, 416, 416), as_strings=True, print_per_layer_stat=True)
for original in output_origianl:
out = f"{original.split("|")[0]} | {original.split("|")[1]}({original.split("|")[2]})"
print(out)
| from utils.flops_and_params import get_model_complexity_info
from models import *
from utils.prune_utils import *
if __name__ == '__main__':
model = '/Users/ioneliabuzatu/PycharmProjects/all-prunings/yolov3.weights'
model_def = "/Users/ioneliabuzatu/weight-pruning/config/yolov3-custom.cfg"
#
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_origin = Darknet(model_def).to(device)
try:
model_origin.load_state_dict(torch.load(model))
except:
model_origin.load_darknet_weights(model)
# flops, params = get_model_complexity_info(model_origin, (3, 416,416), as_strings=True, print_per_layer_stat=True)
output_origianl = get_model_complexity_info(model_origin, (3, 416, 416), as_strings=True, print_per_layer_stat=True)
for original in output_origianl:
out = f"{original.split('|')[0]} | {original.split('|')[1]}({original.split('|')[2]})"
print(out)
|
#!/usr/bin/env python3
''' Decorates organism taxon nodes with the organism types
Usage: decorate_organisms.py [-u <neo4j user>] [-p <neo4j password>] [-b <neo4j bolt address>]
'''
import argparse
import neo4j
import getpass
import sys
import os
import json
import ontobio
__author__ = 'Finn Womack'
__copyright__ = 'Oregon State University'
__credits__ = ['Stephen Ramsey', 'Erica Wood', 'Finn Womack']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = ''
__email__ = ''
__status__ = 'Prototype'
laymen_names = {
"Cryptophyceae":"microbial",
"Sar":"microbial",
"Amoebozoa":"microbial",
"Metamonada":"microbial",
"Glaucocystophyceae":"microbial",
"Discoba":"microbial",
"Viridiplantae":"plant",
"Fungi":"fungi",
"Choanoflagellata":"microbial",
"Filasterea":"microbial",
"Metazoa":"animal",
"Opisthokonta incertae sedis":"microbial",
"Ichthyosporea":"microbial",
"Haptista":"microbial",
"Rhodophyta":"microbial"
}
def query_partition(node_id_list, batch_size, organism):
for i in range(0, len(node_id_list), batch_size):
yield f"MATCH (n:`biolink:OrganismTaxon`) where n.id in {node_id_list[i:i + batch_size]} SET n.organism_type = '"+organism+"' RETURN n.organism_type"
#yield " union ".join([f"MATCH (n:organism_taxon {{ id: "{node_id}" }}) SET n.organism_type = '"+organism+"' RETURN n.organism_type" for node_id in node_id_list[i:i + batch_size]])
class DecorateOTNodes:
def __init__(self, neo4j_user, neo4j_password, neo4j_bolt, taxslim):
if neo4j_user is None:
print('Include the RTXConfig file or pass in a username. Exiting.')
exit(1)
else:
self.neo4j_user = neo4j_user
if neo4j_password is None:
print('Include the RTXConfig file or pass in a password. Exiting.')
exit(1)
else:
self.neo4j_password = neo4j_password
self.neo4j_bolt = neo4j_bolt
self.driver = neo4j.GraphDatabase.driver(self.neo4j_bolt, auth=(self.neo4j_user, self.neo4j_password))
self.ont = ontobio.ontol_factory.OntologyFactory().create(taxslim)
def run_query(self,query):
"""
:param query: a cypher statement as a string to run
"""
# Start a neo4j session, run a query, then close the session
with self.driver.session() as session:
res = session.run(query)
return res
def test_read_only(self):
query = 'call dbms.listConfig() yield name, value where name = "dbms.read_only" return value'
res = self.run_query(query)
data = res.data()
return data[0]['value'] != 'false'
def label_microbes(self, batch):
# Create a list of dictionaries where each key is "labels(n)"
# and each value is a list containing a node label
microbe_node_ansestors = {
"bacteria":"NCBITaxon:2",
"archaea":"NCBITaxon:2157",
"virus":"NCBITaxon:10239",
}
for ansestor, ansestor_id in microbe_node_ansestors.items():
node_ids = set()
node_ids = node_ids.union(set(self.ont.descendants(ansestor_id)))
node_ids.add(ansestor_id)
node_ids = list(node_ids)
for query in query_partition(node_ids, batch, ansestor):
self.run_query(query)
def label_vertibrates(self, batch):
# Create a list of dictionaries where each key is "labels(n)"
# and each value is a list containing a node label
node_ids = set()
vertibrate_node_ansestors = {
"Vertebrata":"NCBITaxon:7742"
}
for ansestor_id in vertibrate_node_ansestors.values():
node_ids = node_ids.union(set(self.ont.descendants(ansestor_id)))
node_ids.add(ansestor_id)
node_ids = list(node_ids)
for query in query_partition(node_ids, batch, 'vertebrate'):
self.run_query(query)
def label_eukaryotes(self, batch):
eukaryote_node_ansestors = {}
for child in self.ont.children("NCBITaxon:2759"):
if child not in ["NCBITaxon:33154"]:
eukaryote_node_ansestors[self.ont.label(child)]=child
else:
for grandchild in self.ont.children(child):
eukaryote_node_ansestors[self.ont.label(grandchild)]=grandchild
for ansestor, ansestor_id in eukaryote_node_ansestors.items():
node_ids = set()
node_ids = node_ids.union(set(self.ont.descendants(ansestor_id)))
node_ids.add(ansestor_id)
node_ids = list(node_ids)
for query in query_partition(node_ids, batch, laymen_names[ansestor]):
self.run_query(query)
def decorate_organisms(self, batch):
self.label_eukaryotes(batch)
self.label_microbes(batch)
#self.label_vertibrates(batch)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", type=str, help="Neo4j Username", default=None, required=False)
parser.add_argument("-p", "--password", help="Neo4j Password", type=str, default=None, required=False)
parser.add_argument("-b", "--bolt", help="Neo4j bolt address", type=str, default=None, required=True)
parser.add_argument("-t", "--taxslim", type=str, help="The path to the taxslim owl file", default=None, required=True)
parser.add_argument("-c", "--config", type=str, help="config.json file location.", default=None, required=False)
parser.add_argument("--batch", type=int, help="The batch size for neo4j set querries", default=500, required=False)
arguments = parser.parse_args()
if arguments.config is not None:
with open(arguments.config, 'r') as fid:
config_data = json.load(fid)
if 'Contextual' in config_data:
config_data_kg2_neo4j = config_data['Contextual']['KG2']['neo4j']
neo4j_user = config_data_kg2_neo4j['username']
neo4j_password = config_data_kg2_neo4j['password']
else:
config_data_kg2_neo4j = config_data['KG2']['neo4j']
neo4j_user = config_data_kg2_neo4j['username']
neo4j_password = config_data_kg2_neo4j['password']
if arguments.password is None:
arguments.password = neo4j_password
if arguments.user is None:
arguments.user = neo4j_user
decorator = DecorateOTNodes(arguments.user, arguments.password, arguments.bolt, arguments.taxslim)
if decorator.test_read_only():
print("WARNING: neo4j database is set to read-only and thus nodes will not update", file=sys.stderr)
else:
decorator.decorate_organisms(arguments.batch) | #!/usr/bin/env python3
''' Decorates organism taxon nodes with the organism types
Usage: decorate_organisms.py [-u <neo4j user>] [-p <neo4j password>] [-b <neo4j bolt address>]
'''
import argparse
import neo4j
import getpass
import sys
import os
import json
import ontobio
__author__ = 'Finn Womack'
__copyright__ = 'Oregon State University'
__credits__ = ['Stephen Ramsey', 'Erica Wood', 'Finn Womack']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = ''
__email__ = ''
__status__ = 'Prototype'
laymen_names = {
"Cryptophyceae":"microbial",
"Sar":"microbial",
"Amoebozoa":"microbial",
"Metamonada":"microbial",
"Glaucocystophyceae":"microbial",
"Discoba":"microbial",
"Viridiplantae":"plant",
"Fungi":"fungi",
"Choanoflagellata":"microbial",
"Filasterea":"microbial",
"Metazoa":"animal",
"Opisthokonta incertae sedis":"microbial",
"Ichthyosporea":"microbial",
"Haptista":"microbial",
"Rhodophyta":"microbial"
}
def query_partition(node_id_list, batch_size, organism):
for i in range(0, len(node_id_list), batch_size):
yield f"MATCH (n:`biolink:OrganismTaxon`) where n.id in {node_id_list[i:i + batch_size]} SET n.organism_type = '"+organism+"' RETURN n.organism_type"
#yield " union ".join([f"MATCH (n:organism_taxon {{ id: '{node_id}' }}) SET n.organism_type = '"+organism+"' RETURN n.organism_type" for node_id in node_id_list[i:i + batch_size]])
class DecorateOTNodes:
def __init__(self, neo4j_user, neo4j_password, neo4j_bolt, taxslim):
if neo4j_user is None:
print('Include the RTXConfig file or pass in a username. Exiting.')
exit(1)
else:
self.neo4j_user = neo4j_user
if neo4j_password is None:
print('Include the RTXConfig file or pass in a password. Exiting.')
exit(1)
else:
self.neo4j_password = neo4j_password
self.neo4j_bolt = neo4j_bolt
self.driver = neo4j.GraphDatabase.driver(self.neo4j_bolt, auth=(self.neo4j_user, self.neo4j_password))
self.ont = ontobio.ontol_factory.OntologyFactory().create(taxslim)
def run_query(self,query):
"""
:param query: a cypher statement as a string to run
"""
# Start a neo4j session, run a query, then close the session
with self.driver.session() as session:
res = session.run(query)
return res
def test_read_only(self):
query = 'call dbms.listConfig() yield name, value where name = "dbms.read_only" return value'
res = self.run_query(query)
data = res.data()
return data[0]['value'] != 'false'
def label_microbes(self, batch):
# Create a list of dictionaries where each key is "labels(n)"
# and each value is a list containing a node label
microbe_node_ansestors = {
"bacteria":"NCBITaxon:2",
"archaea":"NCBITaxon:2157",
"virus":"NCBITaxon:10239",
}
for ansestor, ansestor_id in microbe_node_ansestors.items():
node_ids = set()
node_ids = node_ids.union(set(self.ont.descendants(ansestor_id)))
node_ids.add(ansestor_id)
node_ids = list(node_ids)
for query in query_partition(node_ids, batch, ansestor):
self.run_query(query)
def label_vertibrates(self, batch):
# Create a list of dictionaries where each key is "labels(n)"
# and each value is a list containing a node label
node_ids = set()
vertibrate_node_ansestors = {
"Vertebrata":"NCBITaxon:7742"
}
for ansestor_id in vertibrate_node_ansestors.values():
node_ids = node_ids.union(set(self.ont.descendants(ansestor_id)))
node_ids.add(ansestor_id)
node_ids = list(node_ids)
for query in query_partition(node_ids, batch, 'vertebrate'):
self.run_query(query)
def label_eukaryotes(self, batch):
eukaryote_node_ansestors = {}
for child in self.ont.children("NCBITaxon:2759"):
if child not in ["NCBITaxon:33154"]:
eukaryote_node_ansestors[self.ont.label(child)]=child
else:
for grandchild in self.ont.children(child):
eukaryote_node_ansestors[self.ont.label(grandchild)]=grandchild
for ansestor, ansestor_id in eukaryote_node_ansestors.items():
node_ids = set()
node_ids = node_ids.union(set(self.ont.descendants(ansestor_id)))
node_ids.add(ansestor_id)
node_ids = list(node_ids)
for query in query_partition(node_ids, batch, laymen_names[ansestor]):
self.run_query(query)
def decorate_organisms(self, batch):
self.label_eukaryotes(batch)
self.label_microbes(batch)
#self.label_vertibrates(batch)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", type=str, help="Neo4j Username", default=None, required=False)
parser.add_argument("-p", "--password", help="Neo4j Password", type=str, default=None, required=False)
parser.add_argument("-b", "--bolt", help="Neo4j bolt address", type=str, default=None, required=True)
parser.add_argument("-t", "--taxslim", type=str, help="The path to the taxslim owl file", default=None, required=True)
parser.add_argument("-c", "--config", type=str, help="config.json file location.", default=None, required=False)
parser.add_argument("--batch", type=int, help="The batch size for neo4j set querries", default=500, required=False)
arguments = parser.parse_args()
if arguments.config is not None:
with open(arguments.config, 'r') as fid:
config_data = json.load(fid)
if 'Contextual' in config_data:
config_data_kg2_neo4j = config_data['Contextual']['KG2']['neo4j']
neo4j_user = config_data_kg2_neo4j['username']
neo4j_password = config_data_kg2_neo4j['password']
else:
config_data_kg2_neo4j = config_data['KG2']['neo4j']
neo4j_user = config_data_kg2_neo4j['username']
neo4j_password = config_data_kg2_neo4j['password']
if arguments.password is None:
arguments.password = neo4j_password
if arguments.user is None:
arguments.user = neo4j_user
decorator = DecorateOTNodes(arguments.user, arguments.password, arguments.bolt, arguments.taxslim)
if decorator.test_read_only():
print("WARNING: neo4j database is set to read-only and thus nodes will not update", file=sys.stderr)
else:
decorator.decorate_organisms(arguments.batch) |
import asyncio
import json
import logging
import random
from pathlib import Path
import discord
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours
log = logging.getLogger(__name__)
TIME_LIMIT = 60
# anagram.json file contains all the anagrams
with open(Path("bot/resources/fun/anagram.json"), "r") as f:
ANAGRAMS_ALL = json.load(f)
class AnagramGame:
"""
Used for creating instances of anagram games.
Once multiple games can be run at the same time, this class' instances
can be used for keeping track of each anagram game.
"""
def __init__(self, scrambled: str, correct: list[str]) -> None:
self.scrambled = scrambled
self.correct = set(correct)
self.winners = set()
async def message_creation(self, message: discord.Message) -> None:
"""Check if the message is a correct answer and remove it from the list of answers."""
if message.content.lower() in self.correct:
self.winners.add(message.author.mention)
self.correct.remove(message.content.lower())
class Anagram(commands.Cog):
"""Cog for the Anagram game command."""
def __init__(self, bot: Bot):
self.bot = bot
self.games: dict[int, AnagramGame] = {}
@commands.command(name="anagram", aliases=("anag", "gram", "ag"))
async def anagram_command(self, ctx: commands.Context) -> None:
"""
Given shuffled letters, rearrange them into anagrams.
Show an embed with scrambled letters which if rearranged can form words.
After a specific amount of time, list the correct answers and whether someone provided a
correct answer.
"""
if self.games.get(ctx.channel.id):
await ctx.send("An anagram is already being solved in this channel!")
return
scrambled_letters, correct = random.choice(list(ANAGRAMS_ALL.items()))
game = AnagramGame(scrambled_letters, correct)
self.games[ctx.channel.id] = game
anagram_embed = discord.Embed(
title=f"Find anagrams from these letters: '{scrambled_letters.upper()}'",
description=f"You have {TIME_LIMIT} seconds to find correct words.",
colour=Colours.purple,
)
await ctx.send(embed=anagram_embed)
await asyncio.sleep(TIME_LIMIT)
if game.winners:
win_list = ", ".join(game.winners)
content = f"Well done {win_list} for getting it right!"
else:
content = "Nobody got it right."
answer_embed = discord.Embed(
title=f"The words were: `{"`, `".join(ANAGRAMS_ALL[game.scrambled])}`!",
colour=Colours.pink,
)
await ctx.send(content, embed=answer_embed)
# Game is finished, let's remove it from the dict
self.games.pop(ctx.channel.id)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""Check a message for an anagram attempt and pass to an ongoing game."""
if message.author.bot or not message.guild:
return
game = self.games.get(message.channel.id)
if not game:
return
await game.message_creation(message)
def setup(bot: Bot) -> None:
"""Load the Anagram cog."""
bot.add_cog(Anagram(bot))
| import asyncio
import json
import logging
import random
from pathlib import Path
import discord
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours
log = logging.getLogger(__name__)
TIME_LIMIT = 60
# anagram.json file contains all the anagrams
with open(Path("bot/resources/fun/anagram.json"), "r") as f:
ANAGRAMS_ALL = json.load(f)
class AnagramGame:
"""
Used for creating instances of anagram games.
Once multiple games can be run at the same time, this class' instances
can be used for keeping track of each anagram game.
"""
def __init__(self, scrambled: str, correct: list[str]) -> None:
self.scrambled = scrambled
self.correct = set(correct)
self.winners = set()
async def message_creation(self, message: discord.Message) -> None:
"""Check if the message is a correct answer and remove it from the list of answers."""
if message.content.lower() in self.correct:
self.winners.add(message.author.mention)
self.correct.remove(message.content.lower())
class Anagram(commands.Cog):
"""Cog for the Anagram game command."""
def __init__(self, bot: Bot):
self.bot = bot
self.games: dict[int, AnagramGame] = {}
@commands.command(name="anagram", aliases=("anag", "gram", "ag"))
async def anagram_command(self, ctx: commands.Context) -> None:
"""
Given shuffled letters, rearrange them into anagrams.
Show an embed with scrambled letters which if rearranged can form words.
After a specific amount of time, list the correct answers and whether someone provided a
correct answer.
"""
if self.games.get(ctx.channel.id):
await ctx.send("An anagram is already being solved in this channel!")
return
scrambled_letters, correct = random.choice(list(ANAGRAMS_ALL.items()))
game = AnagramGame(scrambled_letters, correct)
self.games[ctx.channel.id] = game
anagram_embed = discord.Embed(
title=f"Find anagrams from these letters: '{scrambled_letters.upper()}'",
description=f"You have {TIME_LIMIT} seconds to find correct words.",
colour=Colours.purple,
)
await ctx.send(embed=anagram_embed)
await asyncio.sleep(TIME_LIMIT)
if game.winners:
win_list = ", ".join(game.winners)
content = f"Well done {win_list} for getting it right!"
else:
content = "Nobody got it right."
answer_embed = discord.Embed(
title=f"The words were: `{'`, `'.join(ANAGRAMS_ALL[game.scrambled])}`!",
colour=Colours.pink,
)
await ctx.send(content, embed=answer_embed)
# Game is finished, let's remove it from the dict
self.games.pop(ctx.channel.id)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
"""Check a message for an anagram attempt and pass to an ongoing game."""
if message.author.bot or not message.guild:
return
game = self.games.get(message.channel.id)
if not game:
return
await game.message_creation(message)
def setup(bot: Bot) -> None:
"""Load the Anagram cog."""
bot.add_cog(Anagram(bot))
|
#!/usr/bin/python3
__author__ = 'Przemek Decewicz'
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from glob import glob
from os import makedirs, path
from sys import argv
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import numpy as np
from io import TextIOWrapper
import math
def plot_stats(infile, outfile):
"""Reads test/training set and plots all identified stats.
Stats are slightly transformed to retained a visible scale.
Two types of plots are provided:
- transformed stats
Parameters
----------
infile: str
Path to input file.
outfile: str
Path to resulting PNG file with plots.
"""
# read input file
with open(infile) as inf:
colnames = inf.readline().strip().split('\t')
data = np.genfromtxt(infile, delimiter="\t", filling_values=1, dtype=np.float64, skip_header=1)
for i, name in enumerate(colnames):
if name == 'orf_length_med':
data[:, i] = data[:, i] / 50
elif name == 'shannon_slope':
data[:, i] = data[:, i] * 200
elif name == 'at_skew':
data[:, i] = data[:, i] * 2
elif name == 'gc_skew':
data[:, i] = data[:, i] * 2
elif name == 'max_direction':
data[:, i] = data[:, i] / 3
elif name == 'phmms':
data[:, i] = data[:, i] * 2
elif name == 'status':
data[:, i] = data[:, i] * 20
# make a plot
fig, ax = plt.subplots(figsize=(18, 4.5), dpi = 150)
plt.plot(data, '-', linewidth=.8, alpha = 0.9)
plt.legend(colnames, loc='lower center', bbox_to_anchor=(0.5,-0.17), ncol = len(colnames))
plt.margins(x=0.01)
plt.subplots_adjust(left=0.03, right=0.99, top=0.9, bottom=0.15)
plt.title(path.basename(infile))
plt.savefig(outfile)
plt.close()
def main():
args = ArgumentParser(prog = 'plot_trainSets_stats.py',
description = 'Plots PhiSpy\'s training/test sets statistics.',
epilog = 'Example usage:\npython3 scripts/plot_trainSets_stats.py -d PhiSpyModules/data -o PhiSpyModules/data/trainSets_stats ',
formatter_class = RawDescriptionHelpFormatter)
args.add_argument('-i', '--infile',
type = str,
help = 'Path to input GenBank file.')
args.add_argument('-d', '--indir',
type = str,
help = 'Path to input directory with multiple GenBank files.')
args.add_argument('-s', '--suffix',
type = str,
help = 'Suffix that will be added to input file name.')
args.add_argument('-o', '--outdir',
type = str,
help = 'Path to output directory.',
required = True)
if len(argv[1:]) == 0:
args.print_help()
args.exit()
try:
args = args.parse_args()
except:
args.exit()
if not args.infile and not args.indir:
print('You have to provide input data by either --infile or --indir.')
exit(1)
elif args.indir:
infiles = glob(path.join(args.indir, '*.txt'))
else:
infiles = [args.infile]
# Create output directory
if not path.isdir(args.outdir): makedirs(args.outdir)
# Process all input files
for infile in infiles:
plot_file_name = f'{path.basename(infile).rsplit('.', 1)[0]}.{args.suffix}.png'
plot_file = path.join(args.outdir, plot_file_name)
plot_stats(infile, plot_file)
print(f'Done with plot: {plot_file}')
if __name__ == '__main__':
main()
| #!/usr/bin/python3
__author__ = 'Przemek Decewicz'
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from glob import glob
from os import makedirs, path
from sys import argv
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import numpy as np
from io import TextIOWrapper
import math
def plot_stats(infile, outfile):
"""Reads test/training set and plots all identified stats.
Stats are slightly transformed to retained a visible scale.
Two types of plots are provided:
- transformed stats
Parameters
----------
infile: str
Path to input file.
outfile: str
Path to resulting PNG file with plots.
"""
# read input file
with open(infile) as inf:
colnames = inf.readline().strip().split('\t')
data = np.genfromtxt(infile, delimiter="\t", filling_values=1, dtype=np.float64, skip_header=1)
for i, name in enumerate(colnames):
if name == 'orf_length_med':
data[:, i] = data[:, i] / 50
elif name == 'shannon_slope':
data[:, i] = data[:, i] * 200
elif name == 'at_skew':
data[:, i] = data[:, i] * 2
elif name == 'gc_skew':
data[:, i] = data[:, i] * 2
elif name == 'max_direction':
data[:, i] = data[:, i] / 3
elif name == 'phmms':
data[:, i] = data[:, i] * 2
elif name == 'status':
data[:, i] = data[:, i] * 20
# make a plot
fig, ax = plt.subplots(figsize=(18, 4.5), dpi = 150)
plt.plot(data, '-', linewidth=.8, alpha = 0.9)
plt.legend(colnames, loc='lower center', bbox_to_anchor=(0.5,-0.17), ncol = len(colnames))
plt.margins(x=0.01)
plt.subplots_adjust(left=0.03, right=0.99, top=0.9, bottom=0.15)
plt.title(path.basename(infile))
plt.savefig(outfile)
plt.close()
def main():
args = ArgumentParser(prog = 'plot_trainSets_stats.py',
description = 'Plots PhiSpy\'s training/test sets statistics.',
epilog = 'Example usage:\npython3 scripts/plot_trainSets_stats.py -d PhiSpyModules/data -o PhiSpyModules/data/trainSets_stats ',
formatter_class = RawDescriptionHelpFormatter)
args.add_argument('-i', '--infile',
type = str,
help = 'Path to input GenBank file.')
args.add_argument('-d', '--indir',
type = str,
help = 'Path to input directory with multiple GenBank files.')
args.add_argument('-s', '--suffix',
type = str,
help = 'Suffix that will be added to input file name.')
args.add_argument('-o', '--outdir',
type = str,
help = 'Path to output directory.',
required = True)
if len(argv[1:]) == 0:
args.print_help()
args.exit()
try:
args = args.parse_args()
except:
args.exit()
if not args.infile and not args.indir:
print('You have to provide input data by either --infile or --indir.')
exit(1)
elif args.indir:
infiles = glob(path.join(args.indir, '*.txt'))
else:
infiles = [args.infile]
# Create output directory
if not path.isdir(args.outdir): makedirs(args.outdir)
# Process all input files
for infile in infiles:
plot_file_name = f'{path.basename(infile).rsplit(".", 1)[0]}.{args.suffix}.png'
plot_file = path.join(args.outdir, plot_file_name)
plot_stats(infile, plot_file)
print(f'Done with plot: {plot_file}')
if __name__ == '__main__':
main()
|
import columbo
interactions = [
columbo.Echo("Welcome to the Columbo example"),
columbo.Acknowledge("Press enter to start"),
columbo.BasicQuestion(
"user",
"What is your name?",
default="Patrick",
),
columbo.BasicQuestion(
"user_email",
lambda answers: f"""What email address should be used to contact {answers["user"]}?""",
default="me@example.com",
),
columbo.Choice(
"mood",
"How are you feeling today?",
options=["happy", "sad", "sleepy", "confused"],
default="happy",
),
columbo.Confirm("likes_dogs", "Do you like dogs?", default=True),
]
answers = columbo.parse_args(
interactions,
args=[
"--user-email",
"patrick@example.com",
"--likes-dogs",
],
)
print(answers)
| import columbo
interactions = [
columbo.Echo("Welcome to the Columbo example"),
columbo.Acknowledge("Press enter to start"),
columbo.BasicQuestion(
"user",
"What is your name?",
default="Patrick",
),
columbo.BasicQuestion(
"user_email",
lambda answers: f"""What email address should be used to contact {answers["user"]}?""",
default="me@example.com",
),
columbo.Choice(
"mood",
"How are you feeling today?",
options=["happy", "sad", "sleepy", "confused"],
default="happy",
),
columbo.Confirm("likes_dogs", "Do you like dogs?", default=True),
]
answers = columbo.parse_args(
interactions,
args=[
"--user-email",
"patrick@example.com",
"--likes-dogs",
],
)
print(answers)
|
# -*- coding: utf-8 -*-
import os
import click
import logging
import json
import requests
from requests.exceptions import Timeout
import random
from datetime import datetime
from pathlib import Path, PurePath
from dotenv import find_dotenv, load_dotenv
"""
This script aims to collect all meeting documents found at:
https://www.ats.aq/devAS/Meetings/DocDatabase
We do so by scraping iteratively querying the underlying doc database to collect
paper metadata, then resolving that paper metadata into document links. Queries
to ats.aq's Doc Database are slow, so even the metadata portion of this scrape
currently takes over an hour.
Note that this script does not currently collect 'Final Reports', which are at
https://www.ats.aq/devAS/Info/FinalReports
Note also that this script does not currently collect 'attachments'.
We'll get there.
"""
def load_all_metadata(data_dir, logger) -> list:
"""Load all available paper metadata from files in data_dir.
:data_dir: TODO
:returns: TODO
"""
papers = []
files = [os.path.join(data_dir, f) for f in os.listdir(data_dir)]
logger.info(f'{len(files)} total files found in {data_dir}')
all_json = [f for f in files if f.endswith('.json')]
logger.info(f'{len(all_json)} json files found in {data_dir}')
meta = [f for f in all_json if 'papers_metadata' in f]
logger.info(f'{len(meta)} existing metadata files found in {data_dir}')
if meta:
for meta_path in meta:
with open(meta_path, 'r') as f:
logger.info(f'reading metadata from {meta_path}')
metadata = json.load(f)
logger.info(f'{len(metadata)} papers found in {meta_path}')
papers += metadata
# filter for unique entries (using workaround because dicts aren't hashable)
dedupe = [json.loads(i) for i in set(json.dumps(p, sort_keys=True) for p in papers)]
logger.info(f'metadata for {len(dedupe)} unique papers found in {data_dir}')
return papers
def construct_download_fname(wp_info: dict) -> list:
"""
docstring
"""
pass
def construct_document_links(wp_info: dict) -> list:
"""Take a dict of paper metadata; return a list of associated document urls.
Note: this excludes 'attachment' documents, which are resolved separately.
Ideally, this will be a set of four links (E/S/F/R), but may be less.
:wp_info: A dict of some metadata about a working paper.
:returns: A list of all resolved document links
"""
document_links = []
meeting = wp_info['Meeting_type'] + wp_info['Meeting_number']
base = 'https://documents.ats.aq/' + meeting + '/' + wp_info['Abbreviation']
pnum = wp_info['Abbreviation'] + str(wp_info['Number']).zfill(3) # zero pad
if wp_info['Revision'] > 0: # a 'rev#' included in filename iff revisions
revision = f"rev{wp_info["Revision"]}"
else:
revision = None
for country in ['e','s','f','r']:
fname = '_'.join([x for x in [meeting, pnum, revision, country] if x])
fname += '.' + wp_info['Type']
full_path = base + '/' + fname
document_links.append(full_path)
return document_links
def scrape_document_from_link(doc_link, logger, timeout=(2,5)):
"""Take a link and download the associated file as bytes.
:doc_link: TODO
:logger: TODO
:returns: TODO
"""
logger.info(f"requesting {doc_link}")
try:
r = requests.get(doc_link, timeout=timeout)
logger.info(f"{doc_link} returned status {r.status_code}")
if r.ok:
return r.content
except Timeout as e:
logger.info(f"{doc_link} scrape attempt timed-out after {timeout} with exception: {e}")
def scrape_documents(wp_info: dict, out_dir, logger, ignore_existing=True):
"""Take raw info about a working paper, generate doc urls, scrape the files.
:wp_info: A dict of some metadat about a working paper.
:out_dir: The base directory for saving scraped documents.
:returns: TODO
"""
logger.info(f"constructing document links for {wp_info["Paper_id"]}")
doc_links = construct_document_links(wp_info)
if ignore_existing:
logger.info(f"filtering document links for already-scraped papers")
logger.info(f"constructed document links before filter: {len(doc_links)}")
outpaths = [(doc_link, construct_document_outpath(out_dir, wp_info, doc_link)) for doc_link in doc_links]
logger.info(f"target outpaths: {[str(x[1]) for x in outpaths]}")
existing = [os.path.join(out_dir, f) for f in os.listdir(out_dir)]
logger.info(f"already collected outpaths: {[str(x[1]) for x in outpaths if str(x[1]) in existing]}")
doc_links = [x[0] for x in outpaths if str(x[1]) not in existing]
logger.info(f"constructed document links after filter: {len(doc_links)}")
for doc_link in doc_links:
logger.info(f"attempting to scrape file at {doc_link}")
raw_doc = scrape_document_from_link(doc_link, logger)
if raw_doc:
logger.info(f"successful scrape of {doc_link}")
outpath = construct_document_outpath(out_dir, wp_info, doc_link)
logger.info(f"writing file scraped from {doc_link} to {outpath}")
with open(outpath, 'wb+') as f:
f.write(raw_doc)
else:
logger.info(f"failed scrape of {doc_link}")
def construct_document_outpath(out_dir, wp_info, doc_link):
"""Generate a best outpath for saving the actual scraped file.
:wp_info: TODO
:doc_link: TODO
:returns: TODO
"""
fname = doc_link.split('/')[-1]
return PurePath(out_dir).joinpath(fname)
def construct_metadata_scrape_path(base_data_path):
"""Take a base data path and return a sensible filepath for metadata scrape.
:base_data_path: TODO
:returns: TODO
"""
now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
default_fname = f"{now}_papers_metadata.json"
return PurePath(base_data_path).joinpath(default_fname)
def construct_wp_url(page: int):
"""Generate an ats.aq doc database url from a page number (for querying).
:page: TODO
:returns: TODO
"""
url = f"https://www.ats.aq/devAS/Meetings/SearchDocDatabase?page={page}"
return url
def scrape_working_papers_listing(starting_page, logger):
"""Use ats.aq doc database endpoint to collect list of papers from a page.
:page: Some num for of the page of results to start with.
:logger: A logger object for reporting on results.
:returns: TODO
"""
logger.info(f'starting with page {starting_page}')
current_page = starting_page # set the starting page as our 'current'
url = construct_wp_url(current_page) # construct the first url
papers = []
while url is not None:
logger.info(f"attempting to scrape {url}")
r = requests.get(url)
logger.info(f"got {r.status_code} for {url}")
data = json.loads(r.text)
papers += data['payload']
logger.info(f"total {len(papers)} listings collected so far")
next_page = data['pager']['next']
if next_page > current_page:
current_page = next_page
url = construct_wp_url(current_page)
else:
url = None
return papers
@click.command()
@click.argument('output_dir', type=click.Path(exists=True))
def main(output_dir):
""" Runs data scraping scripts to populate raw data (../raw).
"""
logger = logging.getLogger(__name__)
logger.info('using environment variables to generate scrape outpath')
absolute_output_dir = PurePath(project_dir).joinpath(output_dir)
papers = load_all_metadata(absolute_output_dir, logger)
if not papers: # if no metadata found, scrape it all from web
logger.info(f'no existing metadata scrape; beginning')
metadata_outpath = construct_metadata_scrape_path(absolute_output_dir)
logger.info(f'best metadata outpath: {metadata_outpath}')
logger.info('beginning scrape of working papers listing')
papers = scrape_working_papers_listing(starting_page=1, logger=logger)
logger.info(f'saving papers metadata to file at {metadata_outpath}')
with open(metadata_outpath, 'w+') as f:
json.dump(papers, f, indent=2)
if os.path.exists(str(metadata_outpath)):
logger.info(f'metadata file now exists at {metadata_outpath}')
logger.info(f'beginning collection of underlying paper documents')
random.shuffle(papers) # shuffle in place
for i, paper in enumerate(papers):
try:
logger.info(f"attempting scrape of paper ({i+1} of {len(papers)})")
logger.info(f"paper id: {paper["Paper_id"]}")
scrape_documents(paper, absolute_output_dir, logger=logger)
except Exception as e:
logger.info(f"attempted scrape of {paper["Paper_id"]} failed: {e}")
if __name__ == '__main__':
# the base directory from which we'll resolve the 'data/raw' path etc.
project_dir = Path(__file__).resolve().parents[2]
# setting the log
os.makedirs(PurePath(project_dir).joinpath('logs'), exist_ok=True)
log_path = PurePath(project_dir).joinpath('logs/scrape_documents.log')
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=log_path, level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| # -*- coding: utf-8 -*-
import os
import click
import logging
import json
import requests
from requests.exceptions import Timeout
import random
from datetime import datetime
from pathlib import Path, PurePath
from dotenv import find_dotenv, load_dotenv
"""
This script aims to collect all meeting documents found at:
https://www.ats.aq/devAS/Meetings/DocDatabase
We do so by scraping iteratively querying the underlying doc database to collect
paper metadata, then resolving that paper metadata into document links. Queries
to ats.aq's Doc Database are slow, so even the metadata portion of this scrape
currently takes over an hour.
Note that this script does not currently collect 'Final Reports', which are at
https://www.ats.aq/devAS/Info/FinalReports
Note also that this script does not currently collect 'attachments'.
We'll get there.
"""
def load_all_metadata(data_dir, logger) -> list:
"""Load all available paper metadata from files in data_dir.
:data_dir: TODO
:returns: TODO
"""
papers = []
files = [os.path.join(data_dir, f) for f in os.listdir(data_dir)]
logger.info(f'{len(files)} total files found in {data_dir}')
all_json = [f for f in files if f.endswith('.json')]
logger.info(f'{len(all_json)} json files found in {data_dir}')
meta = [f for f in all_json if 'papers_metadata' in f]
logger.info(f'{len(meta)} existing metadata files found in {data_dir}')
if meta:
for meta_path in meta:
with open(meta_path, 'r') as f:
logger.info(f'reading metadata from {meta_path}')
metadata = json.load(f)
logger.info(f'{len(metadata)} papers found in {meta_path}')
papers += metadata
# filter for unique entries (using workaround because dicts aren't hashable)
dedupe = [json.loads(i) for i in set(json.dumps(p, sort_keys=True) for p in papers)]
logger.info(f'metadata for {len(dedupe)} unique papers found in {data_dir}')
return papers
def construct_download_fname(wp_info: dict) -> list:
"""
docstring
"""
pass
def construct_document_links(wp_info: dict) -> list:
"""Take a dict of paper metadata; return a list of associated document urls.
Note: this excludes 'attachment' documents, which are resolved separately.
Ideally, this will be a set of four links (E/S/F/R), but may be less.
:wp_info: A dict of some metadata about a working paper.
:returns: A list of all resolved document links
"""
document_links = []
meeting = wp_info['Meeting_type'] + wp_info['Meeting_number']
base = 'https://documents.ats.aq/' + meeting + '/' + wp_info['Abbreviation']
pnum = wp_info['Abbreviation'] + str(wp_info['Number']).zfill(3) # zero pad
if wp_info['Revision'] > 0: # a 'rev#' included in filename iff revisions
revision = f"rev{wp_info['Revision']}"
else:
revision = None
for country in ['e','s','f','r']:
fname = '_'.join([x for x in [meeting, pnum, revision, country] if x])
fname += '.' + wp_info['Type']
full_path = base + '/' + fname
document_links.append(full_path)
return document_links
def scrape_document_from_link(doc_link, logger, timeout=(2,5)):
"""Take a link and download the associated file as bytes.
:doc_link: TODO
:logger: TODO
:returns: TODO
"""
logger.info(f"requesting {doc_link}")
try:
r = requests.get(doc_link, timeout=timeout)
logger.info(f"{doc_link} returned status {r.status_code}")
if r.ok:
return r.content
except Timeout as e:
logger.info(f"{doc_link} scrape attempt timed-out after {timeout} with exception: {e}")
def scrape_documents(wp_info: dict, out_dir, logger, ignore_existing=True):
"""Take raw info about a working paper, generate doc urls, scrape the files.
:wp_info: A dict of some metadat about a working paper.
:out_dir: The base directory for saving scraped documents.
:returns: TODO
"""
logger.info(f"constructing document links for {wp_info['Paper_id']}")
doc_links = construct_document_links(wp_info)
if ignore_existing:
logger.info(f"filtering document links for already-scraped papers")
logger.info(f"constructed document links before filter: {len(doc_links)}")
outpaths = [(doc_link, construct_document_outpath(out_dir, wp_info, doc_link)) for doc_link in doc_links]
logger.info(f"target outpaths: {[str(x[1]) for x in outpaths]}")
existing = [os.path.join(out_dir, f) for f in os.listdir(out_dir)]
logger.info(f"already collected outpaths: {[str(x[1]) for x in outpaths if str(x[1]) in existing]}")
doc_links = [x[0] for x in outpaths if str(x[1]) not in existing]
logger.info(f"constructed document links after filter: {len(doc_links)}")
for doc_link in doc_links:
logger.info(f"attempting to scrape file at {doc_link}")
raw_doc = scrape_document_from_link(doc_link, logger)
if raw_doc:
logger.info(f"successful scrape of {doc_link}")
outpath = construct_document_outpath(out_dir, wp_info, doc_link)
logger.info(f"writing file scraped from {doc_link} to {outpath}")
with open(outpath, 'wb+') as f:
f.write(raw_doc)
else:
logger.info(f"failed scrape of {doc_link}")
def construct_document_outpath(out_dir, wp_info, doc_link):
"""Generate a best outpath for saving the actual scraped file.
:wp_info: TODO
:doc_link: TODO
:returns: TODO
"""
fname = doc_link.split('/')[-1]
return PurePath(out_dir).joinpath(fname)
def construct_metadata_scrape_path(base_data_path):
"""Take a base data path and return a sensible filepath for metadata scrape.
:base_data_path: TODO
:returns: TODO
"""
now = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
default_fname = f"{now}_papers_metadata.json"
return PurePath(base_data_path).joinpath(default_fname)
def construct_wp_url(page: int):
"""Generate an ats.aq doc database url from a page number (for querying).
:page: TODO
:returns: TODO
"""
url = f"https://www.ats.aq/devAS/Meetings/SearchDocDatabase?page={page}"
return url
def scrape_working_papers_listing(starting_page, logger):
"""Use ats.aq doc database endpoint to collect list of papers from a page.
:page: Some num for of the page of results to start with.
:logger: A logger object for reporting on results.
:returns: TODO
"""
logger.info(f'starting with page {starting_page}')
current_page = starting_page # set the starting page as our 'current'
url = construct_wp_url(current_page) # construct the first url
papers = []
while url is not None:
logger.info(f"attempting to scrape {url}")
r = requests.get(url)
logger.info(f"got {r.status_code} for {url}")
data = json.loads(r.text)
papers += data['payload']
logger.info(f"total {len(papers)} listings collected so far")
next_page = data['pager']['next']
if next_page > current_page:
current_page = next_page
url = construct_wp_url(current_page)
else:
url = None
return papers
@click.command()
@click.argument('output_dir', type=click.Path(exists=True))
def main(output_dir):
""" Runs data scraping scripts to populate raw data (../raw).
"""
logger = logging.getLogger(__name__)
logger.info('using environment variables to generate scrape outpath')
absolute_output_dir = PurePath(project_dir).joinpath(output_dir)
papers = load_all_metadata(absolute_output_dir, logger)
if not papers: # if no metadata found, scrape it all from web
logger.info(f'no existing metadata scrape; beginning')
metadata_outpath = construct_metadata_scrape_path(absolute_output_dir)
logger.info(f'best metadata outpath: {metadata_outpath}')
logger.info('beginning scrape of working papers listing')
papers = scrape_working_papers_listing(starting_page=1, logger=logger)
logger.info(f'saving papers metadata to file at {metadata_outpath}')
with open(metadata_outpath, 'w+') as f:
json.dump(papers, f, indent=2)
if os.path.exists(str(metadata_outpath)):
logger.info(f'metadata file now exists at {metadata_outpath}')
logger.info(f'beginning collection of underlying paper documents')
random.shuffle(papers) # shuffle in place
for i, paper in enumerate(papers):
try:
logger.info(f"attempting scrape of paper ({i+1} of {len(papers)})")
logger.info(f"paper id: {paper['Paper_id']}")
scrape_documents(paper, absolute_output_dir, logger=logger)
except Exception as e:
logger.info(f"attempted scrape of {paper['Paper_id']} failed: {e}")
if __name__ == '__main__':
# the base directory from which we'll resolve the 'data/raw' path etc.
project_dir = Path(__file__).resolve().parents[2]
# setting the log
os.makedirs(PurePath(project_dir).joinpath('logs'), exist_ok=True)
log_path = PurePath(project_dir).joinpath('logs/scrape_documents.log')
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=log_path, level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
#!/usr/bin/env python3
"""This is a wrapper script around the Saxon command line interface.
It attempts to make sure that the classpath is correct and that third
party and other libraries are available."""
import os
import sys
import json
import shutil
import subprocess
from pathlib import Path
from xml.dom.minidom import parse, Node
class JavaClassRunnerException(Exception):
"""Subclass of Exception for errors raised by the runner."""
class JavaClassRunner:
"""Executes a java process based on a set of parameters read from a
configuration file. It constructs a class path that contains the
transitive closure of all dependencies rooted at the modules listed
in `maven-packages`. You must download the dependencies first,
see https://xsltng.docbook.org/guide/ch02.html#python-script
"""
# Yes, I have a lot of instance attributes. I'm also using camelCase names,
# which aren't the Pythonic way, but they are the POM way. And I know some
# methods could be functions.
# pylint: disable=R0902,C0103,R0201
def __init__(self, args):
self.version = "@@VERSION@@"
# The obvious thing to do here would be to make the seed
# org.docbook:docbook-xslTNG:@@VERSION@@, but if we do that
# then it's very hard to run the script in the build
# environment because that version may not be published yet.
# Instead, we rely on the fact that we can get the
# docbook-xslTNG package from the distribution environment and
# we seed with its dependencies.
self.seeds = set(["@@PACKAGE_LIST@@"])
self.config = {
"maven-local": str(Path.home()) + "/.m2/repository",
"maven-packages": [],
"pinned-packages": ["xml-apis:xml-apis:1.4.01"],
"excluded-packages": ["xml-resolver:xml-resolver:1.2"],
"args": [],
"classpath": [],
"class": "net.sf.saxon.Transform",
}
self.depends = {}
self._cp = {}
self._seen = {}
# This script assumes it's in /path/to/somewhere/docbook/bin/python
# where "docbook" is the root of the distribution. If you move
# this script into /usr/local/bin or something, it won't work.
self.root = os.path.abspath(__file__)
self.root = self.root[0:self.root.rfind(os.sep)] # strip /docbook
self.root = self.root[0:self.root.rfind(os.sep)] # strip /bin
self.config_file = str(Path.home()) + "/.docbook-xsltng.json"
self.stylesheet = None
self.output = None
self.catalogs = []
self.verbose = False
self.debug = False
self._java = None
self._app_args = []
self._parse_args(args)
self.catalogs.append(f"{self.root}/xslt/catalog.xml")
if not self.stylesheet:
self.stylesheet = f"-xsl:{self.root}/xslt/docbook.xsl"
self._app_args.append(self.stylesheet)
try:
with open(self.config_file, "r") as depfile:
self.config = json.load(depfile)
except FileNotFoundError:
with open(self.config_file, "w") as depfile:
depfile.write(json.dumps(self.config, indent=2, sort_keys=True))
self.verbose = self.verbose or self.config.get("verbose", False)
if not self._java:
self._java = self.config.get("java", shutil.which("java"))
if not self._java:
raise JavaClassRunnerException(
"The 'java' command is not on your path."
)
for pkg in self.config.get("maven-packages", []):
self.seeds.add(pkg)
if "class" not in self.config:
raise JavaClassRunnerException("Configuration must specify 'class'")
if "verbose" not in self.config:
self.config["verbose"] = False
# No, pylint, there aren't too many branches in this method.
# pylint: disable=R0912
def _parse_args(self, args):
resources = None
# Can't use any of the nice arg parsers here because these
# are mostly args for Saxon.
done = False
for arg in args:
if done:
self._check_arg(arg)
else:
if arg == "--":
done = True
elif arg.startswith("--config:"):
self.config_file = arg[9:]
elif arg.startswith("--java:"):
self._java = arg[7:]
elif arg.startswith("--root:"):
self.root = arg[7:]
elif arg.startswith("--resources"):
if arg.startswith("--resources:"):
resources = arg[12:]
else:
resources = ""
elif arg == "--help":
self._help()
sys.exit(0)
elif arg == "--verbose":
self.verbose = True
elif arg == "--debug":
self.debug = True
else:
self._check_arg(arg)
if resources is not None:
if resources == "" and not self.output:
self._message(f"Cannot determine output directory; ignoring --resources")
else:
if resources == "":
resources = os.path.abspath(self.output)
resources = os.path.dirname(resources)
else:
resources = os.path.abspath(resources)
self._configure_resources(resources)
def _help(self):
print(f"""DocBook xslTNG version @@VERSION@@
Usage: {sys.argv[0]} [options]
This helper script is a convenience wrapper around Saxon. It sets up
the Java classpath and automatically configures a catalog resolver and
the DocBook extension functions.
The initial options, all introduced by two hyphens, are interpreted by
this script. All the remaining options are passed directly to Saxon.
Options:
--help Print this message
--config:file Use 'file' as the configuration file. The default
configuration file is .docbook-xsltng.json in your
home directory.
--java:file Use 'file' as the Java executable. The default
java executable is the first one on your PATH.
--root:dir Use 'dir' as the "DocBook xslTNG" root directory.
This should be the location where you unzipped the
distribution.
--resources[:dir] Copy stylesheet resources (CSS and JS files) to the
output 'dir'. If 'dir' is not specified, the output
directory is determined from the -o: option to Saxon.
--help Print this help message
--verbose Enable 'verbose' mode. This prints more messages.
--debug Enable 'debug' mode. Instead of running the
transformation, print out the command that would
have been run.
-- Immediately stop interpreting options.
The Saxon options -x, -y, -r, and -init may not be specified as the
wrapper sets these automatically.
""")
def _check_arg(self, arg):
if ":" in arg:
pos = arg.index(":")
name = arg[0:pos]
value = arg[(pos + 1):]
if name in ("-x", "-y", "-r", "-init"):
raise JavaClassRunnerException(
f"The {arg} option cannot be specified")
if name == "-catalog":
self.catalogs.append(value)
return
if name == "-xsl":
self.stylesheet = arg
elif name == "-o":
self.output = value
self._app_args.append(arg)
def _message(self, message):
if self.verbose:
print(message)
def _configure_resources(self, path):
if os.path.isdir(f"{self.root}/resources/css"):
rsrcroot = f"{self.root}/resources"
elif os.path.isdir(f"{self.root}/stage/zip/resources/css"):
rsrcroot = f"{self.root}/stage/zip/resources"
else:
self._message(f"Failed to find CSS under {self.root}: ignoring --resources")
return
# Make sure this is likely to succeed
for fdir in (path, f"{path}/css"): ##, f"{path}/js"):
try:
if not os.path.isdir(fdir):
os.makedirs(fdir)
except:
self._message(f"Failed to create output directory: {path}")
return
pos = len(rsrcroot)
for (root, dirs, files) in os.walk(rsrcroot):
targetdir = path + root[pos:]
for name in dirs:
target = f"{targetdir}/{name}"
if not os.path.isdir(target):
os.makedirs(target)
for name in files:
source = f"{root}/{name}"
target = f"{targetdir}/{name}"
shutil.copy2(source, target)
def _pom(self, groupId, artifactId, version):
groupPath = groupId.replace(".", "/")
pom = f"{self.config["maven-local"]}/"
pom += f"{groupPath}/{artifactId}/{version}/{artifactId}-{version}.pom"
try:
with open(pom, "r") as pomfile:
return parse(pomfile)
except IOError:
return None
def _get_value(self, node, tag):
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE \
and child.tagName == tag \
and child.childNodes.length == 1:
return child.childNodes[0].nodeValue
return None
def _jar(self, groupId, artifactId, version):
mavenLocal = self.config["maven-local"]
jardir = groupId.replace(".", "/")
jardir = f"{mavenLocal}/{jardir}/{artifactId}/{version}"
jarfile = f"{jardir}/{artifactId}-{version}.jar"
if os.path.exists(jarfile):
return jarfile
return None
def _skip(self, groupId, artifactId, version):
return (groupId is None or "${" in groupId
or artifactId is None or "${" in artifactId
or version is None or "${" in version)
def _save_config(self):
with open(self.config_file, "w") as depfile:
depfile.write(json.dumps(self.config, indent=2, sort_keys=True))
def _update_dependencies(self, groupId, artifactId, version):
#self._message(f"Update {groupId}:{artifactId}:{version}")
if groupId not in self.depends:
self.depends[groupId] = {}
if artifactId not in self.depends[groupId]:
self.depends[groupId][artifactId] = {}
if version in self.depends[groupId][artifactId]:
return
if f"{groupId}:{artifactId}:{version}" in self.config["excluded-packages"] \
or f"{groupId}:{artifactId}:*" in self.config["excluded-packages"]:
return
jar = self._jar(groupId, artifactId, version)
if not jar:
self._message(f"No jar: {groupId}:{artifactId}:{version}")
return
pkgconfig = {}
pkgconfig["jar"] = jar
pkgconfig["dependencies"] = []
self.depends[groupId][artifactId][version] = pkgconfig
if self._skip(groupId, artifactId, version):
self._message(f"Skipping: {groupId}:{artifactId}:{version}")
return
# Get the dependencies from the POM
if not self._pom(groupId, artifactId, version):
self._message(f"No pom: {groupId}:{artifactId}:{version}")
return
#self._message(f"Checking {groupId}:{artifactId}:{version}")
pkgconfig["dependencies"] = self._artifact_dependencies(groupId, artifactId, version)
self.depends[groupId][artifactId][version] = pkgconfig
self._save_config()
def _artifact_dependencies(self, groupId, artifactId, version):
# Note: we blindly assume the POM will be formatted the way we expect
# I don't care that this method has 16 local variables.
# pylint: disable=R0914
pom = self._pom(groupId, artifactId, version)
project = pom.documentElement
properties = {"project.groupId": groupId,
"project.artifactId": artifactId,
"project.version": version}
for node in project.childNodes:
if node.nodeType == Node.ELEMENT_NODE and node.tagName == "properties":
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE and child.childNodes.length == 1:
properties[child.tagName] = child.childNodes[0].nodeValue
deps = []
for node in project.childNodes:
if node.nodeType == Node.ELEMENT_NODE and node.tagName == "dependencies":
for depnode in node.getElementsByTagName("dependency"):
depGroupId = self._get_value(depnode, "groupId")
depArtifactId = self._get_value(depnode, "artifactId")
depVersion = self._get_value(depnode, "version")
if depGroupId is None or depArtifactId is None or depVersion is None:
pass
else:
depGroupId = self._expandProperties(depGroupId, properties)
depArtifactId = self._expandProperties(depArtifactId, properties)
depVersion = self._expandProperties(depVersion, properties)
scope = self._get_value(depnode, "scope")
if not scope or scope != "test":
depkey = f"{depGroupId}:{depArtifactId}:{depVersion}"
deps.append(depkey)
self._update_dependencies(depGroupId, depArtifactId, depVersion)
return deps
def _expandProperties(self, value, properties):
for prop in properties:
repl = "${" + prop + "}"
if repl in value:
value = value.replace(repl, properties[prop])
return value
def compute_dependencies(self):
"""Find all the (transitive closure) of available dependencies
among the packages that we're going to use.
"""
for package in self.seeds:
group, artifact, version = package.split(":")
self._update_dependencies(group, artifact, version)
for package in self.config["pinned-packages"]:
group, artifact, version = package.split(":")
self._update_dependencies(group, artifact, version)
def _higher_version(self, curver, newver):
if curver == newver:
return False
curlist = curver.split(".")
newlist = newver.split(".")
while curlist and newlist:
cur = curlist[0]
curlist = curlist[1:]
new = newlist[0]
newlist = newlist[1:]
if cur != new:
if cur.isdigit() and new.isdigit():
# print(f"{curver}/{newver}: {cur}/{new}: {int(new)>int(cur)}")
return int(new) > int(cur)
# Meh. We could try to do better, but...
# print(f"{curver}/{newver}: {cur}/{new}: {new>cur}")
return new > cur
# If there are more pieces in the new version, call it newer
return len(newlist) > 0
def _add_to_classpath(self, package):
# I don't think reorganizing this method into smaller pieces
# would make it easier to understand. It's just messy.
# pylint: disable=R0912
if package in self._seen:
return
self._seen[package] = True
group, artifact, version = package.split(":")
try:
if group not in self._cp:
self._cp[group] = {}
if artifact not in self._cp[group]:
self._cp[group][artifact] = {}
if version in self._cp[group][artifact]:
# We already have this version of this package
return
# [expletive] xml-apis:xml-apis:2.x is not
# compatible with xml-apis:xml-apis:1.x so we
# need a provision for pinning versions. Sigh.
basepkg = f"{group}:{artifact}"
usever = None
for pkg in self.config.get("pinned-packages", []):
if pkg.startswith(basepkg):
usever = pkg[len(basepkg) + 1 :]
if usever:
pass
elif not self._cp[group][artifact]:
usever = version
else:
# Sigh again. We've already got a jar for this artifact
# but we're being asked to add another. Pick the one
# with the higher version number. N.B. There should
# only ever be one key in the artifact dict
curver = list(self._cp[group][artifact].keys())[0]
if self._higher_version(curver, version):
usever = version
else:
usever = curver
if usever in self._cp[group][artifact]:
# We already have this version of this package
return
jar = self.depends[group][artifact][usever]["jar"]
if jar not in ("SKIPPED", "NOTFOUND"):
self._cp[group][artifact] = {}
self._cp[group][artifact][usever] = jar
except KeyError:
# I guess we don't have one
pass
# Note that we could end up with more things on the classpath
# than we need. If, for example, we add x:y for version 1.4 of
# some package and then later we replace 1.4 with 1.5.2 which
# no longer has a dependency on x:y. I'm assuming that'll be
# harmless.
try:
depends = self.depends[group][artifact][version]
for dep in depends.get("dependencies", []):
self._add_to_classpath(dep)
except KeyError:
pass
def classpath(self):
"""Compute the class path for this run."""
for package in self.seeds:
self._add_to_classpath(package)
# Work out what jar files are included in the distribution.
# We don't want to put them on the class path because then
# there are two copies of them and SLF4J screams bloody
# murder about that.
distlibs = []
for (_, _, filenames) in os.walk(self.root + "/libs/lib"):
distlibs += filenames
cplist = []
if "CLASSPATH" in os.environ:
for path in os.environ["CLASSPATH"].split(os.pathsep):
cplist.append(path)
for group in self._cp:
for archive in self._cp[group]:
for version in self._cp[group][archive]:
cpjar = self._cp[group][archive][version]
if os.path.basename(cpjar) not in distlibs:
cplist.append(cpjar)
# Where is the distribution jar file?
libpath = os.sep.join([self.root, f"libs/docbook-xslTNG-{self.version}.jar"])
cplist.append(libpath)
for path in self.config.get("classpath", []):
cplist.append(path)
return os.pathsep.join(cplist)
def args(self):
"""Compute the java arguments."""
args = []
argset = set()
for arg in self._app_args:
args.append(arg)
if ":" in arg:
argset.add(arg[0:arg.index(":")])
else:
argset.add(arg)
for arg in self.config.get("args", []):
if ":" in arg:
key = arg[0:arg.index(":")]
else:
key = arg
if key not in argset:
args.append(arg)
for arg in ["-x:org.xmlresolver.tools.ResolvingXMLReader",
"-y:org.xmlresolver.tools.ResolvingXMLReader",
"-r:org.xmlresolver.Resolver",
"-init:org.docbook.xsltng.extensions.Register"]:
if ":" in arg:
key = arg[0:arg.index(":")]
else:
key = arg
if key not in argset:
args.append(arg)
return args
def run(self):
"""Run the process."""
cp = self.classpath()
args = self.args()
jopt = ["-Dxml.catalog.files=" + ";".join(self.catalogs)]
jopt = jopt + self.config.get("java-options", [])
if self.debug:
print(self._java)
for item in jopt:
print(f"\t{item}")
print("-cp")
for item in cp.split(os.pathsep):
print(f"\t{item}")
print(self.config["class"])
for item in args:
print(f"\t{item}")
else:
cmd = [self._java] + jopt + ["-cp", cp] + [self.config["class"]] + args
subprocess.call(cmd)
if __name__ == "__main__":
# I'm perfectly happy with the name 'docbook'
# pylint: disable=C0103
try:
docbook = JavaClassRunner(sys.argv[1:])
docbook.compute_dependencies()
docbook.run()
except JavaClassRunnerException as err:
print(str(err))
sys.exit(1)
| #!/usr/bin/env python3
"""This is a wrapper script around the Saxon command line interface.
It attempts to make sure that the classpath is correct and that third
party and other libraries are available."""
import os
import sys
import json
import shutil
import subprocess
from pathlib import Path
from xml.dom.minidom import parse, Node
class JavaClassRunnerException(Exception):
"""Subclass of Exception for errors raised by the runner."""
class JavaClassRunner:
"""Executes a java process based on a set of parameters read from a
configuration file. It constructs a class path that contains the
transitive closure of all dependencies rooted at the modules listed
in `maven-packages`. You must download the dependencies first,
see https://xsltng.docbook.org/guide/ch02.html#python-script
"""
# Yes, I have a lot of instance attributes. I'm also using camelCase names,
# which aren't the Pythonic way, but they are the POM way. And I know some
# methods could be functions.
# pylint: disable=R0902,C0103,R0201
def __init__(self, args):
self.version = "@@VERSION@@"
# The obvious thing to do here would be to make the seed
# org.docbook:docbook-xslTNG:@@VERSION@@, but if we do that
# then it's very hard to run the script in the build
# environment because that version may not be published yet.
# Instead, we rely on the fact that we can get the
# docbook-xslTNG package from the distribution environment and
# we seed with its dependencies.
self.seeds = set(["@@PACKAGE_LIST@@"])
self.config = {
"maven-local": str(Path.home()) + "/.m2/repository",
"maven-packages": [],
"pinned-packages": ["xml-apis:xml-apis:1.4.01"],
"excluded-packages": ["xml-resolver:xml-resolver:1.2"],
"args": [],
"classpath": [],
"class": "net.sf.saxon.Transform",
}
self.depends = {}
self._cp = {}
self._seen = {}
# This script assumes it's in /path/to/somewhere/docbook/bin/python
# where "docbook" is the root of the distribution. If you move
# this script into /usr/local/bin or something, it won't work.
self.root = os.path.abspath(__file__)
self.root = self.root[0:self.root.rfind(os.sep)] # strip /docbook
self.root = self.root[0:self.root.rfind(os.sep)] # strip /bin
self.config_file = str(Path.home()) + "/.docbook-xsltng.json"
self.stylesheet = None
self.output = None
self.catalogs = []
self.verbose = False
self.debug = False
self._java = None
self._app_args = []
self._parse_args(args)
self.catalogs.append(f"{self.root}/xslt/catalog.xml")
if not self.stylesheet:
self.stylesheet = f"-xsl:{self.root}/xslt/docbook.xsl"
self._app_args.append(self.stylesheet)
try:
with open(self.config_file, "r") as depfile:
self.config = json.load(depfile)
except FileNotFoundError:
with open(self.config_file, "w") as depfile:
depfile.write(json.dumps(self.config, indent=2, sort_keys=True))
self.verbose = self.verbose or self.config.get("verbose", False)
if not self._java:
self._java = self.config.get("java", shutil.which("java"))
if not self._java:
raise JavaClassRunnerException(
"The 'java' command is not on your path."
)
for pkg in self.config.get("maven-packages", []):
self.seeds.add(pkg)
if "class" not in self.config:
raise JavaClassRunnerException("Configuration must specify 'class'")
if "verbose" not in self.config:
self.config["verbose"] = False
# No, pylint, there aren't too many branches in this method.
# pylint: disable=R0912
def _parse_args(self, args):
resources = None
# Can't use any of the nice arg parsers here because these
# are mostly args for Saxon.
done = False
for arg in args:
if done:
self._check_arg(arg)
else:
if arg == "--":
done = True
elif arg.startswith("--config:"):
self.config_file = arg[9:]
elif arg.startswith("--java:"):
self._java = arg[7:]
elif arg.startswith("--root:"):
self.root = arg[7:]
elif arg.startswith("--resources"):
if arg.startswith("--resources:"):
resources = arg[12:]
else:
resources = ""
elif arg == "--help":
self._help()
sys.exit(0)
elif arg == "--verbose":
self.verbose = True
elif arg == "--debug":
self.debug = True
else:
self._check_arg(arg)
if resources is not None:
if resources == "" and not self.output:
self._message(f"Cannot determine output directory; ignoring --resources")
else:
if resources == "":
resources = os.path.abspath(self.output)
resources = os.path.dirname(resources)
else:
resources = os.path.abspath(resources)
self._configure_resources(resources)
def _help(self):
print(f"""DocBook xslTNG version @@VERSION@@
Usage: {sys.argv[0]} [options]
This helper script is a convenience wrapper around Saxon. It sets up
the Java classpath and automatically configures a catalog resolver and
the DocBook extension functions.
The initial options, all introduced by two hyphens, are interpreted by
this script. All the remaining options are passed directly to Saxon.
Options:
--help Print this message
--config:file Use 'file' as the configuration file. The default
configuration file is .docbook-xsltng.json in your
home directory.
--java:file Use 'file' as the Java executable. The default
java executable is the first one on your PATH.
--root:dir Use 'dir' as the "DocBook xslTNG" root directory.
This should be the location where you unzipped the
distribution.
--resources[:dir] Copy stylesheet resources (CSS and JS files) to the
output 'dir'. If 'dir' is not specified, the output
directory is determined from the -o: option to Saxon.
--help Print this help message
--verbose Enable 'verbose' mode. This prints more messages.
--debug Enable 'debug' mode. Instead of running the
transformation, print out the command that would
have been run.
-- Immediately stop interpreting options.
The Saxon options -x, -y, -r, and -init may not be specified as the
wrapper sets these automatically.
""")
def _check_arg(self, arg):
if ":" in arg:
pos = arg.index(":")
name = arg[0:pos]
value = arg[(pos + 1):]
if name in ("-x", "-y", "-r", "-init"):
raise JavaClassRunnerException(
f"The {arg} option cannot be specified")
if name == "-catalog":
self.catalogs.append(value)
return
if name == "-xsl":
self.stylesheet = arg
elif name == "-o":
self.output = value
self._app_args.append(arg)
def _message(self, message):
if self.verbose:
print(message)
def _configure_resources(self, path):
if os.path.isdir(f"{self.root}/resources/css"):
rsrcroot = f"{self.root}/resources"
elif os.path.isdir(f"{self.root}/stage/zip/resources/css"):
rsrcroot = f"{self.root}/stage/zip/resources"
else:
self._message(f"Failed to find CSS under {self.root}: ignoring --resources")
return
# Make sure this is likely to succeed
for fdir in (path, f"{path}/css"): ##, f"{path}/js"):
try:
if not os.path.isdir(fdir):
os.makedirs(fdir)
except:
self._message(f"Failed to create output directory: {path}")
return
pos = len(rsrcroot)
for (root, dirs, files) in os.walk(rsrcroot):
targetdir = path + root[pos:]
for name in dirs:
target = f"{targetdir}/{name}"
if not os.path.isdir(target):
os.makedirs(target)
for name in files:
source = f"{root}/{name}"
target = f"{targetdir}/{name}"
shutil.copy2(source, target)
def _pom(self, groupId, artifactId, version):
groupPath = groupId.replace(".", "/")
pom = f"{self.config['maven-local']}/"
pom += f"{groupPath}/{artifactId}/{version}/{artifactId}-{version}.pom"
try:
with open(pom, "r") as pomfile:
return parse(pomfile)
except IOError:
return None
def _get_value(self, node, tag):
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE \
and child.tagName == tag \
and child.childNodes.length == 1:
return child.childNodes[0].nodeValue
return None
def _jar(self, groupId, artifactId, version):
mavenLocal = self.config["maven-local"]
jardir = groupId.replace(".", "/")
jardir = f"{mavenLocal}/{jardir}/{artifactId}/{version}"
jarfile = f"{jardir}/{artifactId}-{version}.jar"
if os.path.exists(jarfile):
return jarfile
return None
def _skip(self, groupId, artifactId, version):
return (groupId is None or "${" in groupId
or artifactId is None or "${" in artifactId
or version is None or "${" in version)
def _save_config(self):
with open(self.config_file, "w") as depfile:
depfile.write(json.dumps(self.config, indent=2, sort_keys=True))
def _update_dependencies(self, groupId, artifactId, version):
#self._message(f"Update {groupId}:{artifactId}:{version}")
if groupId not in self.depends:
self.depends[groupId] = {}
if artifactId not in self.depends[groupId]:
self.depends[groupId][artifactId] = {}
if version in self.depends[groupId][artifactId]:
return
if f"{groupId}:{artifactId}:{version}" in self.config["excluded-packages"] \
or f"{groupId}:{artifactId}:*" in self.config["excluded-packages"]:
return
jar = self._jar(groupId, artifactId, version)
if not jar:
self._message(f"No jar: {groupId}:{artifactId}:{version}")
return
pkgconfig = {}
pkgconfig["jar"] = jar
pkgconfig["dependencies"] = []
self.depends[groupId][artifactId][version] = pkgconfig
if self._skip(groupId, artifactId, version):
self._message(f"Skipping: {groupId}:{artifactId}:{version}")
return
# Get the dependencies from the POM
if not self._pom(groupId, artifactId, version):
self._message(f"No pom: {groupId}:{artifactId}:{version}")
return
#self._message(f"Checking {groupId}:{artifactId}:{version}")
pkgconfig["dependencies"] = self._artifact_dependencies(groupId, artifactId, version)
self.depends[groupId][artifactId][version] = pkgconfig
self._save_config()
def _artifact_dependencies(self, groupId, artifactId, version):
# Note: we blindly assume the POM will be formatted the way we expect
# I don't care that this method has 16 local variables.
# pylint: disable=R0914
pom = self._pom(groupId, artifactId, version)
project = pom.documentElement
properties = {"project.groupId": groupId,
"project.artifactId": artifactId,
"project.version": version}
for node in project.childNodes:
if node.nodeType == Node.ELEMENT_NODE and node.tagName == "properties":
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE and child.childNodes.length == 1:
properties[child.tagName] = child.childNodes[0].nodeValue
deps = []
for node in project.childNodes:
if node.nodeType == Node.ELEMENT_NODE and node.tagName == "dependencies":
for depnode in node.getElementsByTagName("dependency"):
depGroupId = self._get_value(depnode, "groupId")
depArtifactId = self._get_value(depnode, "artifactId")
depVersion = self._get_value(depnode, "version")
if depGroupId is None or depArtifactId is None or depVersion is None:
pass
else:
depGroupId = self._expandProperties(depGroupId, properties)
depArtifactId = self._expandProperties(depArtifactId, properties)
depVersion = self._expandProperties(depVersion, properties)
scope = self._get_value(depnode, "scope")
if not scope or scope != "test":
depkey = f"{depGroupId}:{depArtifactId}:{depVersion}"
deps.append(depkey)
self._update_dependencies(depGroupId, depArtifactId, depVersion)
return deps
def _expandProperties(self, value, properties):
for prop in properties:
repl = "${" + prop + "}"
if repl in value:
value = value.replace(repl, properties[prop])
return value
def compute_dependencies(self):
"""Find all the (transitive closure) of available dependencies
among the packages that we're going to use.
"""
for package in self.seeds:
group, artifact, version = package.split(":")
self._update_dependencies(group, artifact, version)
for package in self.config["pinned-packages"]:
group, artifact, version = package.split(":")
self._update_dependencies(group, artifact, version)
def _higher_version(self, curver, newver):
if curver == newver:
return False
curlist = curver.split(".")
newlist = newver.split(".")
while curlist and newlist:
cur = curlist[0]
curlist = curlist[1:]
new = newlist[0]
newlist = newlist[1:]
if cur != new:
if cur.isdigit() and new.isdigit():
# print(f"{curver}/{newver}: {cur}/{new}: {int(new)>int(cur)}")
return int(new) > int(cur)
# Meh. We could try to do better, but...
# print(f"{curver}/{newver}: {cur}/{new}: {new>cur}")
return new > cur
# If there are more pieces in the new version, call it newer
return len(newlist) > 0
def _add_to_classpath(self, package):
# I don't think reorganizing this method into smaller pieces
# would make it easier to understand. It's just messy.
# pylint: disable=R0912
if package in self._seen:
return
self._seen[package] = True
group, artifact, version = package.split(":")
try:
if group not in self._cp:
self._cp[group] = {}
if artifact not in self._cp[group]:
self._cp[group][artifact] = {}
if version in self._cp[group][artifact]:
# We already have this version of this package
return
# [expletive] xml-apis:xml-apis:2.x is not
# compatible with xml-apis:xml-apis:1.x so we
# need a provision for pinning versions. Sigh.
basepkg = f"{group}:{artifact}"
usever = None
for pkg in self.config.get("pinned-packages", []):
if pkg.startswith(basepkg):
usever = pkg[len(basepkg) + 1 :]
if usever:
pass
elif not self._cp[group][artifact]:
usever = version
else:
# Sigh again. We've already got a jar for this artifact
# but we're being asked to add another. Pick the one
# with the higher version number. N.B. There should
# only ever be one key in the artifact dict
curver = list(self._cp[group][artifact].keys())[0]
if self._higher_version(curver, version):
usever = version
else:
usever = curver
if usever in self._cp[group][artifact]:
# We already have this version of this package
return
jar = self.depends[group][artifact][usever]["jar"]
if jar not in ("SKIPPED", "NOTFOUND"):
self._cp[group][artifact] = {}
self._cp[group][artifact][usever] = jar
except KeyError:
# I guess we don't have one
pass
# Note that we could end up with more things on the classpath
# than we need. If, for example, we add x:y for version 1.4 of
# some package and then later we replace 1.4 with 1.5.2 which
# no longer has a dependency on x:y. I'm assuming that'll be
# harmless.
try:
depends = self.depends[group][artifact][version]
for dep in depends.get("dependencies", []):
self._add_to_classpath(dep)
except KeyError:
pass
def classpath(self):
"""Compute the class path for this run."""
for package in self.seeds:
self._add_to_classpath(package)
# Work out what jar files are included in the distribution.
# We don't want to put them on the class path because then
# there are two copies of them and SLF4J screams bloody
# murder about that.
distlibs = []
for (_, _, filenames) in os.walk(self.root + "/libs/lib"):
distlibs += filenames
cplist = []
if "CLASSPATH" in os.environ:
for path in os.environ["CLASSPATH"].split(os.pathsep):
cplist.append(path)
for group in self._cp:
for archive in self._cp[group]:
for version in self._cp[group][archive]:
cpjar = self._cp[group][archive][version]
if os.path.basename(cpjar) not in distlibs:
cplist.append(cpjar)
# Where is the distribution jar file?
libpath = os.sep.join([self.root, f"libs/docbook-xslTNG-{self.version}.jar"])
cplist.append(libpath)
for path in self.config.get("classpath", []):
cplist.append(path)
return os.pathsep.join(cplist)
def args(self):
"""Compute the java arguments."""
args = []
argset = set()
for arg in self._app_args:
args.append(arg)
if ":" in arg:
argset.add(arg[0:arg.index(":")])
else:
argset.add(arg)
for arg in self.config.get("args", []):
if ":" in arg:
key = arg[0:arg.index(":")]
else:
key = arg
if key not in argset:
args.append(arg)
for arg in ["-x:org.xmlresolver.tools.ResolvingXMLReader",
"-y:org.xmlresolver.tools.ResolvingXMLReader",
"-r:org.xmlresolver.Resolver",
"-init:org.docbook.xsltng.extensions.Register"]:
if ":" in arg:
key = arg[0:arg.index(":")]
else:
key = arg
if key not in argset:
args.append(arg)
return args
def run(self):
"""Run the process."""
cp = self.classpath()
args = self.args()
jopt = ["-Dxml.catalog.files=" + ";".join(self.catalogs)]
jopt = jopt + self.config.get("java-options", [])
if self.debug:
print(self._java)
for item in jopt:
print(f"\t{item}")
print("-cp")
for item in cp.split(os.pathsep):
print(f"\t{item}")
print(self.config["class"])
for item in args:
print(f"\t{item}")
else:
cmd = [self._java] + jopt + ["-cp", cp] + [self.config["class"]] + args
subprocess.call(cmd)
if __name__ == "__main__":
# I'm perfectly happy with the name 'docbook'
# pylint: disable=C0103
try:
docbook = JavaClassRunner(sys.argv[1:])
docbook.compute_dependencies()
docbook.run()
except JavaClassRunnerException as err:
print(str(err))
sys.exit(1)
|
from discord import Embed
from redbot.core import Config, commands
from .single import Single
from .speedevent import Speedevent
class TypeRacer(commands.Cog):
"""A Typing Speed test cog, to give test your typing skills"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=29834829369)
default_guild = {
"time_start": 60,
"text_size": (10, 20),
"type": "gibberish",
"dm": True,
}
self.config.register_guild(**default_guild)
self.jobs = {"guilds": {}, "personal": {}}
@commands.group()
async def typer(self, ctx):
"""Commands to start and stop personal typing speed test"""
@typer.command()
async def settings(self, ctx):
"""Shows the current setting in the guild"""
settings = await self.config.guild_from_id(ctx.guild.id).all()
emb = Embed(color=await ctx.embed_color())
val = (
f"`Type `:{settings["type"]}\n"
+ f"`Send dms `:{settings["dm"]}\n"
+ f"`Start timer`:{settings["time_start"]}\n"
+ f"`No of Words`:{settings["text_size"][0]} - {settings["text_size"][1]}\n"
)
emb.add_field(name="TyperRacer settings", value=val)
await ctx.send(embed=emb)
@typer.command(name="start")
async def start_personal(self, ctx):
"""Start a personal typing speed test"""
if ctx.author.id in self.jobs["personal"]:
await ctx.send("You already are running a speedtest")
else:
test = Single(ctx, await self.config.guild(ctx.guild).all())
self.jobs["personal"][ctx.author.id] = test
await test.start()
self.jobs["personal"].pop(ctx.author.id)
@typer.command()
async def stop(self, ctx):
"""Stop/Cancel taking the personal typing test"""
if ctx.author.id in self.jobs["personal"]:
await self.jobs["personal"][ctx.author.id].cancel()
else:
await ctx.send("You need to start the test.")
@commands.guild_only()
@commands.group()
async def speedevent(self, ctx):
"""Play a speed test event with multiple players"""
@speedevent.command(name="start")
async def start_event(self, ctx, countdown: int = None):
"""Start a typing speed test event \n Takes an optional countdown argument to start the test\n(Be warned that cheating gets you disqualified)\nThis lasts for 3 minutes at max, and stops if everyone completed"""
if ctx.guild.id in self.jobs["guilds"]:
await ctx.send("There's already a speedtest event running in this guild")
else:
test = Speedevent(
ctx,
countdown or await self.config.guild(ctx.guild).time_start(),
await self.config.guild(ctx.guild).all(),
)
self.jobs["guilds"][ctx.guild.id] = test
await test.start()
self.jobs["guilds"].pop(ctx.guild.id)
@speedevent.command()
async def join(self, ctx):
"""Join the typing test speed event"""
if ctx.guild.id in self.jobs["guilds"]:
await self.jobs["guilds"][ctx.guild.id].join(ctx.author.id, ctx.author.display_name)
else:
await ctx.send("Event has not started yet")
@commands.mod_or_permissions(administrator=True)
@commands.group()
async def typerset(self, ctx):
"""Settings for the typing speed test"""
@typerset.command()
async def time(self, ctx, num: int):
"""Sets the time delay (in seconds) to start a speedtest event (max limit = 1000 seconds)"""
if num <= 1000 and num >= 10:
await self.config.guild_from_id(ctx.guild.id).time_start.set(num)
await ctx.send(f"Changed delay to {num}")
else:
await ctx.send("The Min limit is 10 seconds\nThe Max limit is 1000 seconds")
@typerset.command()
async def words(self, ctx, min: int, max: int):
"""Sets the number of minimum and maximum number of words
Range: min>0 and max<=100"""
if min > 0 and max <= 100:
await self.config.guild_from_id(ctx.guild.id).text_size.set((min, max))
await ctx.send(f"The number of words are changed to\nMinimum:{min}\nMaximum:{max}")
else:
await ctx.send(
"The minimum number of words must be greater than 0\nThe maxiumum number of words must be less than or equal to 100 "
)
@typerset.command()
async def dm(self, ctx, toggle: bool):
"""Toggle whether the bot should send analytics in the dm or not"""
await self.config.guild_from_id(ctx.guild.id).dm.set(toggle)
await ctx.send(f"I will {"" if toggle else "not"} send the speedevent analytics in dms")
@typerset.command(name="type")
async def type_of_text(self, ctx, type_txt: str):
"""Set the type of text to generate.
Types available: lorem,gibberish"""
check = ("lorem", "gibberish")
if type_txt in check:
await self.config.guild_from_id(ctx.guild.id).type.set(type_txt)
await ctx.send(f"Changed type to {type_txt}")
else:
await ctx.send("Only two valid types available: gibberish,lorem")
async def red_get_data_for_user(self, *, user_id: int):
# this cog does not store any data
return {}
async def red_delete_data_for_user(self, *, requester, user_id: int) -> None:
# this cog does not store any data
pass
| from discord import Embed
from redbot.core import Config, commands
from .single import Single
from .speedevent import Speedevent
class TypeRacer(commands.Cog):
"""A Typing Speed test cog, to give test your typing skills"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=29834829369)
default_guild = {
"time_start": 60,
"text_size": (10, 20),
"type": "gibberish",
"dm": True,
}
self.config.register_guild(**default_guild)
self.jobs = {"guilds": {}, "personal": {}}
@commands.group()
async def typer(self, ctx):
"""Commands to start and stop personal typing speed test"""
@typer.command()
async def settings(self, ctx):
"""Shows the current setting in the guild"""
settings = await self.config.guild_from_id(ctx.guild.id).all()
emb = Embed(color=await ctx.embed_color())
val = (
f"`Type `:{settings['type']}\n"
+ f"`Send dms `:{settings['dm']}\n"
+ f"`Start timer`:{settings['time_start']}\n"
+ f"`No of Words`:{settings['text_size'][0]} - {settings['text_size'][1]}\n"
)
emb.add_field(name="TyperRacer settings", value=val)
await ctx.send(embed=emb)
@typer.command(name="start")
async def start_personal(self, ctx):
"""Start a personal typing speed test"""
if ctx.author.id in self.jobs["personal"]:
await ctx.send("You already are running a speedtest")
else:
test = Single(ctx, await self.config.guild(ctx.guild).all())
self.jobs["personal"][ctx.author.id] = test
await test.start()
self.jobs["personal"].pop(ctx.author.id)
@typer.command()
async def stop(self, ctx):
"""Stop/Cancel taking the personal typing test"""
if ctx.author.id in self.jobs["personal"]:
await self.jobs["personal"][ctx.author.id].cancel()
else:
await ctx.send("You need to start the test.")
@commands.guild_only()
@commands.group()
async def speedevent(self, ctx):
"""Play a speed test event with multiple players"""
@speedevent.command(name="start")
async def start_event(self, ctx, countdown: int = None):
"""Start a typing speed test event \n Takes an optional countdown argument to start the test\n(Be warned that cheating gets you disqualified)\nThis lasts for 3 minutes at max, and stops if everyone completed"""
if ctx.guild.id in self.jobs["guilds"]:
await ctx.send("There's already a speedtest event running in this guild")
else:
test = Speedevent(
ctx,
countdown or await self.config.guild(ctx.guild).time_start(),
await self.config.guild(ctx.guild).all(),
)
self.jobs["guilds"][ctx.guild.id] = test
await test.start()
self.jobs["guilds"].pop(ctx.guild.id)
@speedevent.command()
async def join(self, ctx):
"""Join the typing test speed event"""
if ctx.guild.id in self.jobs["guilds"]:
await self.jobs["guilds"][ctx.guild.id].join(ctx.author.id, ctx.author.display_name)
else:
await ctx.send("Event has not started yet")
@commands.mod_or_permissions(administrator=True)
@commands.group()
async def typerset(self, ctx):
"""Settings for the typing speed test"""
@typerset.command()
async def time(self, ctx, num: int):
"""Sets the time delay (in seconds) to start a speedtest event (max limit = 1000 seconds)"""
if num <= 1000 and num >= 10:
await self.config.guild_from_id(ctx.guild.id).time_start.set(num)
await ctx.send(f"Changed delay to {num}")
else:
await ctx.send("The Min limit is 10 seconds\nThe Max limit is 1000 seconds")
@typerset.command()
async def words(self, ctx, min: int, max: int):
"""Sets the number of minimum and maximum number of words
Range: min>0 and max<=100"""
if min > 0 and max <= 100:
await self.config.guild_from_id(ctx.guild.id).text_size.set((min, max))
await ctx.send(f"The number of words are changed to\nMinimum:{min}\nMaximum:{max}")
else:
await ctx.send(
"The minimum number of words must be greater than 0\nThe maxiumum number of words must be less than or equal to 100 "
)
@typerset.command()
async def dm(self, ctx, toggle: bool):
"""Toggle whether the bot should send analytics in the dm or not"""
await self.config.guild_from_id(ctx.guild.id).dm.set(toggle)
await ctx.send(f"I will {'' if toggle else 'not'} send the speedevent analytics in dms")
@typerset.command(name="type")
async def type_of_text(self, ctx, type_txt: str):
"""Set the type of text to generate.
Types available: lorem,gibberish"""
check = ("lorem", "gibberish")
if type_txt in check:
await self.config.guild_from_id(ctx.guild.id).type.set(type_txt)
await ctx.send(f"Changed type to {type_txt}")
else:
await ctx.send("Only two valid types available: gibberish,lorem")
async def red_get_data_for_user(self, *, user_id: int):
# this cog does not store any data
return {}
async def red_delete_data_for_user(self, *, requester, user_id: int) -> None:
# this cog does not store any data
pass
|
class Organizations(object):
def __init__(self, session):
super(Organizations, self).__init__()
self._session = session
def getOrganizations(self):
"""
**List the organizations that the user has privileges on**
https://developer.cisco.com/meraki/api-v1/#!get-organizations
"""
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'getOrganizations'
}
resource = f'/organizations'
return self._session.get(metadata, resource)
def createOrganization(self, name: str):
"""
**Create a new organization**
https://developer.cisco.com/meraki/api-v1/#!create-organization
- name (string): The name of the organization
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'createOrganization'
}
resource = f'/organizations'
body_params = ['name', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationSaml(self, orgId: str):
"""
**Returns the SAML SSO enabled settings for an organization.**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml
- orgId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml'],
'operation': 'getOrganizationSaml'
}
resource = f'/organizations/{orgId}/saml'
return self._session.get(metadata, resource)
def updateOrganizationSaml(self, orgId: str, **kwargs):
"""
**Updates the SAML SSO enabled settings for an organization.**
https://developer.cisco.com/meraki/api-v1/#!update-organization-saml
- orgId (string): (required)
- enabled (boolean): Boolean for updating SAML SSO enabled settings.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'saml'],
'operation': 'updateOrganizationSaml'
}
resource = f'/organizations/{orgId}/saml'
body_params = ['enabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganization(self, organizationId: str):
"""
**Return an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'getOrganization'
}
resource = f'/organizations/{organizationId}'
return self._session.get(metadata, resource)
def updateOrganization(self, organizationId: str, **kwargs):
"""
**Update an organization**
https://developer.cisco.com/meraki/api-v1/#!update-organization
- organizationId (string): (required)
- name (string): The name of the organization
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'updateOrganization'
}
resource = f'/organizations/{organizationId}'
body_params = ['name', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganization(self, organizationId: str):
"""
**Delete an organization**
https://developer.cisco.com/meraki/api-v1/#!delete-organization
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'deleteOrganization'
}
resource = f'/organizations/{organizationId}'
return self._session.delete(metadata, resource)
def createOrganizationActionBatch(self, organizationId: str, actions: list, **kwargs):
"""
**Create an action batch**
https://developer.cisco.com/meraki/api-v1/#!create-organization-action-batch
- organizationId (string): (required)
- actions (array): A set of changes to make as part of this action (<a href='https://developer.cisco.com/meraki/api/#/rest/guides/action-batches/'>more details</a>)
- confirmed (boolean): Set to true for immediate execution. Set to false if the action should be previewed before executing. This property cannot be unset once it is true. Defaults to false.
- synchronous (boolean): Set to true to force the batch to run synchronous. There can be at most 20 actions in synchronous batch. Defaults to false.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'createOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches'
body_params = ['confirmed', 'synchronous', 'actions', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationActionBatches(self, organizationId: str, **kwargs):
"""
**Return the list of action batches in the organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-action-batches
- organizationId (string): (required)
- status (string): Filter batches by status. Valid types are pending, completed, and failed.
"""
kwargs.update(locals())
if 'status' in kwargs:
options = ['pending', 'completed', 'failed']
assert kwargs['status'] in options, f'''"status" cannot be "{kwargs['status']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'getOrganizationActionBatches'
}
resource = f'/organizations/{organizationId}/actionBatches'
query_params = ['status', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getOrganizationActionBatch(self, organizationId: str, actionBatchId: str):
"""
**Return an action batch**
https://developer.cisco.com/meraki/api-v1/#!get-organization-action-batch
- organizationId (string): (required)
- actionBatchId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'getOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches/{actionBatchId}'
return self._session.get(metadata, resource)
def deleteOrganizationActionBatch(self, organizationId: str, actionBatchId: str):
"""
**Delete an action batch**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-action-batch
- organizationId (string): (required)
- actionBatchId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'deleteOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches/{actionBatchId}'
return self._session.delete(metadata, resource)
def updateOrganizationActionBatch(self, organizationId: str, actionBatchId: str, **kwargs):
"""
**Update an action batch**
https://developer.cisco.com/meraki/api-v1/#!update-organization-action-batch
- organizationId (string): (required)
- actionBatchId (string): (required)
- confirmed (boolean): A boolean representing whether or not the batch has been confirmed. This property cannot be unset once it is true.
- synchronous (boolean): Set to true to force the batch to run synchronous. There can be at most 20 actions in synchronous batch.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'updateOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches/{actionBatchId}'
body_params = ['confirmed', 'synchronous', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationAdmins(self, organizationId: str):
"""
**List the dashboard administrators in this organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-admins
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'getOrganizationAdmins'
}
resource = f'/organizations/{organizationId}/admins'
return self._session.get(metadata, resource)
def createOrganizationAdmin(self, organizationId: str, email: str, name: str, orgAccess: str, **kwargs):
"""
**Create a new dashboard administrator**
https://developer.cisco.com/meraki/api-v1/#!create-organization-admin
- organizationId (string): (required)
- email (string): The email of the dashboard administrator. This attribute can not be updated.
- name (string): The name of the dashboard administrator
- orgAccess (string): The privilege of the dashboard administrator on the organization. Can be one of 'full', 'read-only', 'enterprise' or 'none'
- tags (array): The list of tags that the dashboard administrator has privileges on
- networks (array): The list of networks that the dashboard administrator has privileges on
- authenticationMethod (string): The method of authentication the user will use to sign in to the Meraki dashboard. Can be one of 'Email' or 'Cisco SecureX Sign-On'. The default is Email authentication
"""
kwargs.update(locals())
if 'orgAccess' in kwargs:
options = ['full', 'read-only', 'enterprise', 'none']
assert kwargs['orgAccess'] in options, f'''"orgAccess" cannot be "{kwargs['orgAccess']}", & must be set to one of: {options}'''
if 'authenticationMethod' in kwargs:
options = ['Email', 'Cisco SecureX Sign-On']
assert kwargs['authenticationMethod'] in options, f'''"authenticationMethod" cannot be "{kwargs['authenticationMethod']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'createOrganizationAdmin'
}
resource = f'/organizations/{organizationId}/admins'
body_params = ['email', 'name', 'orgAccess', 'tags', 'networks', 'authenticationMethod', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateOrganizationAdmin(self, organizationId: str, id: str, **kwargs):
"""
**Update an administrator**
https://developer.cisco.com/meraki/api-v1/#!update-organization-admin
- organizationId (string): (required)
- id (string): (required)
- name (string): The name of the dashboard administrator
- orgAccess (string): The privilege of the dashboard administrator on the organization. Can be one of 'full', 'read-only', 'enterprise' or 'none'
- tags (array): The list of tags that the dashboard administrator has privileges on
- networks (array): The list of networks that the dashboard administrator has privileges on
"""
kwargs.update(locals())
if 'orgAccess' in kwargs:
options = ['full', 'read-only', 'enterprise', 'none']
assert kwargs['orgAccess'] in options, f'''"orgAccess" cannot be "{kwargs['orgAccess']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'updateOrganizationAdmin'
}
resource = f'/organizations/{organizationId}/admins/{id}'
body_params = ['name', 'orgAccess', 'tags', 'networks', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationAdmin(self, organizationId: str, id: str):
"""
**Revoke all access for a dashboard administrator within this organization**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-admin
- organizationId (string): (required)
- id (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'deleteOrganizationAdmin'
}
resource = f'/organizations/{organizationId}/admins/{id}'
return self._session.delete(metadata, resource)
def getOrganizationApiRequests(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the API requests made by an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-api-requests
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 31 days.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 50.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- adminId (string): Filter the results by the ID of the admin who made the API requests
- path (string): Filter the results by the path of the API requests
- method (string): Filter the results by the method of the API requests (must be 'GET', 'PUT', 'POST' or 'DELETE')
- responseCode (integer): Filter the results by the response code of the API requests
- sourceIp (string): Filter the results by the IP address of the originating API request
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'apiRequests'],
'operation': 'getOrganizationApiRequests'
}
resource = f'/organizations/{organizationId}/apiRequests'
query_params = ['t0', 't1', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'adminId', 'path', 'method', 'responseCode', 'sourceIp', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationApiRequestsOverview(self, organizationId: str, **kwargs):
"""
**Return an aggregated overview of API requests data**
https://developer.cisco.com/meraki/api-v1/#!get-organization-api-requests-overview
- organizationId (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 31 days.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'apiRequests', 'overview'],
'operation': 'getOrganizationApiRequestsOverview'
}
resource = f'/organizations/{organizationId}/apiRequests/overview'
query_params = ['t0', 't1', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getOrganizationBrandingPolicies(self, organizationId: str):
"""
**List the branding policies of an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-branding-policies
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'getOrganizationBrandingPolicies'
}
resource = f'/organizations/{organizationId}/brandingPolicies'
return self._session.get(metadata, resource)
def createOrganizationBrandingPolicy(self, organizationId: str, name: str, enabled: bool, adminSettings: dict, **kwargs):
"""
**Add a new branding policy to an organization**
https://developer.cisco.com/meraki/api-v1/#!create-organization-branding-policy
- organizationId (string): (required)
- name (string): Name of the Dashboard branding policy.
- enabled (boolean): Boolean indicating whether this policy is enabled.
- adminSettings (object): Settings for describing which kinds of admins this policy applies to.
- helpSettings (object): Settings for describing the modifications to various Help page features. Each property in this object accepts one of
'default or inherit' (do not modify functionality), 'hide' (remove the section from Dashboard), or 'show' (always show
the section on Dashboard). Some properties in this object also accept custom HTML used to replace the section on
Dashboard; see the documentation for each property to see the allowed values.
Each property defaults to 'default or inherit' when not provided.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'createOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies'
body_params = ['name', 'enabled', 'adminSettings', 'helpSettings', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationBrandingPoliciesPriorities(self, organizationId: str):
"""
**Return the branding policy IDs of an organization in priority order. IDs are ordered in ascending order of priority (IDs later in the array have higher priority).**
https://developer.cisco.com/meraki/api-v1/#!get-organization-branding-policies-priorities
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies', 'priorities'],
'operation': 'getOrganizationBrandingPoliciesPriorities'
}
resource = f'/organizations/{organizationId}/brandingPolicies/priorities'
return self._session.get(metadata, resource)
def updateOrganizationBrandingPoliciesPriorities(self, organizationId: str, brandingPolicyIds: list):
"""
**Update the priority ordering of an organization's branding policies.**
https://developer.cisco.com/meraki/api-v1/#!update-organization-branding-policies-priorities
- organizationId (string): (required)
- brandingPolicyIds (array): A list of branding policy IDs arranged in ascending priority order (IDs later in the array have higher priority).
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies', 'priorities'],
'operation': 'updateOrganizationBrandingPoliciesPriorities'
}
resource = f'/organizations/{organizationId}/brandingPolicies/priorities'
body_params = ['brandingPolicyIds', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str):
"""
**Return a branding policy**
https://developer.cisco.com/meraki/api-v1/#!get-organization-branding-policy
- organizationId (string): (required)
- brandingPolicyId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'getOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'
return self._session.get(metadata, resource)
def updateOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str, **kwargs):
"""
**Update a branding policy**
https://developer.cisco.com/meraki/api-v1/#!update-organization-branding-policy
- organizationId (string): (required)
- brandingPolicyId (string): (required)
- name (string): Name of the Dashboard branding policy.
- enabled (boolean): Boolean indicating whether this policy is enabled.
- adminSettings (object): Settings for describing which kinds of admins this policy applies to.
- helpSettings (object): Settings for describing the modifications to various Help page features. Each property in this object accepts one of
'default or inherit' (do not modify functionality), 'hide' (remove the section from Dashboard), or 'show' (always show
the section on Dashboard). Some properties in this object also accept custom HTML used to replace the section on
Dashboard; see the documentation for each property to see the allowed values.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'updateOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'
body_params = ['name', 'enabled', 'adminSettings', 'helpSettings', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str):
"""
**Delete a branding policy**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-branding-policy
- organizationId (string): (required)
- brandingPolicyId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'deleteOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'
return self._session.delete(metadata, resource)
def claimIntoOrganization(self, organizationId: str, **kwargs):
"""
**Claim a list of devices, licenses, and/or orders into an organization. When claiming by order, all devices and licenses in the order will be claimed; licenses will be added to the organization and devices will be placed in the organization's inventory.**
https://developer.cisco.com/meraki/api-v1/#!claim-into-organization
- organizationId (string): (required)
- orders (array): The numbers of the orders that should be claimed
- serials (array): The serials of the devices that should be claimed
- licenses (array): The licenses that should be claimed
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'claimIntoOrganization'
}
resource = f'/organizations/{organizationId}/claim'
body_params = ['orders', 'serials', 'licenses', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def cloneOrganization(self, organizationId: str, name: str):
"""
**Create a new organization by cloning the addressed organization**
https://developer.cisco.com/meraki/api-v1/#!clone-organization
- organizationId (string): (required)
- name (string): The name of the new organization
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'cloneOrganization'
}
resource = f'/organizations/{organizationId}/clone'
body_params = ['name', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationConfigTemplates(self, organizationId: str):
"""
**List the configuration templates for this organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-templates
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'getOrganizationConfigTemplates'
}
resource = f'/organizations/{organizationId}/configTemplates'
return self._session.get(metadata, resource)
def createOrganizationConfigTemplate(self, organizationId: str, name: str, **kwargs):
"""
**Create a new configuration template**
https://developer.cisco.com/meraki/api-v1/#!create-organization-config-template
- organizationId (string): (required)
- name (string): The name of the configuration template
- timeZone (string): The timezone of the configuration template. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article</a>. Not applicable if copying from existing network or template
- copyFromNetworkId (string): The ID of the network or config template to copy configuration from
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'createOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates'
body_params = ['name', 'timeZone', 'copyFromNetworkId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateOrganizationConfigTemplate(self, organizationId: str, configTemplateId: str, **kwargs):
"""
**Update a configuration template**
https://developer.cisco.com/meraki/api-v1/#!update-organization-config-template
- organizationId (string): (required)
- configTemplateId (string): (required)
- name (string): The name of the configuration template
- timeZone (string): The timezone of the configuration template. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article.</a>
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'updateOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}'
body_params = ['name', 'timeZone', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationConfigTemplate(self, organizationId: str, configTemplateId: str):
"""
**Remove a configuration template**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-config-template
- organizationId (string): (required)
- configTemplateId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'deleteOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}'
return self._session.delete(metadata, resource)
def getOrganizationConfigTemplate(self, organizationId: str, configTemplateId: str):
"""
**Return a single configuration template**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template
- organizationId (string): (required)
- configTemplateId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'getOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}'
return self._session.get(metadata, resource)
def getOrganizationConfigurationChanges(self, organizationId: str, total_pages=1, direction='prev', **kwargs):
"""
**View the Change Log for your organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-configuration-changes
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" or "prev" (default) page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 365 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 365 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 365 days. The default is 365 days.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 5000. Default is 5000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- networkId (string): Filters on the given network
- adminId (string): Filters on the given Admin
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'configurationChanges'],
'operation': 'getOrganizationConfigurationChanges'
}
resource = f'/organizations/{organizationId}/configurationChanges'
query_params = ['t0', 't1', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'networkId', 'adminId', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationDevices(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the devices in an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-devices
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- configurationUpdatedAfter (string): Filter results by whether or not the device's configuration has been updated after the given timestamp
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'devices'],
'operation': 'getOrganizationDevices'
}
resource = f'/organizations/{organizationId}/devices'
query_params = ['perPage', 'startingAfter', 'endingBefore', 'configurationUpdatedAfter', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationDevicesStatuses(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the status of every Meraki device in the organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-devices-statuses
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'devices', 'statuses'],
'operation': 'getOrganizationDevicesStatuses'
}
resource = f'/organizations/{organizationId}/devices/statuses'
query_params = ['perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationDevicesUplinksLossAndLatency(self, organizationId: str, **kwargs):
"""
**Return the uplink loss and latency for every MX in the organization from at latest 2 minutes ago**
https://developer.cisco.com/meraki/api-v1/#!get-organization-devices-uplinks-loss-and-latency
- organizationId (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 365 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 5 minutes after t0. The latest possible time that t1 can be is 2 minutes into the past.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 5 minutes. The default is 5 minutes.
- uplink (string): Optional filter for a specific WAN uplink. Valid uplinks are wan1, wan2, cellular. Default will return all uplinks.
- ip (string): Optional filter for a specific destination IP. Default will return all destination IPs.
"""
kwargs.update(locals())
if 'uplink' in kwargs:
options = ['wan1', 'wan2', 'cellular']
assert kwargs['uplink'] in options, f'''"uplink" cannot be "{kwargs['uplink']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'monitor', 'devices', 'uplinksLossAndLatency'],
'operation': 'getOrganizationDevicesUplinksLossAndLatency'
}
resource = f'/organizations/{organizationId}/devices/uplinksLossAndLatency'
query_params = ['t0', 't1', 'timespan', 'uplink', 'ip', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getOrganizationInventory(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**Return the device inventory for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-inventory
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- usedState (string): Filter results by used or unused inventory. Accepted values are "used" or "unused".
"""
kwargs.update(locals())
if 'usedState' in kwargs:
options = ['used', 'unused']
assert kwargs['usedState'] in options, f'''"usedState" cannot be "{kwargs['usedState']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'inventory'],
'operation': 'getOrganizationInventory'
}
resource = f'/organizations/{organizationId}/inventory'
query_params = ['perPage', 'startingAfter', 'endingBefore', 'usedState', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationLicenses(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the licenses for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-licenses
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- deviceSerial (string): Filter the licenses to those assigned to a particular device
- networkId (string): Filter the licenses to those assigned in a particular network
- state (string): Filter the licenses to those in a particular state. Can be one of 'active', 'expired', 'expiring', 'unused', 'unusedActive' or 'recentlyQueued'
"""
kwargs.update(locals())
if 'state' in kwargs:
options = ['active', 'expired', 'expiring', 'unused', 'unusedActive', 'recentlyQueued']
assert kwargs['state'] in options, f'''"state" cannot be "{kwargs['state']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'getOrganizationLicenses'
}
resource = f'/organizations/{organizationId}/licenses'
query_params = ['perPage', 'startingAfter', 'endingBefore', 'deviceSerial', 'networkId', 'state', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def assignOrganizationLicensesSeats(self, organizationId: str, licenseId: str, networkId: str, seatCount: int):
"""
**Assign SM seats to a network. This will increase the managed SM device limit of the network**
https://developer.cisco.com/meraki/api-v1/#!assign-organization-licenses-seats
- organizationId (string): (required)
- licenseId (string): The ID of the SM license to assign seats from
- networkId (string): The ID of the SM network to assign the seats to
- seatCount (integer): The number of seats to assign to the SM network. Must be less than or equal to the total number of seats of the license
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'assignOrganizationLicensesSeats'
}
resource = f'/organizations/{organizationId}/licenses/assignSeats'
body_params = ['licenseId', 'networkId', 'seatCount', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def moveOrganizationLicenses(self, organizationId: str, destOrganizationId: str, licenseIds: list):
"""
**Move licenses to another organization. This will also move any devices that the licenses are assigned to**
https://developer.cisco.com/meraki/api-v1/#!move-organization-licenses
- organizationId (string): (required)
- destOrganizationId (string): The ID of the organization to move the licenses to
- licenseIds (array): A list of IDs of licenses to move to the new organization
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'moveOrganizationLicenses'
}
resource = f'/organizations/{organizationId}/licenses/move'
body_params = ['destOrganizationId', 'licenseIds', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def moveOrganizationLicensesSeats(self, organizationId: str, destOrganizationId: str, licenseId: str, seatCount: int):
"""
**Move SM seats to another organization**
https://developer.cisco.com/meraki/api-v1/#!move-organization-licenses-seats
- organizationId (string): (required)
- destOrganizationId (string): The ID of the organization to move the SM seats to
- licenseId (string): The ID of the SM license to move the seats from
- seatCount (integer): The number of seats to move to the new organization. Must be less than or equal to the total number of seats of the license
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'moveOrganizationLicensesSeats'
}
resource = f'/organizations/{organizationId}/licenses/moveSeats'
body_params = ['destOrganizationId', 'licenseId', 'seatCount', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationLicensesOverview(self, organizationId: str):
"""
**Return an overview of the license state for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-licenses-overview
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'monitor', 'licenses', 'overview'],
'operation': 'getOrganizationLicensesOverview'
}
resource = f'/organizations/{organizationId}/licenses/overview'
return self._session.get(metadata, resource)
def renewOrganizationLicensesSeats(self, organizationId: str, licenseIdToRenew: str, unusedLicenseId: str):
"""
**Renew SM seats of a license. This will extend the license expiration date of managed SM devices covered by this license**
https://developer.cisco.com/meraki/api-v1/#!renew-organization-licenses-seats
- organizationId (string): (required)
- licenseIdToRenew (string): The ID of the SM license to renew. This license must already be assigned to an SM network
- unusedLicenseId (string): The SM license to use to renew the seats on 'licenseIdToRenew'. This license must have at least as many seats available as there are seats on 'licenseIdToRenew'
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'renewOrganizationLicensesSeats'
}
resource = f'/organizations/{organizationId}/licenses/renewSeats'
body_params = ['licenseIdToRenew', 'unusedLicenseId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationLicense(self, organizationId: str, licenseId: str):
"""
**Display a license**
https://developer.cisco.com/meraki/api-v1/#!get-organization-license
- organizationId (string): (required)
- licenseId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'getOrganizationLicense'
}
resource = f'/organizations/{organizationId}/licenses/{licenseId}'
return self._session.get(metadata, resource)
def updateOrganizationLicense(self, organizationId: str, licenseId: str, **kwargs):
"""
**Update a license**
https://developer.cisco.com/meraki/api-v1/#!update-organization-license
- organizationId (string): (required)
- licenseId (string): (required)
- deviceSerial (string): The serial number of the device to assign this license to. Set this to null to unassign the license. If a different license is already active on the device, this parameter will control queueing/dequeuing this license.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'updateOrganizationLicense'
}
resource = f'/organizations/{organizationId}/licenses/{licenseId}'
body_params = ['deviceSerial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationNetworks(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the networks that the user has privileges on in an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-networks
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- configTemplateId (string): An optional parameter that is the ID of a config template. Will return all networks bound to that template.
- tags (array): An optional parameter to filter networks by tags. The filtering is case-sensitive. If tags are included, 'tagsFilterType' should also be included (see below).
- tagsFilterType (string): An optional parameter of value 'withAnyTags' or 'withAllTags' to indicate whether to return networks which contain ANY or ALL of the included tags. If no type is included, 'withAnyTags' will be selected.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 100000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
if 'tagsFilterType' in kwargs:
options = ['withAnyTags', 'withAllTags']
assert kwargs['tagsFilterType'] in options, f'''"tagsFilterType" cannot be "{kwargs['tagsFilterType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'networks'],
'operation': 'getOrganizationNetworks'
}
resource = f'/organizations/{organizationId}/networks'
query_params = ['configTemplateId', 'tags', 'tagsFilterType', 'perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
array_params = ['tags', ]
for k, v in kwargs.items():
if k.strip() in array_params:
params[f'{k.strip()}[]'] = kwargs[f'{k}']
params.pop(k.strip())
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def createOrganizationNetwork(self, organizationId: str, name: str, productTypes: list, **kwargs):
"""
**Create a network**
https://developer.cisco.com/meraki/api-v1/#!create-organization-network
- organizationId (string): (required)
- name (string): The name of the new network
- productTypes (array): The product type(s) of the new network. Valid types are wireless, appliance, switch, systemsManager, camera, cellularGateway. If more than one type is included, the network will be a combined network.
- tags (array): A list of tags to be applied to the network
- timeZone (string): The timezone of the network. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article.</a>
- copyFromNetworkId (string): The ID of the network to copy configuration from. Other provided parameters will override the copied configuration, except type which must match this network's type exactly.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'networks'],
'operation': 'createOrganizationNetwork'
}
resource = f'/organizations/{organizationId}/networks'
body_params = ['name', 'productTypes', 'tags', 'timeZone', 'copyFromNetworkId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def combineOrganizationNetworks(self, organizationId: str, name: str, networkIds: list, **kwargs):
"""
**Combine multiple networks into a single network**
https://developer.cisco.com/meraki/api-v1/#!combine-organization-networks
- organizationId (string): (required)
- name (string): The name of the combined network
- networkIds (array): A list of the network IDs that will be combined. If an ID of a combined network is included in this list, the other networks in the list will be grouped into that network
- enrollmentString (string): A unique identifier which can be used for device enrollment or easy access through the Meraki SM Registration page or the Self Service Portal. Please note that changing this field may cause existing bookmarks to break. All networks that are part of this combined network will have their enrollment string appended by '-network_type'. If left empty, all exisitng enrollment strings will be deleted.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'networks'],
'operation': 'combineOrganizationNetworks'
}
resource = f'/organizations/{organizationId}/networks/combine'
body_params = ['name', 'networkIds', 'enrollmentString', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationOpenapiSpec(self, organizationId: str):
"""
**Return the OpenAPI 2.0 Specification of the organization's API documentation in JSON**
https://developer.cisco.com/meraki/api-v1/#!get-organization-openapi-spec
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'monitor', 'openapiSpec'],
'operation': 'getOrganizationOpenapiSpec'
}
resource = f'/organizations/{organizationId}/openapiSpec'
return self._session.get(metadata, resource)
def getOrganizationSamlIdps(self, organizationId: str):
"""
**List the SAML IdPs in your organization.**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-idps
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'getOrganizationSamlIdps'
}
resource = f'/organizations/{organizationId}/saml/idps'
return self._session.get(metadata, resource)
def createOrganizationSamlIdp(self, organizationId: str, x509certSha1Fingerprint: str, **kwargs):
"""
**Create a SAML IdP for your organization.**
https://developer.cisco.com/meraki/api-v1/#!create-organization-saml-idp
- organizationId (string): (required)
- x509certSha1Fingerprint (string): Fingerprint (SHA1) of the SAML certificate provided by your Identity Provider (IdP). This will be used for encryption / validation.
- sloLogoutUrl (string): Dashboard will redirect users to this URL when they sign out.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'createOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps'
body_params = ['x509certSha1Fingerprint', 'sloLogoutUrl', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateOrganizationSamlIdp(self, organizationId: str, idpId: str, **kwargs):
"""
**Update a SAML IdP in your organization**
https://developer.cisco.com/meraki/api-v1/#!update-organization-saml-idp
- organizationId (string): (required)
- idpId (string): (required)
- x509certSha1Fingerprint (string): Fingerprint (SHA1) of the SAML certificate provided by your Identity Provider (IdP). This will be used for encryption / validation.
- sloLogoutUrl (string): Dashboard will redirect users to this URL when they sign out.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'updateOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps/{idpId}'
body_params = ['x509certSha1Fingerprint', 'sloLogoutUrl', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationSamlIdp(self, organizationId: str, idpId: str):
"""
**Get a SAML IdP from your organization.**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-idp
- organizationId (string): (required)
- idpId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'getOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps/{idpId}'
return self._session.get(metadata, resource)
def deleteOrganizationSamlIdp(self, organizationId: str, idpId: str):
"""
**Remove a SAML IdP in your organization.**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-saml-idp
- organizationId (string): (required)
- idpId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'deleteOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps/{idpId}'
return self._session.delete(metadata, resource)
def getOrganizationSamlRoles(self, organizationId: str):
"""
**List the SAML roles for this organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-roles
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'getOrganizationSamlRoles'
}
resource = f'/organizations/{organizationId}/samlRoles'
return self._session.get(metadata, resource)
def createOrganizationSamlRole(self, organizationId: str, **kwargs):
"""
**Create a SAML role**
https://developer.cisco.com/meraki/api-v1/#!create-organization-saml-role
- organizationId (string): (required)
- role (string): The role of the SAML administrator
- orgAccess (string): The privilege of the SAML administrator on the organization
- tags (array): The list of tags that the SAML administrator has privleges on
- networks (array): The list of networks that the SAML administrator has privileges on
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'createOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles'
body_params = ['role', 'orgAccess', 'tags', 'networks', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationSamlRole(self, organizationId: str, samlRoleId: str):
"""
**Return a SAML role**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-role
- organizationId (string): (required)
- samlRoleId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'getOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles/{samlRoleId}'
return self._session.get(metadata, resource)
def updateOrganizationSamlRole(self, organizationId: str, samlRoleId: str, **kwargs):
"""
**Update a SAML role**
https://developer.cisco.com/meraki/api-v1/#!update-organization-saml-role
- organizationId (string): (required)
- samlRoleId (string): (required)
- role (string): The role of the SAML administrator
- orgAccess (string): The privilege of the SAML administrator on the organization
- tags (array): The list of tags that the SAML administrator has privleges on
- networks (array): The list of networks that the SAML administrator has privileges on
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'updateOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles/{samlRoleId}'
body_params = ['role', 'orgAccess', 'tags', 'networks', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationSamlRole(self, organizationId: str, samlRoleId: str):
"""
**Remove a SAML role**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-saml-role
- organizationId (string): (required)
- samlRoleId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'deleteOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles/{samlRoleId}'
return self._session.delete(metadata, resource)
def getOrganizationSnmp(self, organizationId: str):
"""
**Return the SNMP settings for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-snmp
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'snmp'],
'operation': 'getOrganizationSnmp'
}
resource = f'/organizations/{organizationId}/snmp'
return self._session.get(metadata, resource)
def updateOrganizationSnmp(self, organizationId: str, **kwargs):
"""
**Update the SNMP settings for an organization**
https://developer.cisco.com/meraki/api-v1/#!update-organization-snmp
- organizationId (string): (required)
- v2cEnabled (boolean): Boolean indicating whether SNMP version 2c is enabled for the organization.
- v3Enabled (boolean): Boolean indicating whether SNMP version 3 is enabled for the organization.
- v3AuthMode (string): The SNMP version 3 authentication mode. Can be either 'MD5' or 'SHA'.
- v3AuthPass (string): The SNMP version 3 authentication password. Must be at least 8 characters if specified.
- v3PrivMode (string): The SNMP version 3 privacy mode. Can be either 'DES' or 'AES128'.
- v3PrivPass (string): The SNMP version 3 privacy password. Must be at least 8 characters if specified.
- peerIps (array): The list of IPv4 addresses that are allowed to access the SNMP server.
"""
kwargs.update(locals())
if 'v3AuthMode' in kwargs:
options = ['MD5', 'SHA']
assert kwargs['v3AuthMode'] in options, f'''"v3AuthMode" cannot be "{kwargs['v3AuthMode']}", & must be set to one of: {options}'''
if 'v3PrivMode' in kwargs:
options = ['DES', 'AES128']
assert kwargs['v3PrivMode'] in options, f'''"v3PrivMode" cannot be "{kwargs['v3PrivMode']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'snmp'],
'operation': 'updateOrganizationSnmp'
}
resource = f'/organizations/{organizationId}/snmp'
body_params = ['v2cEnabled', 'v3Enabled', 'v3AuthMode', 'v3AuthPass', 'v3PrivMode', 'v3PrivPass', 'peerIps', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationWebhookLogs(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**Return the log of webhook POSTs sent**
https://developer.cisco.com/meraki/api-v1/#!get-organization-webhook-logs
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 90 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 50.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- url (string): The URL the webhook was sent to
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'webhookLogs'],
'operation': 'getOrganizationWebhookLogs'
}
resource = f'/organizations/{organizationId}/webhookLogs'
query_params = ['t0', 't1', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'url', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction) | class Organizations(object):
def __init__(self, session):
super(Organizations, self).__init__()
self._session = session
def getOrganizations(self):
"""
**List the organizations that the user has privileges on**
https://developer.cisco.com/meraki/api-v1/#!get-organizations
"""
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'getOrganizations'
}
resource = f'/organizations'
return self._session.get(metadata, resource)
def createOrganization(self, name: str):
"""
**Create a new organization**
https://developer.cisco.com/meraki/api-v1/#!create-organization
- name (string): The name of the organization
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'createOrganization'
}
resource = f'/organizations'
body_params = ['name', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationSaml(self, orgId: str):
"""
**Returns the SAML SSO enabled settings for an organization.**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml
- orgId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml'],
'operation': 'getOrganizationSaml'
}
resource = f'/organizations/{orgId}/saml'
return self._session.get(metadata, resource)
def updateOrganizationSaml(self, orgId: str, **kwargs):
"""
**Updates the SAML SSO enabled settings for an organization.**
https://developer.cisco.com/meraki/api-v1/#!update-organization-saml
- orgId (string): (required)
- enabled (boolean): Boolean for updating SAML SSO enabled settings.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'saml'],
'operation': 'updateOrganizationSaml'
}
resource = f'/organizations/{orgId}/saml'
body_params = ['enabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganization(self, organizationId: str):
"""
**Return an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'getOrganization'
}
resource = f'/organizations/{organizationId}'
return self._session.get(metadata, resource)
def updateOrganization(self, organizationId: str, **kwargs):
"""
**Update an organization**
https://developer.cisco.com/meraki/api-v1/#!update-organization
- organizationId (string): (required)
- name (string): The name of the organization
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'updateOrganization'
}
resource = f'/organizations/{organizationId}'
body_params = ['name', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganization(self, organizationId: str):
"""
**Delete an organization**
https://developer.cisco.com/meraki/api-v1/#!delete-organization
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'deleteOrganization'
}
resource = f'/organizations/{organizationId}'
return self._session.delete(metadata, resource)
def createOrganizationActionBatch(self, organizationId: str, actions: list, **kwargs):
"""
**Create an action batch**
https://developer.cisco.com/meraki/api-v1/#!create-organization-action-batch
- organizationId (string): (required)
- actions (array): A set of changes to make as part of this action (<a href='https://developer.cisco.com/meraki/api/#/rest/guides/action-batches/'>more details</a>)
- confirmed (boolean): Set to true for immediate execution. Set to false if the action should be previewed before executing. This property cannot be unset once it is true. Defaults to false.
- synchronous (boolean): Set to true to force the batch to run synchronous. There can be at most 20 actions in synchronous batch. Defaults to false.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'createOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches'
body_params = ['confirmed', 'synchronous', 'actions', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationActionBatches(self, organizationId: str, **kwargs):
"""
**Return the list of action batches in the organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-action-batches
- organizationId (string): (required)
- status (string): Filter batches by status. Valid types are pending, completed, and failed.
"""
kwargs.update(locals())
if 'status' in kwargs:
options = ['pending', 'completed', 'failed']
assert kwargs['status'] in options, f'''"status" cannot be "{kwargs['status']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'getOrganizationActionBatches'
}
resource = f'/organizations/{organizationId}/actionBatches'
query_params = ['status', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getOrganizationActionBatch(self, organizationId: str, actionBatchId: str):
"""
**Return an action batch**
https://developer.cisco.com/meraki/api-v1/#!get-organization-action-batch
- organizationId (string): (required)
- actionBatchId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'getOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches/{actionBatchId}'
return self._session.get(metadata, resource)
def deleteOrganizationActionBatch(self, organizationId: str, actionBatchId: str):
"""
**Delete an action batch**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-action-batch
- organizationId (string): (required)
- actionBatchId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'deleteOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches/{actionBatchId}'
return self._session.delete(metadata, resource)
def updateOrganizationActionBatch(self, organizationId: str, actionBatchId: str, **kwargs):
"""
**Update an action batch**
https://developer.cisco.com/meraki/api-v1/#!update-organization-action-batch
- organizationId (string): (required)
- actionBatchId (string): (required)
- confirmed (boolean): A boolean representing whether or not the batch has been confirmed. This property cannot be unset once it is true.
- synchronous (boolean): Set to true to force the batch to run synchronous. There can be at most 20 actions in synchronous batch.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'actionBatches'],
'operation': 'updateOrganizationActionBatch'
}
resource = f'/organizations/{organizationId}/actionBatches/{actionBatchId}'
body_params = ['confirmed', 'synchronous', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationAdmins(self, organizationId: str):
"""
**List the dashboard administrators in this organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-admins
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'getOrganizationAdmins'
}
resource = f'/organizations/{organizationId}/admins'
return self._session.get(metadata, resource)
def createOrganizationAdmin(self, organizationId: str, email: str, name: str, orgAccess: str, **kwargs):
"""
**Create a new dashboard administrator**
https://developer.cisco.com/meraki/api-v1/#!create-organization-admin
- organizationId (string): (required)
- email (string): The email of the dashboard administrator. This attribute can not be updated.
- name (string): The name of the dashboard administrator
- orgAccess (string): The privilege of the dashboard administrator on the organization. Can be one of 'full', 'read-only', 'enterprise' or 'none'
- tags (array): The list of tags that the dashboard administrator has privileges on
- networks (array): The list of networks that the dashboard administrator has privileges on
- authenticationMethod (string): The method of authentication the user will use to sign in to the Meraki dashboard. Can be one of 'Email' or 'Cisco SecureX Sign-On'. The default is Email authentication
"""
kwargs.update(locals())
if 'orgAccess' in kwargs:
options = ['full', 'read-only', 'enterprise', 'none']
assert kwargs['orgAccess'] in options, f'''"orgAccess" cannot be "{kwargs['orgAccess']}", & must be set to one of: {options}'''
if 'authenticationMethod' in kwargs:
options = ['Email', 'Cisco SecureX Sign-On']
assert kwargs['authenticationMethod'] in options, f'''"authenticationMethod" cannot be "{kwargs['authenticationMethod']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'createOrganizationAdmin'
}
resource = f'/organizations/{organizationId}/admins'
body_params = ['email', 'name', 'orgAccess', 'tags', 'networks', 'authenticationMethod', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateOrganizationAdmin(self, organizationId: str, id: str, **kwargs):
"""
**Update an administrator**
https://developer.cisco.com/meraki/api-v1/#!update-organization-admin
- organizationId (string): (required)
- id (string): (required)
- name (string): The name of the dashboard administrator
- orgAccess (string): The privilege of the dashboard administrator on the organization. Can be one of 'full', 'read-only', 'enterprise' or 'none'
- tags (array): The list of tags that the dashboard administrator has privileges on
- networks (array): The list of networks that the dashboard administrator has privileges on
"""
kwargs.update(locals())
if 'orgAccess' in kwargs:
options = ['full', 'read-only', 'enterprise', 'none']
assert kwargs['orgAccess'] in options, f'''"orgAccess" cannot be "{kwargs['orgAccess']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'updateOrganizationAdmin'
}
resource = f'/organizations/{organizationId}/admins/{id}'
body_params = ['name', 'orgAccess', 'tags', 'networks', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationAdmin(self, organizationId: str, id: str):
"""
**Revoke all access for a dashboard administrator within this organization**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-admin
- organizationId (string): (required)
- id (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'admins'],
'operation': 'deleteOrganizationAdmin'
}
resource = f'/organizations/{organizationId}/admins/{id}'
return self._session.delete(metadata, resource)
def getOrganizationApiRequests(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the API requests made by an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-api-requests
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 31 days.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 50.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- adminId (string): Filter the results by the ID of the admin who made the API requests
- path (string): Filter the results by the path of the API requests
- method (string): Filter the results by the method of the API requests (must be 'GET', 'PUT', 'POST' or 'DELETE')
- responseCode (integer): Filter the results by the response code of the API requests
- sourceIp (string): Filter the results by the IP address of the originating API request
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'apiRequests'],
'operation': 'getOrganizationApiRequests'
}
resource = f'/organizations/{organizationId}/apiRequests'
query_params = ['t0', 't1', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'adminId', 'path', 'method', 'responseCode', 'sourceIp', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationApiRequestsOverview(self, organizationId: str, **kwargs):
"""
**Return an aggregated overview of API requests data**
https://developer.cisco.com/meraki/api-v1/#!get-organization-api-requests-overview
- organizationId (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 31 days.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'apiRequests', 'overview'],
'operation': 'getOrganizationApiRequestsOverview'
}
resource = f'/organizations/{organizationId}/apiRequests/overview'
query_params = ['t0', 't1', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getOrganizationBrandingPolicies(self, organizationId: str):
"""
**List the branding policies of an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-branding-policies
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'getOrganizationBrandingPolicies'
}
resource = f'/organizations/{organizationId}/brandingPolicies'
return self._session.get(metadata, resource)
def createOrganizationBrandingPolicy(self, organizationId: str, name: str, enabled: bool, adminSettings: dict, **kwargs):
"""
**Add a new branding policy to an organization**
https://developer.cisco.com/meraki/api-v1/#!create-organization-branding-policy
- organizationId (string): (required)
- name (string): Name of the Dashboard branding policy.
- enabled (boolean): Boolean indicating whether this policy is enabled.
- adminSettings (object): Settings for describing which kinds of admins this policy applies to.
- helpSettings (object): Settings for describing the modifications to various Help page features. Each property in this object accepts one of
'default or inherit' (do not modify functionality), 'hide' (remove the section from Dashboard), or 'show' (always show
the section on Dashboard). Some properties in this object also accept custom HTML used to replace the section on
Dashboard; see the documentation for each property to see the allowed values.
Each property defaults to 'default or inherit' when not provided.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'createOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies'
body_params = ['name', 'enabled', 'adminSettings', 'helpSettings', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationBrandingPoliciesPriorities(self, organizationId: str):
"""
**Return the branding policy IDs of an organization in priority order. IDs are ordered in ascending order of priority (IDs later in the array have higher priority).**
https://developer.cisco.com/meraki/api-v1/#!get-organization-branding-policies-priorities
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies', 'priorities'],
'operation': 'getOrganizationBrandingPoliciesPriorities'
}
resource = f'/organizations/{organizationId}/brandingPolicies/priorities'
return self._session.get(metadata, resource)
def updateOrganizationBrandingPoliciesPriorities(self, organizationId: str, brandingPolicyIds: list):
"""
**Update the priority ordering of an organization's branding policies.**
https://developer.cisco.com/meraki/api-v1/#!update-organization-branding-policies-priorities
- organizationId (string): (required)
- brandingPolicyIds (array): A list of branding policy IDs arranged in ascending priority order (IDs later in the array have higher priority).
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies', 'priorities'],
'operation': 'updateOrganizationBrandingPoliciesPriorities'
}
resource = f'/organizations/{organizationId}/brandingPolicies/priorities'
body_params = ['brandingPolicyIds', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str):
"""
**Return a branding policy**
https://developer.cisco.com/meraki/api-v1/#!get-organization-branding-policy
- organizationId (string): (required)
- brandingPolicyId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'getOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'
return self._session.get(metadata, resource)
def updateOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str, **kwargs):
"""
**Update a branding policy**
https://developer.cisco.com/meraki/api-v1/#!update-organization-branding-policy
- organizationId (string): (required)
- brandingPolicyId (string): (required)
- name (string): Name of the Dashboard branding policy.
- enabled (boolean): Boolean indicating whether this policy is enabled.
- adminSettings (object): Settings for describing which kinds of admins this policy applies to.
- helpSettings (object): Settings for describing the modifications to various Help page features. Each property in this object accepts one of
'default or inherit' (do not modify functionality), 'hide' (remove the section from Dashboard), or 'show' (always show
the section on Dashboard). Some properties in this object also accept custom HTML used to replace the section on
Dashboard; see the documentation for each property to see the allowed values.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'updateOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'
body_params = ['name', 'enabled', 'adminSettings', 'helpSettings', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str):
"""
**Delete a branding policy**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-branding-policy
- organizationId (string): (required)
- brandingPolicyId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'brandingPolicies'],
'operation': 'deleteOrganizationBrandingPolicy'
}
resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'
return self._session.delete(metadata, resource)
def claimIntoOrganization(self, organizationId: str, **kwargs):
"""
**Claim a list of devices, licenses, and/or orders into an organization. When claiming by order, all devices and licenses in the order will be claimed; licenses will be added to the organization and devices will be placed in the organization's inventory.**
https://developer.cisco.com/meraki/api-v1/#!claim-into-organization
- organizationId (string): (required)
- orders (array): The numbers of the orders that should be claimed
- serials (array): The serials of the devices that should be claimed
- licenses (array): The licenses that should be claimed
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'claimIntoOrganization'
}
resource = f'/organizations/{organizationId}/claim'
body_params = ['orders', 'serials', 'licenses', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def cloneOrganization(self, organizationId: str, name: str):
"""
**Create a new organization by cloning the addressed organization**
https://developer.cisco.com/meraki/api-v1/#!clone-organization
- organizationId (string): (required)
- name (string): The name of the new organization
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure'],
'operation': 'cloneOrganization'
}
resource = f'/organizations/{organizationId}/clone'
body_params = ['name', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationConfigTemplates(self, organizationId: str):
"""
**List the configuration templates for this organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-templates
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'getOrganizationConfigTemplates'
}
resource = f'/organizations/{organizationId}/configTemplates'
return self._session.get(metadata, resource)
def createOrganizationConfigTemplate(self, organizationId: str, name: str, **kwargs):
"""
**Create a new configuration template**
https://developer.cisco.com/meraki/api-v1/#!create-organization-config-template
- organizationId (string): (required)
- name (string): The name of the configuration template
- timeZone (string): The timezone of the configuration template. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article</a>. Not applicable if copying from existing network or template
- copyFromNetworkId (string): The ID of the network or config template to copy configuration from
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'createOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates'
body_params = ['name', 'timeZone', 'copyFromNetworkId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateOrganizationConfigTemplate(self, organizationId: str, configTemplateId: str, **kwargs):
"""
**Update a configuration template**
https://developer.cisco.com/meraki/api-v1/#!update-organization-config-template
- organizationId (string): (required)
- configTemplateId (string): (required)
- name (string): The name of the configuration template
- timeZone (string): The timezone of the configuration template. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article.</a>
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'updateOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}'
body_params = ['name', 'timeZone', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationConfigTemplate(self, organizationId: str, configTemplateId: str):
"""
**Remove a configuration template**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-config-template
- organizationId (string): (required)
- configTemplateId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'deleteOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}'
return self._session.delete(metadata, resource)
def getOrganizationConfigTemplate(self, organizationId: str, configTemplateId: str):
"""
**Return a single configuration template**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template
- organizationId (string): (required)
- configTemplateId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'configTemplates'],
'operation': 'getOrganizationConfigTemplate'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}'
return self._session.get(metadata, resource)
def getOrganizationConfigurationChanges(self, organizationId: str, total_pages=1, direction='prev', **kwargs):
"""
**View the Change Log for your organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-configuration-changes
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" or "prev" (default) page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 365 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 365 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 365 days. The default is 365 days.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 5000. Default is 5000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- networkId (string): Filters on the given network
- adminId (string): Filters on the given Admin
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'configurationChanges'],
'operation': 'getOrganizationConfigurationChanges'
}
resource = f'/organizations/{organizationId}/configurationChanges'
query_params = ['t0', 't1', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'networkId', 'adminId', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationDevices(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the devices in an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-devices
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- configurationUpdatedAfter (string): Filter results by whether or not the device's configuration has been updated after the given timestamp
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'devices'],
'operation': 'getOrganizationDevices'
}
resource = f'/organizations/{organizationId}/devices'
query_params = ['perPage', 'startingAfter', 'endingBefore', 'configurationUpdatedAfter', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationDevicesStatuses(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the status of every Meraki device in the organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-devices-statuses
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'devices', 'statuses'],
'operation': 'getOrganizationDevicesStatuses'
}
resource = f'/organizations/{organizationId}/devices/statuses'
query_params = ['perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationDevicesUplinksLossAndLatency(self, organizationId: str, **kwargs):
"""
**Return the uplink loss and latency for every MX in the organization from at latest 2 minutes ago**
https://developer.cisco.com/meraki/api-v1/#!get-organization-devices-uplinks-loss-and-latency
- organizationId (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 365 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 5 minutes after t0. The latest possible time that t1 can be is 2 minutes into the past.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 5 minutes. The default is 5 minutes.
- uplink (string): Optional filter for a specific WAN uplink. Valid uplinks are wan1, wan2, cellular. Default will return all uplinks.
- ip (string): Optional filter for a specific destination IP. Default will return all destination IPs.
"""
kwargs.update(locals())
if 'uplink' in kwargs:
options = ['wan1', 'wan2', 'cellular']
assert kwargs['uplink'] in options, f'''"uplink" cannot be "{kwargs['uplink']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'monitor', 'devices', 'uplinksLossAndLatency'],
'operation': 'getOrganizationDevicesUplinksLossAndLatency'
}
resource = f'/organizations/{organizationId}/devices/uplinksLossAndLatency'
query_params = ['t0', 't1', 'timespan', 'uplink', 'ip', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getOrganizationInventory(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**Return the device inventory for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-inventory
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- usedState (string): Filter results by used or unused inventory. Accepted values are "used" or "unused".
"""
kwargs.update(locals())
if 'usedState' in kwargs:
options = ['used', 'unused']
assert kwargs['usedState'] in options, f'''"usedState" cannot be "{kwargs['usedState']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'inventory'],
'operation': 'getOrganizationInventory'
}
resource = f'/organizations/{organizationId}/inventory'
query_params = ['perPage', 'startingAfter', 'endingBefore', 'usedState', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def getOrganizationLicenses(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the licenses for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-licenses
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- deviceSerial (string): Filter the licenses to those assigned to a particular device
- networkId (string): Filter the licenses to those assigned in a particular network
- state (string): Filter the licenses to those in a particular state. Can be one of 'active', 'expired', 'expiring', 'unused', 'unusedActive' or 'recentlyQueued'
"""
kwargs.update(locals())
if 'state' in kwargs:
options = ['active', 'expired', 'expiring', 'unused', 'unusedActive', 'recentlyQueued']
assert kwargs['state'] in options, f'''"state" cannot be "{kwargs['state']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'getOrganizationLicenses'
}
resource = f'/organizations/{organizationId}/licenses'
query_params = ['perPage', 'startingAfter', 'endingBefore', 'deviceSerial', 'networkId', 'state', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def assignOrganizationLicensesSeats(self, organizationId: str, licenseId: str, networkId: str, seatCount: int):
"""
**Assign SM seats to a network. This will increase the managed SM device limit of the network**
https://developer.cisco.com/meraki/api-v1/#!assign-organization-licenses-seats
- organizationId (string): (required)
- licenseId (string): The ID of the SM license to assign seats from
- networkId (string): The ID of the SM network to assign the seats to
- seatCount (integer): The number of seats to assign to the SM network. Must be less than or equal to the total number of seats of the license
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'assignOrganizationLicensesSeats'
}
resource = f'/organizations/{organizationId}/licenses/assignSeats'
body_params = ['licenseId', 'networkId', 'seatCount', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def moveOrganizationLicenses(self, organizationId: str, destOrganizationId: str, licenseIds: list):
"""
**Move licenses to another organization. This will also move any devices that the licenses are assigned to**
https://developer.cisco.com/meraki/api-v1/#!move-organization-licenses
- organizationId (string): (required)
- destOrganizationId (string): The ID of the organization to move the licenses to
- licenseIds (array): A list of IDs of licenses to move to the new organization
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'moveOrganizationLicenses'
}
resource = f'/organizations/{organizationId}/licenses/move'
body_params = ['destOrganizationId', 'licenseIds', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def moveOrganizationLicensesSeats(self, organizationId: str, destOrganizationId: str, licenseId: str, seatCount: int):
"""
**Move SM seats to another organization**
https://developer.cisco.com/meraki/api-v1/#!move-organization-licenses-seats
- organizationId (string): (required)
- destOrganizationId (string): The ID of the organization to move the SM seats to
- licenseId (string): The ID of the SM license to move the seats from
- seatCount (integer): The number of seats to move to the new organization. Must be less than or equal to the total number of seats of the license
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'moveOrganizationLicensesSeats'
}
resource = f'/organizations/{organizationId}/licenses/moveSeats'
body_params = ['destOrganizationId', 'licenseId', 'seatCount', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationLicensesOverview(self, organizationId: str):
"""
**Return an overview of the license state for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-licenses-overview
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'monitor', 'licenses', 'overview'],
'operation': 'getOrganizationLicensesOverview'
}
resource = f'/organizations/{organizationId}/licenses/overview'
return self._session.get(metadata, resource)
def renewOrganizationLicensesSeats(self, organizationId: str, licenseIdToRenew: str, unusedLicenseId: str):
"""
**Renew SM seats of a license. This will extend the license expiration date of managed SM devices covered by this license**
https://developer.cisco.com/meraki/api-v1/#!renew-organization-licenses-seats
- organizationId (string): (required)
- licenseIdToRenew (string): The ID of the SM license to renew. This license must already be assigned to an SM network
- unusedLicenseId (string): The SM license to use to renew the seats on 'licenseIdToRenew'. This license must have at least as many seats available as there are seats on 'licenseIdToRenew'
"""
kwargs = locals()
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'renewOrganizationLicensesSeats'
}
resource = f'/organizations/{organizationId}/licenses/renewSeats'
body_params = ['licenseIdToRenew', 'unusedLicenseId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationLicense(self, organizationId: str, licenseId: str):
"""
**Display a license**
https://developer.cisco.com/meraki/api-v1/#!get-organization-license
- organizationId (string): (required)
- licenseId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'getOrganizationLicense'
}
resource = f'/organizations/{organizationId}/licenses/{licenseId}'
return self._session.get(metadata, resource)
def updateOrganizationLicense(self, organizationId: str, licenseId: str, **kwargs):
"""
**Update a license**
https://developer.cisco.com/meraki/api-v1/#!update-organization-license
- organizationId (string): (required)
- licenseId (string): (required)
- deviceSerial (string): The serial number of the device to assign this license to. Set this to null to unassign the license. If a different license is already active on the device, this parameter will control queueing/dequeuing this license.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'licenses'],
'operation': 'updateOrganizationLicense'
}
resource = f'/organizations/{organizationId}/licenses/{licenseId}'
body_params = ['deviceSerial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationNetworks(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**List the networks that the user has privileges on in an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-networks
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- configTemplateId (string): An optional parameter that is the ID of a config template. Will return all networks bound to that template.
- tags (array): An optional parameter to filter networks by tags. The filtering is case-sensitive. If tags are included, 'tagsFilterType' should also be included (see below).
- tagsFilterType (string): An optional parameter of value 'withAnyTags' or 'withAllTags' to indicate whether to return networks which contain ANY or ALL of the included tags. If no type is included, 'withAnyTags' will be selected.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 100000. Default is 1000.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
"""
kwargs.update(locals())
if 'tagsFilterType' in kwargs:
options = ['withAnyTags', 'withAllTags']
assert kwargs['tagsFilterType'] in options, f'''"tagsFilterType" cannot be "{kwargs['tagsFilterType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'networks'],
'operation': 'getOrganizationNetworks'
}
resource = f'/organizations/{organizationId}/networks'
query_params = ['configTemplateId', 'tags', 'tagsFilterType', 'perPage', 'startingAfter', 'endingBefore', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
array_params = ['tags', ]
for k, v in kwargs.items():
if k.strip() in array_params:
params[f'{k.strip()}[]'] = kwargs[f'{k}']
params.pop(k.strip())
return self._session.get_pages(metadata, resource, params, total_pages, direction)
def createOrganizationNetwork(self, organizationId: str, name: str, productTypes: list, **kwargs):
"""
**Create a network**
https://developer.cisco.com/meraki/api-v1/#!create-organization-network
- organizationId (string): (required)
- name (string): The name of the new network
- productTypes (array): The product type(s) of the new network. Valid types are wireless, appliance, switch, systemsManager, camera, cellularGateway. If more than one type is included, the network will be a combined network.
- tags (array): A list of tags to be applied to the network
- timeZone (string): The timezone of the network. For a list of allowed timezones, please see the 'TZ' column in the table in <a target='_blank' href='https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'>this article.</a>
- copyFromNetworkId (string): The ID of the network to copy configuration from. Other provided parameters will override the copied configuration, except type which must match this network's type exactly.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'networks'],
'operation': 'createOrganizationNetwork'
}
resource = f'/organizations/{organizationId}/networks'
body_params = ['name', 'productTypes', 'tags', 'timeZone', 'copyFromNetworkId', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def combineOrganizationNetworks(self, organizationId: str, name: str, networkIds: list, **kwargs):
"""
**Combine multiple networks into a single network**
https://developer.cisco.com/meraki/api-v1/#!combine-organization-networks
- organizationId (string): (required)
- name (string): The name of the combined network
- networkIds (array): A list of the network IDs that will be combined. If an ID of a combined network is included in this list, the other networks in the list will be grouped into that network
- enrollmentString (string): A unique identifier which can be used for device enrollment or easy access through the Meraki SM Registration page or the Self Service Portal. Please note that changing this field may cause existing bookmarks to break. All networks that are part of this combined network will have their enrollment string appended by '-network_type'. If left empty, all exisitng enrollment strings will be deleted.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'networks'],
'operation': 'combineOrganizationNetworks'
}
resource = f'/organizations/{organizationId}/networks/combine'
body_params = ['name', 'networkIds', 'enrollmentString', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationOpenapiSpec(self, organizationId: str):
"""
**Return the OpenAPI 2.0 Specification of the organization's API documentation in JSON**
https://developer.cisco.com/meraki/api-v1/#!get-organization-openapi-spec
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'monitor', 'openapiSpec'],
'operation': 'getOrganizationOpenapiSpec'
}
resource = f'/organizations/{organizationId}/openapiSpec'
return self._session.get(metadata, resource)
def getOrganizationSamlIdps(self, organizationId: str):
"""
**List the SAML IdPs in your organization.**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-idps
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'getOrganizationSamlIdps'
}
resource = f'/organizations/{organizationId}/saml/idps'
return self._session.get(metadata, resource)
def createOrganizationSamlIdp(self, organizationId: str, x509certSha1Fingerprint: str, **kwargs):
"""
**Create a SAML IdP for your organization.**
https://developer.cisco.com/meraki/api-v1/#!create-organization-saml-idp
- organizationId (string): (required)
- x509certSha1Fingerprint (string): Fingerprint (SHA1) of the SAML certificate provided by your Identity Provider (IdP). This will be used for encryption / validation.
- sloLogoutUrl (string): Dashboard will redirect users to this URL when they sign out.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'createOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps'
body_params = ['x509certSha1Fingerprint', 'sloLogoutUrl', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateOrganizationSamlIdp(self, organizationId: str, idpId: str, **kwargs):
"""
**Update a SAML IdP in your organization**
https://developer.cisco.com/meraki/api-v1/#!update-organization-saml-idp
- organizationId (string): (required)
- idpId (string): (required)
- x509certSha1Fingerprint (string): Fingerprint (SHA1) of the SAML certificate provided by your Identity Provider (IdP). This will be used for encryption / validation.
- sloLogoutUrl (string): Dashboard will redirect users to this URL when they sign out.
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'updateOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps/{idpId}'
body_params = ['x509certSha1Fingerprint', 'sloLogoutUrl', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationSamlIdp(self, organizationId: str, idpId: str):
"""
**Get a SAML IdP from your organization.**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-idp
- organizationId (string): (required)
- idpId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'getOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps/{idpId}'
return self._session.get(metadata, resource)
def deleteOrganizationSamlIdp(self, organizationId: str, idpId: str):
"""
**Remove a SAML IdP in your organization.**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-saml-idp
- organizationId (string): (required)
- idpId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'saml', 'idps'],
'operation': 'deleteOrganizationSamlIdp'
}
resource = f'/organizations/{organizationId}/saml/idps/{idpId}'
return self._session.delete(metadata, resource)
def getOrganizationSamlRoles(self, organizationId: str):
"""
**List the SAML roles for this organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-roles
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'getOrganizationSamlRoles'
}
resource = f'/organizations/{organizationId}/samlRoles'
return self._session.get(metadata, resource)
def createOrganizationSamlRole(self, organizationId: str, **kwargs):
"""
**Create a SAML role**
https://developer.cisco.com/meraki/api-v1/#!create-organization-saml-role
- organizationId (string): (required)
- role (string): The role of the SAML administrator
- orgAccess (string): The privilege of the SAML administrator on the organization
- tags (array): The list of tags that the SAML administrator has privleges on
- networks (array): The list of networks that the SAML administrator has privileges on
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'createOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles'
body_params = ['role', 'orgAccess', 'tags', 'networks', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getOrganizationSamlRole(self, organizationId: str, samlRoleId: str):
"""
**Return a SAML role**
https://developer.cisco.com/meraki/api-v1/#!get-organization-saml-role
- organizationId (string): (required)
- samlRoleId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'getOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles/{samlRoleId}'
return self._session.get(metadata, resource)
def updateOrganizationSamlRole(self, organizationId: str, samlRoleId: str, **kwargs):
"""
**Update a SAML role**
https://developer.cisco.com/meraki/api-v1/#!update-organization-saml-role
- organizationId (string): (required)
- samlRoleId (string): (required)
- role (string): The role of the SAML administrator
- orgAccess (string): The privilege of the SAML administrator on the organization
- tags (array): The list of tags that the SAML administrator has privleges on
- networks (array): The list of networks that the SAML administrator has privileges on
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'updateOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles/{samlRoleId}'
body_params = ['role', 'orgAccess', 'tags', 'networks', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteOrganizationSamlRole(self, organizationId: str, samlRoleId: str):
"""
**Remove a SAML role**
https://developer.cisco.com/meraki/api-v1/#!delete-organization-saml-role
- organizationId (string): (required)
- samlRoleId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'samlRoles'],
'operation': 'deleteOrganizationSamlRole'
}
resource = f'/organizations/{organizationId}/samlRoles/{samlRoleId}'
return self._session.delete(metadata, resource)
def getOrganizationSnmp(self, organizationId: str):
"""
**Return the SNMP settings for an organization**
https://developer.cisco.com/meraki/api-v1/#!get-organization-snmp
- organizationId (string): (required)
"""
metadata = {
'tags': ['organizations', 'configure', 'snmp'],
'operation': 'getOrganizationSnmp'
}
resource = f'/organizations/{organizationId}/snmp'
return self._session.get(metadata, resource)
def updateOrganizationSnmp(self, organizationId: str, **kwargs):
"""
**Update the SNMP settings for an organization**
https://developer.cisco.com/meraki/api-v1/#!update-organization-snmp
- organizationId (string): (required)
- v2cEnabled (boolean): Boolean indicating whether SNMP version 2c is enabled for the organization.
- v3Enabled (boolean): Boolean indicating whether SNMP version 3 is enabled for the organization.
- v3AuthMode (string): The SNMP version 3 authentication mode. Can be either 'MD5' or 'SHA'.
- v3AuthPass (string): The SNMP version 3 authentication password. Must be at least 8 characters if specified.
- v3PrivMode (string): The SNMP version 3 privacy mode. Can be either 'DES' or 'AES128'.
- v3PrivPass (string): The SNMP version 3 privacy password. Must be at least 8 characters if specified.
- peerIps (array): The list of IPv4 addresses that are allowed to access the SNMP server.
"""
kwargs.update(locals())
if 'v3AuthMode' in kwargs:
options = ['MD5', 'SHA']
assert kwargs['v3AuthMode'] in options, f'''"v3AuthMode" cannot be "{kwargs['v3AuthMode']}", & must be set to one of: {options}'''
if 'v3PrivMode' in kwargs:
options = ['DES', 'AES128']
assert kwargs['v3PrivMode'] in options, f'''"v3PrivMode" cannot be "{kwargs['v3PrivMode']}", & must be set to one of: {options}'''
metadata = {
'tags': ['organizations', 'configure', 'snmp'],
'operation': 'updateOrganizationSnmp'
}
resource = f'/organizations/{organizationId}/snmp'
body_params = ['v2cEnabled', 'v3Enabled', 'v3AuthMode', 'v3AuthPass', 'v3PrivMode', 'v3PrivPass', 'peerIps', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationWebhookLogs(self, organizationId: str, total_pages=1, direction='next', **kwargs):
"""
**Return the log of webhook POSTs sent**
https://developer.cisco.com/meraki/api-v1/#!get-organization-webhook-logs
- organizationId (string): (required)
- total_pages (integer or string): use with perPage to get total results up to total_pages*perPage; -1 or "all" for all pages
- direction (string): direction to paginate, either "next" (default) or "prev" page
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 90 days from today.
- t1 (string): The end of the timespan for the data. t1 can be a maximum of 31 days after t0.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameters t0 and t1. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
- perPage (integer): The number of entries per page returned. Acceptable range is 3 - 1000. Default is 50.
- startingAfter (string): A token used by the server to indicate the start of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- endingBefore (string): A token used by the server to indicate the end of the page. Often this is a timestamp or an ID but it is not limited to those. This parameter should not be defined by client applications. The link for the first, last, prev, or next page in the HTTP Link header should define it.
- url (string): The URL the webhook was sent to
"""
kwargs.update(locals())
metadata = {
'tags': ['organizations', 'monitor', 'webhookLogs'],
'operation': 'getOrganizationWebhookLogs'
}
resource = f'/organizations/{organizationId}/webhookLogs'
query_params = ['t0', 't1', 'timespan', 'perPage', 'startingAfter', 'endingBefore', 'url', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get_pages(metadata, resource, params, total_pages, direction) |
"""
Exercício Python 36: Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
A prestação mensal não pode exceder 30% do salário ou então o empréstimo será negado.
"""
print('-' * 30)
print(f'{'Aprovando Emprestímo':^30}')
print('-' * 30)
# solicita o valor do imóvel e salário
valor_imovel = float(input('Digite o valor do imóvel R$ '))
salario = float(input('Digite o valor do seu salário atual R$ '))
parcelamento = int(input('Deseja pagar o imóvel em quanto anos: '))
valor_parcela = valor_imovel / (parcelamento * 12)
condicao = 0.30
if valor_parcela > (salario * condicao):
print(f'''Emprestímo Negago!\nO Valor das parcelas execedem 30% do seu salário.
''')
else:
print(f'''
Emprestímo autorizazdo!
Valor do imóvel R$ {valor_imovel:.2f}
Prazo de pagamento {parcelamento * 12} meses
Valor da parcelas R$ {valor_parcela:.2f}
''')
| """
Exercício Python 36: Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
A prestação mensal não pode exceder 30% do salário ou então o empréstimo será negado.
"""
print('-' * 30)
print(f'{"Aprovando Emprestímo":^30}')
print('-' * 30)
# solicita o valor do imóvel e salário
valor_imovel = float(input('Digite o valor do imóvel R$ '))
salario = float(input('Digite o valor do seu salário atual R$ '))
parcelamento = int(input('Deseja pagar o imóvel em quanto anos: '))
valor_parcela = valor_imovel / (parcelamento * 12)
condicao = 0.30
if valor_parcela > (salario * condicao):
print(f'''Emprestímo Negago!\nO Valor das parcelas execedem 30% do seu salário.
''')
else:
print(f'''
Emprestímo autorizazdo!
Valor do imóvel R$ {valor_imovel:.2f}
Prazo de pagamento {parcelamento * 12} meses
Valor da parcelas R$ {valor_parcela:.2f}
''')
|
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2019 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command()
@click.option('-i', '--input', 'opt_fp_cfg', required=True,
help='Path YAML job config')
@click.option('--skip-images', 'opt_skip_images', is_flag=True)
@click.option('--skip-labels', 'opt_skip_labels', is_flag=True)
@click.pass_context
def cli(ctx, opt_fp_cfg, opt_skip_images, opt_skip_labels):
"""YOLO PyTorch project"""
from os.path import join
from pathlib import Path
import shutil
from dataclasses import asdict
from tqdm import tqdm
import pandas as pd
from vframe.settings import app_cfg
from vframe.utils.file_utils import ensure_dir, load_yaml, write_yaml
from vframe.utils.file_utils import write_txt, replace_ext, chmod_exec
from vframe.utils.dataset_utils import split_train_val_test
from vframe.models.annotation import Annotation
from vframe.models.training_dataset import YoloPyTorch
log = app_cfg.LOG
# load config
cfg = load_yaml(opt_fp_cfg, data_class=YoloPyTorch)
# provision output
ensure_dir(cfg.fp_output)
dir_images = join(cfg.fp_output, cfg.fn_images)
dir_labels = join(cfg.fp_output, cfg.fn_labels)
ensure_dir(dir_images)
ensure_dir(dir_labels)
# write to yaml
fp_out = join(cfg.fp_output, cfg.fn_hyp)
comment = '\n'.join([app_cfg.LICENSE_HEADER,'# Hyperparameter'])
write_yaml(asdict(cfg.hyperparameters), fp_out, comment=comment)
# load annos
df = pd.read_csv(cfg.fp_annotations)
df_pos = df[df.label_index != -1]
# df_neg = df[df.label_enum == app_cfg.LABEL_BACKGROUND or df.label_index == -1]
df_neg = df[df.label_index == -1]
# count
log.info(f'positive annotations: {len(df_pos):,}')
log.info(f'background annotations: {len(df_neg):,}')
log.info(f'total annotations: {len(df):,}')
log.info(f'positive images: {len(df_pos.groupby('filename')):,}')
log.info(f'negative images: {len(df_neg.groupby('filename')):,}')
log.info(f'total images: {len(df.groupby('filename')):,}')
# get class-label list sorted by class index
df_sorted = df_pos.sort_values(by='label_index', ascending=True)
df_sorted.drop_duplicates(['label_enum'], keep='first', inplace=True)
class_labels = df_sorted.label_enum.values.tolist()
# write to txt
write_txt(class_labels, join(cfg.fp_output, app_cfg.FN_LABELS))
# update config
cfg.classes = class_labels
# Generate one label per file with all bboxes and classes
# <object-class> <x_center> <y_center> <width> <height>
labels_data = {}
file_list = []
df_groups = df_pos.groupby('filename')
for fn, df_group in df_groups:
annos = []
file_list.append(join(dir_images, fn))
for row_idx, row in df_group.iterrows():
annos.append(Annotation.from_anno_series_row(row).to_darknet_str())
labels_data.update({fn: annos})
# write txt files for train, val
splits = split_train_val_test(file_list, splits=cfg.splits, seed=1)
write_txt(splits['train'], join(cfg.fp_output, cfg.fn_train))
write_txt(splits['val'], join(cfg.fp_output, cfg.fn_val))
write_txt(splits['test'], join(cfg.fp_output, cfg.fn_test))
# write metadata
fp_out = join(cfg.fp_output, cfg.fn_metadata)
comment = '\n'.join([app_cfg.LICENSE_HEADER, '# Metadata'])
write_yaml(cfg.to_metadata(), fp_out, comment=comment)
# copy postive images
if not opt_skip_labels:
for fn, annos in tqdm(labels_data.items()):
# write all annos for this image to txt file
fp_label = join(dir_labels, replace_ext(fn, 'txt'))
write_txt(annos, fp_label)
# symlink/copy images
if not opt_skip_images:
df_groups = df.groupby('filename')
for fn, df_group in tqdm(df_groups):
fpp_im_dst = Path(join(dir_images, fn))
fpp_im_src = Path(join(cfg.fp_images, fn))
if not fpp_im_src.is_file():
app_cfg.LOG.error(f'{fpp_im_dst} missing')
continue
if cfg.symlink:
if fpp_im_dst.is_symlink():
fpp_im_dst.unlink()
fpp_im_dst.symlink_to(fpp_im_src)
else:
shutil.copy(fpp_im_src, fpp_im_dst)
# write model yaml, but print k:v pairs instead of dump
model_cfg = load_yaml(cfg.fp_model_cfg)
fp_out = join(cfg.fp_output, cfg.fn_model_cfg)
model_cfg['nc'] = len(cfg.classes)
with open(fp_out, 'w') as f:
for k,v in model_cfg.items():
f.write(f'{k}: {v}\n')
# shell scripts
args = cfg.arguments
py_cmds = ['python','train.py','']
cli_opts = cfg.to_cli_args()
# join strings
sh_header_str = '\n'.join(['#!/bin/bash','','# training', ''])
py_cmds_str = list(map(str, py_cmds))
cli_opts_str = list(map(str, cli_opts))
sh_script = sh_header_str + ' '.join(py_cmds_str) + ' '.join(cli_opts_str)
# write
fp_sh = join(cfg.fp_output, app_cfg.FN_TRAIN_INIT)
write_txt(sh_script, fp_sh)
# make executable
chmod_exec(fp_sh)
# TODO: add tensorboard script
# tensorboard --logdir runs/exp0 --bind_all
if args.device and len(args.device) > 1:
n_gpus = len(args.device)
# multi GPU cmd
py_cmds = ['python', '-m', 'torch.distributed.launch', '--nproc_per_node', f'{n_gpus}', 'train.py', '']
# join strings
sh_header_str = '\n'.join(['#!/bin/bash','','# multi gpu training', ''])
py_cmds_str = list(map(str, py_cmds))
cfg.arguments.batch_size *= 2
cli_opts = cfg.to_cli_args()
cli_opts_str = list(map(str, cli_opts))
sh_script = sh_header_str + ' '.join(py_cmds_str) + ' '.join(cli_opts_str)
# write
fp_sh = join(cfg.fp_output, app_cfg.FN_TRAIN_MULTI)
write_txt(sh_script, fp_sh)
# make executable
chmod_exec(fp_sh)
| #############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2019 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command()
@click.option('-i', '--input', 'opt_fp_cfg', required=True,
help='Path YAML job config')
@click.option('--skip-images', 'opt_skip_images', is_flag=True)
@click.option('--skip-labels', 'opt_skip_labels', is_flag=True)
@click.pass_context
def cli(ctx, opt_fp_cfg, opt_skip_images, opt_skip_labels):
"""YOLO PyTorch project"""
from os.path import join
from pathlib import Path
import shutil
from dataclasses import asdict
from tqdm import tqdm
import pandas as pd
from vframe.settings import app_cfg
from vframe.utils.file_utils import ensure_dir, load_yaml, write_yaml
from vframe.utils.file_utils import write_txt, replace_ext, chmod_exec
from vframe.utils.dataset_utils import split_train_val_test
from vframe.models.annotation import Annotation
from vframe.models.training_dataset import YoloPyTorch
log = app_cfg.LOG
# load config
cfg = load_yaml(opt_fp_cfg, data_class=YoloPyTorch)
# provision output
ensure_dir(cfg.fp_output)
dir_images = join(cfg.fp_output, cfg.fn_images)
dir_labels = join(cfg.fp_output, cfg.fn_labels)
ensure_dir(dir_images)
ensure_dir(dir_labels)
# write to yaml
fp_out = join(cfg.fp_output, cfg.fn_hyp)
comment = '\n'.join([app_cfg.LICENSE_HEADER,'# Hyperparameter'])
write_yaml(asdict(cfg.hyperparameters), fp_out, comment=comment)
# load annos
df = pd.read_csv(cfg.fp_annotations)
df_pos = df[df.label_index != -1]
# df_neg = df[df.label_enum == app_cfg.LABEL_BACKGROUND or df.label_index == -1]
df_neg = df[df.label_index == -1]
# count
log.info(f'positive annotations: {len(df_pos):,}')
log.info(f'background annotations: {len(df_neg):,}')
log.info(f'total annotations: {len(df):,}')
log.info(f'positive images: {len(df_pos.groupby("filename")):,}')
log.info(f'negative images: {len(df_neg.groupby("filename")):,}')
log.info(f'total images: {len(df.groupby("filename")):,}')
# get class-label list sorted by class index
df_sorted = df_pos.sort_values(by='label_index', ascending=True)
df_sorted.drop_duplicates(['label_enum'], keep='first', inplace=True)
class_labels = df_sorted.label_enum.values.tolist()
# write to txt
write_txt(class_labels, join(cfg.fp_output, app_cfg.FN_LABELS))
# update config
cfg.classes = class_labels
# Generate one label per file with all bboxes and classes
# <object-class> <x_center> <y_center> <width> <height>
labels_data = {}
file_list = []
df_groups = df_pos.groupby('filename')
for fn, df_group in df_groups:
annos = []
file_list.append(join(dir_images, fn))
for row_idx, row in df_group.iterrows():
annos.append(Annotation.from_anno_series_row(row).to_darknet_str())
labels_data.update({fn: annos})
# write txt files for train, val
splits = split_train_val_test(file_list, splits=cfg.splits, seed=1)
write_txt(splits['train'], join(cfg.fp_output, cfg.fn_train))
write_txt(splits['val'], join(cfg.fp_output, cfg.fn_val))
write_txt(splits['test'], join(cfg.fp_output, cfg.fn_test))
# write metadata
fp_out = join(cfg.fp_output, cfg.fn_metadata)
comment = '\n'.join([app_cfg.LICENSE_HEADER, '# Metadata'])
write_yaml(cfg.to_metadata(), fp_out, comment=comment)
# copy postive images
if not opt_skip_labels:
for fn, annos in tqdm(labels_data.items()):
# write all annos for this image to txt file
fp_label = join(dir_labels, replace_ext(fn, 'txt'))
write_txt(annos, fp_label)
# symlink/copy images
if not opt_skip_images:
df_groups = df.groupby('filename')
for fn, df_group in tqdm(df_groups):
fpp_im_dst = Path(join(dir_images, fn))
fpp_im_src = Path(join(cfg.fp_images, fn))
if not fpp_im_src.is_file():
app_cfg.LOG.error(f'{fpp_im_dst} missing')
continue
if cfg.symlink:
if fpp_im_dst.is_symlink():
fpp_im_dst.unlink()
fpp_im_dst.symlink_to(fpp_im_src)
else:
shutil.copy(fpp_im_src, fpp_im_dst)
# write model yaml, but print k:v pairs instead of dump
model_cfg = load_yaml(cfg.fp_model_cfg)
fp_out = join(cfg.fp_output, cfg.fn_model_cfg)
model_cfg['nc'] = len(cfg.classes)
with open(fp_out, 'w') as f:
for k,v in model_cfg.items():
f.write(f'{k}: {v}\n')
# shell scripts
args = cfg.arguments
py_cmds = ['python','train.py','']
cli_opts = cfg.to_cli_args()
# join strings
sh_header_str = '\n'.join(['#!/bin/bash','','# training', ''])
py_cmds_str = list(map(str, py_cmds))
cli_opts_str = list(map(str, cli_opts))
sh_script = sh_header_str + ' '.join(py_cmds_str) + ' '.join(cli_opts_str)
# write
fp_sh = join(cfg.fp_output, app_cfg.FN_TRAIN_INIT)
write_txt(sh_script, fp_sh)
# make executable
chmod_exec(fp_sh)
# TODO: add tensorboard script
# tensorboard --logdir runs/exp0 --bind_all
if args.device and len(args.device) > 1:
n_gpus = len(args.device)
# multi GPU cmd
py_cmds = ['python', '-m', 'torch.distributed.launch', '--nproc_per_node', f'{n_gpus}', 'train.py', '']
# join strings
sh_header_str = '\n'.join(['#!/bin/bash','','# multi gpu training', ''])
py_cmds_str = list(map(str, py_cmds))
cfg.arguments.batch_size *= 2
cli_opts = cfg.to_cli_args()
cli_opts_str = list(map(str, cli_opts))
sh_script = sh_header_str + ' '.join(py_cmds_str) + ' '.join(cli_opts_str)
# write
fp_sh = join(cfg.fp_output, app_cfg.FN_TRAIN_MULTI)
write_txt(sh_script, fp_sh)
# make executable
chmod_exec(fp_sh)
|
"""
This script loads the corresponding to repot text for certain patient IDs
"""
import json
import os
import pydicom
from PIL import Image
import pandas as pd
DATA_DIR = "./data/tags"
OUTPUT_DIR = "./manual_classification"
groups = pd.read_csv(f"{DATA_DIR}/groups.csv", sep=";")
tags = pd.read_csv(f"{DATA_DIR}/iu_xray_all_test.tsv", sep="\t")
result = pd.DataFrame(columns=["pat_id", "sex", "normal"])
group_pat_id_list = groups.groupby("group")
for _, relevant_group in group_pat_id_list:
relevant_group_name = f"group_{int(relevant_group["group"].iloc[0])}"
for _, row in relevant_group.iterrows():
pat_id = row["pat_id"]
pat_tags = tags[tags["reports"] == pat_id]["mti_tags"]
is_normal = "Normal" if (pat_tags == "none").iloc[0] else "Abnormal"
result = result.append(
{'pat_id': pat_id, 'sex': row['sex'], 'normal': is_normal},
ignore_index=True,
)
result.to_csv(f"{OUTPUT_DIR}/sex_matched_normality.csv") | """
This script loads the corresponding to repot text for certain patient IDs
"""
import json
import os
import pydicom
from PIL import Image
import pandas as pd
DATA_DIR = "./data/tags"
OUTPUT_DIR = "./manual_classification"
groups = pd.read_csv(f"{DATA_DIR}/groups.csv", sep=";")
tags = pd.read_csv(f"{DATA_DIR}/iu_xray_all_test.tsv", sep="\t")
result = pd.DataFrame(columns=["pat_id", "sex", "normal"])
group_pat_id_list = groups.groupby("group")
for _, relevant_group in group_pat_id_list:
relevant_group_name = f"group_{int(relevant_group['group'].iloc[0])}"
for _, row in relevant_group.iterrows():
pat_id = row["pat_id"]
pat_tags = tags[tags["reports"] == pat_id]["mti_tags"]
is_normal = "Normal" if (pat_tags == "none").iloc[0] else "Abnormal"
result = result.append(
{'pat_id': pat_id, 'sex': row['sex'], 'normal': is_normal},
ignore_index=True,
)
result.to_csv(f"{OUTPUT_DIR}/sex_matched_normality.csv") |
"""
BloomTech Labs DS Data Engineer Role
- Database Interface
- Visualization Interface
"""
import os
import re
import string
from random import randint
from typing import Iterable, Dict, List
import pandas as pd
import psycopg2
import plotly.express as px
import plotly.graph_objects as go
from plotly.graph_objs import Figure
from psycopg2 import sql
from dotenv import load_dotenv
class Data:
load_dotenv()
db_url = os.getenv("DB_URL")
def _setup(self, table_name: str, columns: Iterable[str]):
self._action(f"""CREATE TABLE IF NOT EXISTS {table_name}
({', '.join(columns)});""")
def _action(self, sql_action):
conn = psycopg2.connect(self.db_url)
curs = conn.cursor()
curs.execute(sql_action)
conn.commit()
curs.close()
conn.close()
def _query(self, sql_query) -> list:
conn = psycopg2.connect(self.db_url)
curs = conn.cursor()
curs.execute(sql_query)
results = curs.fetchall()
curs.close()
conn.close()
return results
def count(self) -> int:
return self._query("SELECT COUNT(*) FROM features")[0][0]
def columns(self) -> List[str]:
return [col[3] for col in self._query(
"""SELECT * FROM information_schema.columns
WHERE table_name = 'features';"""
)]
def rows(self) -> List[List]:
return self._query("SELECT * FROM features;")
def df(self):
return pd.DataFrame(data=self.rows(), columns=self.columns())
def row(self, idx: int) -> Dict:
df = self.df()
return df[df["idx"] == idx].to_dict(orient="records")[0]
def format_target(self, target):
return f"Class {str(target).rjust(2, "0")}"
def random_row(self, n_features=3):
features = tuple(randint(1, 6) for _ in range(n_features))
return *features, self.format_target(sum(features))
def joined_rows(self, n_rows):
return ",".join(str(self.random_row()) for _ in range(n_rows))
def seed(self, n_rows: int):
self._action(f"""INSERT INTO
features (feature_1, feature_2, feature_3, target)
VALUES {self.joined_rows(n_rows)};""")
@staticmethod
def cleaner(text: str) -> str:
return re.sub(r"\s+", " ", text.translate(
str.maketrans("", "", string.punctuation)
).strip())
def insert(self, feature_1, feature_2, feature_3, target):
self._action(sql.SQL("""INSERT INTO features
(feature_1, feature_2, feature_3, target)
VALUES ({},{},{},{});""").format(
sql.Literal(feature_1),
sql.Literal(feature_2),
sql.Literal(feature_3),
sql.Literal(self.format_target(self.cleaner(target))),
))
return int(self._query(sql.SQL("""SELECT idx FROM features
ORDER BY idx DESC LIMIT 1;"""))[0][0])
def reset(self):
self._action("TRUNCATE TABLE features RESTART IDENTITY;")
def crosstab_vis(self, feature_id) -> Figure:
if feature_id not in range(1, 4):
return Figure()
feature_name = f"feature_{feature_id}"
feature_title = feature_name.replace('_', ' ').title()
df = self.df()
cross_tab = pd.crosstab(
df["target"],
df[feature_name],
)
data = [
go.Bar(name=col, x=cross_tab.index, y=cross_tab[col])
for col in cross_tab.columns
]
title = f"Target by {feature_title} Crosstab"
layout = go.Layout(
title=title,
barmode="stack",
colorway=px.colors.qualitative.Antique,
)
return go.Figure(data=data, layout=layout)
def target_percent_vis(self):
df = self.df()["target"].value_counts().to_frame()
data = go.Pie(
labels=df.index.values,
values=df["target"],
textinfo='label+percent',
showlegend=False,
hole=0.5,
)
layout = go.Layout(
title="Target Percentage",
colorway=px.colors.qualitative.Antique,
)
return go.Figure(data=data, layout=layout)
if __name__ == '__main__':
db = Data()
# db._action("DROP TABLE features")
# db._setup("features", [
# "idx SERIAL PRIMARY KEY NOT NULL",
# "feature_1 INT8 NOT NULL",
# "feature_2 INT8 NOT NULL",
# "feature_3 INT8 NOT NULL",
# "target varchar(10) NOT NULL"
# ])
# db.reset()
# db.seed(1024)
db.crosstab_vis(1).show()
# db.target_percent_vis().show()
| """
BloomTech Labs DS Data Engineer Role
- Database Interface
- Visualization Interface
"""
import os
import re
import string
from random import randint
from typing import Iterable, Dict, List
import pandas as pd
import psycopg2
import plotly.express as px
import plotly.graph_objects as go
from plotly.graph_objs import Figure
from psycopg2 import sql
from dotenv import load_dotenv
class Data:
load_dotenv()
db_url = os.getenv("DB_URL")
def _setup(self, table_name: str, columns: Iterable[str]):
self._action(f"""CREATE TABLE IF NOT EXISTS {table_name}
({', '.join(columns)});""")
def _action(self, sql_action):
conn = psycopg2.connect(self.db_url)
curs = conn.cursor()
curs.execute(sql_action)
conn.commit()
curs.close()
conn.close()
def _query(self, sql_query) -> list:
conn = psycopg2.connect(self.db_url)
curs = conn.cursor()
curs.execute(sql_query)
results = curs.fetchall()
curs.close()
conn.close()
return results
def count(self) -> int:
return self._query("SELECT COUNT(*) FROM features")[0][0]
def columns(self) -> List[str]:
return [col[3] for col in self._query(
"""SELECT * FROM information_schema.columns
WHERE table_name = 'features';"""
)]
def rows(self) -> List[List]:
return self._query("SELECT * FROM features;")
def df(self):
return pd.DataFrame(data=self.rows(), columns=self.columns())
def row(self, idx: int) -> Dict:
df = self.df()
return df[df["idx"] == idx].to_dict(orient="records")[0]
def format_target(self, target):
return f"Class {str(target).rjust(2, '0')}"
def random_row(self, n_features=3):
features = tuple(randint(1, 6) for _ in range(n_features))
return *features, self.format_target(sum(features))
def joined_rows(self, n_rows):
return ",".join(str(self.random_row()) for _ in range(n_rows))
def seed(self, n_rows: int):
self._action(f"""INSERT INTO
features (feature_1, feature_2, feature_3, target)
VALUES {self.joined_rows(n_rows)};""")
@staticmethod
def cleaner(text: str) -> str:
return re.sub(r"\s+", " ", text.translate(
str.maketrans("", "", string.punctuation)
).strip())
def insert(self, feature_1, feature_2, feature_3, target):
self._action(sql.SQL("""INSERT INTO features
(feature_1, feature_2, feature_3, target)
VALUES ({},{},{},{});""").format(
sql.Literal(feature_1),
sql.Literal(feature_2),
sql.Literal(feature_3),
sql.Literal(self.format_target(self.cleaner(target))),
))
return int(self._query(sql.SQL("""SELECT idx FROM features
ORDER BY idx DESC LIMIT 1;"""))[0][0])
def reset(self):
self._action("TRUNCATE TABLE features RESTART IDENTITY;")
def crosstab_vis(self, feature_id) -> Figure:
if feature_id not in range(1, 4):
return Figure()
feature_name = f"feature_{feature_id}"
feature_title = feature_name.replace('_', ' ').title()
df = self.df()
cross_tab = pd.crosstab(
df["target"],
df[feature_name],
)
data = [
go.Bar(name=col, x=cross_tab.index, y=cross_tab[col])
for col in cross_tab.columns
]
title = f"Target by {feature_title} Crosstab"
layout = go.Layout(
title=title,
barmode="stack",
colorway=px.colors.qualitative.Antique,
)
return go.Figure(data=data, layout=layout)
def target_percent_vis(self):
df = self.df()["target"].value_counts().to_frame()
data = go.Pie(
labels=df.index.values,
values=df["target"],
textinfo='label+percent',
showlegend=False,
hole=0.5,
)
layout = go.Layout(
title="Target Percentage",
colorway=px.colors.qualitative.Antique,
)
return go.Figure(data=data, layout=layout)
if __name__ == '__main__':
db = Data()
# db._action("DROP TABLE features")
# db._setup("features", [
# "idx SERIAL PRIMARY KEY NOT NULL",
# "feature_1 INT8 NOT NULL",
# "feature_2 INT8 NOT NULL",
# "feature_3 INT8 NOT NULL",
# "target varchar(10) NOT NULL"
# ])
# db.reset()
# db.seed(1024)
db.crosstab_vis(1).show()
# db.target_percent_vis().show()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import contextlib
import numpy as np
import itertools as it
from collections import OrderedDict, abc, namedtuple
from typing import (Callable, Iterable, Tuple, Optional, Dict, Any, Set,
NamedTuple, Union, Sequence)
from warnings import warn
from functools import wraps, partial, partialmethod
from enum import Enum
from jax import numpy as jnp
from jax import core
from jax import linear_util as lu
from jax._src.api import Lowered, _check_callable, _check_arg
from jax._src import dispatch
from jax.tree_util import (tree_flatten, tree_unflatten, all_leaves, tree_map,
tree_leaves)
from jax._src.tree_util import _replace_nones
from jax._src.api_util import (flatten_fun_nokwargs, flatten_axes,
_ensure_index_tuple, donation_vector,
shaped_abstractify)
from jax._src import source_info_util
from jax._src.config import config
from jax.errors import JAXTypeError
from jax.interpreters import mlir
from jax.interpreters import partial_eval as pe
from jax.interpreters import pxla
from jax.interpreters import xla
from jax.interpreters import batching
from jax.interpreters import ad
from jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
from jax._src.util import (safe_map, safe_zip, HashableFunction,
as_hashable_function, unzip2, distributed_debug_log,
tuple_insert, moveaxis, split_list, wrap_name)
from jax import lax
map, unsafe_map = safe_map, map
zip = safe_zip
xops = xc.ops
class _PositionalSemantics(Enum):
"""Indicates whether the positional shapes of inputs should be interpreted as
global or local with respect to the multi-host mesh.
While named axes are always associated with global sizes, the outermost pjit
is the boundary between the local shapes in the outer scope and global
positional shapes in its inner scope. pjits nested inside that one should not
attempt to increase the sizes of avals again, and xmap has to take this into
account when inferring the global size of a named axis.
"""
LOCAL = 0
GLOBAL = 1
class _PSThreadLocalState(threading.local):
def __init__(self):
self.val = _PositionalSemantics.LOCAL
_positional_semantics = _PSThreadLocalState()
class FrozenDict(abc.Mapping):
def __init__(self, *args, **kwargs):
self.contents = dict(*args, **kwargs)
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len(self.contents)
def __getitem__(self, name):
return self.contents[name]
def __eq__(self, other):
return isinstance(other, FrozenDict) and self.contents == other.contents
def __hash__(self):
return hash(tuple(self.contents.items()))
def __repr__(self):
return f"FrozenDict({self.contents})"
# Multi-dimensional generalized map
AxisName = core.AxisName
ResourceAxisName = AxisName # Different name just for documentation purposes
Mesh = pxla.Mesh
class _Loop(NamedTuple):
name: ResourceAxisName
length: int
class ResourceEnv(NamedTuple):
physical_mesh: Mesh
loops: Tuple[_Loop, ...]
def with_mesh(self, mesh: Mesh):
overlap = set(mesh.axis_names) & (self.resource_axes - set(self.physical_mesh.axis_names))
if overlap:
raise ValueError(f"Cannot update the mesh of the current resource "
f"environment. The new mesh shadows already defined axes "
f"{show_axes(overlap)}")
return self._replace(physical_mesh=mesh)
def with_extra_loop(self, loop: _Loop):
if loop.name in self.resource_axes:
raise ValueError(f"Cannot extend the resource environment with loop named "
f"`{loop.name}`. An axis of this name is already defined!")
return self._replace(loops=self.loops + (loop,))
@property
def physical_resource_axes(self) -> Set[ResourceAxisName]:
return set(self.physical_mesh.axis_names)
@property
def loop_resource_axes(self) -> Set[ResourceAxisName]:
return set(loop.name for loop in self.loops)
@property
def resource_axes(self) -> Set[ResourceAxisName]:
return self.physical_resource_axes | self.loop_resource_axes
@property
def shape(self):
shape = self.physical_mesh.shape
shape.update(self.loops)
return shape
@property
def local_shape(self):
shape = self.physical_mesh.local_mesh.shape
shape.update(self.loops)
return shape
def __repr__(self):
return f"ResourceEnv({self.physical_mesh!r}, {self.loops!r})"
EMPTY_ENV = ResourceEnv(Mesh(np.empty((), dtype=object), ()), ())
class _ThreadResourcesLocalState(threading.local):
def __init__(self):
self.env = EMPTY_ENV
thread_resources = _ThreadResourcesLocalState()
class SerialLoop:
"""Create an anonymous serial loop resource for use in a single xmap axis.
A use of :py:class:`SerialLoop` in :py:func:`xmap`'s ``axis_resources``
extends the resource environment with a new serial loop with a unique
unspecified name, that will only be used to partition the axis that
used a given instance.
This is unlike :py:func:`serial_loop`, which makes it possible to iterate
jointly over chunks of multiple axes (with the usual requirement that they
do not coincide in a named shape of any value in the program).
Example::
# Processes `x` in a vectorized way, but in 20 micro-batches.
xmap(f, in_axes=['i'], out_axes=[i], axis_resources={'i': SerialLoop(20)})(x)
# Computes the result in a vectorized way, but in 400 micro-batches,
# once for each coordinate (0, 0) <= (i, j) < (20, 20). Each `SerialLoop`
# creates a fresh anonymous loop.
xmap(h, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': SerialLoop(20), 'j': SerialLoop(20)})(x, y)
"""
length: int
def __init__(self, length):
self.length = length
def __eq__(self, other):
return self.length == other.length
def __hash__(self):
return hash(self.length)
@contextlib.contextmanager
def serial_loop(name: ResourceAxisName, length: int):
"""Define a serial loop resource to be available in scope of this context manager.
This is similar to :py:func:`mesh` in that it extends the resource
environment with a resource called ``name``. But, any use of this resource
axis in ``axis_resources`` argument of :py:func:`xmap` will cause the
body of :py:func:`xmap` to get executed ``length`` times with each execution
only processing only a slice of inputs mapped along logical axes assigned
to this resource.
This is especially useful in that it makes it possible to lower the memory
usage compared to :py:func:`vmap`, because it will avoid simultaneous
materialization of intermediate values for every point in the batch.
Note that collectives over loop axes are not supported, so they are less
versatile than physical mesh axes.
Args:
name: Name of the loop in the resource environment.
length: Number of iterations.
Example::
with loop('l', 4):
out = xmap(
lambda x: jnp.sin(x) * 5, # This will be called 4 times with different
# slices of x.
in_axes=['i'], out_axes=['i'],
axis_resources={'i': 'l'})(x)
"""
old_env: ResourceEnv = getattr(thread_resources, "env", EMPTY_ENV)
thread_resources.env = old_env.with_extra_loop(_Loop(name, length))
try:
yield
finally:
thread_resources.env = old_env
@contextlib.contextmanager
def mesh(devices: np.ndarray, axis_names: Sequence[ResourceAxisName]):
"""Declare the hardware resources available in the scope of this manager.
In particular, all ``axis_names`` become valid resource names inside the
managed block and can be used e.g. in the ``axis_resources`` argument of
:py:func:`xmap`.
If you are compiling in multiple threads, make sure that the
``with mesh`` context manager is inside the function that the threads will
execute.
Args:
devices: A NumPy ndarray object containing JAX device objects (as
obtained e.g. from :py:func:`jax.devices`).
axis_names: A sequence of resource axis names to be assigned to the
dimensions of the ``devices`` argument. Its length should match the
rank of ``devices``.
Example::
devices = np.array(jax.devices())[:4].reshape((2, 2))
with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'
distributed_out = xmap(
jnp.vdot,
in_axes=({0: 'left', 1: 'right'}),
out_axes=['left', 'right', ...],
axis_resources={'left': 'x', 'right': 'y'})(x, x.T)
"""
old_env: ResourceEnv = getattr(thread_resources, "env", EMPTY_ENV)
thread_resources.env = old_env.with_mesh(Mesh(np.asarray(devices, dtype=object), axis_names))
try:
yield
finally:
thread_resources.env = old_env
_next_resource_id = 0
class _UniqueResourceName:
def __init__(self, uid, tag=None):
self.uid = uid
self.tag = tag
def __eq__(self, other):
return type(other) is _UniqueResourceName and self.uid == other.uid
def __hash__(self):
return hash(self.uid)
def __repr__(self):
return f"<UniqueResource {self.tag} {self.uid}>"
def fresh_resource_name(tag=None):
global _next_resource_id
try:
return _UniqueResourceName(_next_resource_id, tag)
finally:
_next_resource_id += 1
# This is really a Dict[AxisName, int], but we don't define a
# pytree instance for it, so that it is treated as a leaf.
class AxisNamePos(FrozenDict):
user_repr: str
expected_rank: Optional[int] = None
def __init__(self, *args, user_repr, **kwargs):
super().__init__(*args, **kwargs)
self.user_repr = user_repr
class AxisNamePosWithRank(AxisNamePos):
def __init__(self, *args, expected_rank, **kwargs):
super().__init__(*args, **kwargs)
self.expected_rank = expected_rank
# str(...) == 'Ellipsis' which is really annoying
class DotDotDotRepr:
def __repr__(self): return '...'
def _parse_entry(arg_name, entry):
# Dictionaries mapping axis names to positional axes
if isinstance(entry, dict) and all(isinstance(v, int) for v in entry.keys()):
result = AxisNamePos(((name, axis) for axis, name in entry.items()),
user_repr=str(entry))
num_mapped_dims = len(entry)
# Non-empty lists or tuples that optionally terminate with an ellipsis
elif isinstance(entry, (tuple, list)):
if entry and entry[-1] == ...:
constr = AxisNamePos
entry = entry[:-1]
tail = [DotDotDotRepr()] if isinstance(entry, list) else (DotDotDotRepr(),)
user_repr = str(entry + tail)
else:
constr = partial(AxisNamePosWithRank, expected_rank=len(entry))
user_repr = str(entry)
result = constr(((name, axis) for axis, name in enumerate(entry)
if name is not None),
user_repr=user_repr)
num_mapped_dims = sum(name is not None for name in entry)
else:
raise TypeError(f"""\
Value mapping specification in xmap {arg_name} pytree can be either:
- lists of axis names (possibly ending with the ellipsis object: ...)
- dictionaries that map positional axes (integers) to axis names (e.g. {2: 'name'})
but got: {entry}""")
if len(result) != num_mapped_dims:
raise ValueError(f"Named axes should be unique within each {arg_name} argument "
f"specification, but one them is: {entry}")
for axis in result.values():
if axis < 0:
raise ValueError(f"xmap doesn't support negative axes in {arg_name}")
return result
def _is_axes_leaf(entry):
if isinstance(entry, dict) and all_leaves(entry.values()):
return True
# NOTE: `None`s are not considered leaves by `all_leaves`
if isinstance(entry, (tuple, list)) and all_leaves(v for v in entry if v is not None):
return True
return False
def _prepare_axes(axes, arg_name):
entries, treedef = tree_flatten(axes, is_leaf=_is_axes_leaf)
entries = map(partial(_parse_entry, arg_name), entries)
return tree_unflatten(treedef, entries), entries, treedef
Resource = Union[ResourceAxisName, SerialLoop]
ResourceSet = Union[Resource, Tuple[Resource, ...]]
# TODO: Some syntactic sugar to make the API more usable in a single-axis case?
# TODO: Are the resource axes scoped lexically or dynamically? Dynamically for now!
def xmap(fun: Callable,
in_axes,
out_axes,
*,
axis_sizes: Dict[AxisName, int] = {},
axis_resources: Dict[AxisName, ResourceSet] = {},
donate_argnums: Union[int, Sequence[int]] = (),
backend: Optional[str] = None):
"""Assign a positional signature to a program that uses named array axes.
.. warning::
This is an experimental feature and the details can change at
any time. Use at your own risk!
.. warning::
This docstring is aspirational. Not all features of the named axis
programming model have been implemented just yet.
The usual programming model of JAX (or really NumPy) associates each array
with two pieces of metadata describing its type: the element type (``dtype``)
and the ``shape``. :py:func:`xmap` extends this model by adding support for
*named axes*. In particular, each array used in a function wrapped by
:py:func:`xmap` can additionally have a non-empty ``named_shape`` attribute,
which can be used to query the set of named axes (introduced by
:py:func:`xmap`) appearing in that value along with their shapes.
Furthermore, in most places where positional axis indices are allowed (for
example the `axes` arguments in :py:func:`sum`), bound axis names are also
accepted. The :py:func:`einsum` language is extended inside :py:func:`xmap`
to additionally allow contractions that involve named axes. Broadcasting of
named axes happens *by name*, i.e. all axes with equal names are expected to
have equal shapes in all arguments of a broadcasting operation, while the
result has a (set) union of all named axes. The positional semantics of the
program remain unchanged, and broadcasting still implicitly right-aligns
positional axes for unification. For an extended description of the
:py:func:`xmap` programming model, please refer to the :py:func:`xmap`
tutorial notebook in main JAX documentation.
Note that since all top-level JAX expressions are interpreted in the NumPy
programming model, :py:func:`xmap` can also be seen as an adapter that
converts a function that uses named axes (including in arguments and returned
values) into one that takes and returns values that only have positional
axes.
The default lowering strategy of :py:func:`xmap` converts all named axes into
positional axes, working similarly to multiple applications of
:py:func:`vmap`. However, this behavior can be further customized by the
``axis_resources`` argument. When specified, each axis introduced by
:py:func:`xmap` can be assigned to one or more *resource axes*. Those include
the axes of the hardware mesh, as defined by the :py:func:`mesh` context
manager. Each value that has a named axis in its ``named_shape`` will be
partitioned over all mesh axes that axis is assigned to. Hence,
:py:func:`xmap` can be seen as an alternative to :py:func:`pmap` that also
exposes a way to automatically partition the computation over multiple
devices.
.. warning::
While it is possible to assign multiple axis names to a single resource axis,
care has to be taken to ensure that none of those named axes co-occur in a
``named_shape`` of any value in the named program. At the moment this is
**completely unchecked** and will result in **undefined behavior**. The
final release of :py:func:`xmap` will enforce this invariant, but it is a
work in progress.
Note that you do not have to worry about any of this for as long as no
resource axis is repeated in ``axis_resources.values()``.
Note that any assignment of ``axis_resources`` doesn't ever change the
results of the computation, but only how it is carried out (e.g. how many
devices are used). This makes it easy to try out various ways of
partitioning a single program in many distributed scenarios (both small- and
large-scale), to maximize the performance. As such, :py:func:`xmap` can be
seen as a way to seamlessly interpolate between :py:func:`vmap` and
:py:func:`pmap`-style execution.
Args:
fun: Function that uses named axes. Its arguments and return
value should be arrays, scalars, or (nested) standard Python containers
(tuple/list/dict) thereof (in general: valid pytrees).
in_axes: A Python object with the same container (pytree) structure as the
signature of arguments to ``fun``, but with a positional-to-named axis
mapping in place of every array argument. The valid positional-to-named
mappings are: (1) a ``Dict[int, AxisName]`` specifying that a positional
dimensions given by dictionary keys are to be converted to named axes
of given names (2) a list of axis names that ends with the Ellipsis object
(``...``) in which case a number of leading positional axes of the argument
will be converted into named axes inside the function. Note that ``in_axes``
can also be a prefix of the argument container structure, in which case the
mapping is repeated for all arrays in the collapsed subtree.
out_axes: A Python object with the same container (pytree) structure as the
returns of ``fun``, but with a positional-to-named axis mapping in place
of every returned array. The valid positional-to-named mappings are the same
as in ``in_axes``. Note that ``out_axes`` can also be a prefix of the return
container structure, in which case the mapping is repeated for all arrays
in the collapsed subtree.
axis_sizes: A dict mapping axis names to their sizes. All axes defined by xmap
have to appear either in ``in_axes`` or ``axis_sizes``. Sizes of axes
that appear in ``in_axes`` are inferred from arguments whenever possible.
In multi-host scenarios, the user-specified sizes are expected to be the
global axis sizes (and might not match the expected size of local inputs).
axis_resources: A dictionary mapping the axes introduced in this
:py:func:`xmap` to one or more resource axes. Any array that has in its
shape an axis with some resources assigned will be partitioned over the
resources associated with the respective resource axes.
donate_argnums: Specify which argument buffers are "donated" to the computation.
It is safe to donate argument buffers if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
For more details on buffer donation see the [FAQ](https://jax.readthedocs.io/en/latest/faq.html#buffer-donation).
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.
Returns:
A version of ``fun`` that takes in arrays with positional axes in place of
named axes bound in this :py:func:`xmap` call, and results with all named
axes converted to positional axes. If ``axis_resources`` is specified,
``fun`` can additionally execute in parallel on multiple devices.
For example, :py:func:`xmap` makes it very easy to convert a function that
computes the vector inner product (such as :py:func:`jax.numpy.vdot`) into
one that computes a matrix multiplication:
>>> import jax.numpy as jnp
>>> x = jnp.arange(10).reshape((2, 5))
>>> xmap(jnp.vdot,
... in_axes=({0: 'left'}, {1: 'right'}),
... out_axes=['left', 'right', ...])(x, x.T)
DeviceArray([[ 30, 80],
[ 80, 255]], dtype=int32)
Note that the contraction in the program is performed over the positional axes,
while named axes are just a convenient way to achieve batching. While this
might seem like a silly example at first, it might turn out to be useful in
practice, since with conjuction with ``axis_resources`` this makes it possible
to implement a distributed matrix-multiplication in just a few lines of code::
devices = np.array(jax.devices())[:4].reshape((2, 2))
with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'
distributed_out = xmap(
jnp.vdot,
in_axes=({0: 'left'}, {1: 'right'}),
out_axes=['left', 'right', ...],
axis_resources={'left': 'x', 'right': 'y'})(x, x.T)
Still, the above examples are quite simple. After all, the xmapped
computation was a simple NumPy function that didn't use the axis names at all!
So, let's explore a slightly larger example which is linear regression::
def regression_loss(x, y, w, b):
# Contract over in_features. Batch and out_features are present in
# both inputs and output, so they don't need to be mentioned
y_pred = jnp.einsum('{in_features},{in_features}->{}', x, w) + b
error = jnp.sum((y - y_pred) ** 2, axis='out_features')
return jnp.mean(error, axis='batch')
xmap(regression_loss,
in_axes=(['batch', 'in_features', ...],
['batch', 'out_features', ...],
['in_features', 'out_features', ...],
['out_features', ...]),
out_axes={}) # Loss is reduced over all axes, including batch!
.. note::
When using ``axis_resources`` along with a mesh that is controlled by
multiple JAX hosts, keep in mind that in any given process :py:func:`xmap`
only expects the data slice that corresponds to its local devices to be
specified. This is in line with the current multi-host :py:func:`pmap`
programming model.
"""
warn("xmap is an experimental feature and probably has bugs!")
_check_callable(fun)
if isinstance(in_axes, list) and not _is_axes_leaf(in_axes):
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
# in cases like these users expect tuples and lists to be treated
# essentially interchangeably, so we canonicalize lists to tuples here
# rather than raising an error. https://github.com/google/jax/issues/2367
in_axes = tuple(in_axes)
if in_axes == (): # Allow empty argument lists
in_axes, in_axes_entries = (), []
else:
in_axes, in_axes_entries, _ = _prepare_axes(in_axes, "in_axes")
if out_axes == ():
raise ValueError("xmapped functions cannot have no return values")
else:
out_axes, out_axes_entries, out_axes_treedef = _prepare_axes(out_axes, "out_axes")
out_axes_entries = tuple(out_axes_entries) # Make entries hashable
axis_sizes_names = set(axis_sizes.keys())
in_axes_names = set(it.chain(*(spec.keys() for spec in in_axes_entries)))
defined_names = axis_sizes_names | in_axes_names
out_axes_names = set(it.chain(*(spec.keys() for spec in out_axes_entries)))
anon_serial_loops = []
def normalize_resource(r) -> ResourceAxisName:
if isinstance(r, SerialLoop):
name = fresh_resource_name()
anon_serial_loops.append((name, r.length))
return name
return r
normalized_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]] = {}
for axis in defined_names:
resources = axis_resources.get(axis, ())
if not isinstance(resources, tuple):
resources = (resources,)
normalized_axis_resources[axis] = tuple(unsafe_map(normalize_resource, resources))
frozen_axis_resources = FrozenDict(normalized_axis_resources)
necessary_resources = set(it.chain(*frozen_axis_resources.values()))
axes_with_resources = set(frozen_axis_resources.keys())
if axes_with_resources > defined_names:
raise ValueError(f"All axes that were assigned resources have to appear in "
f"in_axes or axis_sizes, but the following are missing: "
f"{axes_with_resources - defined_names}")
if out_axes_names > defined_names:
raise ValueError(f"All axis names appearing in out_axes must also appear in "
f"in_axes or axis_sizes, but the following are missing: "
f"{out_axes_names - defined_names}")
for axis, resources in frozen_axis_resources.items():
if len(set(resources)) != len(resources): # type: ignore
raise ValueError(f"Resource assignment of a single axis must be a tuple of "
f"distinct resources, but specified {resources} for axis {axis}")
donate_argnums = _ensure_index_tuple(donate_argnums)
# A little performance optimization to avoid iterating over all args unnecessarily
has_input_rank_assertions = any(spec.expected_rank is not None for spec in in_axes_entries)
has_output_rank_assertions = any(spec.expected_rank is not None for spec in out_axes_entries)
def infer_params(*args):
# Putting this outside of fun_mapped would make resources lexically scoped
resource_env = thread_resources.env
available_resources = set(resource_env.shape.keys())
if necessary_resources - available_resources:
raise ValueError(f"In-scope resources are insufficient to execute the "
f"xmapped function. The missing resources are: "
f"{necessary_resources - available_resources}")
args_flat, in_tree = tree_flatten(args)
fun_flat, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
if donate_argnums:
donated_invars = donation_vector(donate_argnums, args, ())
else:
donated_invars = (False,) * len(args_flat)
in_axes_flat = _flatten_axes("xmap in_axes", in_tree, in_axes, tupled_args=True)
# Some pytree containers might be unhashable, so we flatten the out_axes
# pytree into a treedef and entries which are guaranteed to be hashable.
out_axes_thunk = HashableFunction(
lambda: tuple(_flatten_axes("xmap out_axes", out_tree(), out_axes, tupled_args=False)),
closure=(out_axes_entries, out_axes_treedef))
axis_resource_count = _get_axis_resource_count(
_positional_semantics.val, frozen_axis_resources, resource_env)
for axis, size in axis_sizes.items():
resources = axis_resource_count[axis]
if size % resources.nglobal != 0:
global_size = "Global size" if resources.distributed else "Size"
raise ValueError(f"{global_size} of axis {axis} ({size}) is not divisible "
f"by the total number of resources assigned to this axis "
f"({frozen_axis_resources[axis]}, {resources.nglobal} in total)")
frozen_global_axis_sizes = _get_axis_sizes(args_flat, in_axes_flat,
axis_sizes, axis_resource_count)
missing_sizes = defined_names - set(frozen_global_axis_sizes.keys())
if missing_sizes:
raise ValueError(f"Failed to infer size of axes: {", ".join(unsafe_map(str, missing_sizes))}. "
f"You've probably passed in empty containers in place of arguments that had "
f"those axes in their in_axes. Provide the sizes of missing axes explicitly "
f"via axis_sizes to fix this error.")
if has_input_rank_assertions:
for arg, spec in zip(args_flat, in_axes_flat):
if spec.expected_rank is not None and spec.expected_rank != arg.ndim:
raise ValueError(f"xmap argument has an in_axes specification of {spec.user_repr}, "
f"which asserts that it should be of rank {spec.expected_rank}, "
f"but the argument has rank {arg.ndim} (and shape {arg.shape})")
params = dict(
name=getattr(fun, '__name__', '<unnamed function>'),
in_axes=tuple(in_axes_flat),
out_axes_thunk=out_axes_thunk,
donated_invars=donated_invars,
global_axis_sizes=frozen_global_axis_sizes,
axis_resources=frozen_axis_resources,
resource_env=resource_env,
backend=backend,
spmd_in_axes=None,
spmd_out_axes_thunk=None,
positional_semantics=_positional_semantics.val)
return fun_flat, args_flat, params, in_tree, out_tree
def verify_outputs(out_flat, out_tree, params):
if has_output_rank_assertions:
for out, spec in zip(out_flat, params['out_axes_thunk']()):
if spec.expected_rank is not None and spec.expected_rank != out.ndim:
raise ValueError(f"xmap output has an out_axes specification of {spec.user_repr}, "
f"which asserts that it should be of rank {spec.expected_rank}, "
f"but the output has rank {out.ndim} (and shape {out.shape})")
return tree_unflatten(out_tree(), out_flat)
def fun_mapped(*args):
tree_map(_check_arg, args)
fun_flat, args_flat, params, _, out_tree = infer_params(*args)
out_flat = xmap_p.bind(fun_flat, *args_flat, **params)
return verify_outputs(out_flat, out_tree, params)
def decorate_serial(f):
for loop_params in reversed(anon_serial_loops):
f = serial_loop(*loop_params)(f)
return f
def lower(*args):
fun_flat, args_flat, params, in_tree, out_tree = infer_params(*args)
avals_flat = [shaped_abstractify(arg) for arg in args_flat]
computation = make_xmap_callable(
fun_flat, params['name'], params['in_axes'], params['out_axes_thunk'],
params['donated_invars'], params['global_axis_sizes'], params['axis_resources'],
params['resource_env'], params['backend'], params['spmd_in_axes'],
params['spmd_out_axes_thunk'], params['positional_semantics'], *avals_flat)
return Lowered(
computation, in_tree, out_tree(), donate_argnums, no_kwargs=True)
fun_mapped = wraps(fun)(decorate_serial(fun_mapped))
fun_mapped.lower = decorate_serial(lower)
return fun_mapped
def xmap_impl(fun: lu.WrappedFun, *args, name, in_axes, out_axes_thunk, donated_invars,
global_axis_sizes, axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes_thunk, positional_semantics):
in_avals = [core.raise_to_shaped(core.get_aval(arg)) for arg in args]
xmap_callable = make_xmap_callable(
fun, name, in_axes, out_axes_thunk, donated_invars, global_axis_sizes,
axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes_thunk, positional_semantics,
*in_avals).compile().unsafe_call
distributed_debug_log(("Running xmapped function", name),
("python function", fun.f),
("mesh", resource_env.physical_mesh),
("abstract args", in_avals))
return xmap_callable(*args)
@lu.cache
def make_xmap_callable(fun: lu.WrappedFun,
name,
in_axes, out_axes_thunk, donated_invars,
global_axis_sizes, axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes_thunk, positional_semantics,
*in_avals):
assert positional_semantics == _PositionalSemantics.LOCAL
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
# TODO: Making axis substitution final style would allow us to avoid
# tracing to jaxpr here
mapped_in_avals = [_delete_aval_axes(aval, in_axes, global_axis_sizes)
for aval, in_axes in zip(in_avals, in_axes)]
with core.extend_axis_env_nd(global_axis_sizes.items()):
with dispatch.log_elapsed_time(f"Finished tracing + transforming {fun.__name__} "
"for xmap in {elapsed_time} sec"):
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, mapped_in_avals)
out_axes = out_axes_thunk()
_check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)
# NOTE: We don't use avals and all params, so only pass in the relevant parts (too lazy...)
_resource_typing_xmap([], dict(axis_resources=axis_resources,
out_axes=out_axes,
call_jaxpr=jaxpr,
resource_env=resource_env,
name=name),
source_info_util.new_source_info(), resource_env, {})
jaxpr = plan.subst_axes_with_resources(jaxpr)
use_spmd_lowering = config.experimental_xmap_spmd_lowering
ensure_fixed_sharding = config.experimental_xmap_ensure_fixed_sharding
if use_spmd_lowering and ensure_fixed_sharding:
jaxpr = _fix_inferred_spmd_sharding(jaxpr, resource_env)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(jaxpr, consts)))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize_and_loop(f, in_axes, out_axes)
used_resources = _jaxpr_resources(jaxpr, resource_env) | set(it.chain(*axis_resources.values()))
used_mesh_axes = used_resources & resource_env.physical_resource_axes
if used_mesh_axes:
assert spmd_in_axes is None and spmd_out_axes_thunk is None # No outer xmaps, so should be None
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
global_in_avals = [mesh.local_to_global(ax, av)
for ax, av in safe_zip(mesh_in_axes, in_avals)]
if config.experimental_xmap_spmd_lowering_manual:
tiling_method = pxla.TilingMethod.MANUAL
else:
tiling_method = pxla.TilingMethod.VECTORIZE
return pxla.lower_mesh_computation(
f, name, mesh,
mesh_in_axes, mesh_out_axes, donated_invars,
use_spmd_lowering, global_in_avals,
tiling_method=tiling_method, in_is_gda=[False] * len(global_in_avals))
else:
return dispatch.lower_xla_callable(
f, None, backend, name, donated_invars, *((a, None) for a in in_avals))
class EvaluationPlan(NamedTuple):
"""Encapsulates preprocessing common to top-level xmap invocations and its translation rule."""
resource_env: ResourceEnv
physical_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]
loop_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]
axis_subst_dict: Dict[AxisName, Tuple[ResourceAxisName, ...]]
axis_vmap_size: Dict[AxisName, Optional[int]]
@property
def axis_subst(self) -> core.AxisSubst:
return lambda name: self.axis_subst_dict.get(name, (name,))
@property
def resource_axis_env(self):
env = dict(self.resource_env.shape)
for axis, size in self.axis_vmap_size.items():
if size is None:
continue
vmap_axis = self.axis_subst_dict[axis][-1]
env[vmap_axis] = size
return env
@classmethod
def from_axis_resources(cls,
axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv,
global_axis_sizes: Dict[AxisName, int]):
physical_axis_resources, loop_axis_resources = _unzip_axis_resources(
axis_resources, resource_env)
axis_resource_count = _get_axis_resource_count(None, axis_resources, resource_env)
axis_subst_dict = dict(axis_resources)
axis_vmap_size: Dict[AxisName, Optional[int]] = {}
for naxis, raxes in sorted(axis_resources.items(), key=lambda x: str(x[0])):
num_resources = axis_resource_count[naxis]
assert global_axis_sizes[naxis] % num_resources.nglobal == 0
local_tile_size = global_axis_sizes[naxis] // num_resources.nglobal
# We have to vmap when there are no resources (to handle the axis name!) or
# when every resource gets chunks of values.
if not raxes or local_tile_size > 1:
axis_vmap_size[naxis] = local_tile_size
axis_subst_dict[naxis] += (fresh_resource_name(naxis),)
else:
axis_vmap_size[naxis] = None
return cls(resource_env,
physical_axis_resources, loop_axis_resources,
axis_subst_dict, axis_vmap_size)
def subst_axes_with_resources(self, jaxpr):
try:
if any(self.loop_axis_resources.values()):
_check_no_loop_collectives(jaxpr, self.loop_axis_resources)
with core.extend_axis_env_nd(self.resource_axis_env.items()):
return core.subst_axis_names_jaxpr(jaxpr, self.axis_subst)
except core.DuplicateAxisNameError:
raise AssertionError("Incomplete resource type-checking? Please open a bug report!")
def vectorize_and_loop(self, f: lu.WrappedFun, in_axes, out_axes) -> lu.WrappedFun:
vmap_axes = {
naxis: raxes[-1]
for naxis, raxes in self.axis_subst_dict.items()
if self.axis_vmap_size[naxis] is not None
}
for naxis, vaxis in sorted(vmap_axes.items(), key=lambda x: x[1].uid):
local_tile_size = self.axis_vmap_size[naxis]
map_in_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), in_axes))
map_out_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), out_axes))
f = batching.vtile(f, map_in_axes, map_out_axes, tile_size=local_tile_size, axis_name=vaxis)
used_loops = set(it.chain.from_iterable(self.loop_axis_resources.values()))
if not used_loops:
return f
if len(used_loops) > 1:
# TODO: Support multiple loops
raise NotImplementedError("Only one loop per xmap is supported")
loop_in_axes = _to_resource_axes(in_axes, self.loop_axis_resources)
loop_out_axes = _to_resource_axes(out_axes, self.loop_axis_resources)
loop_name, = used_loops
loop_length = self.resource_env.shape[loop_name]
def looped_f(*args):
def body(i, _):
# XXX: This call_wrapped is only valid under the assumption that scan
# only ever traces the body once (which it does at the moment).
result = f.call_wrapped(
*(_slice_tile(arg, spec.get(loop_name, None), i, loop_length)
for arg, spec in zip(args, loop_in_axes)))
return i + 1, result
_, stacked_results = lax.scan(body, 0, (), length=loop_length)
return [_merge_leading_axis(sresult, spec.get(loop_name, None))
for sresult, spec in zip(stacked_results, loop_out_axes)]
return lu.wrap_init(looped_f)
def to_mesh_axes(self, in_axes, out_axes):
"""
Convert in/out_axes parameters ranging over logical dimensions to
in/out_axes that range over the mesh dimensions.
"""
return (_to_resource_axes(in_axes, self.physical_axis_resources),
_to_resource_axes(out_axes, self.physical_axis_resources))
# -------- xmap primitive and its transforms --------
# xmap has a different set of parameters than pmap, so we make it its own primitive type
class XMapPrimitive(core.MapPrimitive):
def __init__(self):
super().__init__('xmap')
self.def_impl(xmap_impl)
self.def_custom_bind(self.bind)
def bind(self, fun, *args, in_axes, **params):
assert len(in_axes) == len(args), (in_axes, args)
return core.map_bind(self, fun, *args, in_axes=in_axes, **params)
def process(self, trace, fun, tracers, params):
return trace.process_xmap(self, fun, tracers, params)
def post_process(self, trace, out_tracers, params):
raise NotImplementedError
def get_bind_params(self, params):
new_params = dict(params)
subfun = lu.wrap_init(partial(core.eval_jaxpr, new_params.pop('call_jaxpr'), ()))
axes = new_params.pop('out_axes')
new_params['out_axes_thunk'] = HashableFunction(lambda: axes, closure=axes)
spmd_axes = new_params.pop('spmd_out_axes')
if spmd_axes is not None:
new_params['spmd_out_axes_thunk'] = \
HashableFunction(lambda: spmd_axes, closure=spmd_axes)
else:
new_params['spmd_out_axes_thunk'] = None
return [subfun], new_params
xmap_p = XMapPrimitive()
core.EvalTrace.process_xmap = core.EvalTrace.process_call # type: ignore
def _process_xmap_default(self, call_primitive, f, tracers, params):
raise NotImplementedError(f"{type(self)} must override process_xmap to handle xmap")
core.Trace.process_xmap = _process_xmap_default # type: ignore
def _xmap_axis_subst(params, subst, traverse):
if 'call_jaxpr' not in params: # TODO(apaszke): This feels sketchy, but I'm not sure why
return params
if not traverse:
return params
def shadowed_subst(name):
return (name,) if name in params['global_axis_sizes'] else subst(name)
with core.extend_axis_env_nd(params['global_axis_sizes'].items()):
new_jaxpr = core.subst_axis_names_jaxpr(params['call_jaxpr'], shadowed_subst)
return dict(params, call_jaxpr=new_jaxpr)
core.axis_substitution_rules[xmap_p] = _xmap_axis_subst
# NOTE: We don't have to handle spmd_{in|out}_axes here, because
# SPMD batching always gets involved as the last transform before XLA translation
ad.JVPTrace.process_xmap = ad.JVPTrace.process_call # type: ignore
ad.call_param_updaters[xmap_p] = ad.call_param_updaters[xla.xla_call_p]
def _xmap_transpose(params, call_jaxpr, args, cts_in, cts_in_avals, reduce_axes):
all_args, in_tree_def = tree_flatten(((), args, cts_in)) # empty consts
fun = lu.hashable_partial(
lu.wrap_init(ad.backward_pass),
call_jaxpr, reduce_axes + tuple(params['global_axis_sizes'].keys()))
fun, nz_arg_cts = ad.nonzero_outputs(fun)
fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def)
# Preserve axis for primal arguments, skip tangents (represented as undefined primals).
in_axes, out_axes = params['in_axes'], params['out_axes']
new_in_axes = (*(axis for axis, x in zip(in_axes, args) if not ad.is_undefined_primal(x)),
*(axis for axis, x in zip(out_axes, cts_in) if type(x) is not ad.Zero))
# NOTE: This assumes that the output cotangents being zero is a deterministic
# function of which input cotangents were zero.
@as_hashable_function(closure=(in_axes, tuple(type(c) is ad.Zero for c in cts_in)))
def out_axes_thunk():
return tuple(axis for axis, nz in zip(in_axes, nz_arg_cts()) if nz)
new_params = dict(params,
name=wrap_name(params['name'], 'transpose'),
in_axes=new_in_axes,
out_axes_thunk=out_axes_thunk,
donated_invars=(False,) * len(new_in_axes),
spmd_out_axes_thunk=None)
del new_params['out_axes']
del new_params['spmd_out_axes']
out_flat = xmap_p.bind(fun, *all_args, **new_params)
arg_cts = tree_unflatten(out_tree(), out_flat)
axis_resource_count = _get_axis_resource_count(
params['positional_semantics'], params['axis_resources'], params['resource_env'])
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in params['global_axis_sizes'].items()}
def unmap_zero(zero, axes):
return ad.Zero(_insert_aval_axes(zero.aval, axes, local_axis_sizes))
return tuple(unmap_zero(arg_ct, in_axis) if type(arg_ct) is ad.Zero else arg_ct
for arg_ct, in_axis in zip(arg_cts, in_axes))
ad.primitive_transposes[xmap_p] = _xmap_transpose
def _typecheck_xmap(
*in_avals, call_jaxpr, name, in_axes, out_axes, donated_invars,
global_axis_sizes, axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes, positional_semantics):
axis_resource_count = _get_axis_resource_count(
positional_semantics, axis_resources, resource_env)
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
binder_in_avals = [_insert_aval_axes(v.aval, a_in_axes, local_axis_sizes)
for v, a_in_axes in zip(call_jaxpr.invars, in_axes)]
for binder_in_aval, in_aval in zip(binder_in_avals, in_avals):
if not core.typecompat(binder_in_aval, in_aval):
raise core.JaxprTypeError(
f"xmap passes operand {in_aval} to jaxpr expecting {binder_in_aval}")
mapped_in_avals = [_delete_aval_axes(a, a_in_axes, global_axis_sizes)
for a, a_in_axes in zip(in_avals, in_axes)]
with core.extend_axis_env_nd(global_axis_sizes.items()):
core._check_jaxpr(lambda: core.JaxprPpContext(), call_jaxpr,
mapped_in_avals)
mapped_out_avals = [v.aval for v in call_jaxpr.outvars]
out_avals = [_insert_aval_axes(a, a_out_axes, local_axis_sizes)
for a, a_out_axes in zip(mapped_out_avals, out_axes)]
return out_avals
core.custom_typechecks[xmap_p] = _typecheck_xmap
def show_axes(axes):
return ", ".join(sorted([f"`{a}`" for a in axes]))
def _resource_typing_xmap(avals,
params,
source_info: source_info_util.SourceInfo,
resource_env,
outer_axis_resources):
axis_resources = params['axis_resources']
inner_axis_resources = dict(outer_axis_resources)
inner_axis_resources.update(axis_resources)
if len(inner_axis_resources) < len(outer_axis_resources) + len(axis_resources):
overlap = set(outer_axis_resources) & set(axis_resources)
raise JAXTypeError(
f"Detected disallowed xmap axis name shadowing at "
f"{source_info_util.summarize(source_info)} "
f"(shadowed axes: {show_axes(overlap)})")
if resource_env.physical_mesh != params['resource_env'].physical_mesh:
raise RuntimeError("Changing the physical mesh is not allowed inside xmap.")
call_jaxpr = params['call_jaxpr']
pxla.resource_typecheck(
params['call_jaxpr'], resource_env, inner_axis_resources,
lambda: (f"an xmapped function {params["name"]} " +
(f"(xmap called at {source_info_util.summarize(source_info)})"
if source_info else "")))
for v, axes in zip(call_jaxpr.outvars, params['out_axes']):
broadcast_axes = set(axes) - set(v.aval.named_shape)
used_resources = set(it.chain.from_iterable(
inner_axis_resources[a] for a in v.aval.named_shape))
for baxis in broadcast_axes:
baxis_resources = set(inner_axis_resources[baxis])
overlap = baxis_resources & used_resources
if overlap:
resource_to_axis = {}
for axis in v.aval.named_shape:
for raxis in inner_axis_resources[axis]:
resource_to_axis[raxis] = axis
partitioning_axes = set(resource_to_axis[raxis] for raxis in overlap)
raise JAXTypeError(
f"One of xmapped function ({params["name"]}) outputs is broadcast "
f"along axis `{baxis}` which is assigned to resources "
f"{show_axes(baxis_resources)}, but the output is already "
f"partitioned along {show_axes(overlap)}, because its "
f"named shape contains {show_axes(partitioning_axes)}")
pxla.custom_resource_typing_rules[xmap_p] = _resource_typing_xmap
# This is DynamicJaxprTrace.process_map with some very minor modifications
def _dynamic_jaxpr_process_xmap(self, primitive, f, tracers, params):
from jax.interpreters.partial_eval import (
trace_to_subjaxpr_dynamic, DynamicJaxprTracer,
convert_constvars_jaxpr, new_jaxpr_eqn)
assert primitive is xmap_p
in_avals = [t.aval for t in tracers]
global_axis_sizes = params['global_axis_sizes']
mapped_in_avals = [_delete_aval_axes(a, a_in_axes, global_axis_sizes)
for a, a_in_axes in zip(in_avals, params['in_axes'])]
with core.extend_axis_env_nd(global_axis_sizes.items()):
jaxpr, mapped_out_avals, consts = trace_to_subjaxpr_dynamic(
f, self.main, mapped_in_avals)
out_axes = params['out_axes_thunk']()
if params['spmd_out_axes_thunk'] is not None:
spmd_out_axes = params['spmd_out_axes_thunk']()
else:
spmd_out_axes = None
axis_resource_count = _get_axis_resource_count(
params['positional_semantics'], params['axis_resources'], params['resource_env'])
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
out_avals = [_insert_aval_axes(a, a_out_axes, local_axis_sizes)
for a, a_out_axes in zip(mapped_out_avals, out_axes)]
_check_out_avals_vs_out_axes(out_avals, out_axes, params['global_axis_sizes'])
source_info = source_info_util.current()
out_tracers = [DynamicJaxprTracer(self, a, source_info) for a in out_avals]
invars = map(self.getvar, tracers)
constvars = map(self.getvar, map(self.instantiate_const, consts))
outvars = map(self.makevar, out_tracers)
new_in_axes = (AxisNamePos(user_repr='{}'),) * len(consts) + params['in_axes']
if params['spmd_in_axes'] is None:
new_spmd_in_axes = None
else:
new_spmd_in_axes = (None,) * len(consts) + params['spmd_in_axes']
new_donated_invars = (False,) * len(consts) + params['donated_invars']
with core.extend_axis_env_nd(global_axis_sizes.items()):
call_jaxpr = convert_constvars_jaxpr(jaxpr)
new_params = dict(params, in_axes=new_in_axes, out_axes=out_axes,
donated_invars=new_donated_invars,
spmd_in_axes=new_spmd_in_axes,
spmd_out_axes=spmd_out_axes,
call_jaxpr=call_jaxpr)
del new_params['out_axes_thunk']
del new_params['spmd_out_axes_thunk']
eqn = new_jaxpr_eqn([*constvars, *invars], outvars, primitive,
new_params, source_info)
self.frame.eqns.append(eqn)
return out_tracers
pe.DynamicJaxprTrace.process_xmap = _dynamic_jaxpr_process_xmap # type: ignore
def _xmap_partial_eval_custom_params_updater(
unks_in: Sequence[bool],
kept_outs_known: Sequence[bool], kept_outs_staged: Sequence[bool],
num_res: int, params_known: dict, params_staged: dict
) -> Tuple[dict, dict]:
assert params_known['spmd_in_axes'] is None and params_known['spmd_out_axes'] is None
assert params_staged['spmd_in_axes'] is None and params_staged['spmd_out_axes'] is None
# pruned inputs to jaxpr_known according to unks_in
donated_invars_known, _ = pe.partition_list(unks_in, params_known['donated_invars'])
in_axes_known, _ = pe.partition_list(unks_in, params_known['in_axes'])
if num_res == 0:
residual_axes = []
else:
residual_axes = [
AxisNamePos(zip(sort_named_shape, range(len(sort_named_shape))),
user_repr=f'<internal: {sort_named_shape}>')
for named_shape in (v.aval.named_shape for v in params_known['call_jaxpr'].outvars[:-num_res])
# We sort here to make the iteration order deterministic
for sort_named_shape in [sorted(named_shape, key=str)]
]
_, out_axes_known = pe.partition_list(kept_outs_known, params_known['out_axes'])
new_params_known = dict(params_known,
in_axes=tuple(in_axes_known),
out_axes=(*out_axes_known, *residual_axes),
donated_invars=tuple(donated_invars_known))
assert len(new_params_known['in_axes']) == len(params_known['call_jaxpr'].invars)
assert len(new_params_known['out_axes']) == len(params_known['call_jaxpr'].outvars)
# added num_res new inputs to jaxpr_staged
donated_invars_staged = (*(False for _ in range(num_res)), *params_staged['donated_invars'])
_, out_axes_staged = pe.partition_list(kept_outs_staged, params_staged['out_axes'])
new_params_staged = dict(params_staged,
in_axes=(*residual_axes, *params_staged['in_axes']),
out_axes=tuple(out_axes_staged),
donated_invars=donated_invars_staged)
assert len(new_params_staged['in_axes']) == len(params_staged['call_jaxpr'].invars)
assert len(new_params_staged['out_axes']) == len(params_staged['call_jaxpr'].outvars)
return new_params_known, new_params_staged
pe.partial_eval_jaxpr_custom_rules[xmap_p] = \
partial(pe.call_partial_eval_custom_rule, 'call_jaxpr',
_xmap_partial_eval_custom_params_updater)
@lu.transformation_with_aux
def out_local_named_shapes(local_axes, *args, **kwargs):
ans = yield args, kwargs
ans_axes = [frozenset(a.aval.named_shape) & local_axes for a in ans]
yield ans, ans_axes
@lu.transformation_with_aux
def hide_units(unit_args, *args, **kwargs):
ans = yield restore_units(unit_args, args), kwargs
yield filter_units(ans)
def filter_units(vals):
vals_no_units = [v for v in vals if v is not core.unit]
vals_is_unit = [v is core.unit for v in vals]
return vals_no_units, vals_is_unit
def restore_units(is_unit, vals):
vals_it = iter(vals)
vals_with_units = [core.unit if u else next(vals_it) for u in is_unit]
try:
next(vals_it)
raise RuntimeError("Expected the iterator to be exhausted")
except StopIteration:
return vals_with_units
def _jaxpr_trace_process_xmap(self, primitive, f: lu.WrappedFun, tracers, params):
from jax.interpreters.partial_eval import (
PartialVal, JaxprTracer, _drop_vars, _dce_open_jaxpr,
convert_constvars_jaxpr, new_eqn_recipe)
assert primitive is xmap_p
in_axes = params['in_axes']
donated_invars = params['donated_invars']
global_axis_sizes = params['global_axis_sizes']
in_pvals = [t.pval for t in tracers]
in_pvals = [pval if pval.is_known()
else PartialVal.unknown(_delete_aval_axes(pval[0], axes, global_axis_sizes))
for pval, axes in zip(in_pvals, in_axes)]
const_axes_s = lu.Store()
def app(f, *args):
args_no_units, in_units = filter_units(args)
f, out_units = hide_units(f, tuple(in_units))
f, out_named_shapes = out_local_named_shapes(f, frozenset(global_axis_sizes))
out_axes_thunk = params['out_axes_thunk']
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
out_axes = out_axes_thunk()
axes_units, const_units = split_list(out_units(), [len(out_axes)])
assert not any(const_units)
num_consts = len(const_units)
out_axes_no_units = [a for a, u in zip(out_axes, axes_units) if not u]
const_axes: Sequence[AxisNamePos]
if num_consts == 0:
const_axes = ()
else:
const_axes = [
AxisNamePos(zip(sort_named_shape, range(len(sort_named_shape))),
user_repr=f'<internal: {sort_named_shape}>')
for named_shape in out_named_shapes()[-num_consts:]
# We sort here to make the iteration order deterministic
for sort_named_shape in [sorted(named_shape, key=str)]
]
if not const_axes_s: # NOTE: This can be called multiple times
const_axes_s.store(const_axes)
assert const_axes_s.val == const_axes
return (*out_axes_no_units, *const_axes)
pe_params = dict(
params,
in_axes=tuple(a for a, u in zip(in_axes, in_units) if not u),
donated_invars=tuple(a for a, u in zip(donated_invars, in_units) if not u),
out_axes_thunk=new_out_axes_thunk)
outs_no_units = primitive.bind(f, *args_no_units, **pe_params)
new_out_axes_thunk() # Make sure it is called at least once to compute const_axes
return restore_units(out_units(), outs_no_units)
jaxpr, out_pvals, consts, env_tracers = self.partial_eval(
f, in_pvals, app, instantiate=False)
out_axes = params['out_axes_thunk']()
const_axes = const_axes_s.val
axis_resource_count = _get_axis_resource_count(
params['positional_semantics'], params['axis_resources'], params['resource_env'])
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
out_pvals = [pval if pval.is_known() else
PartialVal.unknown(_insert_aval_axes(pval[0], axes, local_axis_sizes))
for pval, axes in zip(out_pvals, out_axes)]
with core.extend_axis_env_nd(global_axis_sizes.items()):
# Skip known invars and outvars, and lift constants as regular invars
in_knowns = tuple(t.pval.is_known() for t in it.chain(env_tracers, tracers))
out_unknowns = tuple(not pval.is_known() for pval in out_pvals)
jaxpr = _drop_vars(jaxpr, in_knowns, (False,) * len(jaxpr.outvars))
jaxpr = _dce_open_jaxpr(jaxpr, out_unknowns, drop_outputs=True)
jaxpr = convert_constvars_jaxpr(jaxpr)
# Known tracers get propagated as if they were constants
known_tracers_out = [self.new_const(pval.get_known()) for pval in out_pvals
if pval.is_known()]
# I'm not 100% if that's correct, but it is an assumption that
# JaxprTrace.process_call already makes.
if any(t.pval.is_known() for t in env_tracers):
raise AssertionError("Please open a bug report!")
# Unknown tracers need to have the jaxpr set up as their recipe
unknown_tracers_in = (*env_tracers, *(t for t in tracers if not t.pval.is_known()))
unknown_tracers_out = [JaxprTracer(self, pval, None) for pval in out_pvals
if not pval.is_known()]
const_tracers = map(self.new_instantiated_const, consts)
# Set up new params
new_in_axes = (*const_axes,
*(None for _ in env_tracers),
*(axis for axis, t in zip(in_axes, tracers)
if not t.pval.is_known()))
new_out_axes = tuple(axis for axis, pval in zip(out_axes, out_pvals)
if not pval.is_known())
assert params['spmd_in_axes'] is None and params['spmd_out_axes_thunk'] is None
new_params = dict(
params,
call_jaxpr=jaxpr,
donated_invars=(*(False for _ in const_tracers),
*(d for d, t in zip(donated_invars, tracers) if not t.pval.is_known())),
in_axes=new_in_axes,
out_axes=new_out_axes,
spmd_out_axes=None)
del new_params['out_axes_thunk']
del new_params['spmd_out_axes_thunk']
eqn = new_eqn_recipe((*const_tracers, *unknown_tracers_in),
unknown_tracers_out,
primitive, new_params, source_info_util.current())
for t in unknown_tracers_out: t.recipe = eqn
return pe._zip_knowns(known_tracers_out, unknown_tracers_out, out_unknowns)
pe.JaxprTrace.process_xmap = _jaxpr_trace_process_xmap
def _batch_trace_update_spmd_axes(
spmd_in_axes, spmd_out_axes_thunk,
axis_name, dims, dims_out_thunk):
"""Extends spmd in and out axes with the position of the trace's batch dimension."""
not_mapped = batching.not_mapped
def insert_spmd_axis(axes, nd):
too_short = nd - len(axes)
if too_short > 0:
axes += (None,) * too_short
return tuple_insert(axes, nd, axis_name)
if spmd_in_axes is None:
spmd_in_axes = ((),) * len(dims)
new_spmd_in_axes = tuple(
spmd_axes if d is not_mapped else insert_spmd_axis(spmd_axes, d)
for spmd_axes, d in zip(spmd_in_axes, dims))
@as_hashable_function(closure=spmd_out_axes_thunk)
def new_spmd_out_axes_thunk():
dims_out = dims_out_thunk()
if spmd_out_axes_thunk is None:
spmd_out_axes = ((),) * len(dims_out)
else:
spmd_out_axes = spmd_out_axes_thunk()
return tuple(
spmd_out_axes if nd is not_mapped else insert_spmd_axis(spmd_out_axes, nd)
for spmd_out_axes, nd in zip(spmd_out_axes, dims_out))
return new_spmd_in_axes, new_spmd_out_axes_thunk
def _batch_trace_process_xmap(self, is_spmd, primitive, f: lu.WrappedFun, tracers, params):
not_mapped = batching.not_mapped
vals, dims = unzip2((t.val, t.batch_dim) for t in tracers)
assert primitive is xmap_p
if not is_spmd and all(dim is not_mapped for dim in dims):
return primitive.bind(f, *vals, **params)
else:
assert len({x.shape[d] for x, d in zip(vals, dims) if d is not not_mapped}) == 1
def fmap_dims(axes, f):
return AxisNamePos(((name, f(axis)) for name, axis in axes.items()),
user_repr=axes.user_repr)
new_in_axes = tuple(
fmap_dims(in_axes, lambda a: a + (d is not not_mapped and d <= a))
for d, in_axes in zip(dims, params['in_axes']))
mapped_dims_in = tuple(
d if d is not_mapped else d - sum(a < d for a in in_axis.values())
for d, in_axis in zip(dims, params['in_axes']))
f, mapped_dims_out = batching.batch_subtrace(f, self.main, mapped_dims_in)
out_axes_thunk: Callable[[], Sequence[AxisNamePos]] = params['out_axes_thunk']
dims_out_thunk = lambda: tuple(d if d is not_mapped else axis_after_insertion(d, out_axes)
for d, out_axes in zip(mapped_dims_out(), out_axes_thunk()))
def axis_after_insertion(axis, inserted_named_axes):
for inserted_axis in sorted(inserted_named_axes.values()):
if inserted_axis >= axis:
break
axis += 1
return axis
# NOTE: This assumes that the choice of the dimensions over which outputs
# are batched is entirely dependent on the function and not e.g. on the
# data or its shapes.
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
return tuple(
out_axes if d is not_mapped else
fmap_dims(out_axes, lambda a, nd=axis_after_insertion(d, out_axes): a + (nd <= a))
for out_axes, d in zip(out_axes_thunk(), mapped_dims_out()))
if not is_spmd:
assert params['spmd_in_axes'] is None and params['spmd_out_axes_thunk'] is None
new_spmd_in_axes = None
new_spmd_out_axes_thunk = None
else:
new_spmd_in_axes, new_spmd_out_axes_thunk = _batch_trace_update_spmd_axes(
params['spmd_in_axes'], params['spmd_out_axes_thunk'],
self.axis_name, dims, dims_out_thunk)
new_params = dict(params,
in_axes=new_in_axes, out_axes_thunk=new_out_axes_thunk,
spmd_in_axes=new_spmd_in_axes,
spmd_out_axes_thunk=new_spmd_out_axes_thunk)
vals_out = primitive.bind(f, *vals, **new_params)
dims_out = dims_out_thunk()
return [batching.BatchTracer(self, v, d) for v, d in zip(vals_out, dims_out)]
batching.BatchTrace.process_xmap = partialmethod(_batch_trace_process_xmap, False) # type: ignore
pxla.SPMDBatchTrace.process_xmap = partialmethod(_batch_trace_process_xmap, True) # type: ignore
# -------- nested xmap handling --------
def _xmap_lowering_rule(ctx, *args, **kwargs):
if isinstance(ctx.module_context.axis_context, mlir.SPMDAxisContext):
if config.experimental_xmap_spmd_lowering_manual:
return _xmap_lowering_rule_spmd_manual(ctx, *args, **kwargs)
else:
return _xmap_lowering_rule_spmd(ctx, *args, **kwargs)
elif isinstance(ctx.module_context.axis_context, mlir.ReplicaAxisContext):
return _xmap_lowering_rule_replica(ctx, *args, **kwargs)
else:
raise AssertionError("Unrecognized axis context type!")
mlir.register_lowering(xmap_p, _xmap_lowering_rule)
def _xmap_lowering_rule_replica(ctx, *in_nodes,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
global_axis_sizes,
spmd_in_axes, spmd_out_axes,
positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.module_context.platform)
# The only way for any of those two assertions to be violated is when xmap
# is using the SPMD lowering, but then this rule shouldn't even trigger.
assert positional_semantics == _PositionalSemantics.LOCAL
assert spmd_in_axes is None and spmd_out_axes is None
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
axis_resource_count = _get_axis_resource_count(positional_semantics, axis_resources, resource_env)
if any(resource_count.distributed for resource_count in axis_resource_count.values()):
raise NotImplementedError
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
local_mesh = resource_env.physical_mesh.local_mesh
local_mesh_shape = local_mesh.shape
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
local_avals = [pxla.tile_aval_nd(
local_mesh_shape, aval_mesh_in_axes,
_insert_aval_axes(v.aval, aval_in_axes, local_axis_sizes))
for v, aval_in_axes, aval_mesh_in_axes
in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]
# We have to substitute before tracing, because we want the vectorized
# axes to be used in the jaxpr.
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize_and_loop(f, in_axes, out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
vectorized_jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(f, local_avals)
_check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)
assert not consts
tiled_ins = (
mlir.lower_fun(partial(_tile, in_axes=arg_in_axes,
axis_sizes=local_mesh_shape),
multiple_results=False)(
mlir.LoweringRuleContext(module_context=ctx.module_context,
primitive=None,
avals_in=[aval], avals_out=None),
in_node)[0]
if v.aval is not core.abstract_unit else in_node
for v, aval, in_node, arg_in_axes
in zip(call_jaxpr.invars, ctx.avals_in, in_nodes, mesh_in_axes))
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.module_context.replace(
name_stack=xla.extend_name_stack(ctx.module_context.name_stack,
xla.wrap_name(name, 'xmap')))
tiled_outs = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (), *tiled_ins)
outs = [
mlir.lower_fun(
partial(_untile, out_axes=ans_out_axes, axis_sizes=local_mesh_shape,
platform=ctx.module_context.platform),
multiple_results=False)(
mlir.LoweringRuleContext(module_context=ctx.module_context,
primitive=None,
avals_in=[vectorized_outvar.aval],
avals_out=None), tiled_out)[0]
if v.aval is not core.abstract_unit else tiled_out
for v, vectorized_outvar, tiled_out, ans_out_axes
in zip(call_jaxpr.outvars, vectorized_jaxpr.outvars, tiled_outs,
mesh_out_axes)]
return outs
def _xmap_lowering_rule_spmd(ctx, *global_in_nodes,
call_jaxpr, name, in_axes, out_axes,
donated_invars, global_axis_sizes, spmd_in_axes,
spmd_out_axes, positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.module_context.platform)
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, in_axes, out_axes)
f = plan.vectorize_and_loop(f, in_axes, out_axes)
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
f = pxla.vtile_by_mesh(f, mesh, mesh_in_axes, mesh_out_axes)
# XXX: We modify mesh_in_axes and mesh_out_axes here
def add_spmd_axes(flat_mesh_axes: Sequence[pxla.ArrayMapping],
flat_extra_axes: Optional[Sequence[Sequence[Sequence[pxla.MeshAxisName]]]]):
if flat_extra_axes is None:
return
for axes, extra in zip(flat_mesh_axes, flat_extra_axes):
for dim, dim_extra_axis in enumerate(extra):
if dim_extra_axis is None: continue
assert dim_extra_axis not in axes
assert not config.jax_enable_checks or all(v != dim for v in axes.values())
axes[dim_extra_axis] = dim
add_spmd_axes(mesh_in_axes, spmd_in_axes)
add_spmd_axes(mesh_out_axes, spmd_out_axes)
global_in_avals = ctx.avals_in
vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(f, global_in_avals)
assert not consts
global_sharding_spec = pxla.mesh_sharding_specs(mesh.shape, mesh.axis_names)
sharded_global_in_nodes = [
[mlir.wrap_with_sharding_op(node, global_sharding_spec(aval, aval_axes).sharding_proto())]
if aval_axes else [node]
for node, aval, aval_axes in zip(global_in_nodes, global_in_avals, mesh_in_axes)
]
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.module_context.replace(
name_stack=xla.extend_name_stack(ctx.module_context.name_stack,
xla.wrap_name(name, 'xmap')))
global_out_nodes = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),
*sharded_global_in_nodes)
sharded_global_out_nodes = [
mlir.wrap_with_sharding_op(node, global_sharding_spec(aval, aval_axes).sharding_proto())
if aval_axes else node
for (node,), aval, aval_axes in zip(global_out_nodes, global_out_avals, mesh_out_axes)
]
return sharded_global_out_nodes
def _xmap_lowering_rule_spmd_manual(ctx, *global_in_nodes,
call_jaxpr, name, in_axes, out_axes,
donated_invars, global_axis_sizes, spmd_in_axes,
spmd_out_axes, positional_semantics,
axis_resources, resource_env, backend):
assert spmd_in_axes is None and spmd_out_axes is None
# This first part (up to vtile_manual) is shared with non-MANUAL SPMD rule.
xla.check_backend_matches(backend, ctx.module_context.platform)
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, in_axes, out_axes)
f = plan.vectorize_and_loop(f, in_axes, out_axes)
# NOTE: Sharding constraints are handled entirely by vtile_manual!
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
f = pxla.vtile_manual(f, mesh, mesh_in_axes, mesh_out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
global_in_avals = ctx.avals_in
vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(f, global_in_avals)
assert not consts
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
manual_mesh_axes = frozenset(it.chain.from_iterable(plan.physical_axis_resources.values()))
assert isinstance(ctx.module_context.axis_context, mlir.SPMDAxisContext)
sub_ctx = ctx.module_context.replace(
name_stack=xla.extend_name_stack(ctx.module_context.name_stack,
xla.wrap_name(name, 'xmap')),
axis_context=ctx.module_context.axis_context.extend_manual(manual_mesh_axes))
global_out_nodes = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),
*([n] for n in global_in_nodes))
return global_out_nodes
def _xmap_translation_rule(*args, **kwargs):
if config.experimental_xmap_spmd_lowering_manual:
raise NotImplementedError("Manual lowering only supported in MLIR lowering")
elif config.experimental_xmap_spmd_lowering:
return _xmap_translation_rule_spmd(*args, **kwargs)
else:
return _xmap_translation_rule_replica(*args, **kwargs)
xla.register_translation(xmap_p, _xmap_translation_rule)
def _xmap_translation_rule_replica(ctx, avals_in, avals_out, *in_nodes,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
global_axis_sizes,
spmd_in_axes, spmd_out_axes,
positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.platform)
# The only way for any of those two assertions to be violated is when xmap
# is using the SPMD lowering, but then this rule shouldn't even trigger.
assert positional_semantics == _PositionalSemantics.LOCAL
assert spmd_in_axes is None and spmd_out_axes is None
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
axis_resource_count = _get_axis_resource_count(positional_semantics, axis_resources, resource_env)
if any(resource_count.distributed for resource_count in axis_resource_count.values()):
raise NotImplementedError
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
local_mesh = resource_env.physical_mesh.local_mesh
local_mesh_shape = local_mesh.shape
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
local_avals = [pxla.tile_aval_nd(
local_mesh_shape, aval_mesh_in_axes,
_insert_aval_axes(v.aval, aval_in_axes, local_axis_sizes))
for v, aval_in_axes, aval_mesh_in_axes
in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]
# We have to substitute before tracing, because we want the vectorized
# axes to be used in the jaxpr.
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize_and_loop(f, in_axes, out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
vectorized_jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(f, local_avals)
_check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)
assert not consts
tiled_ins = (
xla.lower_fun(
partial(_tile, in_axes=arg_in_axes, axis_sizes=local_mesh_shape),
new_style=True, multiple_results=False)(ctx, [aval], None, in_node)[0]
if aval is not core.abstract_unit else in_node
for aval, in_node, arg_in_axes
in zip(avals_in, in_nodes, mesh_in_axes))
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.replace(
name_stack=xla.extend_name_stack(ctx.name_stack,
xla.wrap_name(name, 'xmap')))
tiled_outs = xla.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (), *tiled_ins)
outs = [
xla.lower_fun(
partial(_untile, out_axes=ans_out_axes, axis_sizes=local_mesh_shape,
platform=ctx.platform),
new_style=True, multiple_results=False)(
ctx, [v.aval], None, tiled_out
)[0]
if v.aval is not core.abstract_unit else tiled_out
for v, tiled_out, ans_out_axes
in zip(vectorized_jaxpr.outvars, tiled_outs, mesh_out_axes)]
return outs
def _tile_base_indices(tile_shape, axes, axis_sizes):
zero = np.zeros((), dtype=np.int32)
linear_idxs = [zero] * len(tile_shape)
strides = [1] * len(tile_shape)
for name, axis in reversed(axes.items()):
axis_index = lax.axis_index(name)
stride_c = np.array(strides[axis], np.int32)
if linear_idxs[axis] is zero and strides[axis] == 1:
linear_idxs[axis] = axis_index
else:
linear_idxs[axis] = lax.add(linear_idxs[axis],
lax.mul(axis_index, stride_c))
strides[axis] *= axis_sizes[name]
return [zero if linear_idx is zero else
lax.mul(linear_idx, np.array(tile_dim_size, np.int32))
for linear_idx, tile_dim_size in zip(linear_idxs, tile_shape)]
def _tile(x, in_axes, axis_sizes):
if not in_axes:
return x
tile_shape = list(x.shape)
for name, axis in in_axes.items():
axis_size = axis_sizes[name]
assert tile_shape[axis] % axis_size == 0
tile_shape[axis] //= axis_size
base_idxs = _tile_base_indices(tile_shape, in_axes, axis_sizes)
return lax.dynamic_slice(x, base_idxs, tile_shape)
# TODO(b/110096942): more efficient gather
def _untile(x, out_axes, axis_sizes, platform):
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
convert_bool = (np.issubdtype(x.dtype, np.bool_)
and platform in ('cpu', 'gpu'))
if convert_bool:
x = lax.convert_element_type(x, np.dtype(np.float32))
tile_shape = list(x.shape)
shape = list(tile_shape)
for name, axis in out_axes.items():
shape[axis] *= axis_sizes[name]
base_idxs = _tile_base_indices(tile_shape, out_axes, axis_sizes)
padded = lax.broadcast(np.array(0, x.dtype), shape)
padded = lax.dynamic_update_slice(padded, x, base_idxs)
out = lax.psum(padded, tuple(out_axes.keys()))
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
if convert_bool:
nonzero = lax.ne(out, np.array(0, dtype=np.float32))
out = lax.convert_element_type(nonzero, np.dtype(np.bool_))
return out
def _xmap_translation_rule_spmd(ctx, avals_in, avals_out, *global_in_nodes,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
global_axis_sizes,
spmd_in_axes, spmd_out_axes,
positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.platform)
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, in_axes, out_axes)
f = plan.vectorize_and_loop(f, in_axes, out_axes)
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
f = pxla.vtile_by_mesh(f, mesh, mesh_in_axes, mesh_out_axes)
# XXX: We modify mesh_in_axes and mesh_out_axes here
def add_spmd_axes(flat_mesh_axes: Sequence[pxla.ArrayMapping],
flat_extra_axes: Optional[Sequence[Sequence[Sequence[pxla.MeshAxisName]]]]):
if flat_extra_axes is None:
return
for axes, extra in zip(flat_mesh_axes, flat_extra_axes):
for dim, dim_extra_axis in enumerate(extra):
if dim_extra_axis is None: continue
assert dim_extra_axis not in axes
assert not config.jax_enable_checks or all(v != dim for v in axes.values())
axes[dim_extra_axis] = dim
add_spmd_axes(mesh_in_axes, spmd_in_axes)
add_spmd_axes(mesh_out_axes, spmd_out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
global_in_avals = [
core.ShapedArray(xla_type.dimensions(), xla_type.numpy_dtype())
for in_node in global_in_nodes
for xla_type in (ctx.builder.get_shape(in_node),)
]
vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(
f, global_in_avals)
assert not consts
global_sharding_spec = pxla.mesh_sharding_specs(mesh.shape, mesh.axis_names)
def set_sharding(node, aval, aval_axes):
sharding_proto = global_sharding_spec(aval, aval_axes).sharding_proto()
if not config.experimental_xmap_ensure_fixed_sharding:
# Do not specify sharding on other dimensions.
unspecified_dims = set(range(aval.ndim))
for axis in set(aval_axes.values()):
unspecified_dims.remove(axis)
return xla.set_sharding_proto(ctx.builder, node, sharding_proto,
unspecified_dims)
else:
return xla.set_sharding_proto(ctx.builder, node, sharding_proto)
sharded_global_in_nodes = [
set_sharding(node, aval, aval_axes) if aval_axes else node for node, aval,
aval_axes in zip(global_in_nodes, global_in_avals, mesh_in_axes)
]
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.replace(
name_stack=xla.extend_name_stack(ctx.name_stack,
xla.wrap_name(name, 'xmap')))
global_out_nodes = xla.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),
*sharded_global_in_nodes)
sharded_global_out_nodes = [
set_sharding(node, aval, aval_axes) if aval_axes else node for node, aval,
aval_axes in zip(global_out_nodes, global_out_avals, mesh_out_axes)
]
return sharded_global_out_nodes
# -------- helper functions --------
def _delete_aval_axes(aval, axes: AxisNamePos, global_axis_sizes):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
named_shape = dict(aval.named_shape)
for name, dim in sorted(axes.items(), key=lambda x: x[1], reverse=True):
named_shape[name] = global_axis_sizes[name]
del shape[dim]
return aval.update(shape=tuple(shape), named_shape=named_shape)
def _insert_aval_axes(aval, axes: AxisNamePos, local_axis_sizes):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
named_shape = dict(aval.named_shape)
for name, dim in sorted(axes.items(), key=lambda x: x[1]):
shape.insert(dim, local_axis_sizes[name])
named_shape.pop(name, None) # The name might be missing --- it's a broadcast.
return aval.update(shape=tuple(shape), named_shape=named_shape)
class ResourceCount(namedtuple('ResourceCount', ['semantics', 'nglobal', 'nlocal'])):
def to_local(self, global_size):
if self.semantics == _PositionalSemantics.GLOBAL:
return global_size
elif self.semantics == _PositionalSemantics.LOCAL:
assert global_size % self.nglobal == 0, "Please report this issue!"
return (global_size // self.nglobal) * self.nlocal
else:
raise AssertionError("Unhandled case {_positional_semantics}")
def to_global(self, local_size):
if self.semantics == _PositionalSemantics.GLOBAL:
return local_size
elif self.semantics == _PositionalSemantics.LOCAL:
assert local_size % self.nlocal == 0, "Please report this issue!"
return (local_size // self.nlocal) * self.nglobal
else:
raise AssertionError(f"Unhandled case {_positional_semantics}")
@property
def distributed(self):
return self.nglobal != self.nlocal
def _get_axis_resource_count(semantics, axis_resources, resource_env) -> Dict[ResourceAxisName, ResourceCount]:
global_res_shape = resource_env.shape
local_res_shape = resource_env.local_shape
return {axis: ResourceCount(semantics,
int(np.prod(map(global_res_shape.get, resources), dtype=np.int64)),
int(np.prod(map(local_res_shape.get, resources), dtype=np.int64)))
for axis, resources in axis_resources.items()}
def _get_axis_sizes(args_flat: Iterable[Any],
in_axes_flat: Iterable[AxisNamePos],
global_axis_sizes: Dict[AxisName, int],
axis_resource_count: Dict[AxisName, ResourceCount]):
global_axis_sizes = dict(global_axis_sizes)
for arg, in_axes in zip(args_flat, in_axes_flat):
for name, dim in in_axes.items():
resources = axis_resource_count[name]
local_ = "local " if resources.distributed else ""
try:
local_dim_size = arg.shape[dim]
except IndexError:
# TODO(apaszke): Handle negative indices. Check for overlap too!
raise ValueError(f"One of xmap arguments has an in_axes specification of "
f"{in_axes.user_repr}, which implies that it has at least "
f"{max(in_axes.values()) + 1} dimensions, but the argument "
f"has rank {arg.ndim}")
if local_dim_size % resources.nlocal != 0:
raise ValueError(f"One of xmap arguments has an in_axes specification of "
f"{in_axes.user_repr}, which implies that its size in dimension "
f"{dim} ({local_dim_size}) should be divisible by the number of "
f"{local_}resources assigned to axis {name} ({resources.nlocal})")
global_dim_size = resources.to_global(local_dim_size)
if name in global_axis_sizes:
expected_local_dim_size = resources.to_local(global_axis_sizes[name])
if local_dim_size != expected_local_dim_size:
raise ValueError(f"The {local_}size of axis {name} was previously inferred to be "
f"{expected_local_dim_size}, but found an argument of shape {arg.shape} "
f"with in_axes specification {in_axes.user_repr}. Shape mismatch "
f"occurs in dimension {dim}: {local_dim_size} != {expected_local_dim_size}")
global_axis_sizes[name] = global_dim_size
return FrozenDict(global_axis_sizes)
def lookup_exactly_one_of(d: AxisNamePos, names: Set[AxisName]) -> Optional[int]:
res = None
for name in names:
if name in d:
if res is not None:
raise ValueError("An input was mapped to the same resource twice")
res = d[name]
return res
@lu.transformation
def hide_mapped_axes(flat_in_axes, flat_out_axes, *flat_args):
def _squeeze_mapped_axes(arg, axes: AxisNamePos):
for dim in sorted(axes.values(), reverse=True):
arg = arg.squeeze(dim)
return arg
def _unsqueeze_mapped_axes(out, axes: AxisNamePos):
try:
return jnp.expand_dims(out, tuple(axes.values()))
except ValueError as e:
# Improve the axis out of bounds errors
# TODO(apaszke): Handle negative indices. Check for overlap too!
if e.args[0].startswith('axis') and 'out of bounds' in e.args[0]:
raise ValueError(f"One of xmap outputs has an out_axes specification of "
f"{axes.user_repr}, which requires the result of the xmapped "
f"function to have at least {max(axes.values()) - len(axes) + 1} "
f"positional dimensions, but it only has {out.ndim}")
raise
squeezed_args = map(_squeeze_mapped_axes, flat_args, flat_in_axes)
flat_outputs = yield squeezed_args, {}
yield map(_unsqueeze_mapped_axes, flat_outputs, flat_out_axes)
def _jaxpr_resources(jaxpr, resource_env) -> Set[ResourceAxisName]:
if isinstance(jaxpr, core.ClosedJaxpr):
jaxpr = jaxpr.jaxpr
assert isinstance(jaxpr, core.Jaxpr)
used_resources = set()
for eqn in jaxpr.eqns:
if eqn.primitive is xmap_p:
if eqn.params['resource_env'].physical_mesh != resource_env.physical_mesh:
raise RuntimeError("Changing the physical mesh is not allowed inside xmap.")
used_resources |= set(it.chain(*eqn.params['axis_resources'].values()))
updates = core.traverse_jaxpr_params(
partial(_jaxpr_resources, resource_env=resource_env), eqn.params).values()
for update in updates:
used_resources |= update
return used_resources
def _to_resource_axes(axes_specs: Sequence[AxisNamePos],
axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]):
"""
Convert in/out_axes parameters ranging over logical dimensions to
ones that range over resource dimensions.
Note that values no longer have to be distinct, as multiple resource
axes can tile a single positional axes. This is why the result is
an OrderedDict with an implicit major-to-minor ordering.
"""
return tuple(OrderedDict((resource_axis, pos_axis)
for logical_axis, pos_axis in axes.items()
for resource_axis in axis_resources[logical_axis])
for axes in axes_specs)
def _merge_leading_axis(x, axis: Optional[int]):
if axis is None:
# We assume that the output does not vary along the leading axis
return lax.index_in_dim(x, 0, axis=0, keepdims=False)
else:
x_moved = moveaxis(x, 0, axis)
shape = list(x_moved.shape)
shape[axis:axis + 2] = [shape[axis] * shape[axis + 1]]
return x_moved.reshape(shape)
def _slice_tile(x, dim: Optional[int], i, n: int):
"""Selects an `i`th (out of `n`) tiles of `x` along `dim`."""
if dim is None: return x
(tile_size, rem) = divmod(x.shape[dim], n)
assert rem == 0, "Please open a bug report!"
return lax.dynamic_slice_in_dim(x, i * tile_size, slice_size=tile_size, axis=dim)
def _unzip_axis_resources(axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv):
"""Splits axis_resources into separate dicts for physical and loop resources."""
physical_axis_resources = {}
loop_axis_resources = {}
loop_resource_axes = resource_env.loop_resource_axes
for axis, raxes in axis_resources.items():
first_loop = 0
for raxis in raxes:
if raxis in loop_resource_axes:
break
else:
first_loop += 1
physical_axis_resources[axis] = raxes[:first_loop]
loop_resources = loop_axis_resources[axis] = raxes[first_loop:]
if not all(name in loop_resource_axes for name in loop_resources):
raise NotImplementedError("Loop resources cannot appear before mesh axes "
"in the resource_axis argument")
return physical_axis_resources, loop_axis_resources
def _check_out_avals_vs_out_axes(out_avals: Sequence[core.AbstractValue],
out_axes: Sequence[AxisNamePos],
global_axis_sizes: Dict[AxisName, int]):
defined_axes = set(global_axis_sizes)
for aval, axes in zip(out_avals, out_axes):
if not isinstance(aval, core.ShapedArray):
if axes:
raise AssertionError(f"Only array abstract values can have non-empty "
f"out_axes, but {aval} has {axes}")
continue
undeclared_axes = (set(aval.named_shape) - set(axes)) & defined_axes
if undeclared_axes:
undeclared_axes_str = sorted([str(axis) for axis in undeclared_axes])
raise TypeError(f"One of xmap results has an out_axes specification of "
f"{axes.user_repr}, but is actually mapped along more axes "
f"defined by this xmap call: {", ".join(undeclared_axes_str)}")
# TODO: We should relax this at least for "constructor primitives"
# such as axis_index or zeros.
def _check_no_loop_collectives(jaxpr, loop_axis_resources):
if isinstance(jaxpr, core.ClosedJaxpr):
jaxpr = jaxpr.jaxpr
def subst_no_loop(name):
if loop_axis_resources.get(name, ()):
raise RuntimeError(f"Named axes with loop resources assigned to them cannot "
f"be referenced inside the xmapped computation (e.g. in "
f"collectives), but `{name}` violates that rule")
return (name,)
for eqn in jaxpr.eqns:
core.subst_axis_names(eqn.primitive, eqn.params, subst_no_loop, traverse=False)
rec = partial(_check_no_loop_collectives, loop_axis_resources=loop_axis_resources)
core.traverse_jaxpr_params(rec, eqn.params)
def _fix_inferred_spmd_sharding(jaxpr, resource_env, gen_fresh_name = None):
from jax.experimental.pjit import sharding_constraint_p, ParsedPartitionSpec
rec = lambda jaxpr: _fix_inferred_spmd_sharding(jaxpr, resource_env, gen_fresh_name)
if isinstance(jaxpr, core.ClosedJaxpr):
return jaxpr.map_jaxpr(rec)
assert isinstance(jaxpr, core.Jaxpr)
if gen_fresh_name is None:
gen_fresh_name = core.gensym([jaxpr])
new_eqns = []
for eqn in jaxpr.eqns:
new_jaxpr_params = core.traverse_jaxpr_params(rec, eqn.params)
tmp_outvars = [gen_fresh_name(v.aval) for v in eqn.outvars]
new_eqns.append(core.JaxprEqn(eqn.invars, tmp_outvars, eqn.primitive,
dict(eqn.params, **new_jaxpr_params), eqn.source_info))
for outvar, tmpvar in zip(eqn.outvars, tmp_outvars):
new_eqns.append(core.JaxprEqn([tmpvar], [outvar], sharding_constraint_p,
dict(resource_env=resource_env, axis_resources=ParsedPartitionSpec((), ())),
eqn.source_info))
return core.Jaxpr(jaxpr.constvars, jaxpr.invars, jaxpr.outvars, new_eqns)
def _flatten_axes(what, tree, axes, tupled_args):
try:
return tuple(flatten_axes(what, tree, axes, tupled_args=tupled_args))
except ValueError:
pass
# Replace axis_resources with unparsed versions to avoid revealing internal details
flatten_axes(what, tree, tree_map(lambda parsed: NoQuotesStr(parsed.user_repr), axes),
tupled_args=tupled_args)
raise AssertionError("Please open a bug request!") # This should be unreachable
class NoQuotesStr(str):
__repr__ = str.__str__
# -------- soft_pmap --------
def soft_pmap(fun: Callable, axis_name: Optional[AxisName] = None, in_axes=0
) -> Callable:
warn("soft_pmap is an experimental feature and probably has bugs!")
_check_callable(fun)
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
if any(axis != 0 for axis in tree_leaves(in_axes)):
raise ValueError(f"soft_pmap in_axes leaves must be 0 or None, got {in_axes}")
proxy = object()
in_axes = _replace_nones(proxy, in_axes)
in_axes = tree_map(lambda i: {i: axis_name} if i is not proxy else {}, in_axes)
@wraps(fun)
def f_pmapped(*args, **kwargs):
mesh_devices = np.array(xb.local_devices())
with mesh(mesh_devices, ['devices']):
return xmap(fun, in_axes=in_axes, out_axes={0: axis_name},
axis_resources={axis_name: 'devices'})(*args, **kwargs)
return f_pmapped
# -------- config flags --------
def _thread_local_flag_unsupported(_):
raise RuntimeError("thread-local xmap flags not supported!")
def _clear_compilation_cache(_):
make_xmap_callable.cache_clear() # type: ignore
def _ensure_spmd_and(f):
def update(v):
if v and not config.experimental_xmap_spmd_lowering:
raise RuntimeError("This flag requires enabling the experimental_xmap_spmd_lowering flag")
return f(v)
return update
def _ensure_supports_manual_and(f):
def update(v):
if v and not hasattr(xc.OpSharding.Type, "MANUAL"):
raise RuntimeError("This flag requires a version of jaxlib that supports MANUAL sharding type")
return f(v)
return update
try:
config.define_bool_state(
name="experimental_xmap_spmd_lowering",
default=False,
help=("When set, multi-device xmap computations will be compiled through "
"the XLA SPMD partitioner instead of explicit cross-replica collectives. "
"Not supported on CPU!"),
update_global_hook=_clear_compilation_cache,
update_thread_local_hook=_thread_local_flag_unsupported)
config.define_bool_state(
name="experimental_xmap_spmd_lowering_manual",
default=False,
help=("When set, multi-device xmap computations will be compiled using "
"the MANUAL partitioning feature of the XLA SPMD partitioner instead of "
"sharding constraints on vectorized code. "
"Requires experimental_xmap_spmd_lowering!"),
update_global_hook=_ensure_supports_manual_and(_ensure_spmd_and(_clear_compilation_cache)),
update_thread_local_hook=_thread_local_flag_unsupported)
config.define_bool_state(
name="experimental_xmap_ensure_fixed_sharding",
default=False,
help=("When set and `experimental_xmap_spmd_lowering` is enabled, the lowering will "
"try to limit the flexibility of the automated SPMD partitioner heuristics "
"by emitting additional sharding annotations for program intermediates."),
update_global_hook=_ensure_spmd_and(_clear_compilation_cache),
update_thread_local_hook=_thread_local_flag_unsupported)
except Exception:
raise ImportError("jax.experimental.maps has to be imported before JAX flags "
"are parsed")
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import contextlib
import numpy as np
import itertools as it
from collections import OrderedDict, abc, namedtuple
from typing import (Callable, Iterable, Tuple, Optional, Dict, Any, Set,
NamedTuple, Union, Sequence)
from warnings import warn
from functools import wraps, partial, partialmethod
from enum import Enum
from jax import numpy as jnp
from jax import core
from jax import linear_util as lu
from jax._src.api import Lowered, _check_callable, _check_arg
from jax._src import dispatch
from jax.tree_util import (tree_flatten, tree_unflatten, all_leaves, tree_map,
tree_leaves)
from jax._src.tree_util import _replace_nones
from jax._src.api_util import (flatten_fun_nokwargs, flatten_axes,
_ensure_index_tuple, donation_vector,
shaped_abstractify)
from jax._src import source_info_util
from jax._src.config import config
from jax.errors import JAXTypeError
from jax.interpreters import mlir
from jax.interpreters import partial_eval as pe
from jax.interpreters import pxla
from jax.interpreters import xla
from jax.interpreters import batching
from jax.interpreters import ad
from jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
from jax._src.util import (safe_map, safe_zip, HashableFunction,
as_hashable_function, unzip2, distributed_debug_log,
tuple_insert, moveaxis, split_list, wrap_name)
from jax import lax
map, unsafe_map = safe_map, map
zip = safe_zip
xops = xc.ops
class _PositionalSemantics(Enum):
"""Indicates whether the positional shapes of inputs should be interpreted as
global or local with respect to the multi-host mesh.
While named axes are always associated with global sizes, the outermost pjit
is the boundary between the local shapes in the outer scope and global
positional shapes in its inner scope. pjits nested inside that one should not
attempt to increase the sizes of avals again, and xmap has to take this into
account when inferring the global size of a named axis.
"""
LOCAL = 0
GLOBAL = 1
class _PSThreadLocalState(threading.local):
def __init__(self):
self.val = _PositionalSemantics.LOCAL
_positional_semantics = _PSThreadLocalState()
class FrozenDict(abc.Mapping):
def __init__(self, *args, **kwargs):
self.contents = dict(*args, **kwargs)
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len(self.contents)
def __getitem__(self, name):
return self.contents[name]
def __eq__(self, other):
return isinstance(other, FrozenDict) and self.contents == other.contents
def __hash__(self):
return hash(tuple(self.contents.items()))
def __repr__(self):
return f"FrozenDict({self.contents})"
# Multi-dimensional generalized map
AxisName = core.AxisName
ResourceAxisName = AxisName # Different name just for documentation purposes
Mesh = pxla.Mesh
class _Loop(NamedTuple):
name: ResourceAxisName
length: int
class ResourceEnv(NamedTuple):
physical_mesh: Mesh
loops: Tuple[_Loop, ...]
def with_mesh(self, mesh: Mesh):
overlap = set(mesh.axis_names) & (self.resource_axes - set(self.physical_mesh.axis_names))
if overlap:
raise ValueError(f"Cannot update the mesh of the current resource "
f"environment. The new mesh shadows already defined axes "
f"{show_axes(overlap)}")
return self._replace(physical_mesh=mesh)
def with_extra_loop(self, loop: _Loop):
if loop.name in self.resource_axes:
raise ValueError(f"Cannot extend the resource environment with loop named "
f"`{loop.name}`. An axis of this name is already defined!")
return self._replace(loops=self.loops + (loop,))
@property
def physical_resource_axes(self) -> Set[ResourceAxisName]:
return set(self.physical_mesh.axis_names)
@property
def loop_resource_axes(self) -> Set[ResourceAxisName]:
return set(loop.name for loop in self.loops)
@property
def resource_axes(self) -> Set[ResourceAxisName]:
return self.physical_resource_axes | self.loop_resource_axes
@property
def shape(self):
shape = self.physical_mesh.shape
shape.update(self.loops)
return shape
@property
def local_shape(self):
shape = self.physical_mesh.local_mesh.shape
shape.update(self.loops)
return shape
def __repr__(self):
return f"ResourceEnv({self.physical_mesh!r}, {self.loops!r})"
EMPTY_ENV = ResourceEnv(Mesh(np.empty((), dtype=object), ()), ())
class _ThreadResourcesLocalState(threading.local):
def __init__(self):
self.env = EMPTY_ENV
thread_resources = _ThreadResourcesLocalState()
class SerialLoop:
"""Create an anonymous serial loop resource for use in a single xmap axis.
A use of :py:class:`SerialLoop` in :py:func:`xmap`'s ``axis_resources``
extends the resource environment with a new serial loop with a unique
unspecified name, that will only be used to partition the axis that
used a given instance.
This is unlike :py:func:`serial_loop`, which makes it possible to iterate
jointly over chunks of multiple axes (with the usual requirement that they
do not coincide in a named shape of any value in the program).
Example::
# Processes `x` in a vectorized way, but in 20 micro-batches.
xmap(f, in_axes=['i'], out_axes=[i], axis_resources={'i': SerialLoop(20)})(x)
# Computes the result in a vectorized way, but in 400 micro-batches,
# once for each coordinate (0, 0) <= (i, j) < (20, 20). Each `SerialLoop`
# creates a fresh anonymous loop.
xmap(h, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': SerialLoop(20), 'j': SerialLoop(20)})(x, y)
"""
length: int
def __init__(self, length):
self.length = length
def __eq__(self, other):
return self.length == other.length
def __hash__(self):
return hash(self.length)
@contextlib.contextmanager
def serial_loop(name: ResourceAxisName, length: int):
"""Define a serial loop resource to be available in scope of this context manager.
This is similar to :py:func:`mesh` in that it extends the resource
environment with a resource called ``name``. But, any use of this resource
axis in ``axis_resources`` argument of :py:func:`xmap` will cause the
body of :py:func:`xmap` to get executed ``length`` times with each execution
only processing only a slice of inputs mapped along logical axes assigned
to this resource.
This is especially useful in that it makes it possible to lower the memory
usage compared to :py:func:`vmap`, because it will avoid simultaneous
materialization of intermediate values for every point in the batch.
Note that collectives over loop axes are not supported, so they are less
versatile than physical mesh axes.
Args:
name: Name of the loop in the resource environment.
length: Number of iterations.
Example::
with loop('l', 4):
out = xmap(
lambda x: jnp.sin(x) * 5, # This will be called 4 times with different
# slices of x.
in_axes=['i'], out_axes=['i'],
axis_resources={'i': 'l'})(x)
"""
old_env: ResourceEnv = getattr(thread_resources, "env", EMPTY_ENV)
thread_resources.env = old_env.with_extra_loop(_Loop(name, length))
try:
yield
finally:
thread_resources.env = old_env
@contextlib.contextmanager
def mesh(devices: np.ndarray, axis_names: Sequence[ResourceAxisName]):
"""Declare the hardware resources available in the scope of this manager.
In particular, all ``axis_names`` become valid resource names inside the
managed block and can be used e.g. in the ``axis_resources`` argument of
:py:func:`xmap`.
If you are compiling in multiple threads, make sure that the
``with mesh`` context manager is inside the function that the threads will
execute.
Args:
devices: A NumPy ndarray object containing JAX device objects (as
obtained e.g. from :py:func:`jax.devices`).
axis_names: A sequence of resource axis names to be assigned to the
dimensions of the ``devices`` argument. Its length should match the
rank of ``devices``.
Example::
devices = np.array(jax.devices())[:4].reshape((2, 2))
with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'
distributed_out = xmap(
jnp.vdot,
in_axes=({0: 'left', 1: 'right'}),
out_axes=['left', 'right', ...],
axis_resources={'left': 'x', 'right': 'y'})(x, x.T)
"""
old_env: ResourceEnv = getattr(thread_resources, "env", EMPTY_ENV)
thread_resources.env = old_env.with_mesh(Mesh(np.asarray(devices, dtype=object), axis_names))
try:
yield
finally:
thread_resources.env = old_env
_next_resource_id = 0
class _UniqueResourceName:
def __init__(self, uid, tag=None):
self.uid = uid
self.tag = tag
def __eq__(self, other):
return type(other) is _UniqueResourceName and self.uid == other.uid
def __hash__(self):
return hash(self.uid)
def __repr__(self):
return f"<UniqueResource {self.tag} {self.uid}>"
def fresh_resource_name(tag=None):
global _next_resource_id
try:
return _UniqueResourceName(_next_resource_id, tag)
finally:
_next_resource_id += 1
# This is really a Dict[AxisName, int], but we don't define a
# pytree instance for it, so that it is treated as a leaf.
class AxisNamePos(FrozenDict):
user_repr: str
expected_rank: Optional[int] = None
def __init__(self, *args, user_repr, **kwargs):
super().__init__(*args, **kwargs)
self.user_repr = user_repr
class AxisNamePosWithRank(AxisNamePos):
def __init__(self, *args, expected_rank, **kwargs):
super().__init__(*args, **kwargs)
self.expected_rank = expected_rank
# str(...) == 'Ellipsis' which is really annoying
class DotDotDotRepr:
def __repr__(self): return '...'
def _parse_entry(arg_name, entry):
# Dictionaries mapping axis names to positional axes
if isinstance(entry, dict) and all(isinstance(v, int) for v in entry.keys()):
result = AxisNamePos(((name, axis) for axis, name in entry.items()),
user_repr=str(entry))
num_mapped_dims = len(entry)
# Non-empty lists or tuples that optionally terminate with an ellipsis
elif isinstance(entry, (tuple, list)):
if entry and entry[-1] == ...:
constr = AxisNamePos
entry = entry[:-1]
tail = [DotDotDotRepr()] if isinstance(entry, list) else (DotDotDotRepr(),)
user_repr = str(entry + tail)
else:
constr = partial(AxisNamePosWithRank, expected_rank=len(entry))
user_repr = str(entry)
result = constr(((name, axis) for axis, name in enumerate(entry)
if name is not None),
user_repr=user_repr)
num_mapped_dims = sum(name is not None for name in entry)
else:
raise TypeError(f"""\
Value mapping specification in xmap {arg_name} pytree can be either:
- lists of axis names (possibly ending with the ellipsis object: ...)
- dictionaries that map positional axes (integers) to axis names (e.g. {2: 'name'})
but got: {entry}""")
if len(result) != num_mapped_dims:
raise ValueError(f"Named axes should be unique within each {arg_name} argument "
f"specification, but one them is: {entry}")
for axis in result.values():
if axis < 0:
raise ValueError(f"xmap doesn't support negative axes in {arg_name}")
return result
def _is_axes_leaf(entry):
if isinstance(entry, dict) and all_leaves(entry.values()):
return True
# NOTE: `None`s are not considered leaves by `all_leaves`
if isinstance(entry, (tuple, list)) and all_leaves(v for v in entry if v is not None):
return True
return False
def _prepare_axes(axes, arg_name):
entries, treedef = tree_flatten(axes, is_leaf=_is_axes_leaf)
entries = map(partial(_parse_entry, arg_name), entries)
return tree_unflatten(treedef, entries), entries, treedef
Resource = Union[ResourceAxisName, SerialLoop]
ResourceSet = Union[Resource, Tuple[Resource, ...]]
# TODO: Some syntactic sugar to make the API more usable in a single-axis case?
# TODO: Are the resource axes scoped lexically or dynamically? Dynamically for now!
def xmap(fun: Callable,
in_axes,
out_axes,
*,
axis_sizes: Dict[AxisName, int] = {},
axis_resources: Dict[AxisName, ResourceSet] = {},
donate_argnums: Union[int, Sequence[int]] = (),
backend: Optional[str] = None):
"""Assign a positional signature to a program that uses named array axes.
.. warning::
This is an experimental feature and the details can change at
any time. Use at your own risk!
.. warning::
This docstring is aspirational. Not all features of the named axis
programming model have been implemented just yet.
The usual programming model of JAX (or really NumPy) associates each array
with two pieces of metadata describing its type: the element type (``dtype``)
and the ``shape``. :py:func:`xmap` extends this model by adding support for
*named axes*. In particular, each array used in a function wrapped by
:py:func:`xmap` can additionally have a non-empty ``named_shape`` attribute,
which can be used to query the set of named axes (introduced by
:py:func:`xmap`) appearing in that value along with their shapes.
Furthermore, in most places where positional axis indices are allowed (for
example the `axes` arguments in :py:func:`sum`), bound axis names are also
accepted. The :py:func:`einsum` language is extended inside :py:func:`xmap`
to additionally allow contractions that involve named axes. Broadcasting of
named axes happens *by name*, i.e. all axes with equal names are expected to
have equal shapes in all arguments of a broadcasting operation, while the
result has a (set) union of all named axes. The positional semantics of the
program remain unchanged, and broadcasting still implicitly right-aligns
positional axes for unification. For an extended description of the
:py:func:`xmap` programming model, please refer to the :py:func:`xmap`
tutorial notebook in main JAX documentation.
Note that since all top-level JAX expressions are interpreted in the NumPy
programming model, :py:func:`xmap` can also be seen as an adapter that
converts a function that uses named axes (including in arguments and returned
values) into one that takes and returns values that only have positional
axes.
The default lowering strategy of :py:func:`xmap` converts all named axes into
positional axes, working similarly to multiple applications of
:py:func:`vmap`. However, this behavior can be further customized by the
``axis_resources`` argument. When specified, each axis introduced by
:py:func:`xmap` can be assigned to one or more *resource axes*. Those include
the axes of the hardware mesh, as defined by the :py:func:`mesh` context
manager. Each value that has a named axis in its ``named_shape`` will be
partitioned over all mesh axes that axis is assigned to. Hence,
:py:func:`xmap` can be seen as an alternative to :py:func:`pmap` that also
exposes a way to automatically partition the computation over multiple
devices.
.. warning::
While it is possible to assign multiple axis names to a single resource axis,
care has to be taken to ensure that none of those named axes co-occur in a
``named_shape`` of any value in the named program. At the moment this is
**completely unchecked** and will result in **undefined behavior**. The
final release of :py:func:`xmap` will enforce this invariant, but it is a
work in progress.
Note that you do not have to worry about any of this for as long as no
resource axis is repeated in ``axis_resources.values()``.
Note that any assignment of ``axis_resources`` doesn't ever change the
results of the computation, but only how it is carried out (e.g. how many
devices are used). This makes it easy to try out various ways of
partitioning a single program in many distributed scenarios (both small- and
large-scale), to maximize the performance. As such, :py:func:`xmap` can be
seen as a way to seamlessly interpolate between :py:func:`vmap` and
:py:func:`pmap`-style execution.
Args:
fun: Function that uses named axes. Its arguments and return
value should be arrays, scalars, or (nested) standard Python containers
(tuple/list/dict) thereof (in general: valid pytrees).
in_axes: A Python object with the same container (pytree) structure as the
signature of arguments to ``fun``, but with a positional-to-named axis
mapping in place of every array argument. The valid positional-to-named
mappings are: (1) a ``Dict[int, AxisName]`` specifying that a positional
dimensions given by dictionary keys are to be converted to named axes
of given names (2) a list of axis names that ends with the Ellipsis object
(``...``) in which case a number of leading positional axes of the argument
will be converted into named axes inside the function. Note that ``in_axes``
can also be a prefix of the argument container structure, in which case the
mapping is repeated for all arrays in the collapsed subtree.
out_axes: A Python object with the same container (pytree) structure as the
returns of ``fun``, but with a positional-to-named axis mapping in place
of every returned array. The valid positional-to-named mappings are the same
as in ``in_axes``. Note that ``out_axes`` can also be a prefix of the return
container structure, in which case the mapping is repeated for all arrays
in the collapsed subtree.
axis_sizes: A dict mapping axis names to their sizes. All axes defined by xmap
have to appear either in ``in_axes`` or ``axis_sizes``. Sizes of axes
that appear in ``in_axes`` are inferred from arguments whenever possible.
In multi-host scenarios, the user-specified sizes are expected to be the
global axis sizes (and might not match the expected size of local inputs).
axis_resources: A dictionary mapping the axes introduced in this
:py:func:`xmap` to one or more resource axes. Any array that has in its
shape an axis with some resources assigned will be partitioned over the
resources associated with the respective resource axes.
donate_argnums: Specify which argument buffers are "donated" to the computation.
It is safe to donate argument buffers if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
For more details on buffer donation see the [FAQ](https://jax.readthedocs.io/en/latest/faq.html#buffer-donation).
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.
Returns:
A version of ``fun`` that takes in arrays with positional axes in place of
named axes bound in this :py:func:`xmap` call, and results with all named
axes converted to positional axes. If ``axis_resources`` is specified,
``fun`` can additionally execute in parallel on multiple devices.
For example, :py:func:`xmap` makes it very easy to convert a function that
computes the vector inner product (such as :py:func:`jax.numpy.vdot`) into
one that computes a matrix multiplication:
>>> import jax.numpy as jnp
>>> x = jnp.arange(10).reshape((2, 5))
>>> xmap(jnp.vdot,
... in_axes=({0: 'left'}, {1: 'right'}),
... out_axes=['left', 'right', ...])(x, x.T)
DeviceArray([[ 30, 80],
[ 80, 255]], dtype=int32)
Note that the contraction in the program is performed over the positional axes,
while named axes are just a convenient way to achieve batching. While this
might seem like a silly example at first, it might turn out to be useful in
practice, since with conjuction with ``axis_resources`` this makes it possible
to implement a distributed matrix-multiplication in just a few lines of code::
devices = np.array(jax.devices())[:4].reshape((2, 2))
with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'
distributed_out = xmap(
jnp.vdot,
in_axes=({0: 'left'}, {1: 'right'}),
out_axes=['left', 'right', ...],
axis_resources={'left': 'x', 'right': 'y'})(x, x.T)
Still, the above examples are quite simple. After all, the xmapped
computation was a simple NumPy function that didn't use the axis names at all!
So, let's explore a slightly larger example which is linear regression::
def regression_loss(x, y, w, b):
# Contract over in_features. Batch and out_features are present in
# both inputs and output, so they don't need to be mentioned
y_pred = jnp.einsum('{in_features},{in_features}->{}', x, w) + b
error = jnp.sum((y - y_pred) ** 2, axis='out_features')
return jnp.mean(error, axis='batch')
xmap(regression_loss,
in_axes=(['batch', 'in_features', ...],
['batch', 'out_features', ...],
['in_features', 'out_features', ...],
['out_features', ...]),
out_axes={}) # Loss is reduced over all axes, including batch!
.. note::
When using ``axis_resources`` along with a mesh that is controlled by
multiple JAX hosts, keep in mind that in any given process :py:func:`xmap`
only expects the data slice that corresponds to its local devices to be
specified. This is in line with the current multi-host :py:func:`pmap`
programming model.
"""
warn("xmap is an experimental feature and probably has bugs!")
_check_callable(fun)
if isinstance(in_axes, list) and not _is_axes_leaf(in_axes):
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
# in cases like these users expect tuples and lists to be treated
# essentially interchangeably, so we canonicalize lists to tuples here
# rather than raising an error. https://github.com/google/jax/issues/2367
in_axes = tuple(in_axes)
if in_axes == (): # Allow empty argument lists
in_axes, in_axes_entries = (), []
else:
in_axes, in_axes_entries, _ = _prepare_axes(in_axes, "in_axes")
if out_axes == ():
raise ValueError("xmapped functions cannot have no return values")
else:
out_axes, out_axes_entries, out_axes_treedef = _prepare_axes(out_axes, "out_axes")
out_axes_entries = tuple(out_axes_entries) # Make entries hashable
axis_sizes_names = set(axis_sizes.keys())
in_axes_names = set(it.chain(*(spec.keys() for spec in in_axes_entries)))
defined_names = axis_sizes_names | in_axes_names
out_axes_names = set(it.chain(*(spec.keys() for spec in out_axes_entries)))
anon_serial_loops = []
def normalize_resource(r) -> ResourceAxisName:
if isinstance(r, SerialLoop):
name = fresh_resource_name()
anon_serial_loops.append((name, r.length))
return name
return r
normalized_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]] = {}
for axis in defined_names:
resources = axis_resources.get(axis, ())
if not isinstance(resources, tuple):
resources = (resources,)
normalized_axis_resources[axis] = tuple(unsafe_map(normalize_resource, resources))
frozen_axis_resources = FrozenDict(normalized_axis_resources)
necessary_resources = set(it.chain(*frozen_axis_resources.values()))
axes_with_resources = set(frozen_axis_resources.keys())
if axes_with_resources > defined_names:
raise ValueError(f"All axes that were assigned resources have to appear in "
f"in_axes or axis_sizes, but the following are missing: "
f"{axes_with_resources - defined_names}")
if out_axes_names > defined_names:
raise ValueError(f"All axis names appearing in out_axes must also appear in "
f"in_axes or axis_sizes, but the following are missing: "
f"{out_axes_names - defined_names}")
for axis, resources in frozen_axis_resources.items():
if len(set(resources)) != len(resources): # type: ignore
raise ValueError(f"Resource assignment of a single axis must be a tuple of "
f"distinct resources, but specified {resources} for axis {axis}")
donate_argnums = _ensure_index_tuple(donate_argnums)
# A little performance optimization to avoid iterating over all args unnecessarily
has_input_rank_assertions = any(spec.expected_rank is not None for spec in in_axes_entries)
has_output_rank_assertions = any(spec.expected_rank is not None for spec in out_axes_entries)
def infer_params(*args):
# Putting this outside of fun_mapped would make resources lexically scoped
resource_env = thread_resources.env
available_resources = set(resource_env.shape.keys())
if necessary_resources - available_resources:
raise ValueError(f"In-scope resources are insufficient to execute the "
f"xmapped function. The missing resources are: "
f"{necessary_resources - available_resources}")
args_flat, in_tree = tree_flatten(args)
fun_flat, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
if donate_argnums:
donated_invars = donation_vector(donate_argnums, args, ())
else:
donated_invars = (False,) * len(args_flat)
in_axes_flat = _flatten_axes("xmap in_axes", in_tree, in_axes, tupled_args=True)
# Some pytree containers might be unhashable, so we flatten the out_axes
# pytree into a treedef and entries which are guaranteed to be hashable.
out_axes_thunk = HashableFunction(
lambda: tuple(_flatten_axes("xmap out_axes", out_tree(), out_axes, tupled_args=False)),
closure=(out_axes_entries, out_axes_treedef))
axis_resource_count = _get_axis_resource_count(
_positional_semantics.val, frozen_axis_resources, resource_env)
for axis, size in axis_sizes.items():
resources = axis_resource_count[axis]
if size % resources.nglobal != 0:
global_size = "Global size" if resources.distributed else "Size"
raise ValueError(f"{global_size} of axis {axis} ({size}) is not divisible "
f"by the total number of resources assigned to this axis "
f"({frozen_axis_resources[axis]}, {resources.nglobal} in total)")
frozen_global_axis_sizes = _get_axis_sizes(args_flat, in_axes_flat,
axis_sizes, axis_resource_count)
missing_sizes = defined_names - set(frozen_global_axis_sizes.keys())
if missing_sizes:
raise ValueError(f"Failed to infer size of axes: {', '.join(unsafe_map(str, missing_sizes))}. "
f"You've probably passed in empty containers in place of arguments that had "
f"those axes in their in_axes. Provide the sizes of missing axes explicitly "
f"via axis_sizes to fix this error.")
if has_input_rank_assertions:
for arg, spec in zip(args_flat, in_axes_flat):
if spec.expected_rank is not None and spec.expected_rank != arg.ndim:
raise ValueError(f"xmap argument has an in_axes specification of {spec.user_repr}, "
f"which asserts that it should be of rank {spec.expected_rank}, "
f"but the argument has rank {arg.ndim} (and shape {arg.shape})")
params = dict(
name=getattr(fun, '__name__', '<unnamed function>'),
in_axes=tuple(in_axes_flat),
out_axes_thunk=out_axes_thunk,
donated_invars=donated_invars,
global_axis_sizes=frozen_global_axis_sizes,
axis_resources=frozen_axis_resources,
resource_env=resource_env,
backend=backend,
spmd_in_axes=None,
spmd_out_axes_thunk=None,
positional_semantics=_positional_semantics.val)
return fun_flat, args_flat, params, in_tree, out_tree
def verify_outputs(out_flat, out_tree, params):
if has_output_rank_assertions:
for out, spec in zip(out_flat, params['out_axes_thunk']()):
if spec.expected_rank is not None and spec.expected_rank != out.ndim:
raise ValueError(f"xmap output has an out_axes specification of {spec.user_repr}, "
f"which asserts that it should be of rank {spec.expected_rank}, "
f"but the output has rank {out.ndim} (and shape {out.shape})")
return tree_unflatten(out_tree(), out_flat)
def fun_mapped(*args):
tree_map(_check_arg, args)
fun_flat, args_flat, params, _, out_tree = infer_params(*args)
out_flat = xmap_p.bind(fun_flat, *args_flat, **params)
return verify_outputs(out_flat, out_tree, params)
def decorate_serial(f):
for loop_params in reversed(anon_serial_loops):
f = serial_loop(*loop_params)(f)
return f
def lower(*args):
fun_flat, args_flat, params, in_tree, out_tree = infer_params(*args)
avals_flat = [shaped_abstractify(arg) for arg in args_flat]
computation = make_xmap_callable(
fun_flat, params['name'], params['in_axes'], params['out_axes_thunk'],
params['donated_invars'], params['global_axis_sizes'], params['axis_resources'],
params['resource_env'], params['backend'], params['spmd_in_axes'],
params['spmd_out_axes_thunk'], params['positional_semantics'], *avals_flat)
return Lowered(
computation, in_tree, out_tree(), donate_argnums, no_kwargs=True)
fun_mapped = wraps(fun)(decorate_serial(fun_mapped))
fun_mapped.lower = decorate_serial(lower)
return fun_mapped
def xmap_impl(fun: lu.WrappedFun, *args, name, in_axes, out_axes_thunk, donated_invars,
global_axis_sizes, axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes_thunk, positional_semantics):
in_avals = [core.raise_to_shaped(core.get_aval(arg)) for arg in args]
xmap_callable = make_xmap_callable(
fun, name, in_axes, out_axes_thunk, donated_invars, global_axis_sizes,
axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes_thunk, positional_semantics,
*in_avals).compile().unsafe_call
distributed_debug_log(("Running xmapped function", name),
("python function", fun.f),
("mesh", resource_env.physical_mesh),
("abstract args", in_avals))
return xmap_callable(*args)
@lu.cache
def make_xmap_callable(fun: lu.WrappedFun,
name,
in_axes, out_axes_thunk, donated_invars,
global_axis_sizes, axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes_thunk, positional_semantics,
*in_avals):
assert positional_semantics == _PositionalSemantics.LOCAL
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
# TODO: Making axis substitution final style would allow us to avoid
# tracing to jaxpr here
mapped_in_avals = [_delete_aval_axes(aval, in_axes, global_axis_sizes)
for aval, in_axes in zip(in_avals, in_axes)]
with core.extend_axis_env_nd(global_axis_sizes.items()):
with dispatch.log_elapsed_time(f"Finished tracing + transforming {fun.__name__} "
"for xmap in {elapsed_time} sec"):
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, mapped_in_avals)
out_axes = out_axes_thunk()
_check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)
# NOTE: We don't use avals and all params, so only pass in the relevant parts (too lazy...)
_resource_typing_xmap([], dict(axis_resources=axis_resources,
out_axes=out_axes,
call_jaxpr=jaxpr,
resource_env=resource_env,
name=name),
source_info_util.new_source_info(), resource_env, {})
jaxpr = plan.subst_axes_with_resources(jaxpr)
use_spmd_lowering = config.experimental_xmap_spmd_lowering
ensure_fixed_sharding = config.experimental_xmap_ensure_fixed_sharding
if use_spmd_lowering and ensure_fixed_sharding:
jaxpr = _fix_inferred_spmd_sharding(jaxpr, resource_env)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(jaxpr, consts)))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize_and_loop(f, in_axes, out_axes)
used_resources = _jaxpr_resources(jaxpr, resource_env) | set(it.chain(*axis_resources.values()))
used_mesh_axes = used_resources & resource_env.physical_resource_axes
if used_mesh_axes:
assert spmd_in_axes is None and spmd_out_axes_thunk is None # No outer xmaps, so should be None
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
global_in_avals = [mesh.local_to_global(ax, av)
for ax, av in safe_zip(mesh_in_axes, in_avals)]
if config.experimental_xmap_spmd_lowering_manual:
tiling_method = pxla.TilingMethod.MANUAL
else:
tiling_method = pxla.TilingMethod.VECTORIZE
return pxla.lower_mesh_computation(
f, name, mesh,
mesh_in_axes, mesh_out_axes, donated_invars,
use_spmd_lowering, global_in_avals,
tiling_method=tiling_method, in_is_gda=[False] * len(global_in_avals))
else:
return dispatch.lower_xla_callable(
f, None, backend, name, donated_invars, *((a, None) for a in in_avals))
class EvaluationPlan(NamedTuple):
"""Encapsulates preprocessing common to top-level xmap invocations and its translation rule."""
resource_env: ResourceEnv
physical_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]
loop_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]
axis_subst_dict: Dict[AxisName, Tuple[ResourceAxisName, ...]]
axis_vmap_size: Dict[AxisName, Optional[int]]
@property
def axis_subst(self) -> core.AxisSubst:
return lambda name: self.axis_subst_dict.get(name, (name,))
@property
def resource_axis_env(self):
env = dict(self.resource_env.shape)
for axis, size in self.axis_vmap_size.items():
if size is None:
continue
vmap_axis = self.axis_subst_dict[axis][-1]
env[vmap_axis] = size
return env
@classmethod
def from_axis_resources(cls,
axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv,
global_axis_sizes: Dict[AxisName, int]):
physical_axis_resources, loop_axis_resources = _unzip_axis_resources(
axis_resources, resource_env)
axis_resource_count = _get_axis_resource_count(None, axis_resources, resource_env)
axis_subst_dict = dict(axis_resources)
axis_vmap_size: Dict[AxisName, Optional[int]] = {}
for naxis, raxes in sorted(axis_resources.items(), key=lambda x: str(x[0])):
num_resources = axis_resource_count[naxis]
assert global_axis_sizes[naxis] % num_resources.nglobal == 0
local_tile_size = global_axis_sizes[naxis] // num_resources.nglobal
# We have to vmap when there are no resources (to handle the axis name!) or
# when every resource gets chunks of values.
if not raxes or local_tile_size > 1:
axis_vmap_size[naxis] = local_tile_size
axis_subst_dict[naxis] += (fresh_resource_name(naxis),)
else:
axis_vmap_size[naxis] = None
return cls(resource_env,
physical_axis_resources, loop_axis_resources,
axis_subst_dict, axis_vmap_size)
def subst_axes_with_resources(self, jaxpr):
try:
if any(self.loop_axis_resources.values()):
_check_no_loop_collectives(jaxpr, self.loop_axis_resources)
with core.extend_axis_env_nd(self.resource_axis_env.items()):
return core.subst_axis_names_jaxpr(jaxpr, self.axis_subst)
except core.DuplicateAxisNameError:
raise AssertionError("Incomplete resource type-checking? Please open a bug report!")
def vectorize_and_loop(self, f: lu.WrappedFun, in_axes, out_axes) -> lu.WrappedFun:
vmap_axes = {
naxis: raxes[-1]
for naxis, raxes in self.axis_subst_dict.items()
if self.axis_vmap_size[naxis] is not None
}
for naxis, vaxis in sorted(vmap_axes.items(), key=lambda x: x[1].uid):
local_tile_size = self.axis_vmap_size[naxis]
map_in_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), in_axes))
map_out_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), out_axes))
f = batching.vtile(f, map_in_axes, map_out_axes, tile_size=local_tile_size, axis_name=vaxis)
used_loops = set(it.chain.from_iterable(self.loop_axis_resources.values()))
if not used_loops:
return f
if len(used_loops) > 1:
# TODO: Support multiple loops
raise NotImplementedError("Only one loop per xmap is supported")
loop_in_axes = _to_resource_axes(in_axes, self.loop_axis_resources)
loop_out_axes = _to_resource_axes(out_axes, self.loop_axis_resources)
loop_name, = used_loops
loop_length = self.resource_env.shape[loop_name]
def looped_f(*args):
def body(i, _):
# XXX: This call_wrapped is only valid under the assumption that scan
# only ever traces the body once (which it does at the moment).
result = f.call_wrapped(
*(_slice_tile(arg, spec.get(loop_name, None), i, loop_length)
for arg, spec in zip(args, loop_in_axes)))
return i + 1, result
_, stacked_results = lax.scan(body, 0, (), length=loop_length)
return [_merge_leading_axis(sresult, spec.get(loop_name, None))
for sresult, spec in zip(stacked_results, loop_out_axes)]
return lu.wrap_init(looped_f)
def to_mesh_axes(self, in_axes, out_axes):
"""
Convert in/out_axes parameters ranging over logical dimensions to
in/out_axes that range over the mesh dimensions.
"""
return (_to_resource_axes(in_axes, self.physical_axis_resources),
_to_resource_axes(out_axes, self.physical_axis_resources))
# -------- xmap primitive and its transforms --------
# xmap has a different set of parameters than pmap, so we make it its own primitive type
class XMapPrimitive(core.MapPrimitive):
def __init__(self):
super().__init__('xmap')
self.def_impl(xmap_impl)
self.def_custom_bind(self.bind)
def bind(self, fun, *args, in_axes, **params):
assert len(in_axes) == len(args), (in_axes, args)
return core.map_bind(self, fun, *args, in_axes=in_axes, **params)
def process(self, trace, fun, tracers, params):
return trace.process_xmap(self, fun, tracers, params)
def post_process(self, trace, out_tracers, params):
raise NotImplementedError
def get_bind_params(self, params):
new_params = dict(params)
subfun = lu.wrap_init(partial(core.eval_jaxpr, new_params.pop('call_jaxpr'), ()))
axes = new_params.pop('out_axes')
new_params['out_axes_thunk'] = HashableFunction(lambda: axes, closure=axes)
spmd_axes = new_params.pop('spmd_out_axes')
if spmd_axes is not None:
new_params['spmd_out_axes_thunk'] = \
HashableFunction(lambda: spmd_axes, closure=spmd_axes)
else:
new_params['spmd_out_axes_thunk'] = None
return [subfun], new_params
xmap_p = XMapPrimitive()
core.EvalTrace.process_xmap = core.EvalTrace.process_call # type: ignore
def _process_xmap_default(self, call_primitive, f, tracers, params):
raise NotImplementedError(f"{type(self)} must override process_xmap to handle xmap")
core.Trace.process_xmap = _process_xmap_default # type: ignore
def _xmap_axis_subst(params, subst, traverse):
if 'call_jaxpr' not in params: # TODO(apaszke): This feels sketchy, but I'm not sure why
return params
if not traverse:
return params
def shadowed_subst(name):
return (name,) if name in params['global_axis_sizes'] else subst(name)
with core.extend_axis_env_nd(params['global_axis_sizes'].items()):
new_jaxpr = core.subst_axis_names_jaxpr(params['call_jaxpr'], shadowed_subst)
return dict(params, call_jaxpr=new_jaxpr)
core.axis_substitution_rules[xmap_p] = _xmap_axis_subst
# NOTE: We don't have to handle spmd_{in|out}_axes here, because
# SPMD batching always gets involved as the last transform before XLA translation
ad.JVPTrace.process_xmap = ad.JVPTrace.process_call # type: ignore
ad.call_param_updaters[xmap_p] = ad.call_param_updaters[xla.xla_call_p]
def _xmap_transpose(params, call_jaxpr, args, cts_in, cts_in_avals, reduce_axes):
all_args, in_tree_def = tree_flatten(((), args, cts_in)) # empty consts
fun = lu.hashable_partial(
lu.wrap_init(ad.backward_pass),
call_jaxpr, reduce_axes + tuple(params['global_axis_sizes'].keys()))
fun, nz_arg_cts = ad.nonzero_outputs(fun)
fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def)
# Preserve axis for primal arguments, skip tangents (represented as undefined primals).
in_axes, out_axes = params['in_axes'], params['out_axes']
new_in_axes = (*(axis for axis, x in zip(in_axes, args) if not ad.is_undefined_primal(x)),
*(axis for axis, x in zip(out_axes, cts_in) if type(x) is not ad.Zero))
# NOTE: This assumes that the output cotangents being zero is a deterministic
# function of which input cotangents were zero.
@as_hashable_function(closure=(in_axes, tuple(type(c) is ad.Zero for c in cts_in)))
def out_axes_thunk():
return tuple(axis for axis, nz in zip(in_axes, nz_arg_cts()) if nz)
new_params = dict(params,
name=wrap_name(params['name'], 'transpose'),
in_axes=new_in_axes,
out_axes_thunk=out_axes_thunk,
donated_invars=(False,) * len(new_in_axes),
spmd_out_axes_thunk=None)
del new_params['out_axes']
del new_params['spmd_out_axes']
out_flat = xmap_p.bind(fun, *all_args, **new_params)
arg_cts = tree_unflatten(out_tree(), out_flat)
axis_resource_count = _get_axis_resource_count(
params['positional_semantics'], params['axis_resources'], params['resource_env'])
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in params['global_axis_sizes'].items()}
def unmap_zero(zero, axes):
return ad.Zero(_insert_aval_axes(zero.aval, axes, local_axis_sizes))
return tuple(unmap_zero(arg_ct, in_axis) if type(arg_ct) is ad.Zero else arg_ct
for arg_ct, in_axis in zip(arg_cts, in_axes))
ad.primitive_transposes[xmap_p] = _xmap_transpose
def _typecheck_xmap(
*in_avals, call_jaxpr, name, in_axes, out_axes, donated_invars,
global_axis_sizes, axis_resources, resource_env, backend,
spmd_in_axes, spmd_out_axes, positional_semantics):
axis_resource_count = _get_axis_resource_count(
positional_semantics, axis_resources, resource_env)
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
binder_in_avals = [_insert_aval_axes(v.aval, a_in_axes, local_axis_sizes)
for v, a_in_axes in zip(call_jaxpr.invars, in_axes)]
for binder_in_aval, in_aval in zip(binder_in_avals, in_avals):
if not core.typecompat(binder_in_aval, in_aval):
raise core.JaxprTypeError(
f"xmap passes operand {in_aval} to jaxpr expecting {binder_in_aval}")
mapped_in_avals = [_delete_aval_axes(a, a_in_axes, global_axis_sizes)
for a, a_in_axes in zip(in_avals, in_axes)]
with core.extend_axis_env_nd(global_axis_sizes.items()):
core._check_jaxpr(lambda: core.JaxprPpContext(), call_jaxpr,
mapped_in_avals)
mapped_out_avals = [v.aval for v in call_jaxpr.outvars]
out_avals = [_insert_aval_axes(a, a_out_axes, local_axis_sizes)
for a, a_out_axes in zip(mapped_out_avals, out_axes)]
return out_avals
core.custom_typechecks[xmap_p] = _typecheck_xmap
def show_axes(axes):
return ", ".join(sorted([f"`{a}`" for a in axes]))
def _resource_typing_xmap(avals,
params,
source_info: source_info_util.SourceInfo,
resource_env,
outer_axis_resources):
axis_resources = params['axis_resources']
inner_axis_resources = dict(outer_axis_resources)
inner_axis_resources.update(axis_resources)
if len(inner_axis_resources) < len(outer_axis_resources) + len(axis_resources):
overlap = set(outer_axis_resources) & set(axis_resources)
raise JAXTypeError(
f"Detected disallowed xmap axis name shadowing at "
f"{source_info_util.summarize(source_info)} "
f"(shadowed axes: {show_axes(overlap)})")
if resource_env.physical_mesh != params['resource_env'].physical_mesh:
raise RuntimeError("Changing the physical mesh is not allowed inside xmap.")
call_jaxpr = params['call_jaxpr']
pxla.resource_typecheck(
params['call_jaxpr'], resource_env, inner_axis_resources,
lambda: (f"an xmapped function {params['name']} " +
(f"(xmap called at {source_info_util.summarize(source_info)})"
if source_info else "")))
for v, axes in zip(call_jaxpr.outvars, params['out_axes']):
broadcast_axes = set(axes) - set(v.aval.named_shape)
used_resources = set(it.chain.from_iterable(
inner_axis_resources[a] for a in v.aval.named_shape))
for baxis in broadcast_axes:
baxis_resources = set(inner_axis_resources[baxis])
overlap = baxis_resources & used_resources
if overlap:
resource_to_axis = {}
for axis in v.aval.named_shape:
for raxis in inner_axis_resources[axis]:
resource_to_axis[raxis] = axis
partitioning_axes = set(resource_to_axis[raxis] for raxis in overlap)
raise JAXTypeError(
f"One of xmapped function ({params['name']}) outputs is broadcast "
f"along axis `{baxis}` which is assigned to resources "
f"{show_axes(baxis_resources)}, but the output is already "
f"partitioned along {show_axes(overlap)}, because its "
f"named shape contains {show_axes(partitioning_axes)}")
pxla.custom_resource_typing_rules[xmap_p] = _resource_typing_xmap
# This is DynamicJaxprTrace.process_map with some very minor modifications
def _dynamic_jaxpr_process_xmap(self, primitive, f, tracers, params):
from jax.interpreters.partial_eval import (
trace_to_subjaxpr_dynamic, DynamicJaxprTracer,
convert_constvars_jaxpr, new_jaxpr_eqn)
assert primitive is xmap_p
in_avals = [t.aval for t in tracers]
global_axis_sizes = params['global_axis_sizes']
mapped_in_avals = [_delete_aval_axes(a, a_in_axes, global_axis_sizes)
for a, a_in_axes in zip(in_avals, params['in_axes'])]
with core.extend_axis_env_nd(global_axis_sizes.items()):
jaxpr, mapped_out_avals, consts = trace_to_subjaxpr_dynamic(
f, self.main, mapped_in_avals)
out_axes = params['out_axes_thunk']()
if params['spmd_out_axes_thunk'] is not None:
spmd_out_axes = params['spmd_out_axes_thunk']()
else:
spmd_out_axes = None
axis_resource_count = _get_axis_resource_count(
params['positional_semantics'], params['axis_resources'], params['resource_env'])
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
out_avals = [_insert_aval_axes(a, a_out_axes, local_axis_sizes)
for a, a_out_axes in zip(mapped_out_avals, out_axes)]
_check_out_avals_vs_out_axes(out_avals, out_axes, params['global_axis_sizes'])
source_info = source_info_util.current()
out_tracers = [DynamicJaxprTracer(self, a, source_info) for a in out_avals]
invars = map(self.getvar, tracers)
constvars = map(self.getvar, map(self.instantiate_const, consts))
outvars = map(self.makevar, out_tracers)
new_in_axes = (AxisNamePos(user_repr='{}'),) * len(consts) + params['in_axes']
if params['spmd_in_axes'] is None:
new_spmd_in_axes = None
else:
new_spmd_in_axes = (None,) * len(consts) + params['spmd_in_axes']
new_donated_invars = (False,) * len(consts) + params['donated_invars']
with core.extend_axis_env_nd(global_axis_sizes.items()):
call_jaxpr = convert_constvars_jaxpr(jaxpr)
new_params = dict(params, in_axes=new_in_axes, out_axes=out_axes,
donated_invars=new_donated_invars,
spmd_in_axes=new_spmd_in_axes,
spmd_out_axes=spmd_out_axes,
call_jaxpr=call_jaxpr)
del new_params['out_axes_thunk']
del new_params['spmd_out_axes_thunk']
eqn = new_jaxpr_eqn([*constvars, *invars], outvars, primitive,
new_params, source_info)
self.frame.eqns.append(eqn)
return out_tracers
pe.DynamicJaxprTrace.process_xmap = _dynamic_jaxpr_process_xmap # type: ignore
def _xmap_partial_eval_custom_params_updater(
unks_in: Sequence[bool],
kept_outs_known: Sequence[bool], kept_outs_staged: Sequence[bool],
num_res: int, params_known: dict, params_staged: dict
) -> Tuple[dict, dict]:
assert params_known['spmd_in_axes'] is None and params_known['spmd_out_axes'] is None
assert params_staged['spmd_in_axes'] is None and params_staged['spmd_out_axes'] is None
# pruned inputs to jaxpr_known according to unks_in
donated_invars_known, _ = pe.partition_list(unks_in, params_known['donated_invars'])
in_axes_known, _ = pe.partition_list(unks_in, params_known['in_axes'])
if num_res == 0:
residual_axes = []
else:
residual_axes = [
AxisNamePos(zip(sort_named_shape, range(len(sort_named_shape))),
user_repr=f'<internal: {sort_named_shape}>')
for named_shape in (v.aval.named_shape for v in params_known['call_jaxpr'].outvars[:-num_res])
# We sort here to make the iteration order deterministic
for sort_named_shape in [sorted(named_shape, key=str)]
]
_, out_axes_known = pe.partition_list(kept_outs_known, params_known['out_axes'])
new_params_known = dict(params_known,
in_axes=tuple(in_axes_known),
out_axes=(*out_axes_known, *residual_axes),
donated_invars=tuple(donated_invars_known))
assert len(new_params_known['in_axes']) == len(params_known['call_jaxpr'].invars)
assert len(new_params_known['out_axes']) == len(params_known['call_jaxpr'].outvars)
# added num_res new inputs to jaxpr_staged
donated_invars_staged = (*(False for _ in range(num_res)), *params_staged['donated_invars'])
_, out_axes_staged = pe.partition_list(kept_outs_staged, params_staged['out_axes'])
new_params_staged = dict(params_staged,
in_axes=(*residual_axes, *params_staged['in_axes']),
out_axes=tuple(out_axes_staged),
donated_invars=donated_invars_staged)
assert len(new_params_staged['in_axes']) == len(params_staged['call_jaxpr'].invars)
assert len(new_params_staged['out_axes']) == len(params_staged['call_jaxpr'].outvars)
return new_params_known, new_params_staged
pe.partial_eval_jaxpr_custom_rules[xmap_p] = \
partial(pe.call_partial_eval_custom_rule, 'call_jaxpr',
_xmap_partial_eval_custom_params_updater)
@lu.transformation_with_aux
def out_local_named_shapes(local_axes, *args, **kwargs):
ans = yield args, kwargs
ans_axes = [frozenset(a.aval.named_shape) & local_axes for a in ans]
yield ans, ans_axes
@lu.transformation_with_aux
def hide_units(unit_args, *args, **kwargs):
ans = yield restore_units(unit_args, args), kwargs
yield filter_units(ans)
def filter_units(vals):
vals_no_units = [v for v in vals if v is not core.unit]
vals_is_unit = [v is core.unit for v in vals]
return vals_no_units, vals_is_unit
def restore_units(is_unit, vals):
vals_it = iter(vals)
vals_with_units = [core.unit if u else next(vals_it) for u in is_unit]
try:
next(vals_it)
raise RuntimeError("Expected the iterator to be exhausted")
except StopIteration:
return vals_with_units
def _jaxpr_trace_process_xmap(self, primitive, f: lu.WrappedFun, tracers, params):
from jax.interpreters.partial_eval import (
PartialVal, JaxprTracer, _drop_vars, _dce_open_jaxpr,
convert_constvars_jaxpr, new_eqn_recipe)
assert primitive is xmap_p
in_axes = params['in_axes']
donated_invars = params['donated_invars']
global_axis_sizes = params['global_axis_sizes']
in_pvals = [t.pval for t in tracers]
in_pvals = [pval if pval.is_known()
else PartialVal.unknown(_delete_aval_axes(pval[0], axes, global_axis_sizes))
for pval, axes in zip(in_pvals, in_axes)]
const_axes_s = lu.Store()
def app(f, *args):
args_no_units, in_units = filter_units(args)
f, out_units = hide_units(f, tuple(in_units))
f, out_named_shapes = out_local_named_shapes(f, frozenset(global_axis_sizes))
out_axes_thunk = params['out_axes_thunk']
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
out_axes = out_axes_thunk()
axes_units, const_units = split_list(out_units(), [len(out_axes)])
assert not any(const_units)
num_consts = len(const_units)
out_axes_no_units = [a for a, u in zip(out_axes, axes_units) if not u]
const_axes: Sequence[AxisNamePos]
if num_consts == 0:
const_axes = ()
else:
const_axes = [
AxisNamePos(zip(sort_named_shape, range(len(sort_named_shape))),
user_repr=f'<internal: {sort_named_shape}>')
for named_shape in out_named_shapes()[-num_consts:]
# We sort here to make the iteration order deterministic
for sort_named_shape in [sorted(named_shape, key=str)]
]
if not const_axes_s: # NOTE: This can be called multiple times
const_axes_s.store(const_axes)
assert const_axes_s.val == const_axes
return (*out_axes_no_units, *const_axes)
pe_params = dict(
params,
in_axes=tuple(a for a, u in zip(in_axes, in_units) if not u),
donated_invars=tuple(a for a, u in zip(donated_invars, in_units) if not u),
out_axes_thunk=new_out_axes_thunk)
outs_no_units = primitive.bind(f, *args_no_units, **pe_params)
new_out_axes_thunk() # Make sure it is called at least once to compute const_axes
return restore_units(out_units(), outs_no_units)
jaxpr, out_pvals, consts, env_tracers = self.partial_eval(
f, in_pvals, app, instantiate=False)
out_axes = params['out_axes_thunk']()
const_axes = const_axes_s.val
axis_resource_count = _get_axis_resource_count(
params['positional_semantics'], params['axis_resources'], params['resource_env'])
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
out_pvals = [pval if pval.is_known() else
PartialVal.unknown(_insert_aval_axes(pval[0], axes, local_axis_sizes))
for pval, axes in zip(out_pvals, out_axes)]
with core.extend_axis_env_nd(global_axis_sizes.items()):
# Skip known invars and outvars, and lift constants as regular invars
in_knowns = tuple(t.pval.is_known() for t in it.chain(env_tracers, tracers))
out_unknowns = tuple(not pval.is_known() for pval in out_pvals)
jaxpr = _drop_vars(jaxpr, in_knowns, (False,) * len(jaxpr.outvars))
jaxpr = _dce_open_jaxpr(jaxpr, out_unknowns, drop_outputs=True)
jaxpr = convert_constvars_jaxpr(jaxpr)
# Known tracers get propagated as if they were constants
known_tracers_out = [self.new_const(pval.get_known()) for pval in out_pvals
if pval.is_known()]
# I'm not 100% if that's correct, but it is an assumption that
# JaxprTrace.process_call already makes.
if any(t.pval.is_known() for t in env_tracers):
raise AssertionError("Please open a bug report!")
# Unknown tracers need to have the jaxpr set up as their recipe
unknown_tracers_in = (*env_tracers, *(t for t in tracers if not t.pval.is_known()))
unknown_tracers_out = [JaxprTracer(self, pval, None) for pval in out_pvals
if not pval.is_known()]
const_tracers = map(self.new_instantiated_const, consts)
# Set up new params
new_in_axes = (*const_axes,
*(None for _ in env_tracers),
*(axis for axis, t in zip(in_axes, tracers)
if not t.pval.is_known()))
new_out_axes = tuple(axis for axis, pval in zip(out_axes, out_pvals)
if not pval.is_known())
assert params['spmd_in_axes'] is None and params['spmd_out_axes_thunk'] is None
new_params = dict(
params,
call_jaxpr=jaxpr,
donated_invars=(*(False for _ in const_tracers),
*(d for d, t in zip(donated_invars, tracers) if not t.pval.is_known())),
in_axes=new_in_axes,
out_axes=new_out_axes,
spmd_out_axes=None)
del new_params['out_axes_thunk']
del new_params['spmd_out_axes_thunk']
eqn = new_eqn_recipe((*const_tracers, *unknown_tracers_in),
unknown_tracers_out,
primitive, new_params, source_info_util.current())
for t in unknown_tracers_out: t.recipe = eqn
return pe._zip_knowns(known_tracers_out, unknown_tracers_out, out_unknowns)
pe.JaxprTrace.process_xmap = _jaxpr_trace_process_xmap
def _batch_trace_update_spmd_axes(
spmd_in_axes, spmd_out_axes_thunk,
axis_name, dims, dims_out_thunk):
"""Extends spmd in and out axes with the position of the trace's batch dimension."""
not_mapped = batching.not_mapped
def insert_spmd_axis(axes, nd):
too_short = nd - len(axes)
if too_short > 0:
axes += (None,) * too_short
return tuple_insert(axes, nd, axis_name)
if spmd_in_axes is None:
spmd_in_axes = ((),) * len(dims)
new_spmd_in_axes = tuple(
spmd_axes if d is not_mapped else insert_spmd_axis(spmd_axes, d)
for spmd_axes, d in zip(spmd_in_axes, dims))
@as_hashable_function(closure=spmd_out_axes_thunk)
def new_spmd_out_axes_thunk():
dims_out = dims_out_thunk()
if spmd_out_axes_thunk is None:
spmd_out_axes = ((),) * len(dims_out)
else:
spmd_out_axes = spmd_out_axes_thunk()
return tuple(
spmd_out_axes if nd is not_mapped else insert_spmd_axis(spmd_out_axes, nd)
for spmd_out_axes, nd in zip(spmd_out_axes, dims_out))
return new_spmd_in_axes, new_spmd_out_axes_thunk
def _batch_trace_process_xmap(self, is_spmd, primitive, f: lu.WrappedFun, tracers, params):
not_mapped = batching.not_mapped
vals, dims = unzip2((t.val, t.batch_dim) for t in tracers)
assert primitive is xmap_p
if not is_spmd and all(dim is not_mapped for dim in dims):
return primitive.bind(f, *vals, **params)
else:
assert len({x.shape[d] for x, d in zip(vals, dims) if d is not not_mapped}) == 1
def fmap_dims(axes, f):
return AxisNamePos(((name, f(axis)) for name, axis in axes.items()),
user_repr=axes.user_repr)
new_in_axes = tuple(
fmap_dims(in_axes, lambda a: a + (d is not not_mapped and d <= a))
for d, in_axes in zip(dims, params['in_axes']))
mapped_dims_in = tuple(
d if d is not_mapped else d - sum(a < d for a in in_axis.values())
for d, in_axis in zip(dims, params['in_axes']))
f, mapped_dims_out = batching.batch_subtrace(f, self.main, mapped_dims_in)
out_axes_thunk: Callable[[], Sequence[AxisNamePos]] = params['out_axes_thunk']
dims_out_thunk = lambda: tuple(d if d is not_mapped else axis_after_insertion(d, out_axes)
for d, out_axes in zip(mapped_dims_out(), out_axes_thunk()))
def axis_after_insertion(axis, inserted_named_axes):
for inserted_axis in sorted(inserted_named_axes.values()):
if inserted_axis >= axis:
break
axis += 1
return axis
# NOTE: This assumes that the choice of the dimensions over which outputs
# are batched is entirely dependent on the function and not e.g. on the
# data or its shapes.
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
return tuple(
out_axes if d is not_mapped else
fmap_dims(out_axes, lambda a, nd=axis_after_insertion(d, out_axes): a + (nd <= a))
for out_axes, d in zip(out_axes_thunk(), mapped_dims_out()))
if not is_spmd:
assert params['spmd_in_axes'] is None and params['spmd_out_axes_thunk'] is None
new_spmd_in_axes = None
new_spmd_out_axes_thunk = None
else:
new_spmd_in_axes, new_spmd_out_axes_thunk = _batch_trace_update_spmd_axes(
params['spmd_in_axes'], params['spmd_out_axes_thunk'],
self.axis_name, dims, dims_out_thunk)
new_params = dict(params,
in_axes=new_in_axes, out_axes_thunk=new_out_axes_thunk,
spmd_in_axes=new_spmd_in_axes,
spmd_out_axes_thunk=new_spmd_out_axes_thunk)
vals_out = primitive.bind(f, *vals, **new_params)
dims_out = dims_out_thunk()
return [batching.BatchTracer(self, v, d) for v, d in zip(vals_out, dims_out)]
batching.BatchTrace.process_xmap = partialmethod(_batch_trace_process_xmap, False) # type: ignore
pxla.SPMDBatchTrace.process_xmap = partialmethod(_batch_trace_process_xmap, True) # type: ignore
# -------- nested xmap handling --------
def _xmap_lowering_rule(ctx, *args, **kwargs):
if isinstance(ctx.module_context.axis_context, mlir.SPMDAxisContext):
if config.experimental_xmap_spmd_lowering_manual:
return _xmap_lowering_rule_spmd_manual(ctx, *args, **kwargs)
else:
return _xmap_lowering_rule_spmd(ctx, *args, **kwargs)
elif isinstance(ctx.module_context.axis_context, mlir.ReplicaAxisContext):
return _xmap_lowering_rule_replica(ctx, *args, **kwargs)
else:
raise AssertionError("Unrecognized axis context type!")
mlir.register_lowering(xmap_p, _xmap_lowering_rule)
def _xmap_lowering_rule_replica(ctx, *in_nodes,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
global_axis_sizes,
spmd_in_axes, spmd_out_axes,
positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.module_context.platform)
# The only way for any of those two assertions to be violated is when xmap
# is using the SPMD lowering, but then this rule shouldn't even trigger.
assert positional_semantics == _PositionalSemantics.LOCAL
assert spmd_in_axes is None and spmd_out_axes is None
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
axis_resource_count = _get_axis_resource_count(positional_semantics, axis_resources, resource_env)
if any(resource_count.distributed for resource_count in axis_resource_count.values()):
raise NotImplementedError
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
local_mesh = resource_env.physical_mesh.local_mesh
local_mesh_shape = local_mesh.shape
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
local_avals = [pxla.tile_aval_nd(
local_mesh_shape, aval_mesh_in_axes,
_insert_aval_axes(v.aval, aval_in_axes, local_axis_sizes))
for v, aval_in_axes, aval_mesh_in_axes
in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]
# We have to substitute before tracing, because we want the vectorized
# axes to be used in the jaxpr.
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize_and_loop(f, in_axes, out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
vectorized_jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(f, local_avals)
_check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)
assert not consts
tiled_ins = (
mlir.lower_fun(partial(_tile, in_axes=arg_in_axes,
axis_sizes=local_mesh_shape),
multiple_results=False)(
mlir.LoweringRuleContext(module_context=ctx.module_context,
primitive=None,
avals_in=[aval], avals_out=None),
in_node)[0]
if v.aval is not core.abstract_unit else in_node
for v, aval, in_node, arg_in_axes
in zip(call_jaxpr.invars, ctx.avals_in, in_nodes, mesh_in_axes))
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.module_context.replace(
name_stack=xla.extend_name_stack(ctx.module_context.name_stack,
xla.wrap_name(name, 'xmap')))
tiled_outs = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (), *tiled_ins)
outs = [
mlir.lower_fun(
partial(_untile, out_axes=ans_out_axes, axis_sizes=local_mesh_shape,
platform=ctx.module_context.platform),
multiple_results=False)(
mlir.LoweringRuleContext(module_context=ctx.module_context,
primitive=None,
avals_in=[vectorized_outvar.aval],
avals_out=None), tiled_out)[0]
if v.aval is not core.abstract_unit else tiled_out
for v, vectorized_outvar, tiled_out, ans_out_axes
in zip(call_jaxpr.outvars, vectorized_jaxpr.outvars, tiled_outs,
mesh_out_axes)]
return outs
def _xmap_lowering_rule_spmd(ctx, *global_in_nodes,
call_jaxpr, name, in_axes, out_axes,
donated_invars, global_axis_sizes, spmd_in_axes,
spmd_out_axes, positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.module_context.platform)
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, in_axes, out_axes)
f = plan.vectorize_and_loop(f, in_axes, out_axes)
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
f = pxla.vtile_by_mesh(f, mesh, mesh_in_axes, mesh_out_axes)
# XXX: We modify mesh_in_axes and mesh_out_axes here
def add_spmd_axes(flat_mesh_axes: Sequence[pxla.ArrayMapping],
flat_extra_axes: Optional[Sequence[Sequence[Sequence[pxla.MeshAxisName]]]]):
if flat_extra_axes is None:
return
for axes, extra in zip(flat_mesh_axes, flat_extra_axes):
for dim, dim_extra_axis in enumerate(extra):
if dim_extra_axis is None: continue
assert dim_extra_axis not in axes
assert not config.jax_enable_checks or all(v != dim for v in axes.values())
axes[dim_extra_axis] = dim
add_spmd_axes(mesh_in_axes, spmd_in_axes)
add_spmd_axes(mesh_out_axes, spmd_out_axes)
global_in_avals = ctx.avals_in
vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(f, global_in_avals)
assert not consts
global_sharding_spec = pxla.mesh_sharding_specs(mesh.shape, mesh.axis_names)
sharded_global_in_nodes = [
[mlir.wrap_with_sharding_op(node, global_sharding_spec(aval, aval_axes).sharding_proto())]
if aval_axes else [node]
for node, aval, aval_axes in zip(global_in_nodes, global_in_avals, mesh_in_axes)
]
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.module_context.replace(
name_stack=xla.extend_name_stack(ctx.module_context.name_stack,
xla.wrap_name(name, 'xmap')))
global_out_nodes = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),
*sharded_global_in_nodes)
sharded_global_out_nodes = [
mlir.wrap_with_sharding_op(node, global_sharding_spec(aval, aval_axes).sharding_proto())
if aval_axes else node
for (node,), aval, aval_axes in zip(global_out_nodes, global_out_avals, mesh_out_axes)
]
return sharded_global_out_nodes
def _xmap_lowering_rule_spmd_manual(ctx, *global_in_nodes,
call_jaxpr, name, in_axes, out_axes,
donated_invars, global_axis_sizes, spmd_in_axes,
spmd_out_axes, positional_semantics,
axis_resources, resource_env, backend):
assert spmd_in_axes is None and spmd_out_axes is None
# This first part (up to vtile_manual) is shared with non-MANUAL SPMD rule.
xla.check_backend_matches(backend, ctx.module_context.platform)
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, in_axes, out_axes)
f = plan.vectorize_and_loop(f, in_axes, out_axes)
# NOTE: Sharding constraints are handled entirely by vtile_manual!
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
f = pxla.vtile_manual(f, mesh, mesh_in_axes, mesh_out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
global_in_avals = ctx.avals_in
vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(f, global_in_avals)
assert not consts
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
manual_mesh_axes = frozenset(it.chain.from_iterable(plan.physical_axis_resources.values()))
assert isinstance(ctx.module_context.axis_context, mlir.SPMDAxisContext)
sub_ctx = ctx.module_context.replace(
name_stack=xla.extend_name_stack(ctx.module_context.name_stack,
xla.wrap_name(name, 'xmap')),
axis_context=ctx.module_context.axis_context.extend_manual(manual_mesh_axes))
global_out_nodes = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),
*([n] for n in global_in_nodes))
return global_out_nodes
def _xmap_translation_rule(*args, **kwargs):
if config.experimental_xmap_spmd_lowering_manual:
raise NotImplementedError("Manual lowering only supported in MLIR lowering")
elif config.experimental_xmap_spmd_lowering:
return _xmap_translation_rule_spmd(*args, **kwargs)
else:
return _xmap_translation_rule_replica(*args, **kwargs)
xla.register_translation(xmap_p, _xmap_translation_rule)
def _xmap_translation_rule_replica(ctx, avals_in, avals_out, *in_nodes,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
global_axis_sizes,
spmd_in_axes, spmd_out_axes,
positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.platform)
# The only way for any of those two assertions to be violated is when xmap
# is using the SPMD lowering, but then this rule shouldn't even trigger.
assert positional_semantics == _PositionalSemantics.LOCAL
assert spmd_in_axes is None and spmd_out_axes is None
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
axis_resource_count = _get_axis_resource_count(positional_semantics, axis_resources, resource_env)
if any(resource_count.distributed for resource_count in axis_resource_count.values()):
raise NotImplementedError
local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)
for axis, global_size in global_axis_sizes.items()}
local_mesh = resource_env.physical_mesh.local_mesh
local_mesh_shape = local_mesh.shape
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
local_avals = [pxla.tile_aval_nd(
local_mesh_shape, aval_mesh_in_axes,
_insert_aval_axes(v.aval, aval_in_axes, local_axis_sizes))
for v, aval_in_axes, aval_mesh_in_axes
in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]
# We have to substitute before tracing, because we want the vectorized
# axes to be used in the jaxpr.
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize_and_loop(f, in_axes, out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
vectorized_jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(f, local_avals)
_check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)
assert not consts
tiled_ins = (
xla.lower_fun(
partial(_tile, in_axes=arg_in_axes, axis_sizes=local_mesh_shape),
new_style=True, multiple_results=False)(ctx, [aval], None, in_node)[0]
if aval is not core.abstract_unit else in_node
for aval, in_node, arg_in_axes
in zip(avals_in, in_nodes, mesh_in_axes))
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.replace(
name_stack=xla.extend_name_stack(ctx.name_stack,
xla.wrap_name(name, 'xmap')))
tiled_outs = xla.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (), *tiled_ins)
outs = [
xla.lower_fun(
partial(_untile, out_axes=ans_out_axes, axis_sizes=local_mesh_shape,
platform=ctx.platform),
new_style=True, multiple_results=False)(
ctx, [v.aval], None, tiled_out
)[0]
if v.aval is not core.abstract_unit else tiled_out
for v, tiled_out, ans_out_axes
in zip(vectorized_jaxpr.outvars, tiled_outs, mesh_out_axes)]
return outs
def _tile_base_indices(tile_shape, axes, axis_sizes):
zero = np.zeros((), dtype=np.int32)
linear_idxs = [zero] * len(tile_shape)
strides = [1] * len(tile_shape)
for name, axis in reversed(axes.items()):
axis_index = lax.axis_index(name)
stride_c = np.array(strides[axis], np.int32)
if linear_idxs[axis] is zero and strides[axis] == 1:
linear_idxs[axis] = axis_index
else:
linear_idxs[axis] = lax.add(linear_idxs[axis],
lax.mul(axis_index, stride_c))
strides[axis] *= axis_sizes[name]
return [zero if linear_idx is zero else
lax.mul(linear_idx, np.array(tile_dim_size, np.int32))
for linear_idx, tile_dim_size in zip(linear_idxs, tile_shape)]
def _tile(x, in_axes, axis_sizes):
if not in_axes:
return x
tile_shape = list(x.shape)
for name, axis in in_axes.items():
axis_size = axis_sizes[name]
assert tile_shape[axis] % axis_size == 0
tile_shape[axis] //= axis_size
base_idxs = _tile_base_indices(tile_shape, in_axes, axis_sizes)
return lax.dynamic_slice(x, base_idxs, tile_shape)
# TODO(b/110096942): more efficient gather
def _untile(x, out_axes, axis_sizes, platform):
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
convert_bool = (np.issubdtype(x.dtype, np.bool_)
and platform in ('cpu', 'gpu'))
if convert_bool:
x = lax.convert_element_type(x, np.dtype(np.float32))
tile_shape = list(x.shape)
shape = list(tile_shape)
for name, axis in out_axes.items():
shape[axis] *= axis_sizes[name]
base_idxs = _tile_base_indices(tile_shape, out_axes, axis_sizes)
padded = lax.broadcast(np.array(0, x.dtype), shape)
padded = lax.dynamic_update_slice(padded, x, base_idxs)
out = lax.psum(padded, tuple(out_axes.keys()))
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
if convert_bool:
nonzero = lax.ne(out, np.array(0, dtype=np.float32))
out = lax.convert_element_type(nonzero, np.dtype(np.bool_))
return out
def _xmap_translation_rule_spmd(ctx, avals_in, avals_out, *global_in_nodes,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
global_axis_sizes,
spmd_in_axes, spmd_out_axes,
positional_semantics,
axis_resources, resource_env, backend):
xla.check_backend_matches(backend, ctx.platform)
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)
resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, in_axes, out_axes)
f = plan.vectorize_and_loop(f, in_axes, out_axes)
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
mesh = resource_env.physical_mesh
f = pxla.vtile_by_mesh(f, mesh, mesh_in_axes, mesh_out_axes)
# XXX: We modify mesh_in_axes and mesh_out_axes here
def add_spmd_axes(flat_mesh_axes: Sequence[pxla.ArrayMapping],
flat_extra_axes: Optional[Sequence[Sequence[Sequence[pxla.MeshAxisName]]]]):
if flat_extra_axes is None:
return
for axes, extra in zip(flat_mesh_axes, flat_extra_axes):
for dim, dim_extra_axis in enumerate(extra):
if dim_extra_axis is None: continue
assert dim_extra_axis not in axes
assert not config.jax_enable_checks or all(v != dim for v in axes.values())
axes[dim_extra_axis] = dim
add_spmd_axes(mesh_in_axes, spmd_in_axes)
add_spmd_axes(mesh_out_axes, spmd_out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
global_in_avals = [
core.ShapedArray(xla_type.dimensions(), xla_type.numpy_dtype())
for in_node in global_in_nodes
for xla_type in (ctx.builder.get_shape(in_node),)
]
vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(
f, global_in_avals)
assert not consts
global_sharding_spec = pxla.mesh_sharding_specs(mesh.shape, mesh.axis_names)
def set_sharding(node, aval, aval_axes):
sharding_proto = global_sharding_spec(aval, aval_axes).sharding_proto()
if not config.experimental_xmap_ensure_fixed_sharding:
# Do not specify sharding on other dimensions.
unspecified_dims = set(range(aval.ndim))
for axis in set(aval_axes.values()):
unspecified_dims.remove(axis)
return xla.set_sharding_proto(ctx.builder, node, sharding_proto,
unspecified_dims)
else:
return xla.set_sharding_proto(ctx.builder, node, sharding_proto)
sharded_global_in_nodes = [
set_sharding(node, aval, aval_axes) if aval_axes else node for node, aval,
aval_axes in zip(global_in_nodes, global_in_avals, mesh_in_axes)
]
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
sub_ctx = ctx.replace(
name_stack=xla.extend_name_stack(ctx.name_stack,
xla.wrap_name(name, 'xmap')))
global_out_nodes = xla.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),
*sharded_global_in_nodes)
sharded_global_out_nodes = [
set_sharding(node, aval, aval_axes) if aval_axes else node for node, aval,
aval_axes in zip(global_out_nodes, global_out_avals, mesh_out_axes)
]
return sharded_global_out_nodes
# -------- helper functions --------
def _delete_aval_axes(aval, axes: AxisNamePos, global_axis_sizes):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
named_shape = dict(aval.named_shape)
for name, dim in sorted(axes.items(), key=lambda x: x[1], reverse=True):
named_shape[name] = global_axis_sizes[name]
del shape[dim]
return aval.update(shape=tuple(shape), named_shape=named_shape)
def _insert_aval_axes(aval, axes: AxisNamePos, local_axis_sizes):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
named_shape = dict(aval.named_shape)
for name, dim in sorted(axes.items(), key=lambda x: x[1]):
shape.insert(dim, local_axis_sizes[name])
named_shape.pop(name, None) # The name might be missing --- it's a broadcast.
return aval.update(shape=tuple(shape), named_shape=named_shape)
class ResourceCount(namedtuple('ResourceCount', ['semantics', 'nglobal', 'nlocal'])):
def to_local(self, global_size):
if self.semantics == _PositionalSemantics.GLOBAL:
return global_size
elif self.semantics == _PositionalSemantics.LOCAL:
assert global_size % self.nglobal == 0, "Please report this issue!"
return (global_size // self.nglobal) * self.nlocal
else:
raise AssertionError("Unhandled case {_positional_semantics}")
def to_global(self, local_size):
if self.semantics == _PositionalSemantics.GLOBAL:
return local_size
elif self.semantics == _PositionalSemantics.LOCAL:
assert local_size % self.nlocal == 0, "Please report this issue!"
return (local_size // self.nlocal) * self.nglobal
else:
raise AssertionError(f"Unhandled case {_positional_semantics}")
@property
def distributed(self):
return self.nglobal != self.nlocal
def _get_axis_resource_count(semantics, axis_resources, resource_env) -> Dict[ResourceAxisName, ResourceCount]:
global_res_shape = resource_env.shape
local_res_shape = resource_env.local_shape
return {axis: ResourceCount(semantics,
int(np.prod(map(global_res_shape.get, resources), dtype=np.int64)),
int(np.prod(map(local_res_shape.get, resources), dtype=np.int64)))
for axis, resources in axis_resources.items()}
def _get_axis_sizes(args_flat: Iterable[Any],
in_axes_flat: Iterable[AxisNamePos],
global_axis_sizes: Dict[AxisName, int],
axis_resource_count: Dict[AxisName, ResourceCount]):
global_axis_sizes = dict(global_axis_sizes)
for arg, in_axes in zip(args_flat, in_axes_flat):
for name, dim in in_axes.items():
resources = axis_resource_count[name]
local_ = "local " if resources.distributed else ""
try:
local_dim_size = arg.shape[dim]
except IndexError:
# TODO(apaszke): Handle negative indices. Check for overlap too!
raise ValueError(f"One of xmap arguments has an in_axes specification of "
f"{in_axes.user_repr}, which implies that it has at least "
f"{max(in_axes.values()) + 1} dimensions, but the argument "
f"has rank {arg.ndim}")
if local_dim_size % resources.nlocal != 0:
raise ValueError(f"One of xmap arguments has an in_axes specification of "
f"{in_axes.user_repr}, which implies that its size in dimension "
f"{dim} ({local_dim_size}) should be divisible by the number of "
f"{local_}resources assigned to axis {name} ({resources.nlocal})")
global_dim_size = resources.to_global(local_dim_size)
if name in global_axis_sizes:
expected_local_dim_size = resources.to_local(global_axis_sizes[name])
if local_dim_size != expected_local_dim_size:
raise ValueError(f"The {local_}size of axis {name} was previously inferred to be "
f"{expected_local_dim_size}, but found an argument of shape {arg.shape} "
f"with in_axes specification {in_axes.user_repr}. Shape mismatch "
f"occurs in dimension {dim}: {local_dim_size} != {expected_local_dim_size}")
global_axis_sizes[name] = global_dim_size
return FrozenDict(global_axis_sizes)
def lookup_exactly_one_of(d: AxisNamePos, names: Set[AxisName]) -> Optional[int]:
res = None
for name in names:
if name in d:
if res is not None:
raise ValueError("An input was mapped to the same resource twice")
res = d[name]
return res
@lu.transformation
def hide_mapped_axes(flat_in_axes, flat_out_axes, *flat_args):
def _squeeze_mapped_axes(arg, axes: AxisNamePos):
for dim in sorted(axes.values(), reverse=True):
arg = arg.squeeze(dim)
return arg
def _unsqueeze_mapped_axes(out, axes: AxisNamePos):
try:
return jnp.expand_dims(out, tuple(axes.values()))
except ValueError as e:
# Improve the axis out of bounds errors
# TODO(apaszke): Handle negative indices. Check for overlap too!
if e.args[0].startswith('axis') and 'out of bounds' in e.args[0]:
raise ValueError(f"One of xmap outputs has an out_axes specification of "
f"{axes.user_repr}, which requires the result of the xmapped "
f"function to have at least {max(axes.values()) - len(axes) + 1} "
f"positional dimensions, but it only has {out.ndim}")
raise
squeezed_args = map(_squeeze_mapped_axes, flat_args, flat_in_axes)
flat_outputs = yield squeezed_args, {}
yield map(_unsqueeze_mapped_axes, flat_outputs, flat_out_axes)
def _jaxpr_resources(jaxpr, resource_env) -> Set[ResourceAxisName]:
if isinstance(jaxpr, core.ClosedJaxpr):
jaxpr = jaxpr.jaxpr
assert isinstance(jaxpr, core.Jaxpr)
used_resources = set()
for eqn in jaxpr.eqns:
if eqn.primitive is xmap_p:
if eqn.params['resource_env'].physical_mesh != resource_env.physical_mesh:
raise RuntimeError("Changing the physical mesh is not allowed inside xmap.")
used_resources |= set(it.chain(*eqn.params['axis_resources'].values()))
updates = core.traverse_jaxpr_params(
partial(_jaxpr_resources, resource_env=resource_env), eqn.params).values()
for update in updates:
used_resources |= update
return used_resources
def _to_resource_axes(axes_specs: Sequence[AxisNamePos],
axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]):
"""
Convert in/out_axes parameters ranging over logical dimensions to
ones that range over resource dimensions.
Note that values no longer have to be distinct, as multiple resource
axes can tile a single positional axes. This is why the result is
an OrderedDict with an implicit major-to-minor ordering.
"""
return tuple(OrderedDict((resource_axis, pos_axis)
for logical_axis, pos_axis in axes.items()
for resource_axis in axis_resources[logical_axis])
for axes in axes_specs)
def _merge_leading_axis(x, axis: Optional[int]):
if axis is None:
# We assume that the output does not vary along the leading axis
return lax.index_in_dim(x, 0, axis=0, keepdims=False)
else:
x_moved = moveaxis(x, 0, axis)
shape = list(x_moved.shape)
shape[axis:axis + 2] = [shape[axis] * shape[axis + 1]]
return x_moved.reshape(shape)
def _slice_tile(x, dim: Optional[int], i, n: int):
"""Selects an `i`th (out of `n`) tiles of `x` along `dim`."""
if dim is None: return x
(tile_size, rem) = divmod(x.shape[dim], n)
assert rem == 0, "Please open a bug report!"
return lax.dynamic_slice_in_dim(x, i * tile_size, slice_size=tile_size, axis=dim)
def _unzip_axis_resources(axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv):
"""Splits axis_resources into separate dicts for physical and loop resources."""
physical_axis_resources = {}
loop_axis_resources = {}
loop_resource_axes = resource_env.loop_resource_axes
for axis, raxes in axis_resources.items():
first_loop = 0
for raxis in raxes:
if raxis in loop_resource_axes:
break
else:
first_loop += 1
physical_axis_resources[axis] = raxes[:first_loop]
loop_resources = loop_axis_resources[axis] = raxes[first_loop:]
if not all(name in loop_resource_axes for name in loop_resources):
raise NotImplementedError("Loop resources cannot appear before mesh axes "
"in the resource_axis argument")
return physical_axis_resources, loop_axis_resources
def _check_out_avals_vs_out_axes(out_avals: Sequence[core.AbstractValue],
out_axes: Sequence[AxisNamePos],
global_axis_sizes: Dict[AxisName, int]):
defined_axes = set(global_axis_sizes)
for aval, axes in zip(out_avals, out_axes):
if not isinstance(aval, core.ShapedArray):
if axes:
raise AssertionError(f"Only array abstract values can have non-empty "
f"out_axes, but {aval} has {axes}")
continue
undeclared_axes = (set(aval.named_shape) - set(axes)) & defined_axes
if undeclared_axes:
undeclared_axes_str = sorted([str(axis) for axis in undeclared_axes])
raise TypeError(f"One of xmap results has an out_axes specification of "
f"{axes.user_repr}, but is actually mapped along more axes "
f"defined by this xmap call: {', '.join(undeclared_axes_str)}")
# TODO: We should relax this at least for "constructor primitives"
# such as axis_index or zeros.
def _check_no_loop_collectives(jaxpr, loop_axis_resources):
if isinstance(jaxpr, core.ClosedJaxpr):
jaxpr = jaxpr.jaxpr
def subst_no_loop(name):
if loop_axis_resources.get(name, ()):
raise RuntimeError(f"Named axes with loop resources assigned to them cannot "
f"be referenced inside the xmapped computation (e.g. in "
f"collectives), but `{name}` violates that rule")
return (name,)
for eqn in jaxpr.eqns:
core.subst_axis_names(eqn.primitive, eqn.params, subst_no_loop, traverse=False)
rec = partial(_check_no_loop_collectives, loop_axis_resources=loop_axis_resources)
core.traverse_jaxpr_params(rec, eqn.params)
def _fix_inferred_spmd_sharding(jaxpr, resource_env, gen_fresh_name = None):
from jax.experimental.pjit import sharding_constraint_p, ParsedPartitionSpec
rec = lambda jaxpr: _fix_inferred_spmd_sharding(jaxpr, resource_env, gen_fresh_name)
if isinstance(jaxpr, core.ClosedJaxpr):
return jaxpr.map_jaxpr(rec)
assert isinstance(jaxpr, core.Jaxpr)
if gen_fresh_name is None:
gen_fresh_name = core.gensym([jaxpr])
new_eqns = []
for eqn in jaxpr.eqns:
new_jaxpr_params = core.traverse_jaxpr_params(rec, eqn.params)
tmp_outvars = [gen_fresh_name(v.aval) for v in eqn.outvars]
new_eqns.append(core.JaxprEqn(eqn.invars, tmp_outvars, eqn.primitive,
dict(eqn.params, **new_jaxpr_params), eqn.source_info))
for outvar, tmpvar in zip(eqn.outvars, tmp_outvars):
new_eqns.append(core.JaxprEqn([tmpvar], [outvar], sharding_constraint_p,
dict(resource_env=resource_env, axis_resources=ParsedPartitionSpec((), ())),
eqn.source_info))
return core.Jaxpr(jaxpr.constvars, jaxpr.invars, jaxpr.outvars, new_eqns)
def _flatten_axes(what, tree, axes, tupled_args):
try:
return tuple(flatten_axes(what, tree, axes, tupled_args=tupled_args))
except ValueError:
pass
# Replace axis_resources with unparsed versions to avoid revealing internal details
flatten_axes(what, tree, tree_map(lambda parsed: NoQuotesStr(parsed.user_repr), axes),
tupled_args=tupled_args)
raise AssertionError("Please open a bug request!") # This should be unreachable
class NoQuotesStr(str):
__repr__ = str.__str__
# -------- soft_pmap --------
def soft_pmap(fun: Callable, axis_name: Optional[AxisName] = None, in_axes=0
) -> Callable:
warn("soft_pmap is an experimental feature and probably has bugs!")
_check_callable(fun)
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
if any(axis != 0 for axis in tree_leaves(in_axes)):
raise ValueError(f"soft_pmap in_axes leaves must be 0 or None, got {in_axes}")
proxy = object()
in_axes = _replace_nones(proxy, in_axes)
in_axes = tree_map(lambda i: {i: axis_name} if i is not proxy else {}, in_axes)
@wraps(fun)
def f_pmapped(*args, **kwargs):
mesh_devices = np.array(xb.local_devices())
with mesh(mesh_devices, ['devices']):
return xmap(fun, in_axes=in_axes, out_axes={0: axis_name},
axis_resources={axis_name: 'devices'})(*args, **kwargs)
return f_pmapped
# -------- config flags --------
def _thread_local_flag_unsupported(_):
raise RuntimeError("thread-local xmap flags not supported!")
def _clear_compilation_cache(_):
make_xmap_callable.cache_clear() # type: ignore
def _ensure_spmd_and(f):
def update(v):
if v and not config.experimental_xmap_spmd_lowering:
raise RuntimeError("This flag requires enabling the experimental_xmap_spmd_lowering flag")
return f(v)
return update
def _ensure_supports_manual_and(f):
def update(v):
if v and not hasattr(xc.OpSharding.Type, "MANUAL"):
raise RuntimeError("This flag requires a version of jaxlib that supports MANUAL sharding type")
return f(v)
return update
try:
config.define_bool_state(
name="experimental_xmap_spmd_lowering",
default=False,
help=("When set, multi-device xmap computations will be compiled through "
"the XLA SPMD partitioner instead of explicit cross-replica collectives. "
"Not supported on CPU!"),
update_global_hook=_clear_compilation_cache,
update_thread_local_hook=_thread_local_flag_unsupported)
config.define_bool_state(
name="experimental_xmap_spmd_lowering_manual",
default=False,
help=("When set, multi-device xmap computations will be compiled using "
"the MANUAL partitioning feature of the XLA SPMD partitioner instead of "
"sharding constraints on vectorized code. "
"Requires experimental_xmap_spmd_lowering!"),
update_global_hook=_ensure_supports_manual_and(_ensure_spmd_and(_clear_compilation_cache)),
update_thread_local_hook=_thread_local_flag_unsupported)
config.define_bool_state(
name="experimental_xmap_ensure_fixed_sharding",
default=False,
help=("When set and `experimental_xmap_spmd_lowering` is enabled, the lowering will "
"try to limit the flexibility of the automated SPMD partitioner heuristics "
"by emitting additional sharding annotations for program intermediates."),
update_global_hook=_ensure_spmd_and(_clear_compilation_cache),
update_thread_local_hook=_thread_local_flag_unsupported)
except Exception:
raise ImportError("jax.experimental.maps has to be imported before JAX flags "
"are parsed")
|
import logging
from decimal import Decimal
import asyncio
import aiohttp
from typing import Dict, Any, List, Optional
import json
import time
import ssl
import copy
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
from hummingbot.core.utils import async_ttl_cache
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.logger import HummingbotLogger
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.event.events import (
MarketEvent,
BuyOrderCreatedEvent,
SellOrderCreatedEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
MarketOrderFailureEvent,
OrderFilledEvent,
OrderType,
TradeType,
TradeFee
)
from hummingbot.connector.connector_base import ConnectorBase
from hummingbot.connector.connector.balancer.balancer_in_flight_order import BalancerInFlightOrder
from hummingbot.client.settings import GATEAWAY_CA_CERT_PATH, GATEAWAY_CLIENT_CERT_PATH, GATEAWAY_CLIENT_KEY_PATH
from hummingbot.core.utils.eth_gas_station_lookup import get_gas_price
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_helpers import get_erc20_token_addresses
s_logger = None
s_decimal_0 = Decimal("0")
s_decimal_NaN = Decimal("nan")
logging.basicConfig(level=METRICS_LOG_LEVEL)
class BalancerConnector(ConnectorBase):
"""
BalancerConnector connects with balancer gateway APIs and provides pricing, user account tracking and trading
functionality.
"""
API_CALL_TIMEOUT = 10.0
POLL_INTERVAL = 60.0
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
def __init__(self,
trading_pairs: List[str],
wallet_private_key: str,
ethereum_rpc_url: str,
trading_required: bool = True
):
"""
:param trading_pairs: a list of trading pairs
:param wallet_private_key: a private key for eth wallet
:param ethereum_rpc_url: this is usually infura RPC URL
:param trading_required: Whether actual trading is needed.
"""
super().__init__()
self._trading_pairs = trading_pairs
tokens = set()
for trading_pair in trading_pairs:
tokens.update(set(trading_pair.split("-")))
self._token_addresses = get_erc20_token_addresses(tokens)
self._wallet_private_key = wallet_private_key
self._ethereum_rpc_url = ethereum_rpc_url
self._trading_required = trading_required
self._ev_loop = asyncio.get_event_loop()
self._shared_client = None
self._last_poll_timestamp = 0.0
self._in_flight_orders = {}
self._allowances = {}
self._status_polling_task = None
self._auto_approve_task = None
self._real_time_balance_update = False
@property
def name(self):
return "balancer"
@property
def limit_orders(self) -> List[LimitOrder]:
return [
in_flight_order.to_limit_order()
for in_flight_order in self._in_flight_orders.values()
]
async def auto_approve(self):
"""
Automatically approves Balancer contract as a spender for token in trading pairs.
It first checks if there are any already approved amount (allowance)
"""
self.logger().info("Checking for allowances...")
self._allowances = await self.get_allowances()
for token, amount in self._allowances.items():
if amount <= s_decimal_0:
amount_approved = await self.approve_balancer_spender(token)
if amount_approved > 0:
self._allowances[token] = amount_approved
await asyncio.sleep(2)
else:
break
async def approve_balancer_spender(self, token_symbol: str) -> Decimal:
"""
Approves Balancer contract as a spender for a token.
:param token_symbol: token to approve.
"""
resp = await self._api_request("post",
"eth/approve",
{"tokenAddress": self._token_addresses[token_symbol],
"gasPrice": str(get_gas_price())})
amount_approved = Decimal(str(resp["amount"]))
if amount_approved > 0:
self.logger().info(f"Approved Balancer spender contract for {token_symbol}.")
else:
self.logger().info(f"Balancer spender contract approval failed on {token_symbol}.")
return amount_approved
async def get_allowances(self) -> Dict[str, Decimal]:
"""
Retrieves allowances for token in trading_pairs
:return: A dictionary of token and its allowance (how much Balancer can spend).
"""
ret_val = {}
resp = await self._api_request("post", "eth/allowances",
{"tokenAddressList": ",".join(self._token_addresses.values())})
for address, amount in resp["approvals"].items():
ret_val[self.get_token(address)] = Decimal(str(amount))
return ret_val
@async_ttl_cache(ttl=5, maxsize=10)
async def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Optional[Decimal]:
"""
Retrieves a quote price.
:param trading_pair: The market trading pair
:param is_buy: True for an intention to buy, False for an intention to sell
:param amount: The amount required (in base token unit)
:return: The quote price.
"""
try:
base, quote = trading_pair.split("-")
side = "buy" if is_buy else "sell"
resp = await self._api_request("post",
f"balancer/{side}-price",
{"base": self._token_addresses[base],
"quote": self._token_addresses[quote],
"amount": amount})
if resp["price"] is not None:
return Decimal(str(resp["price"]))
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Error getting quote price for {trading_pair} {side} order for {amount} amount.",
exc_info=True,
app_warning_msg=str(e)
)
async def get_order_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
"""
This is simply the quote price
"""
return await self.get_quote_price(trading_pair, is_buy, amount)
def buy(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:
"""
Buys an amount of base token for a given price (or cheaper).
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param order_type: Any order type is fine, not needed for this.
:param price: The maximum price for the order.
:return: A newly created order id (internal).
"""
return self.place_order(True, trading_pair, amount, price)
def sell(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:
"""
Sells an amount of base token for a given price (or at a higher price).
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param order_type: Any order type is fine, not needed for this.
:param price: The minimum price for the order.
:return: A newly created order id (internal).
"""
return self.place_order(False, trading_pair, amount, price)
def place_order(self, is_buy: bool, trading_pair: str, amount: Decimal, price: Decimal) -> str:
"""
Places an order.
:param is_buy: True for buy order
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param price: The minimum price for the order.
:return: A newly created order id (internal).
"""
side = TradeType.BUY if is_buy else TradeType.SELL
order_id = f"{side.name.lower()}-{trading_pair}-{get_tracking_nonce()}"
safe_ensure_future(self._create_order(side, order_id, trading_pair, amount, price))
return order_id
async def _create_order(self,
trade_type: TradeType,
order_id: str,
trading_pair: str,
amount: Decimal,
price: Decimal):
"""
Calls buy or sell API end point to place an order, starts tracking the order and triggers relevant order events.
:param trade_type: BUY or SELL
:param order_id: Internal order id (also called client_order_id)
:param trading_pair: The market to place order
:param amount: The order amount (in base token value)
:param price: The order price
"""
amount = self.quantize_order_amount(trading_pair, amount)
price = self.quantize_order_price(trading_pair, price)
base, quote = trading_pair.split("-")
gas_price = get_gas_price()
api_params = {"base": self._token_addresses[base],
"quote": self._token_addresses[quote],
"amount": str(amount),
"maxPrice": str(price),
"gasPrice": str(gas_price),
}
self.start_tracking_order(order_id, None, trading_pair, trade_type, price, amount)
try:
order_result = await self._api_request("post", f"balancer/{trade_type.name.lower()}", api_params)
hash = order_result["txHash"]
status = order_result["status"]
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is not None:
self.logger().info(f"Created {trade_type.name} order {order_id} txHash: {hash} "
f"for {amount} {trading_pair}.")
tracked_order.exchange_order_id = hash
if int(status) == 1:
tracked_order.fee_asset = "ETH"
tracked_order.executed_amount_base = amount
tracked_order.executed_amount_quote = amount * price
tracked_order.fee_paid = Decimal(str(order_result["gasUsed"])) * gas_price / Decimal(str(1e9))
event_tag = MarketEvent.BuyOrderCreated if trade_type is TradeType.BUY else MarketEvent.SellOrderCreated
event_class = BuyOrderCreatedEvent if trade_type is TradeType.BUY else SellOrderCreatedEvent
self.trigger_event(event_tag, event_class(self.current_timestamp, OrderType.LIMIT, trading_pair, amount,
price, order_id, hash))
self.trigger_event(MarketEvent.OrderFilled,
OrderFilledEvent(
self.current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
price,
amount,
TradeFee(0.0, [("ETH", tracked_order.fee_paid)]),
hash
))
event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \
else MarketEvent.SellOrderCompleted
event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \
else SellOrderCompletedEvent
self.trigger_event(event_tag,
event_class(self.current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.fee_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.fee_paid,
tracked_order.order_type))
self.stop_tracking_order(tracked_order.client_order_id)
else:
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))
except asyncio.CancelledError:
raise
except Exception as e:
self.stop_tracking_order(order_id)
self.logger().network(
f"Error submitting {trade_type.name} order to Balancer for "
f"{amount} {trading_pair} "
f"{price}.",
exc_info=True,
app_warning_msg=str(e)
)
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))
def start_tracking_order(self,
order_id: str,
exchange_order_id: str,
trading_pair: str,
trade_type: TradeType,
price: Decimal,
amount: Decimal):
"""
Starts tracking an order by simply adding it into _in_flight_orders dictionary.
"""
self._in_flight_orders[order_id] = BalancerInFlightOrder(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
order_type=OrderType.LIMIT,
trade_type=trade_type,
price=price,
amount=amount
)
def stop_tracking_order(self, order_id: str):
"""
Stops tracking an order by simply removing it from _in_flight_orders dictionary.
"""
if order_id in self._in_flight_orders:
del self._in_flight_orders[order_id]
def get_taker_order_type(self):
return OrderType.LIMIT
def get_order_price_quantum(self, trading_pair: str, price: Decimal) -> Decimal:
return Decimal("1e-15")
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal) -> Decimal:
return Decimal("1e-15")
@property
def ready(self):
return all(self.status_dict.values())
def has_allowances(self) -> bool:
"""
Checks if all tokens have allowance (an amount approved)
"""
return len(self._allowances.values()) == len(self._token_addresses.values()) and \
all(amount > s_decimal_0 for amount in self._allowances.values())
@property
def status_dict(self) -> Dict[str, bool]:
return {
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"allowances": self.has_allowances() if self._trading_required else True
}
async def start_network(self):
if self._trading_required:
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._auto_approve_task = safe_ensure_future(self.auto_approve())
async def stop_network(self):
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._auto_approve_task is not None:
self._auto_approve_task.cancel()
self._auto_approve_task = None
async def check_network(self) -> NetworkStatus:
try:
response = await self._api_request("get", "api")
if response["status"] != "ok":
raise Exception(f"Error connecting to Gateway API. HTTP status is {response.status}.")
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
def tick(self, timestamp: float):
"""
Is called automatically by the clock for each clock's tick (1 second by default).
It checks if status polling task is due for execution.
"""
if time.time() - self._last_poll_timestamp > self.POLL_INTERVAL:
if not self._poll_notifier.is_set():
self._poll_notifier.set()
async def _status_polling_loop(self):
while True:
try:
self._poll_notifier = asyncio.Event()
await self._poll_notifier.wait()
await safe_gather(
self._update_balances(),
)
self._last_poll_timestamp = self.current_timestamp
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(str(e), exc_info=True)
self.logger().network("Unexpected error while fetching account updates.",
exc_info=True,
app_warning_msg="Could not fetch balances from Gateway API.")
await asyncio.sleep(0.5)
def get_token(self, token_address: str) -> str:
return [k for k, v in self._token_addresses.items() if v == token_address][0]
async def _update_balances(self):
"""
Calls Eth API to update total and available balances.
"""
local_asset_names = set(self._account_balances.keys())
remote_asset_names = set()
resp_json = await self._api_request("post",
"eth/balances",
{"tokenAddressList": ",".join(self._token_addresses.values())})
for token, bal in resp_json["balances"].items():
if len(token) > 4:
token = self.get_token(token)
self._account_available_balances[token] = Decimal(str(bal))
self._account_balances[token] = Decimal(str(bal))
remote_asset_names.add(token)
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}
self._in_flight_orders_snapshot_timestamp = self.current_timestamp
async def _http_client(self) -> aiohttp.ClientSession:
"""
:returns Shared client session instance
"""
if self._shared_client is None:
ssl_ctx = ssl.create_default_context(cafile=GATEAWAY_CA_CERT_PATH)
ssl_ctx.load_cert_chain(GATEAWAY_CLIENT_CERT_PATH, GATEAWAY_CLIENT_KEY_PATH)
conn = aiohttp.TCPConnector(ssl_context=ssl_ctx)
self._shared_client = aiohttp.ClientSession(connector=conn)
return self._shared_client
async def _api_request(self,
method: str,
path_url: str,
params: Dict[str, Any] = {}) -> Dict[str, Any]:
"""
Sends an aiohttp request and waits for a response.
:param method: The HTTP method, e.g. get or post
:param path_url: The path url or the API end point
:param params: A dictionary of required params for the end point
:returns A response in json format.
"""
base_url = f"https://{global_config_map["gateway_api_host"].value}:" \
f"{global_config_map["gateway_api_port"].value}"
url = f"{base_url}/{path_url}"
client = await self._http_client()
if method == "get":
if len(params) > 0:
response = await client.get(url, params=params)
else:
response = await client.get(url)
elif method == "post":
params["privateKey"] = self._wallet_private_key
if params["privateKey"][:2] != "0x":
params["privateKey"] = "0x" + params["privateKey"]
response = await client.post(url, data=params)
parsed_response = json.loads(await response.text())
if response.status != 200:
err_msg = ""
if "error" in parsed_response:
err_msg = f" Message: {parsed_response["error"]}"
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}")
if "error" in parsed_response:
raise Exception(f"Error: {parsed_response["error"]}")
return parsed_response
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
return []
@property
def in_flight_orders(self) -> Dict[str, BalancerInFlightOrder]:
return self._in_flight_orders
| import logging
from decimal import Decimal
import asyncio
import aiohttp
from typing import Dict, Any, List, Optional
import json
import time
import ssl
import copy
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
from hummingbot.core.utils import async_ttl_cache
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.logger import HummingbotLogger
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.event.events import (
MarketEvent,
BuyOrderCreatedEvent,
SellOrderCreatedEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
MarketOrderFailureEvent,
OrderFilledEvent,
OrderType,
TradeType,
TradeFee
)
from hummingbot.connector.connector_base import ConnectorBase
from hummingbot.connector.connector.balancer.balancer_in_flight_order import BalancerInFlightOrder
from hummingbot.client.settings import GATEAWAY_CA_CERT_PATH, GATEAWAY_CLIENT_CERT_PATH, GATEAWAY_CLIENT_KEY_PATH
from hummingbot.core.utils.eth_gas_station_lookup import get_gas_price
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_helpers import get_erc20_token_addresses
s_logger = None
s_decimal_0 = Decimal("0")
s_decimal_NaN = Decimal("nan")
logging.basicConfig(level=METRICS_LOG_LEVEL)
class BalancerConnector(ConnectorBase):
"""
BalancerConnector connects with balancer gateway APIs and provides pricing, user account tracking and trading
functionality.
"""
API_CALL_TIMEOUT = 10.0
POLL_INTERVAL = 60.0
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
def __init__(self,
trading_pairs: List[str],
wallet_private_key: str,
ethereum_rpc_url: str,
trading_required: bool = True
):
"""
:param trading_pairs: a list of trading pairs
:param wallet_private_key: a private key for eth wallet
:param ethereum_rpc_url: this is usually infura RPC URL
:param trading_required: Whether actual trading is needed.
"""
super().__init__()
self._trading_pairs = trading_pairs
tokens = set()
for trading_pair in trading_pairs:
tokens.update(set(trading_pair.split("-")))
self._token_addresses = get_erc20_token_addresses(tokens)
self._wallet_private_key = wallet_private_key
self._ethereum_rpc_url = ethereum_rpc_url
self._trading_required = trading_required
self._ev_loop = asyncio.get_event_loop()
self._shared_client = None
self._last_poll_timestamp = 0.0
self._in_flight_orders = {}
self._allowances = {}
self._status_polling_task = None
self._auto_approve_task = None
self._real_time_balance_update = False
@property
def name(self):
return "balancer"
@property
def limit_orders(self) -> List[LimitOrder]:
return [
in_flight_order.to_limit_order()
for in_flight_order in self._in_flight_orders.values()
]
async def auto_approve(self):
"""
Automatically approves Balancer contract as a spender for token in trading pairs.
It first checks if there are any already approved amount (allowance)
"""
self.logger().info("Checking for allowances...")
self._allowances = await self.get_allowances()
for token, amount in self._allowances.items():
if amount <= s_decimal_0:
amount_approved = await self.approve_balancer_spender(token)
if amount_approved > 0:
self._allowances[token] = amount_approved
await asyncio.sleep(2)
else:
break
async def approve_balancer_spender(self, token_symbol: str) -> Decimal:
"""
Approves Balancer contract as a spender for a token.
:param token_symbol: token to approve.
"""
resp = await self._api_request("post",
"eth/approve",
{"tokenAddress": self._token_addresses[token_symbol],
"gasPrice": str(get_gas_price())})
amount_approved = Decimal(str(resp["amount"]))
if amount_approved > 0:
self.logger().info(f"Approved Balancer spender contract for {token_symbol}.")
else:
self.logger().info(f"Balancer spender contract approval failed on {token_symbol}.")
return amount_approved
async def get_allowances(self) -> Dict[str, Decimal]:
"""
Retrieves allowances for token in trading_pairs
:return: A dictionary of token and its allowance (how much Balancer can spend).
"""
ret_val = {}
resp = await self._api_request("post", "eth/allowances",
{"tokenAddressList": ",".join(self._token_addresses.values())})
for address, amount in resp["approvals"].items():
ret_val[self.get_token(address)] = Decimal(str(amount))
return ret_val
@async_ttl_cache(ttl=5, maxsize=10)
async def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Optional[Decimal]:
"""
Retrieves a quote price.
:param trading_pair: The market trading pair
:param is_buy: True for an intention to buy, False for an intention to sell
:param amount: The amount required (in base token unit)
:return: The quote price.
"""
try:
base, quote = trading_pair.split("-")
side = "buy" if is_buy else "sell"
resp = await self._api_request("post",
f"balancer/{side}-price",
{"base": self._token_addresses[base],
"quote": self._token_addresses[quote],
"amount": amount})
if resp["price"] is not None:
return Decimal(str(resp["price"]))
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Error getting quote price for {trading_pair} {side} order for {amount} amount.",
exc_info=True,
app_warning_msg=str(e)
)
async def get_order_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
"""
This is simply the quote price
"""
return await self.get_quote_price(trading_pair, is_buy, amount)
def buy(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:
"""
Buys an amount of base token for a given price (or cheaper).
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param order_type: Any order type is fine, not needed for this.
:param price: The maximum price for the order.
:return: A newly created order id (internal).
"""
return self.place_order(True, trading_pair, amount, price)
def sell(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:
"""
Sells an amount of base token for a given price (or at a higher price).
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param order_type: Any order type is fine, not needed for this.
:param price: The minimum price for the order.
:return: A newly created order id (internal).
"""
return self.place_order(False, trading_pair, amount, price)
def place_order(self, is_buy: bool, trading_pair: str, amount: Decimal, price: Decimal) -> str:
"""
Places an order.
:param is_buy: True for buy order
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param price: The minimum price for the order.
:return: A newly created order id (internal).
"""
side = TradeType.BUY if is_buy else TradeType.SELL
order_id = f"{side.name.lower()}-{trading_pair}-{get_tracking_nonce()}"
safe_ensure_future(self._create_order(side, order_id, trading_pair, amount, price))
return order_id
async def _create_order(self,
trade_type: TradeType,
order_id: str,
trading_pair: str,
amount: Decimal,
price: Decimal):
"""
Calls buy or sell API end point to place an order, starts tracking the order and triggers relevant order events.
:param trade_type: BUY or SELL
:param order_id: Internal order id (also called client_order_id)
:param trading_pair: The market to place order
:param amount: The order amount (in base token value)
:param price: The order price
"""
amount = self.quantize_order_amount(trading_pair, amount)
price = self.quantize_order_price(trading_pair, price)
base, quote = trading_pair.split("-")
gas_price = get_gas_price()
api_params = {"base": self._token_addresses[base],
"quote": self._token_addresses[quote],
"amount": str(amount),
"maxPrice": str(price),
"gasPrice": str(gas_price),
}
self.start_tracking_order(order_id, None, trading_pair, trade_type, price, amount)
try:
order_result = await self._api_request("post", f"balancer/{trade_type.name.lower()}", api_params)
hash = order_result["txHash"]
status = order_result["status"]
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is not None:
self.logger().info(f"Created {trade_type.name} order {order_id} txHash: {hash} "
f"for {amount} {trading_pair}.")
tracked_order.exchange_order_id = hash
if int(status) == 1:
tracked_order.fee_asset = "ETH"
tracked_order.executed_amount_base = amount
tracked_order.executed_amount_quote = amount * price
tracked_order.fee_paid = Decimal(str(order_result["gasUsed"])) * gas_price / Decimal(str(1e9))
event_tag = MarketEvent.BuyOrderCreated if trade_type is TradeType.BUY else MarketEvent.SellOrderCreated
event_class = BuyOrderCreatedEvent if trade_type is TradeType.BUY else SellOrderCreatedEvent
self.trigger_event(event_tag, event_class(self.current_timestamp, OrderType.LIMIT, trading_pair, amount,
price, order_id, hash))
self.trigger_event(MarketEvent.OrderFilled,
OrderFilledEvent(
self.current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
price,
amount,
TradeFee(0.0, [("ETH", tracked_order.fee_paid)]),
hash
))
event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \
else MarketEvent.SellOrderCompleted
event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \
else SellOrderCompletedEvent
self.trigger_event(event_tag,
event_class(self.current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.fee_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.fee_paid,
tracked_order.order_type))
self.stop_tracking_order(tracked_order.client_order_id)
else:
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))
except asyncio.CancelledError:
raise
except Exception as e:
self.stop_tracking_order(order_id)
self.logger().network(
f"Error submitting {trade_type.name} order to Balancer for "
f"{amount} {trading_pair} "
f"{price}.",
exc_info=True,
app_warning_msg=str(e)
)
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))
def start_tracking_order(self,
order_id: str,
exchange_order_id: str,
trading_pair: str,
trade_type: TradeType,
price: Decimal,
amount: Decimal):
"""
Starts tracking an order by simply adding it into _in_flight_orders dictionary.
"""
self._in_flight_orders[order_id] = BalancerInFlightOrder(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
order_type=OrderType.LIMIT,
trade_type=trade_type,
price=price,
amount=amount
)
def stop_tracking_order(self, order_id: str):
"""
Stops tracking an order by simply removing it from _in_flight_orders dictionary.
"""
if order_id in self._in_flight_orders:
del self._in_flight_orders[order_id]
def get_taker_order_type(self):
return OrderType.LIMIT
def get_order_price_quantum(self, trading_pair: str, price: Decimal) -> Decimal:
return Decimal("1e-15")
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal) -> Decimal:
return Decimal("1e-15")
@property
def ready(self):
return all(self.status_dict.values())
def has_allowances(self) -> bool:
"""
Checks if all tokens have allowance (an amount approved)
"""
return len(self._allowances.values()) == len(self._token_addresses.values()) and \
all(amount > s_decimal_0 for amount in self._allowances.values())
@property
def status_dict(self) -> Dict[str, bool]:
return {
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"allowances": self.has_allowances() if self._trading_required else True
}
async def start_network(self):
if self._trading_required:
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._auto_approve_task = safe_ensure_future(self.auto_approve())
async def stop_network(self):
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._auto_approve_task is not None:
self._auto_approve_task.cancel()
self._auto_approve_task = None
async def check_network(self) -> NetworkStatus:
try:
response = await self._api_request("get", "api")
if response["status"] != "ok":
raise Exception(f"Error connecting to Gateway API. HTTP status is {response.status}.")
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
def tick(self, timestamp: float):
"""
Is called automatically by the clock for each clock's tick (1 second by default).
It checks if status polling task is due for execution.
"""
if time.time() - self._last_poll_timestamp > self.POLL_INTERVAL:
if not self._poll_notifier.is_set():
self._poll_notifier.set()
async def _status_polling_loop(self):
while True:
try:
self._poll_notifier = asyncio.Event()
await self._poll_notifier.wait()
await safe_gather(
self._update_balances(),
)
self._last_poll_timestamp = self.current_timestamp
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(str(e), exc_info=True)
self.logger().network("Unexpected error while fetching account updates.",
exc_info=True,
app_warning_msg="Could not fetch balances from Gateway API.")
await asyncio.sleep(0.5)
def get_token(self, token_address: str) -> str:
return [k for k, v in self._token_addresses.items() if v == token_address][0]
async def _update_balances(self):
"""
Calls Eth API to update total and available balances.
"""
local_asset_names = set(self._account_balances.keys())
remote_asset_names = set()
resp_json = await self._api_request("post",
"eth/balances",
{"tokenAddressList": ",".join(self._token_addresses.values())})
for token, bal in resp_json["balances"].items():
if len(token) > 4:
token = self.get_token(token)
self._account_available_balances[token] = Decimal(str(bal))
self._account_balances[token] = Decimal(str(bal))
remote_asset_names.add(token)
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}
self._in_flight_orders_snapshot_timestamp = self.current_timestamp
async def _http_client(self) -> aiohttp.ClientSession:
"""
:returns Shared client session instance
"""
if self._shared_client is None:
ssl_ctx = ssl.create_default_context(cafile=GATEAWAY_CA_CERT_PATH)
ssl_ctx.load_cert_chain(GATEAWAY_CLIENT_CERT_PATH, GATEAWAY_CLIENT_KEY_PATH)
conn = aiohttp.TCPConnector(ssl_context=ssl_ctx)
self._shared_client = aiohttp.ClientSession(connector=conn)
return self._shared_client
async def _api_request(self,
method: str,
path_url: str,
params: Dict[str, Any] = {}) -> Dict[str, Any]:
"""
Sends an aiohttp request and waits for a response.
:param method: The HTTP method, e.g. get or post
:param path_url: The path url or the API end point
:param params: A dictionary of required params for the end point
:returns A response in json format.
"""
base_url = f"https://{global_config_map['gateway_api_host'].value}:" \
f"{global_config_map['gateway_api_port'].value}"
url = f"{base_url}/{path_url}"
client = await self._http_client()
if method == "get":
if len(params) > 0:
response = await client.get(url, params=params)
else:
response = await client.get(url)
elif method == "post":
params["privateKey"] = self._wallet_private_key
if params["privateKey"][:2] != "0x":
params["privateKey"] = "0x" + params["privateKey"]
response = await client.post(url, data=params)
parsed_response = json.loads(await response.text())
if response.status != 200:
err_msg = ""
if "error" in parsed_response:
err_msg = f" Message: {parsed_response['error']}"
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}")
if "error" in parsed_response:
raise Exception(f"Error: {parsed_response['error']}")
return parsed_response
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
return []
@property
def in_flight_orders(self) -> Dict[str, BalancerInFlightOrder]:
return self._in_flight_orders
|
from datetime import datetime, timedelta
from data_manager import DataManager
from flight_search import FlightSearch
from notification_manager import NotificationManager
ORIGIN_CITY_IATA = 'LON'
def main():
data_manager = DataManager()
sheet_data = data_manager.read_data()
flight_search = FlightSearch()
notification_manager = NotificationManager()
if len(sheet_data) > 0:
for row in sheet_data:
if row['iataCode'] == '':
row['iataCode'] = flight_search.search_iata(row['city'])
data_manager.destination_data = sheet_data
data_manager.update_data()
sheet_data = data_manager.read_data()
destinations = {
data["iataCode"]: {
"id": data["id"],
"city": data["city"],
"price": data["lowestPrice"]
} for data in sheet_data}
tomorrow = datetime.now() + timedelta(days=1)
six_months_from_today = datetime.now() + timedelta(days=180)
for destination in destinations:
flight = flight_search.search_flights(
ORIGIN_CITY_IATA,
destination['iataCode'],
from_time=tomorrow,
to_time=six_months_from_today
)
if flight is None:
continue
if flight.price < sheet_data[destination]['price']:
users = data_manager.get_customer_emails()
emails = [row['email'] for row in users]
names = [row['firstName'] for row in users]
msg = f'Low price alert! Only £{flight.price} to fly from {flight.origin_city}-{flight.origin_airport} '
f'to {flight.destination_city}-{flight.destination_airport}, from {flight.out_date} '
f'to {flight.return_date}.'
if flight['stop_overs'] > 0:
msg += f'\nFlight has {flight['stop_overs']} stop over, via {flight['via_city']}'
link = f'https://www.google.co.uk/flights?hl=en#flt={flight.origin_airport}.{flight.destination_airport}' \
f'.{flight.out_date}*{flight.destination_airport}.{flight.origin_airport}.{flight.return_date}'
notification_manager.send_emails(emails, msg, link)
if __name__ == '__main__':
main()
| from datetime import datetime, timedelta
from data_manager import DataManager
from flight_search import FlightSearch
from notification_manager import NotificationManager
ORIGIN_CITY_IATA = 'LON'
def main():
data_manager = DataManager()
sheet_data = data_manager.read_data()
flight_search = FlightSearch()
notification_manager = NotificationManager()
if len(sheet_data) > 0:
for row in sheet_data:
if row['iataCode'] == '':
row['iataCode'] = flight_search.search_iata(row['city'])
data_manager.destination_data = sheet_data
data_manager.update_data()
sheet_data = data_manager.read_data()
destinations = {
data["iataCode"]: {
"id": data["id"],
"city": data["city"],
"price": data["lowestPrice"]
} for data in sheet_data}
tomorrow = datetime.now() + timedelta(days=1)
six_months_from_today = datetime.now() + timedelta(days=180)
for destination in destinations:
flight = flight_search.search_flights(
ORIGIN_CITY_IATA,
destination['iataCode'],
from_time=tomorrow,
to_time=six_months_from_today
)
if flight is None:
continue
if flight.price < sheet_data[destination]['price']:
users = data_manager.get_customer_emails()
emails = [row['email'] for row in users]
names = [row['firstName'] for row in users]
msg = f'Low price alert! Only £{flight.price} to fly from {flight.origin_city}-{flight.origin_airport} '
f'to {flight.destination_city}-{flight.destination_airport}, from {flight.out_date} '
f'to {flight.return_date}.'
if flight['stop_overs'] > 0:
msg += f'\nFlight has {flight["stop_overs"]} stop over, via {flight["via_city"]}'
link = f'https://www.google.co.uk/flights?hl=en#flt={flight.origin_airport}.{flight.destination_airport}' \
f'.{flight.out_date}*{flight.destination_airport}.{flight.origin_airport}.{flight.return_date}'
notification_manager.send_emails(emails, msg, link)
if __name__ == '__main__':
main()
|
import os
import apsw
from typing import Union, Tuple, Set, List
from itertools import chain
from decimal import Decimal
from collections import namedtuple
from multiprocessing import Manager
from binascii import unhexlify
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.util import class_logger
from lbry.wallet.database import query, constraints_to_sql
from lbry.schema.tags import clean_tags
from lbry.schema.mime_types import guess_stream_type
from lbry.wallet import Ledger, RegTestLedger
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.server.db.canonical import register_canonical_functions
from lbry.wallet.server.db.full_text_search import update_full_text_search, CREATE_FULL_TEXT_SEARCH, first_sync_finished
from lbry.wallet.server.db.trending import TRENDING_ALGORITHMS
from .common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
class SQLDB:
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_CLAIM_TABLE = """
create table if not exists claim (
claim_hash bytes primary key,
claim_id text not null,
claim_name text not null,
normalized text not null,
txo_hash bytes not null,
tx_position integer not null,
amount integer not null,
timestamp integer not null, -- last updated timestamp
creation_timestamp integer not null,
height integer not null, -- last updated height
creation_height integer not null,
activation_height integer,
expiration_height integer not null,
release_time integer not null,
short_url text not null, -- normalized#shortest-unique-claim_id
canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel
title text,
author text,
description text,
claim_type integer,
reposted integer default 0,
-- streams
stream_type text,
media_type text,
fee_amount integer default 0,
fee_currency text,
duration integer,
-- reposts
reposted_claim_hash bytes,
-- claims which are channels
public_key_bytes bytes,
public_key_hash bytes,
claims_in_channel integer,
-- claims which are inside channels
channel_hash bytes,
channel_join integer, -- height at which claim got valid signature / joined channel
signature bytes,
signature_digest bytes,
signature_valid bool,
effective_amount integer not null default 0,
support_amount integer not null default 0,
trending_group integer not null default 0,
trending_mixed integer not null default 0,
trending_local integer not null default 0,
trending_global integer not null default 0
);
create index if not exists claim_normalized_idx on claim (normalized, activation_height);
create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash);
create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized);
create index if not exists claim_txo_hash_idx on claim (txo_hash);
create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash);
create index if not exists claim_expiration_height_idx on claim (expiration_height);
create index if not exists claim_reposted_claim_hash_idx on claim (reposted_claim_hash);
"""
CREATE_SUPPORT_TABLE = """
create table if not exists support (
txo_hash bytes primary key,
tx_position integer not null,
height integer not null,
claim_hash bytes not null,
amount integer not null
);
create index if not exists support_claim_hash_idx on support (claim_hash, height);
"""
CREATE_TAG_TABLE = """
create table if not exists tag (
tag text not null,
claim_hash bytes not null,
height integer not null
);
create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag);
"""
CREATE_LANGUAGE_TABLE = """
create table if not exists language (
language text not null,
claim_hash bytes not null,
height integer not null
);
create unique index if not exists language_claim_hash_language_idx on language (claim_hash, language);
"""
CREATE_CLAIMTRIE_TABLE = """
create table if not exists claimtrie (
normalized text primary key,
claim_hash bytes not null,
last_take_over_height integer not null
);
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
"""
SEARCH_INDEXES = """
-- used by any tag clouds
create index if not exists tag_tag_idx on tag (tag, claim_hash);
-- naked order bys (no filters)
create unique index if not exists claim_release_idx on claim (release_time, claim_hash);
create unique index if not exists claim_trending_idx on claim (trending_group, trending_mixed, claim_hash);
create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash);
-- claim_type filter + order by
create unique index if not exists claim_type_release_idx on claim (release_time, claim_type, claim_hash);
create unique index if not exists claim_type_trending_idx on claim (trending_group, trending_mixed, claim_type, claim_hash);
create unique index if not exists claim_type_effective_amount_idx on claim (effective_amount, claim_type, claim_hash);
-- stream_type filter + order by
create unique index if not exists stream_type_release_idx on claim (stream_type, release_time, claim_hash);
create unique index if not exists stream_type_trending_idx on claim (stream_type, trending_group, trending_mixed, claim_hash);
create unique index if not exists stream_type_effective_amount_idx on claim (stream_type, effective_amount, claim_hash);
-- channel_hash filter + order by
create unique index if not exists channel_hash_release_idx on claim (channel_hash, release_time, claim_hash);
create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_group, trending_mixed, claim_hash);
create unique index if not exists channel_hash_effective_amount_idx on claim (channel_hash, effective_amount, claim_hash);
-- duration filter + order by
create unique index if not exists duration_release_idx on claim (duration, release_time, claim_hash);
create unique index if not exists duration_trending_idx on claim (duration, trending_group, trending_mixed, claim_hash);
create unique index if not exists duration_effective_amount_idx on claim (duration, effective_amount, claim_hash);
-- fee_amount + order by
create unique index if not exists fee_amount_release_idx on claim (fee_amount, release_time, claim_hash);
create unique index if not exists fee_amount_trending_idx on claim (fee_amount, trending_group, trending_mixed, claim_hash);
create unique index if not exists fee_amount_effective_amount_idx on claim (fee_amount, effective_amount, claim_hash);
-- TODO: verify that all indexes below are used
create index if not exists claim_height_normalized_idx on claim (height, normalized asc);
create index if not exists claim_resolve_idx on claim (normalized, claim_id);
create index if not exists claim_id_idx on claim (claim_id, claim_hash);
create index if not exists claim_timestamp_idx on claim (timestamp);
create index if not exists claim_public_key_hash_idx on claim (public_key_hash);
create index if not exists claim_signature_valid_idx on claim (signature_valid);
"""
TAG_INDEXES = '\n'.join(
f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';"
for tag_value, tag_key in COMMON_TAGS.items()
)
LANGUAGE_INDEXES = '\n'.join(
f"create unique index if not exists language_{language}_idx on language (language, claim_hash) WHERE language='{language}';"
for language in INDEXED_LANGUAGES
)
CREATE_TABLES_QUERY = (
CREATE_CLAIM_TABLE +
CREATE_FULL_TEXT_SEARCH +
CREATE_SUPPORT_TABLE +
CREATE_CLAIMTRIE_TABLE +
CREATE_TAG_TABLE +
CREATE_LANGUAGE_TABLE
)
def __init__(
self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list):
self.main = main
self._db_path = path
self.db = None
self.logger = class_logger(__name__, self.__class__.__name__)
self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger
self._fts_synced = False
self.state_manager = None
self.blocked_streams = None
self.blocked_channels = None
self.blocking_channel_hashes = {
unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id
}
self.filtered_streams = None
self.filtered_channels = None
self.filtering_channel_hashes = {
unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id
}
self.trending = trending
def open(self):
self.db = apsw.Connection(
self._db_path,
flags=(
apsw.SQLITE_OPEN_READWRITE |
apsw.SQLITE_OPEN_CREATE |
apsw.SQLITE_OPEN_URI
)
)
def exec_factory(cursor, statement, bindings):
tpl = namedtuple('row', (d[0] for d in cursor.getdescription()))
cursor.setrowtrace(lambda cursor, row: tpl(*row))
return True
self.db.setexectrace(exec_factory)
self.execute(self.PRAGMAS)
self.execute(self.CREATE_TABLES_QUERY)
register_canonical_functions(self.db)
self.state_manager = Manager()
self.blocked_streams = self.state_manager.dict()
self.blocked_channels = self.state_manager.dict()
self.filtered_streams = self.state_manager.dict()
self.filtered_channels = self.state_manager.dict()
self.update_blocked_and_filtered_claims()
for algorithm in self.trending:
algorithm.install(self.db)
def close(self):
if self.db is not None:
self.db.close()
if self.state_manager is not None:
self.state_manager.shutdown()
def update_blocked_and_filtered_claims(self):
self.update_claims_from_channel_hashes(
self.blocked_streams, self.blocked_channels, self.blocking_channel_hashes
)
self.update_claims_from_channel_hashes(
self.filtered_streams, self.filtered_channels, self.filtering_channel_hashes
)
self.filtered_streams.update(self.blocked_streams)
self.filtered_channels.update(self.blocked_channels)
def update_claims_from_channel_hashes(self, shared_streams, shared_channels, channel_hashes):
streams, channels = {}, {}
if channel_hashes:
sql = query(
"SELECT repost.channel_hash, repost.reposted_claim_hash, target.claim_type "
"FROM claim as repost JOIN claim AS target ON (target.claim_hash=repost.reposted_claim_hash)", **{
'repost.reposted_claim_hash__is_not_null': 1,
'repost.channel_hash__in': channel_hashes
}
)
for blocked_claim in self.execute(*sql):
if blocked_claim.claim_type == CLAIM_TYPES['stream']:
streams[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
elif blocked_claim.claim_type == CLAIM_TYPES['channel']:
channels[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
shared_streams.clear()
shared_streams.update(streams)
shared_channels.clear()
shared_channels.update(channels)
@staticmethod
def _insert_sql(table: str, data: dict) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
sql = (
f"INSERT INTO {table} ({", ".join(columns)}) "
f"VALUES ({", ".join(["?"] * len(values))})"
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
return f"UPDATE {table} SET {", ".join(columns)} WHERE {where}", values
@staticmethod
def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]:
where, values = constraints_to_sql(constraints)
return f"DELETE FROM {table} WHERE {where}", values
def execute(self, *args):
return self.db.cursor().execute(*args)
def executemany(self, *args):
return self.db.cursor().executemany(*args)
def begin(self):
self.execute('begin;')
def commit(self):
self.execute('commit;')
def _upsertable_claims(self, txos: List[Output], header, clear_first=False):
claim_hashes, claims, tags, languages = set(), [], {}, {}
for txo in txos:
tx = txo.tx_ref.tx
try:
assert txo.claim_name
assert txo.normalized_name
except:
#self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
continue
language = 'none'
try:
if txo.claim.is_stream and txo.claim.stream.languages:
language = txo.claim.stream.languages[0].language
except:
pass
claim_hash = txo.claim_hash
claim_hashes.add(claim_hash)
claim_record = {
'claim_hash': claim_hash,
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'normalized': txo.normalized_name,
'txo_hash': txo.ref.hash,
'tx_position': tx.position,
'amount': txo.amount,
'timestamp': header['timestamp'],
'height': tx.height,
'title': None,
'description': None,
'author': None,
'duration': None,
'claim_type': None,
'stream_type': None,
'media_type': None,
'release_time': None,
'fee_currency': None,
'fee_amount': 0,
'reposted_claim_hash': None
}
claims.append(claim_record)
try:
claim = txo.claim
except:
#self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
continue
if claim.is_stream:
claim_record['claim_type'] = CLAIM_TYPES['stream']
claim_record['media_type'] = claim.stream.source.media_type
claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(claim_record['media_type'])]
claim_record['title'] = claim.stream.title
claim_record['description'] = claim.stream.description
claim_record['author'] = claim.stream.author
if claim.stream.video and claim.stream.video.duration:
claim_record['duration'] = claim.stream.video.duration
if claim.stream.audio and claim.stream.audio.duration:
claim_record['duration'] = claim.stream.audio.duration
if claim.stream.release_time:
claim_record['release_time'] = claim.stream.release_time
if claim.stream.has_fee:
fee = claim.stream.fee
if isinstance(fee.currency, str):
claim_record['fee_currency'] = fee.currency.lower()
if isinstance(fee.amount, Decimal):
claim_record['fee_amount'] = int(fee.amount*1000)
elif claim.is_repost:
claim_record['claim_type'] = CLAIM_TYPES['repost']
claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash
elif claim.is_channel:
claim_record['claim_type'] = CLAIM_TYPES['channel']
elif claim.is_collection:
claim_record['claim_type'] = CLAIM_TYPES['collection']
languages[(language, claim_hash)] = (language, claim_hash, tx.height)
for tag in clean_tags(claim.message.tags):
tags[(tag, claim_hash)] = (tag, claim_hash, tx.height)
if clear_first:
self._clear_claim_metadata(claim_hashes)
if tags:
self.executemany(
"INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)", tags.values()
)
if languages:
self.executemany(
"INSERT OR IGNORE INTO language (language, claim_hash, height) VALUES (?, ?, ?)", languages.values()
)
return claims
def insert_claims(self, txos: List[Output], header):
claims = self._upsertable_claims(txos, header)
if claims:
self.executemany("""
INSERT OR IGNORE INTO claim (
claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount,
claim_type, media_type, stream_type, timestamp, creation_timestamp,
fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash,
creation_height, release_time, activation_height, expiration_height, short_url)
VALUES (
:claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount,
:claim_type, :media_type, :stream_type, :timestamp, :timestamp,
:fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height,
CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END,
CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END,
CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END,
:claim_name||COALESCE(
(SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized),
'#'||substr(:claim_id, 1, 1)
)
)""", claims)
def update_claims(self, txos: List[Output], header):
claims = self._upsertable_claims(txos, header, clear_first=True)
if claims:
self.executemany("""
UPDATE claim SET
txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height,
claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type,
timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency,
title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash,
release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END
WHERE claim_hash=:claim_hash;
""", claims)
def delete_claims(self, claim_hashes: Set[bytes]):
""" Deletes claim supports and from claimtrie in case of an abandon. """
if claim_hashes:
affected_channels = self.execute(*query(
"SELECT channel_hash FROM claim", channel_hash__is_not_null=1, claim_hash__in=claim_hashes
)).fetchall()
for table in ('claim', 'support', 'claimtrie'):
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
self._clear_claim_metadata(claim_hashes)
return {r.channel_hash for r in affected_channels}
return set()
def delete_claims_above_height(self, height: int):
claim_hashes = [x[0] for x in self.execute(
"SELECT claim_hash FROM claim WHERE height>?", (height, )
).fetchall()]
while claim_hashes:
batch = set(claim_hashes[:500])
claim_hashes = claim_hashes[500:]
self.delete_claims(batch)
def _clear_claim_metadata(self, claim_hashes: Set[bytes]):
if claim_hashes:
for table in ('tag',): # 'language', 'location', etc
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
def split_inputs_into_claims_supports_and_other(self, txis):
txo_hashes = {txi.txo_ref.hash for txi in txis}
claims = self.execute(*query(
"SELECT txo_hash, claim_hash, normalized FROM claim", txo_hash__in=txo_hashes
)).fetchall()
txo_hashes -= {r.txo_hash for r in claims}
supports = {}
if txo_hashes:
supports = self.execute(*query(
"SELECT txo_hash, claim_hash FROM support", txo_hash__in=txo_hashes
)).fetchall()
txo_hashes -= {r.txo_hash for r in supports}
return claims, supports, txo_hashes
def insert_supports(self, txos: List[Output]):
supports = []
for txo in txos:
tx = txo.tx_ref.tx
supports.append((
txo.ref.hash, tx.position, tx.height,
txo.claim_hash, txo.amount
))
if supports:
self.executemany(
"INSERT OR IGNORE INTO support ("
" txo_hash, tx_position, height, claim_hash, amount"
") "
"VALUES (?, ?, ?, ?, ?)", supports
)
def delete_supports(self, txo_hashes: Set[bytes]):
if txo_hashes:
self.execute(*self._delete_sql('support', {'txo_hash__in': txo_hashes}))
def calculate_reposts(self, txos: List[Output]):
targets = set()
for txo in txos:
try:
claim = txo.claim
except:
continue
if claim.is_repost:
targets.add((claim.repost.reference.claim_hash,))
if targets:
self.executemany(
"""
UPDATE claim SET reposted = (
SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash
)
WHERE claim_hash = ?
""", targets
)
def validate_channel_signatures(self, height, new_claims, updated_claims, spent_claims, affected_channels, timer):
if not new_claims and not updated_claims and not spent_claims:
return
sub_timer = timer.add_timer('segregate channels and signables')
sub_timer.start()
channels, new_channel_keys, signables = {}, {}, {}
for txo in chain(new_claims, updated_claims):
try:
claim = txo.claim
except:
continue
if claim.is_channel:
channels[txo.claim_hash] = txo
new_channel_keys[txo.claim_hash] = claim.channel.public_key_bytes
else:
signables[txo.claim_hash] = txo
sub_timer.stop()
sub_timer = timer.add_timer('make list of channels we need to lookup')
sub_timer.start()
missing_channel_keys = set()
for txo in signables.values():
claim = txo.claim
if claim.is_signed and claim.signing_channel_hash not in new_channel_keys:
missing_channel_keys.add(claim.signing_channel_hash)
sub_timer.stop()
sub_timer = timer.add_timer('lookup missing channels')
sub_timer.start()
all_channel_keys = {}
if new_channel_keys or missing_channel_keys or affected_channels:
all_channel_keys = dict(self.execute(*query(
"SELECT claim_hash, public_key_bytes FROM claim",
claim_hash__in=set(new_channel_keys) | missing_channel_keys | affected_channels
)))
sub_timer.stop()
sub_timer = timer.add_timer('prepare for updating claims')
sub_timer.start()
changed_channel_keys = {}
for claim_hash, new_key in new_channel_keys.items():
if claim_hash not in all_channel_keys or all_channel_keys[claim_hash] != new_key:
all_channel_keys[claim_hash] = new_key
changed_channel_keys[claim_hash] = new_key
claim_updates = []
for claim_hash, txo in signables.items():
claim = txo.claim
update = {
'claim_hash': claim_hash,
'channel_hash': None,
'signature': None,
'signature_digest': None,
'signature_valid': None
}
if claim.is_signed:
update.update({
'channel_hash': claim.signing_channel_hash,
'signature': txo.get_encoded_signature(),
'signature_digest': txo.get_signature_digest(self.ledger),
'signature_valid': 0
})
claim_updates.append(update)
sub_timer.stop()
sub_timer = timer.add_timer('find claims affected by a change in channel key')
sub_timer.start()
if changed_channel_keys:
sql = f"""
SELECT * FROM claim WHERE
channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND
signature IS NOT NULL
"""
for affected_claim in self.execute(sql, changed_channel_keys.keys()):
if affected_claim.claim_hash not in signables:
claim_updates.append({
'claim_hash': affected_claim.claim_hash,
'channel_hash': affected_claim.channel_hash,
'signature': affected_claim.signature,
'signature_digest': affected_claim.signature_digest,
'signature_valid': 0
})
sub_timer.stop()
sub_timer = timer.add_timer('verify signatures')
sub_timer.start()
for update in claim_updates:
channel_pub_key = all_channel_keys.get(update['channel_hash'])
if channel_pub_key and update['signature']:
update['signature_valid'] = Output.is_signature_valid(
bytes(update['signature']), bytes(update['signature_digest']), channel_pub_key
)
sub_timer.stop()
sub_timer = timer.add_timer('update claims')
sub_timer.start()
if claim_updates:
self.executemany(f"""
UPDATE claim SET
channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest,
signature_valid=:signature_valid,
channel_join=CASE
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join
WHEN :signature_valid=1 THEN {height}
END,
canonical_url=CASE
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url
WHEN :signature_valid=1 THEN
(SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'||
claim_name||COALESCE(
(SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim
WHERE other_claim.signature_valid = 1 AND
other_claim.channel_hash = :channel_hash AND
other_claim.normalized = claim.normalized),
'#'||substr(claim_id, 1, 1)
)
END
WHERE claim_hash=:claim_hash;
""", claim_updates)
sub_timer.stop()
sub_timer = timer.add_timer('update claims affected by spent channels')
sub_timer.start()
if spent_claims:
self.execute(
f"""
UPDATE claim SET
signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END,
channel_join=NULL, canonical_url=NULL
WHERE channel_hash IN ({','.join('?' for _ in spent_claims)})
""", spent_claims
)
sub_timer.stop()
sub_timer = timer.add_timer('update channels')
sub_timer.start()
if channels:
self.executemany(
"""
UPDATE claim SET
public_key_bytes=:public_key_bytes,
public_key_hash=:public_key_hash
WHERE claim_hash=:claim_hash""", [{
'claim_hash': claim_hash,
'public_key_bytes': txo.claim.channel.public_key_bytes,
'public_key_hash': self.ledger.address_to_hash160(
self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes)
)
} for claim_hash, txo in channels.items()]
)
sub_timer.stop()
sub_timer = timer.add_timer('update claims_in_channel counts')
sub_timer.start()
if all_channel_keys:
self.executemany(f"""
UPDATE claim SET
claims_in_channel=(
SELECT COUNT(*) FROM claim AS claim_in_channel
WHERE claim_in_channel.signature_valid=1 AND
claim_in_channel.channel_hash=claim.claim_hash
)
WHERE claim_hash = ?
""", [(channel_hash,) for channel_hash in all_channel_keys.keys()])
sub_timer.stop()
sub_timer = timer.add_timer('update blocked claims list')
sub_timer.start()
if (self.blocking_channel_hashes.intersection(all_channel_keys) or
self.filtering_channel_hashes.intersection(all_channel_keys)):
self.update_blocked_and_filtered_claims()
sub_timer.stop()
def _update_support_amount(self, claim_hashes):
if claim_hashes:
self.execute(f"""
UPDATE claim SET
support_amount = COALESCE(
(SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0
)
WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)})
""", claim_hashes)
def _update_effective_amount(self, height, claim_hashes=None):
self.execute(
f"UPDATE claim SET effective_amount = amount + support_amount "
f"WHERE activation_height = {height}"
)
if claim_hashes:
self.execute(
f"UPDATE claim SET effective_amount = amount + support_amount "
f"WHERE activation_height < {height} "
f" AND claim_hash IN ({",".join("?" for _ in claim_hashes)})",
claim_hashes
)
def _calculate_activation_height(self, height):
last_take_over_height = f"""COALESCE(
(SELECT last_take_over_height FROM claimtrie
WHERE claimtrie.normalized=claim.normalized),
{height}
)
"""
self.execute(f"""
UPDATE claim SET activation_height =
{height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT))
WHERE activation_height IS NULL
""")
def _perform_overtake(self, height, changed_claim_hashes, deleted_names):
deleted_names_sql = claim_hashes_sql = ""
if changed_claim_hashes:
claim_hashes_sql = f"OR claim_hash IN ({",".join("?" for _ in changed_claim_hashes)})"
if deleted_names:
deleted_names_sql = f"OR normalized IN ({",".join("?" for _ in deleted_names)})"
overtakes = self.execute(f"""
SELECT winner.normalized, winner.claim_hash,
claimtrie.claim_hash AS current_winner,
MAX(winner.effective_amount) AS max_winner_effective_amount
FROM (
SELECT normalized, claim_hash, effective_amount FROM claim
WHERE normalized IN (
SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql}
) {deleted_names_sql}
ORDER BY effective_amount DESC, height ASC, tx_position ASC
) AS winner LEFT JOIN claimtrie USING (normalized)
GROUP BY winner.normalized
HAVING current_winner IS NULL OR current_winner <> winner.claim_hash
""", list(changed_claim_hashes)+deleted_names)
for overtake in overtakes:
if overtake.current_winner:
self.execute(
f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} "
f"WHERE normalized = ?",
(overtake.claim_hash, overtake.normalized)
)
else:
self.execute(
f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) "
f"VALUES (?, ?, {height})",
(overtake.claim_hash, overtake.normalized)
)
self.execute(
f"UPDATE claim SET activation_height = {height} WHERE normalized = ? "
f"AND (activation_height IS NULL OR activation_height > {height})",
(overtake.normalized,)
)
def _copy(self, height):
if height > 50:
self.execute(f"DROP TABLE claimtrie{height-50}")
self.execute(f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie")
def update_claimtrie(self, height, changed_claim_hashes, deleted_names, timer):
r = timer.run
r(self._calculate_activation_height, height)
r(self._update_support_amount, changed_claim_hashes)
r(self._update_effective_amount, height, changed_claim_hashes)
r(self._perform_overtake, height, changed_claim_hashes, list(deleted_names))
r(self._update_effective_amount, height)
r(self._perform_overtake, height, [], [])
def get_expiring(self, height):
return self.execute(
f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}"
)
def advance_txs(self, height, all_txs, header, daemon_height, timer):
insert_claims = []
update_claims = []
update_claim_hashes = set()
delete_claim_hashes = set()
insert_supports = []
delete_support_txo_hashes = set()
recalculate_claim_hashes = set() # added/deleted supports, added/updated claim
deleted_claim_names = set()
delete_others = set()
body_timer = timer.add_timer('body')
for position, (etx, txid) in enumerate(all_txs):
tx = timer.run(
Transaction, etx.raw, height=height, position=position
)
# Inputs
spent_claims, spent_supports, spent_others = timer.run(
self.split_inputs_into_claims_supports_and_other, tx.inputs
)
body_timer.start()
delete_claim_hashes.update({r.claim_hash for r in spent_claims})
deleted_claim_names.update({r.normalized for r in spent_claims})
delete_support_txo_hashes.update({r.txo_hash for r in spent_supports})
recalculate_claim_hashes.update({r.claim_hash for r in spent_supports})
delete_others.update(spent_others)
# Outputs
for output in tx.outputs:
if output.is_support:
insert_supports.append(output)
recalculate_claim_hashes.add(output.claim_hash)
elif output.script.is_claim_name:
insert_claims.append(output)
recalculate_claim_hashes.add(output.claim_hash)
elif output.script.is_update_claim:
claim_hash = output.claim_hash
update_claims.append(output)
recalculate_claim_hashes.add(claim_hash)
body_timer.stop()
skip_update_claim_timer = timer.add_timer('skip update of abandoned claims')
skip_update_claim_timer.start()
for updated_claim in list(update_claims):
if updated_claim.ref.hash in delete_others:
update_claims.remove(updated_claim)
for updated_claim in update_claims:
claim_hash = updated_claim.claim_hash
delete_claim_hashes.discard(claim_hash)
update_claim_hashes.add(claim_hash)
skip_update_claim_timer.stop()
skip_insert_claim_timer = timer.add_timer('skip insertion of abandoned claims')
skip_insert_claim_timer.start()
for new_claim in list(insert_claims):
if new_claim.ref.hash in delete_others:
if new_claim.claim_hash not in update_claim_hashes:
insert_claims.remove(new_claim)
skip_insert_claim_timer.stop()
skip_insert_support_timer = timer.add_timer('skip insertion of abandoned supports')
skip_insert_support_timer.start()
for new_support in list(insert_supports):
if new_support.ref.hash in delete_others:
insert_supports.remove(new_support)
skip_insert_support_timer.stop()
expire_timer = timer.add_timer('recording expired claims')
expire_timer.start()
for expired in self.get_expiring(height):
delete_claim_hashes.add(expired.claim_hash)
deleted_claim_names.add(expired.normalized)
expire_timer.stop()
r = timer.run
r(update_full_text_search, 'before-delete',
delete_claim_hashes, self.db.cursor(), self.main.first_sync)
affected_channels = r(self.delete_claims, delete_claim_hashes)
r(self.delete_supports, delete_support_txo_hashes)
r(self.insert_claims, insert_claims, header)
r(self.calculate_reposts, insert_claims)
r(update_full_text_search, 'after-insert',
[txo.claim_hash for txo in insert_claims], self.db.cursor(), self.main.first_sync)
r(update_full_text_search, 'before-update',
[txo.claim_hash for txo in update_claims], self.db.cursor(), self.main.first_sync)
r(self.update_claims, update_claims, header)
r(update_full_text_search, 'after-update',
[txo.claim_hash for txo in update_claims], self.db.cursor(), self.main.first_sync)
r(self.validate_channel_signatures, height, insert_claims,
update_claims, delete_claim_hashes, affected_channels, forward_timer=True)
r(self.insert_supports, insert_supports)
r(self.update_claimtrie, height, recalculate_claim_hashes, deleted_claim_names, forward_timer=True)
for algorithm in self.trending:
r(algorithm.run, self.db.cursor(), height, daemon_height, recalculate_claim_hashes)
if not self._fts_synced and self.main.first_sync and height == daemon_height:
r(first_sync_finished, self.db.cursor())
self._fts_synced = True
class LBRYLevelDB(LevelDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = os.path.join(self.env.db_dir, 'claims.db')
trending = []
for algorithm_name in self.env.trending_algorithms:
if algorithm_name in TRENDING_ALGORITHMS:
trending.append(TRENDING_ALGORITHMS[algorithm_name])
self.sql = SQLDB(
self, path,
self.env.default('BLOCKING_CHANNEL_IDS', '').split(' '),
self.env.default('FILTERING_CHANNEL_IDS', '').split(' '),
trending
)
def close(self):
super().close()
self.sql.close()
async def _open_dbs(self, *args, **kwargs):
await super()._open_dbs(*args, **kwargs)
self.sql.open()
| import os
import apsw
from typing import Union, Tuple, Set, List
from itertools import chain
from decimal import Decimal
from collections import namedtuple
from multiprocessing import Manager
from binascii import unhexlify
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.util import class_logger
from lbry.wallet.database import query, constraints_to_sql
from lbry.schema.tags import clean_tags
from lbry.schema.mime_types import guess_stream_type
from lbry.wallet import Ledger, RegTestLedger
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.server.db.canonical import register_canonical_functions
from lbry.wallet.server.db.full_text_search import update_full_text_search, CREATE_FULL_TEXT_SEARCH, first_sync_finished
from lbry.wallet.server.db.trending import TRENDING_ALGORITHMS
from .common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
class SQLDB:
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_CLAIM_TABLE = """
create table if not exists claim (
claim_hash bytes primary key,
claim_id text not null,
claim_name text not null,
normalized text not null,
txo_hash bytes not null,
tx_position integer not null,
amount integer not null,
timestamp integer not null, -- last updated timestamp
creation_timestamp integer not null,
height integer not null, -- last updated height
creation_height integer not null,
activation_height integer,
expiration_height integer not null,
release_time integer not null,
short_url text not null, -- normalized#shortest-unique-claim_id
canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel
title text,
author text,
description text,
claim_type integer,
reposted integer default 0,
-- streams
stream_type text,
media_type text,
fee_amount integer default 0,
fee_currency text,
duration integer,
-- reposts
reposted_claim_hash bytes,
-- claims which are channels
public_key_bytes bytes,
public_key_hash bytes,
claims_in_channel integer,
-- claims which are inside channels
channel_hash bytes,
channel_join integer, -- height at which claim got valid signature / joined channel
signature bytes,
signature_digest bytes,
signature_valid bool,
effective_amount integer not null default 0,
support_amount integer not null default 0,
trending_group integer not null default 0,
trending_mixed integer not null default 0,
trending_local integer not null default 0,
trending_global integer not null default 0
);
create index if not exists claim_normalized_idx on claim (normalized, activation_height);
create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash);
create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized);
create index if not exists claim_txo_hash_idx on claim (txo_hash);
create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash);
create index if not exists claim_expiration_height_idx on claim (expiration_height);
create index if not exists claim_reposted_claim_hash_idx on claim (reposted_claim_hash);
"""
CREATE_SUPPORT_TABLE = """
create table if not exists support (
txo_hash bytes primary key,
tx_position integer not null,
height integer not null,
claim_hash bytes not null,
amount integer not null
);
create index if not exists support_claim_hash_idx on support (claim_hash, height);
"""
CREATE_TAG_TABLE = """
create table if not exists tag (
tag text not null,
claim_hash bytes not null,
height integer not null
);
create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag);
"""
CREATE_LANGUAGE_TABLE = """
create table if not exists language (
language text not null,
claim_hash bytes not null,
height integer not null
);
create unique index if not exists language_claim_hash_language_idx on language (claim_hash, language);
"""
CREATE_CLAIMTRIE_TABLE = """
create table if not exists claimtrie (
normalized text primary key,
claim_hash bytes not null,
last_take_over_height integer not null
);
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
"""
SEARCH_INDEXES = """
-- used by any tag clouds
create index if not exists tag_tag_idx on tag (tag, claim_hash);
-- naked order bys (no filters)
create unique index if not exists claim_release_idx on claim (release_time, claim_hash);
create unique index if not exists claim_trending_idx on claim (trending_group, trending_mixed, claim_hash);
create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash);
-- claim_type filter + order by
create unique index if not exists claim_type_release_idx on claim (release_time, claim_type, claim_hash);
create unique index if not exists claim_type_trending_idx on claim (trending_group, trending_mixed, claim_type, claim_hash);
create unique index if not exists claim_type_effective_amount_idx on claim (effective_amount, claim_type, claim_hash);
-- stream_type filter + order by
create unique index if not exists stream_type_release_idx on claim (stream_type, release_time, claim_hash);
create unique index if not exists stream_type_trending_idx on claim (stream_type, trending_group, trending_mixed, claim_hash);
create unique index if not exists stream_type_effective_amount_idx on claim (stream_type, effective_amount, claim_hash);
-- channel_hash filter + order by
create unique index if not exists channel_hash_release_idx on claim (channel_hash, release_time, claim_hash);
create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_group, trending_mixed, claim_hash);
create unique index if not exists channel_hash_effective_amount_idx on claim (channel_hash, effective_amount, claim_hash);
-- duration filter + order by
create unique index if not exists duration_release_idx on claim (duration, release_time, claim_hash);
create unique index if not exists duration_trending_idx on claim (duration, trending_group, trending_mixed, claim_hash);
create unique index if not exists duration_effective_amount_idx on claim (duration, effective_amount, claim_hash);
-- fee_amount + order by
create unique index if not exists fee_amount_release_idx on claim (fee_amount, release_time, claim_hash);
create unique index if not exists fee_amount_trending_idx on claim (fee_amount, trending_group, trending_mixed, claim_hash);
create unique index if not exists fee_amount_effective_amount_idx on claim (fee_amount, effective_amount, claim_hash);
-- TODO: verify that all indexes below are used
create index if not exists claim_height_normalized_idx on claim (height, normalized asc);
create index if not exists claim_resolve_idx on claim (normalized, claim_id);
create index if not exists claim_id_idx on claim (claim_id, claim_hash);
create index if not exists claim_timestamp_idx on claim (timestamp);
create index if not exists claim_public_key_hash_idx on claim (public_key_hash);
create index if not exists claim_signature_valid_idx on claim (signature_valid);
"""
TAG_INDEXES = '\n'.join(
f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';"
for tag_value, tag_key in COMMON_TAGS.items()
)
LANGUAGE_INDEXES = '\n'.join(
f"create unique index if not exists language_{language}_idx on language (language, claim_hash) WHERE language='{language}';"
for language in INDEXED_LANGUAGES
)
CREATE_TABLES_QUERY = (
CREATE_CLAIM_TABLE +
CREATE_FULL_TEXT_SEARCH +
CREATE_SUPPORT_TABLE +
CREATE_CLAIMTRIE_TABLE +
CREATE_TAG_TABLE +
CREATE_LANGUAGE_TABLE
)
def __init__(
self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list):
self.main = main
self._db_path = path
self.db = None
self.logger = class_logger(__name__, self.__class__.__name__)
self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger
self._fts_synced = False
self.state_manager = None
self.blocked_streams = None
self.blocked_channels = None
self.blocking_channel_hashes = {
unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id
}
self.filtered_streams = None
self.filtered_channels = None
self.filtering_channel_hashes = {
unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id
}
self.trending = trending
def open(self):
self.db = apsw.Connection(
self._db_path,
flags=(
apsw.SQLITE_OPEN_READWRITE |
apsw.SQLITE_OPEN_CREATE |
apsw.SQLITE_OPEN_URI
)
)
def exec_factory(cursor, statement, bindings):
tpl = namedtuple('row', (d[0] for d in cursor.getdescription()))
cursor.setrowtrace(lambda cursor, row: tpl(*row))
return True
self.db.setexectrace(exec_factory)
self.execute(self.PRAGMAS)
self.execute(self.CREATE_TABLES_QUERY)
register_canonical_functions(self.db)
self.state_manager = Manager()
self.blocked_streams = self.state_manager.dict()
self.blocked_channels = self.state_manager.dict()
self.filtered_streams = self.state_manager.dict()
self.filtered_channels = self.state_manager.dict()
self.update_blocked_and_filtered_claims()
for algorithm in self.trending:
algorithm.install(self.db)
def close(self):
if self.db is not None:
self.db.close()
if self.state_manager is not None:
self.state_manager.shutdown()
def update_blocked_and_filtered_claims(self):
self.update_claims_from_channel_hashes(
self.blocked_streams, self.blocked_channels, self.blocking_channel_hashes
)
self.update_claims_from_channel_hashes(
self.filtered_streams, self.filtered_channels, self.filtering_channel_hashes
)
self.filtered_streams.update(self.blocked_streams)
self.filtered_channels.update(self.blocked_channels)
def update_claims_from_channel_hashes(self, shared_streams, shared_channels, channel_hashes):
streams, channels = {}, {}
if channel_hashes:
sql = query(
"SELECT repost.channel_hash, repost.reposted_claim_hash, target.claim_type "
"FROM claim as repost JOIN claim AS target ON (target.claim_hash=repost.reposted_claim_hash)", **{
'repost.reposted_claim_hash__is_not_null': 1,
'repost.channel_hash__in': channel_hashes
}
)
for blocked_claim in self.execute(*sql):
if blocked_claim.claim_type == CLAIM_TYPES['stream']:
streams[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
elif blocked_claim.claim_type == CLAIM_TYPES['channel']:
channels[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
shared_streams.clear()
shared_streams.update(streams)
shared_channels.clear()
shared_channels.update(channels)
@staticmethod
def _insert_sql(table: str, data: dict) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
sql = (
f"INSERT INTO {table} ({', '.join(columns)}) "
f"VALUES ({', '.join(['?'] * len(values))})"
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
return f"UPDATE {table} SET {', '.join(columns)} WHERE {where}", values
@staticmethod
def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]:
where, values = constraints_to_sql(constraints)
return f"DELETE FROM {table} WHERE {where}", values
def execute(self, *args):
return self.db.cursor().execute(*args)
def executemany(self, *args):
return self.db.cursor().executemany(*args)
def begin(self):
self.execute('begin;')
def commit(self):
self.execute('commit;')
def _upsertable_claims(self, txos: List[Output], header, clear_first=False):
claim_hashes, claims, tags, languages = set(), [], {}, {}
for txo in txos:
tx = txo.tx_ref.tx
try:
assert txo.claim_name
assert txo.normalized_name
except:
#self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
continue
language = 'none'
try:
if txo.claim.is_stream and txo.claim.stream.languages:
language = txo.claim.stream.languages[0].language
except:
pass
claim_hash = txo.claim_hash
claim_hashes.add(claim_hash)
claim_record = {
'claim_hash': claim_hash,
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'normalized': txo.normalized_name,
'txo_hash': txo.ref.hash,
'tx_position': tx.position,
'amount': txo.amount,
'timestamp': header['timestamp'],
'height': tx.height,
'title': None,
'description': None,
'author': None,
'duration': None,
'claim_type': None,
'stream_type': None,
'media_type': None,
'release_time': None,
'fee_currency': None,
'fee_amount': 0,
'reposted_claim_hash': None
}
claims.append(claim_record)
try:
claim = txo.claim
except:
#self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
continue
if claim.is_stream:
claim_record['claim_type'] = CLAIM_TYPES['stream']
claim_record['media_type'] = claim.stream.source.media_type
claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(claim_record['media_type'])]
claim_record['title'] = claim.stream.title
claim_record['description'] = claim.stream.description
claim_record['author'] = claim.stream.author
if claim.stream.video and claim.stream.video.duration:
claim_record['duration'] = claim.stream.video.duration
if claim.stream.audio and claim.stream.audio.duration:
claim_record['duration'] = claim.stream.audio.duration
if claim.stream.release_time:
claim_record['release_time'] = claim.stream.release_time
if claim.stream.has_fee:
fee = claim.stream.fee
if isinstance(fee.currency, str):
claim_record['fee_currency'] = fee.currency.lower()
if isinstance(fee.amount, Decimal):
claim_record['fee_amount'] = int(fee.amount*1000)
elif claim.is_repost:
claim_record['claim_type'] = CLAIM_TYPES['repost']
claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash
elif claim.is_channel:
claim_record['claim_type'] = CLAIM_TYPES['channel']
elif claim.is_collection:
claim_record['claim_type'] = CLAIM_TYPES['collection']
languages[(language, claim_hash)] = (language, claim_hash, tx.height)
for tag in clean_tags(claim.message.tags):
tags[(tag, claim_hash)] = (tag, claim_hash, tx.height)
if clear_first:
self._clear_claim_metadata(claim_hashes)
if tags:
self.executemany(
"INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)", tags.values()
)
if languages:
self.executemany(
"INSERT OR IGNORE INTO language (language, claim_hash, height) VALUES (?, ?, ?)", languages.values()
)
return claims
def insert_claims(self, txos: List[Output], header):
claims = self._upsertable_claims(txos, header)
if claims:
self.executemany("""
INSERT OR IGNORE INTO claim (
claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount,
claim_type, media_type, stream_type, timestamp, creation_timestamp,
fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash,
creation_height, release_time, activation_height, expiration_height, short_url)
VALUES (
:claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount,
:claim_type, :media_type, :stream_type, :timestamp, :timestamp,
:fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height,
CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END,
CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END,
CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END,
:claim_name||COALESCE(
(SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized),
'#'||substr(:claim_id, 1, 1)
)
)""", claims)
def update_claims(self, txos: List[Output], header):
claims = self._upsertable_claims(txos, header, clear_first=True)
if claims:
self.executemany("""
UPDATE claim SET
txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height,
claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type,
timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency,
title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash,
release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END
WHERE claim_hash=:claim_hash;
""", claims)
def delete_claims(self, claim_hashes: Set[bytes]):
""" Deletes claim supports and from claimtrie in case of an abandon. """
if claim_hashes:
affected_channels = self.execute(*query(
"SELECT channel_hash FROM claim", channel_hash__is_not_null=1, claim_hash__in=claim_hashes
)).fetchall()
for table in ('claim', 'support', 'claimtrie'):
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
self._clear_claim_metadata(claim_hashes)
return {r.channel_hash for r in affected_channels}
return set()
def delete_claims_above_height(self, height: int):
claim_hashes = [x[0] for x in self.execute(
"SELECT claim_hash FROM claim WHERE height>?", (height, )
).fetchall()]
while claim_hashes:
batch = set(claim_hashes[:500])
claim_hashes = claim_hashes[500:]
self.delete_claims(batch)
def _clear_claim_metadata(self, claim_hashes: Set[bytes]):
if claim_hashes:
for table in ('tag',): # 'language', 'location', etc
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
def split_inputs_into_claims_supports_and_other(self, txis):
txo_hashes = {txi.txo_ref.hash for txi in txis}
claims = self.execute(*query(
"SELECT txo_hash, claim_hash, normalized FROM claim", txo_hash__in=txo_hashes
)).fetchall()
txo_hashes -= {r.txo_hash for r in claims}
supports = {}
if txo_hashes:
supports = self.execute(*query(
"SELECT txo_hash, claim_hash FROM support", txo_hash__in=txo_hashes
)).fetchall()
txo_hashes -= {r.txo_hash for r in supports}
return claims, supports, txo_hashes
def insert_supports(self, txos: List[Output]):
supports = []
for txo in txos:
tx = txo.tx_ref.tx
supports.append((
txo.ref.hash, tx.position, tx.height,
txo.claim_hash, txo.amount
))
if supports:
self.executemany(
"INSERT OR IGNORE INTO support ("
" txo_hash, tx_position, height, claim_hash, amount"
") "
"VALUES (?, ?, ?, ?, ?)", supports
)
def delete_supports(self, txo_hashes: Set[bytes]):
if txo_hashes:
self.execute(*self._delete_sql('support', {'txo_hash__in': txo_hashes}))
def calculate_reposts(self, txos: List[Output]):
targets = set()
for txo in txos:
try:
claim = txo.claim
except:
continue
if claim.is_repost:
targets.add((claim.repost.reference.claim_hash,))
if targets:
self.executemany(
"""
UPDATE claim SET reposted = (
SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash
)
WHERE claim_hash = ?
""", targets
)
def validate_channel_signatures(self, height, new_claims, updated_claims, spent_claims, affected_channels, timer):
if not new_claims and not updated_claims and not spent_claims:
return
sub_timer = timer.add_timer('segregate channels and signables')
sub_timer.start()
channels, new_channel_keys, signables = {}, {}, {}
for txo in chain(new_claims, updated_claims):
try:
claim = txo.claim
except:
continue
if claim.is_channel:
channels[txo.claim_hash] = txo
new_channel_keys[txo.claim_hash] = claim.channel.public_key_bytes
else:
signables[txo.claim_hash] = txo
sub_timer.stop()
sub_timer = timer.add_timer('make list of channels we need to lookup')
sub_timer.start()
missing_channel_keys = set()
for txo in signables.values():
claim = txo.claim
if claim.is_signed and claim.signing_channel_hash not in new_channel_keys:
missing_channel_keys.add(claim.signing_channel_hash)
sub_timer.stop()
sub_timer = timer.add_timer('lookup missing channels')
sub_timer.start()
all_channel_keys = {}
if new_channel_keys or missing_channel_keys or affected_channels:
all_channel_keys = dict(self.execute(*query(
"SELECT claim_hash, public_key_bytes FROM claim",
claim_hash__in=set(new_channel_keys) | missing_channel_keys | affected_channels
)))
sub_timer.stop()
sub_timer = timer.add_timer('prepare for updating claims')
sub_timer.start()
changed_channel_keys = {}
for claim_hash, new_key in new_channel_keys.items():
if claim_hash not in all_channel_keys or all_channel_keys[claim_hash] != new_key:
all_channel_keys[claim_hash] = new_key
changed_channel_keys[claim_hash] = new_key
claim_updates = []
for claim_hash, txo in signables.items():
claim = txo.claim
update = {
'claim_hash': claim_hash,
'channel_hash': None,
'signature': None,
'signature_digest': None,
'signature_valid': None
}
if claim.is_signed:
update.update({
'channel_hash': claim.signing_channel_hash,
'signature': txo.get_encoded_signature(),
'signature_digest': txo.get_signature_digest(self.ledger),
'signature_valid': 0
})
claim_updates.append(update)
sub_timer.stop()
sub_timer = timer.add_timer('find claims affected by a change in channel key')
sub_timer.start()
if changed_channel_keys:
sql = f"""
SELECT * FROM claim WHERE
channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND
signature IS NOT NULL
"""
for affected_claim in self.execute(sql, changed_channel_keys.keys()):
if affected_claim.claim_hash not in signables:
claim_updates.append({
'claim_hash': affected_claim.claim_hash,
'channel_hash': affected_claim.channel_hash,
'signature': affected_claim.signature,
'signature_digest': affected_claim.signature_digest,
'signature_valid': 0
})
sub_timer.stop()
sub_timer = timer.add_timer('verify signatures')
sub_timer.start()
for update in claim_updates:
channel_pub_key = all_channel_keys.get(update['channel_hash'])
if channel_pub_key and update['signature']:
update['signature_valid'] = Output.is_signature_valid(
bytes(update['signature']), bytes(update['signature_digest']), channel_pub_key
)
sub_timer.stop()
sub_timer = timer.add_timer('update claims')
sub_timer.start()
if claim_updates:
self.executemany(f"""
UPDATE claim SET
channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest,
signature_valid=:signature_valid,
channel_join=CASE
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join
WHEN :signature_valid=1 THEN {height}
END,
canonical_url=CASE
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url
WHEN :signature_valid=1 THEN
(SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'||
claim_name||COALESCE(
(SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim
WHERE other_claim.signature_valid = 1 AND
other_claim.channel_hash = :channel_hash AND
other_claim.normalized = claim.normalized),
'#'||substr(claim_id, 1, 1)
)
END
WHERE claim_hash=:claim_hash;
""", claim_updates)
sub_timer.stop()
sub_timer = timer.add_timer('update claims affected by spent channels')
sub_timer.start()
if spent_claims:
self.execute(
f"""
UPDATE claim SET
signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END,
channel_join=NULL, canonical_url=NULL
WHERE channel_hash IN ({','.join('?' for _ in spent_claims)})
""", spent_claims
)
sub_timer.stop()
sub_timer = timer.add_timer('update channels')
sub_timer.start()
if channels:
self.executemany(
"""
UPDATE claim SET
public_key_bytes=:public_key_bytes,
public_key_hash=:public_key_hash
WHERE claim_hash=:claim_hash""", [{
'claim_hash': claim_hash,
'public_key_bytes': txo.claim.channel.public_key_bytes,
'public_key_hash': self.ledger.address_to_hash160(
self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes)
)
} for claim_hash, txo in channels.items()]
)
sub_timer.stop()
sub_timer = timer.add_timer('update claims_in_channel counts')
sub_timer.start()
if all_channel_keys:
self.executemany(f"""
UPDATE claim SET
claims_in_channel=(
SELECT COUNT(*) FROM claim AS claim_in_channel
WHERE claim_in_channel.signature_valid=1 AND
claim_in_channel.channel_hash=claim.claim_hash
)
WHERE claim_hash = ?
""", [(channel_hash,) for channel_hash in all_channel_keys.keys()])
sub_timer.stop()
sub_timer = timer.add_timer('update blocked claims list')
sub_timer.start()
if (self.blocking_channel_hashes.intersection(all_channel_keys) or
self.filtering_channel_hashes.intersection(all_channel_keys)):
self.update_blocked_and_filtered_claims()
sub_timer.stop()
def _update_support_amount(self, claim_hashes):
if claim_hashes:
self.execute(f"""
UPDATE claim SET
support_amount = COALESCE(
(SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0
)
WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)})
""", claim_hashes)
def _update_effective_amount(self, height, claim_hashes=None):
self.execute(
f"UPDATE claim SET effective_amount = amount + support_amount "
f"WHERE activation_height = {height}"
)
if claim_hashes:
self.execute(
f"UPDATE claim SET effective_amount = amount + support_amount "
f"WHERE activation_height < {height} "
f" AND claim_hash IN ({','.join('?' for _ in claim_hashes)})",
claim_hashes
)
def _calculate_activation_height(self, height):
last_take_over_height = f"""COALESCE(
(SELECT last_take_over_height FROM claimtrie
WHERE claimtrie.normalized=claim.normalized),
{height}
)
"""
self.execute(f"""
UPDATE claim SET activation_height =
{height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT))
WHERE activation_height IS NULL
""")
def _perform_overtake(self, height, changed_claim_hashes, deleted_names):
deleted_names_sql = claim_hashes_sql = ""
if changed_claim_hashes:
claim_hashes_sql = f"OR claim_hash IN ({','.join('?' for _ in changed_claim_hashes)})"
if deleted_names:
deleted_names_sql = f"OR normalized IN ({','.join('?' for _ in deleted_names)})"
overtakes = self.execute(f"""
SELECT winner.normalized, winner.claim_hash,
claimtrie.claim_hash AS current_winner,
MAX(winner.effective_amount) AS max_winner_effective_amount
FROM (
SELECT normalized, claim_hash, effective_amount FROM claim
WHERE normalized IN (
SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql}
) {deleted_names_sql}
ORDER BY effective_amount DESC, height ASC, tx_position ASC
) AS winner LEFT JOIN claimtrie USING (normalized)
GROUP BY winner.normalized
HAVING current_winner IS NULL OR current_winner <> winner.claim_hash
""", list(changed_claim_hashes)+deleted_names)
for overtake in overtakes:
if overtake.current_winner:
self.execute(
f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} "
f"WHERE normalized = ?",
(overtake.claim_hash, overtake.normalized)
)
else:
self.execute(
f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) "
f"VALUES (?, ?, {height})",
(overtake.claim_hash, overtake.normalized)
)
self.execute(
f"UPDATE claim SET activation_height = {height} WHERE normalized = ? "
f"AND (activation_height IS NULL OR activation_height > {height})",
(overtake.normalized,)
)
def _copy(self, height):
if height > 50:
self.execute(f"DROP TABLE claimtrie{height-50}")
self.execute(f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie")
def update_claimtrie(self, height, changed_claim_hashes, deleted_names, timer):
r = timer.run
r(self._calculate_activation_height, height)
r(self._update_support_amount, changed_claim_hashes)
r(self._update_effective_amount, height, changed_claim_hashes)
r(self._perform_overtake, height, changed_claim_hashes, list(deleted_names))
r(self._update_effective_amount, height)
r(self._perform_overtake, height, [], [])
def get_expiring(self, height):
return self.execute(
f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}"
)
def advance_txs(self, height, all_txs, header, daemon_height, timer):
insert_claims = []
update_claims = []
update_claim_hashes = set()
delete_claim_hashes = set()
insert_supports = []
delete_support_txo_hashes = set()
recalculate_claim_hashes = set() # added/deleted supports, added/updated claim
deleted_claim_names = set()
delete_others = set()
body_timer = timer.add_timer('body')
for position, (etx, txid) in enumerate(all_txs):
tx = timer.run(
Transaction, etx.raw, height=height, position=position
)
# Inputs
spent_claims, spent_supports, spent_others = timer.run(
self.split_inputs_into_claims_supports_and_other, tx.inputs
)
body_timer.start()
delete_claim_hashes.update({r.claim_hash for r in spent_claims})
deleted_claim_names.update({r.normalized for r in spent_claims})
delete_support_txo_hashes.update({r.txo_hash for r in spent_supports})
recalculate_claim_hashes.update({r.claim_hash for r in spent_supports})
delete_others.update(spent_others)
# Outputs
for output in tx.outputs:
if output.is_support:
insert_supports.append(output)
recalculate_claim_hashes.add(output.claim_hash)
elif output.script.is_claim_name:
insert_claims.append(output)
recalculate_claim_hashes.add(output.claim_hash)
elif output.script.is_update_claim:
claim_hash = output.claim_hash
update_claims.append(output)
recalculate_claim_hashes.add(claim_hash)
body_timer.stop()
skip_update_claim_timer = timer.add_timer('skip update of abandoned claims')
skip_update_claim_timer.start()
for updated_claim in list(update_claims):
if updated_claim.ref.hash in delete_others:
update_claims.remove(updated_claim)
for updated_claim in update_claims:
claim_hash = updated_claim.claim_hash
delete_claim_hashes.discard(claim_hash)
update_claim_hashes.add(claim_hash)
skip_update_claim_timer.stop()
skip_insert_claim_timer = timer.add_timer('skip insertion of abandoned claims')
skip_insert_claim_timer.start()
for new_claim in list(insert_claims):
if new_claim.ref.hash in delete_others:
if new_claim.claim_hash not in update_claim_hashes:
insert_claims.remove(new_claim)
skip_insert_claim_timer.stop()
skip_insert_support_timer = timer.add_timer('skip insertion of abandoned supports')
skip_insert_support_timer.start()
for new_support in list(insert_supports):
if new_support.ref.hash in delete_others:
insert_supports.remove(new_support)
skip_insert_support_timer.stop()
expire_timer = timer.add_timer('recording expired claims')
expire_timer.start()
for expired in self.get_expiring(height):
delete_claim_hashes.add(expired.claim_hash)
deleted_claim_names.add(expired.normalized)
expire_timer.stop()
r = timer.run
r(update_full_text_search, 'before-delete',
delete_claim_hashes, self.db.cursor(), self.main.first_sync)
affected_channels = r(self.delete_claims, delete_claim_hashes)
r(self.delete_supports, delete_support_txo_hashes)
r(self.insert_claims, insert_claims, header)
r(self.calculate_reposts, insert_claims)
r(update_full_text_search, 'after-insert',
[txo.claim_hash for txo in insert_claims], self.db.cursor(), self.main.first_sync)
r(update_full_text_search, 'before-update',
[txo.claim_hash for txo in update_claims], self.db.cursor(), self.main.first_sync)
r(self.update_claims, update_claims, header)
r(update_full_text_search, 'after-update',
[txo.claim_hash for txo in update_claims], self.db.cursor(), self.main.first_sync)
r(self.validate_channel_signatures, height, insert_claims,
update_claims, delete_claim_hashes, affected_channels, forward_timer=True)
r(self.insert_supports, insert_supports)
r(self.update_claimtrie, height, recalculate_claim_hashes, deleted_claim_names, forward_timer=True)
for algorithm in self.trending:
r(algorithm.run, self.db.cursor(), height, daemon_height, recalculate_claim_hashes)
if not self._fts_synced and self.main.first_sync and height == daemon_height:
r(first_sync_finished, self.db.cursor())
self._fts_synced = True
class LBRYLevelDB(LevelDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = os.path.join(self.env.db_dir, 'claims.db')
trending = []
for algorithm_name in self.env.trending_algorithms:
if algorithm_name in TRENDING_ALGORITHMS:
trending.append(TRENDING_ALGORITHMS[algorithm_name])
self.sql = SQLDB(
self, path,
self.env.default('BLOCKING_CHANNEL_IDS', '').split(' '),
self.env.default('FILTERING_CHANNEL_IDS', '').split(' '),
trending
)
def close(self):
super().close()
self.sql.close()
async def _open_dbs(self, *args, **kwargs):
await super()._open_dbs(*args, **kwargs)
self.sql.open()
|
from ._tksheet_vars import *
from ._tksheet_other_classes import *
from collections import defaultdict, deque
from itertools import islice, repeat, accumulate, chain, product, cycle
from math import floor, ceil
from tkinter import TclError
import bisect
import csv as csv_module
import io
import pickle
import tkinter as tk
import zlib
class MainTable(tk.Canvas):
def __init__(self,
parentframe = None,
enable_edit_cell_auto_resize = True,
page_up_down_select_row = False,
expand_sheet_if_paste_too_big = False,
paste_insert_column_limit = None,
paste_insert_row_limit = None,
arrow_key_down_right_scroll_page = False,
ctrl_keys_over_dropdowns_enabled = False,
column_width = None,
column_headers_canvas = None,
row_index_canvas = None,
headers = None,
header_height = None,
row_height = None,
data_reference = None,
total_cols = None,
total_rows = None,
row_index = None,
font = None,
header_font = None,
popup_menu_font = get_font(),
popup_menu_fg = "gray10",
popup_menu_bg = "white",
popup_menu_highlight_bg = "#f1f3f4",
popup_menu_highlight_fg = "gray10",
align = None,
width = None,
height = None,
table_bg = "white",
table_grid_fg = "gray15",
table_fg = "black",
show_selected_cells_border = True,
table_selected_cells_border_fg = "#1a73e8",
table_selected_cells_bg = "#e7f0fd",
display_selected_fg_over_highlights = False,
table_selected_cells_fg = "black",
table_selected_rows_border_fg = "#1a73e8",
table_selected_rows_bg = "#e7f0fd",
table_selected_rows_fg = "black",
table_selected_columns_border_fg = "#1a73e8",
table_selected_columns_bg = "#e7f0fd",
table_selected_columns_fg = "black",
displayed_columns = [],
all_columns_displayed = True,
show_vertical_grid = True,
show_horizontal_grid = True,
show_index = True,
show_header = True,
selected_rows_to_end_of_window = False,
horizontal_grid_to_end_of_window = False,
vertical_grid_to_end_of_window = False,
empty_horizontal = 150,
empty_vertical = 100,
max_undos = 20):
tk.Canvas.__init__(self,
parentframe,
width = width,
height = height,
background = table_bg,
highlightthickness = 0)
self.parentframe = parentframe
self.b1_pressed_loc = None
self.existing_dropdown_canvas_id = None
self.existing_dropdown_window = None
self.closed_dropdown = None
self.disp_text = {}
self.disp_high = {}
self.disp_grid = {}
self.disp_fill_sels = {}
self.disp_bord_sels = {}
self.disp_resize_lines = {}
self.disp_ctrl_outline = {}
self.disp_dropdown = {}
self.disp_checkbox = {}
self.hidd_ctrl_outline = {}
self.hidd_text = {}
self.hidd_high = {}
self.hidd_grid = {}
self.hidd_fill_sels = {}
self.hidd_bord_sels = {}
self.hidd_resize_lines = {}
self.hidd_dropdown = {}
self.hidd_checkbox = {}
self.cell_options = {}
self.col_options = {}
self.row_options = {}
"""
cell options dict looks like:
{(row int, column int): {'dropdown': {'values': values,
'window': "no dropdown open",
'select_function': selection_function,
'keypress_function': keypress_function,
'state': state},
'highlight: (bg, fg),
'align': "e",
'readonly': True}
"""
self.extra_table_rc_menu_funcs = {}
self.extra_index_rc_menu_funcs = {}
self.extra_header_rc_menu_funcs = {}
self.max_undos = max_undos
self.undo_storage = deque(maxlen = max_undos)
self.page_up_down_select_row = page_up_down_select_row
self.expand_sheet_if_paste_too_big = expand_sheet_if_paste_too_big
self.paste_insert_column_limit = paste_insert_column_limit
self.paste_insert_row_limit = paste_insert_row_limit
self.arrow_key_down_right_scroll_page = arrow_key_down_right_scroll_page
self.cell_auto_resize_enabled = enable_edit_cell_auto_resize
self.display_selected_fg_over_highlights = display_selected_fg_over_highlights
self.centre_alignment_text_mod_indexes = (slice(1, None), slice(None, -1))
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
self.show_index = show_index
self.show_header = show_header
self.selected_rows_to_end_of_window = selected_rows_to_end_of_window
self.horizontal_grid_to_end_of_window = horizontal_grid_to_end_of_window
self.vertical_grid_to_end_of_window = vertical_grid_to_end_of_window
self.empty_horizontal = empty_horizontal
self.empty_vertical = empty_vertical
self.show_vertical_grid = show_vertical_grid
self.show_horizontal_grid = show_horizontal_grid
self.min_rh = 0
self.hdr_min_rh = 0
self.being_drawn_rect = None
self.extra_motion_func = None
self.extra_b1_press_func = None
self.extra_b1_motion_func = None
self.extra_b1_release_func = None
self.extra_double_b1_func = None
self.extra_rc_func = None
self.extra_begin_ctrl_c_func = None
self.extra_end_ctrl_c_func = None
self.extra_begin_ctrl_x_func = None
self.extra_end_ctrl_x_func = None
self.extra_begin_ctrl_v_func = None
self.extra_end_ctrl_v_func = None
self.extra_begin_ctrl_z_func = None
self.extra_end_ctrl_z_func = None
self.extra_begin_delete_key_func = None
self.extra_end_delete_key_func = None
self.extra_begin_edit_cell_func = None
self.extra_end_edit_cell_func = None
self.extra_begin_del_rows_rc_func = None
self.extra_end_del_rows_rc_func = None
self.extra_begin_del_cols_rc_func = None
self.extra_end_del_cols_rc_func = None
self.extra_begin_insert_cols_rc_func = None
self.extra_end_insert_cols_rc_func = None
self.extra_begin_insert_rows_rc_func = None
self.extra_end_insert_rows_rc_func = None
self.text_editor_user_bound_keys = {}
self.selection_binding_func = None
self.deselection_binding_func = None
self.drag_selection_binding_func = None
self.shift_selection_binding_func = None
self.select_all_binding_func = None
self.single_selection_enabled = False
self.toggle_selection_enabled = False # with this mode every left click adds the cell to selected cells
self.ctrl_keys_over_dropdowns_enabled = ctrl_keys_over_dropdowns_enabled
self.drag_selection_enabled = False
self.select_all_enabled = False
self.arrowkeys_enabled = False
self.undo_enabled = False
self.cut_enabled = False
self.copy_enabled = False
self.paste_enabled = False
self.delete_key_enabled = False
self.rc_select_enabled = False
self.rc_delete_column_enabled = False
self.rc_insert_column_enabled = False
self.rc_delete_row_enabled = False
self.rc_insert_row_enabled = False
self.rc_popup_menus_enabled = False
self.edit_cell_enabled = False
self.text_editor_loc = None
self.show_selected_cells_border = show_selected_cells_border
self.new_row_width = 0
self.new_header_height = 0
self.parentframe = parentframe
self.row_width_resize_bbox = tuple()
self.header_height_resize_bbox = tuple()
self.CH = column_headers_canvas
self.CH.MT = self
self.CH.RI = row_index_canvas
self.RI = row_index_canvas
self.RI.MT = self
self.RI.CH = column_headers_canvas
self.TL = None # is set from within TopLeftRectangle() __init__
self.all_columns_displayed = True
self.align = align
self.my_font = font
self.fnt_fam = font[0]
self.fnt_sze = font[1]
self.fnt_wgt = font[2]
self.my_hdr_font = header_font
self.hdr_fnt_fam = header_font[0]
self.hdr_fnt_sze = header_font[1]
self.hdr_fnt_wgt = header_font[2]
self.txt_measure_canvas = tk.Canvas(self)
self.txt_measure_canvas_text = self.txt_measure_canvas.create_text(0, 0, text = "", font = self.my_font)
self.text_editor = None
self.text_editor_id = None
self.default_cw = column_width
self.default_rh = (row_height if isinstance(row_height, str) else "pixels",
row_height if isinstance(row_height, int) else self.GetLinesHeight(int(row_height)))
self.default_hh = (header_height if isinstance(header_height, str) else "pixels",
header_height if isinstance(header_height, int) else self.GetHdrLinesHeight(int(header_height)))
self.set_fnt_help()
self.set_hdr_fnt_help()
self.data_ref = data_reference
if isinstance(self.data_ref, (list, tuple)):
self.data_ref = data_reference
else:
self.data_ref = []
if not self.data_ref:
if isinstance(total_rows, int) and isinstance(total_cols, int) and total_rows > 0 and total_cols > 0:
self.data_ref = [list(repeat("", total_cols)) for i in range(total_rows)]
if isinstance(headers, int):
self.my_hdrs = headers
else:
if headers:
self.my_hdrs = headers
else:
self.my_hdrs = []
if isinstance(row_index, int):
self.my_row_index = row_index
else:
if row_index:
self.my_row_index = row_index
else:
self.my_row_index = []
self.displayed_columns = []
self.col_positions = [0]
self.row_positions = [0]
self.reset_row_positions()
self.display_columns(indexes = displayed_columns,
enable = not all_columns_displayed,
reset_col_positions = False,
set_col_positions = False,
deselect_all = False)
self.reset_col_positions()
self.table_grid_fg = table_grid_fg
self.table_fg = table_fg
self.table_selected_cells_border_fg = table_selected_cells_border_fg
self.table_selected_cells_bg = table_selected_cells_bg
self.table_selected_cells_fg = table_selected_cells_fg
self.table_selected_rows_border_fg = table_selected_rows_border_fg
self.table_selected_rows_bg = table_selected_rows_bg
self.table_selected_rows_fg = table_selected_rows_fg
self.table_selected_columns_border_fg = table_selected_columns_border_fg
self.table_selected_columns_bg = table_selected_columns_bg
self.table_selected_columns_fg = table_selected_columns_fg
self.table_bg = table_bg
self.popup_menu_font = popup_menu_font
self.popup_menu_fg = popup_menu_fg
self.popup_menu_bg = popup_menu_bg
self.popup_menu_highlight_bg = popup_menu_highlight_bg
self.popup_menu_highlight_fg = popup_menu_highlight_fg
self.rc_popup_menu = None
self.empty_rc_popup_menu = None
self.basic_bindings()
self.create_rc_menus()
def refresh(self, event = None):
self.main_table_redraw_grid_and_text(True, True)
def basic_bindings(self, enable = True):
if enable:
self.bind("<Configure>", self.refresh)
self.bind("<Motion>", self.mouse_motion)
self.bind("<ButtonPress-1>", self.b1_press)
self.bind("<B1-Motion>", self.b1_motion)
self.bind("<ButtonRelease-1>", self.b1_release)
self.bind("<Double-Button-1>", self.double_b1)
self.bind("<MouseWheel>", self.mousewheel)
if USER_OS == "Linux":
for canvas in (self, self.RI):
canvas.bind("<Button-4>", self.mousewheel)
canvas.bind("<Button-5>", self.mousewheel)
for canvas in (self, self.CH):
canvas.bind("<Shift-Button-4>", self.shift_mousewheel)
canvas.bind("<Shift-Button-5>", self.shift_mousewheel)
self.bind("<Shift-MouseWheel>", self.shift_mousewheel)
self.bind("<Shift-ButtonPress-1>", self.shift_b1_press)
self.CH.bind("<Shift-ButtonPress-1>", self.CH.shift_b1_press)
self.RI.bind("<Shift-ButtonPress-1>", self.RI.shift_b1_press)
self.CH.bind("<Shift-MouseWheel>", self.shift_mousewheel)
self.RI.bind("<MouseWheel>", self.mousewheel)
self.bind(get_rc_binding(), self.rc)
else:
self.unbind("<Configure>")
self.unbind("<Motion>")
self.unbind("<ButtonPress-1>")
self.unbind("<B1-Motion>")
self.unbind("<ButtonRelease-1>")
self.unbind("<Double-Button-1>")
self.unbind("<MouseWheel>")
if USER_OS == "Linux":
for canvas in (self, self.RI):
canvas.unbind("<Button-4>")
canvas.unbind("<Button-5>")
for canvas in (self, self.CH):
canvas.unbind("<Shift-Button-4>")
canvas.unbind("<Shift-Button-5>")
self.unbind("<Shift-ButtonPress-1>")
self.CH.unbind("<Shift-ButtonPress-1>")
self.RI.unbind("<Shift-ButtonPress-1>")
self.unbind("<Shift-MouseWheel>")
self.CH.unbind("<Shift-MouseWheel>")
self.RI.unbind("<MouseWheel>")
self.unbind(get_rc_binding())
def show_ctrl_outline(self, canvas = "table", start_cell = (0, 0), end_cell = (0, 0)):
self.create_ctrl_outline(self.col_positions[start_cell[0]] + 2,
self.row_positions[start_cell[1]] + 2,
self.col_positions[end_cell[0]] - 2,
self.row_positions[end_cell[1]] - 2,
fill = "",
dash = (10, 15),
width = 3 if end_cell[0] - start_cell[0] == 1 and end_cell[1] - start_cell[1] == 1 else 2,
outline = self.table_selected_cells_border_fg,
tag = "ctrl")
self.after(1500, self.delete_ctrl_outlines)
def create_ctrl_outline(self, x1, y1, x2, y2, fill, dash, width, outline, tag):
if self.hidd_ctrl_outline:
t, sh = self.hidd_ctrl_outline.popitem()
self.coords(t, x1, y1, x2, y2)
if sh:
self.itemconfig(t, fill = fill, dash = dash, width = width, outline = outline, tag = tag)
else:
self.itemconfig(t, fill = fill, dash = dash, width = width, outline = outline, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_rectangle(x1, y1, x2, y2, fill = fill, dash = dash, width = width, outline = outline, tag = tag)
self.disp_ctrl_outline[t] = True
def delete_ctrl_outlines(self):
self.hidd_ctrl_outline.update(self.disp_ctrl_outline)
self.disp_ctrl_outline = {}
for t, sh in self.hidd_ctrl_outline.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_ctrl_outline[t] = False
def get_ctrl_x_c_boxes(self):
currently_selected = self.currently_selected()
boxes = {}
if isinstance(currently_selected[0], int) or currently_selected[0] == "column":
for item in chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Outside"), self.find_withtag("ColSelectFill")):
alltags = self.gettags(item)
if alltags[0] == "CellSelectFill" or alltags[0] == "Current_Outside":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
elif alltags[0] == "ColSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cols"
maxrows = 0
for r1, c1, r2, c2 in boxes:
if r2 - r1 > maxrows:
maxrows = r2 - r1
for r1, c1, r2, c2 in tuple(boxes):
if r2 - r1 < maxrows:
del boxes[(r1, c1, r2, c2)]
return boxes, maxrows
elif currently_selected[0] == "row":
for item in self.find_withtag("RowSelectFill"):
boxes[tuple(int(e) for e in self.gettags(item)[1].split("_") if e)] = "rows"
return boxes
def ctrl_c(self, event = None):
currently_selected = self.currently_selected()
if currently_selected:
s = io.StringIO()
writer = csv_module.writer(s, dialect = csv_module.excel_tab, lineterminator = "\n")
rows = []
if isinstance(currently_selected[0], int) or currently_selected[0] == "column":
boxes, maxrows = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_c_func is not None:
try:
self.extra_begin_ctrl_c_func(CtrlKeyEvent("begin_ctrl_c", boxes, currently_selected, tuple()))
except:
return
for rn in range(maxrows):
row = []
for r1, c1, r2, c2 in boxes:
if r2 - r1 < maxrows:
continue
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
row.append(self.data_ref[data_ref_rn][dcol])
except:
row.append("")
writer.writerow(row)
rows.append(row)
elif currently_selected[0] == "row":
boxes = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_c_func is not None:
try:
self.extra_begin_ctrl_c_func(CtrlKeyEvent("begin_ctrl_c", boxes, currently_selected, tuple()))
except:
return
for r1, c1, r2, c2 in boxes:
for rn in range(r2 - r1):
row = []
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
row.append(self.data_ref[data_ref_rn][dcol])
except:
row.append("")
writer.writerow(row)
rows.append(row)
for r1, c1, r2, c2 in boxes:
self.show_ctrl_outline(canvas = "table", start_cell = (c1, r1), end_cell = (c2, r2))
self.clipboard_clear()
self.clipboard_append(s.getvalue())
self.update()
if self.extra_end_ctrl_c_func is not None:
self.extra_end_ctrl_c_func(CtrlKeyEvent("end_ctrl_c", boxes, currently_selected, rows))
def ctrl_x(self, event = None):
if self.anything_selected():
if self.undo_enabled:
undo_storage = {}
s = io.StringIO()
writer = csv_module.writer(s, dialect = csv_module.excel_tab, lineterminator = "\n")
currently_selected = self.currently_selected()
rows = []
if isinstance(currently_selected[0], int) or currently_selected[0] == "column":
boxes, maxrows = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_x_func is not None:
try:
self.extra_begin_ctrl_x_func(CtrlKeyEvent("begin_ctrl_x", boxes, currently_selected, tuple()))
except:
return
for rn in range(maxrows):
row = []
for r1, c1, r2, c2 in boxes:
if r2 - r1 < maxrows:
continue
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
sx = f"{self.data_ref[data_ref_rn][dcol]}"
row.append(sx)
if self.undo_enabled:
undo_storage[(data_ref_rn, dcol)] = sx
except:
row.append("")
writer.writerow(row)
rows.append(row)
for rn in range(maxrows):
for r1, c1, r2, c2 in boxes:
if r2 - r1 < maxrows:
continue
data_ref_rn = r1 + rn
if data_ref_rn in self.row_options and 'readonly' in self.row_options[data_ref_rn]:
continue
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (
((data_ref_rn, dcol) in self.cell_options and ('readonly' in self.cell_options[(data_ref_rn, dcol)] or 'checkbox' in self.cell_options[(data_ref_rn, dcol)])) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(not self.ctrl_keys_over_dropdowns_enabled and
(data_ref_rn, dcol) in self.cell_options and
'dropdown' in self.cell_options[(data_ref_rn, dcol)] and
"" not in self.cell_options[(data_ref_rn, dcol)]['dropdown']['values'])
):
continue
try:
self.data_ref[data_ref_rn][dcol] = ""
except:
continue
elif currently_selected[0] == "row":
boxes = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_x_func is not None:
try:
self.extra_begin_ctrl_x_func(CtrlKeyEvent("begin_ctrl_x", boxes, currently_selected, tuple()))
except:
return
for r1, c1, r2, c2 in boxes:
for rn in range(r2 - r1):
row = []
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
sx = f"{self.data_ref[data_ref_rn][dcol]}"
row.append(sx)
if self.undo_enabled:
undo_storage[(data_ref_rn, dcol)] = sx
except:
row.append("")
writer.writerow(row)
rows.append(row)
for r1, c1, r2, c2 in boxes:
for rn in range(r2 - r1):
data_ref_rn = r1 + rn
if data_ref_rn in self.row_options and 'readonly' in self.row_options[data_ref_rn]:
continue
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (
((data_ref_rn, dcol) in self.cell_options and ('readonly' in self.cell_options[(data_ref_rn, dcol)] or 'checkbox' in self.cell_options[(data_ref_rn, dcol)])) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(not self.ctrl_keys_over_dropdowns_enabled and
(data_ref_rn, dcol) in self.cell_options and
'dropdown' in self.cell_options[(data_ref_rn, dcol)] and
"" not in self.cell_options[(data_ref_rn, dcol)]['dropdown']['values'])
):
continue
try:
self.data_ref[data_ref_rn][dcol] = ""
except:
continue
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells", undo_storage, tuple(boxes.items()), currently_selected))))
self.clipboard_clear()
self.clipboard_append(s.getvalue())
self.update()
self.refresh()
for r1, c1, r2, c2 in boxes:
self.show_ctrl_outline(canvas = "table", start_cell = (c1, r1), end_cell = (c2, r2))
if self.extra_end_ctrl_x_func is not None:
self.extra_end_ctrl_x_func(CtrlKeyEvent("end_ctrl_x", boxes, currently_selected, rows))
def ctrl_v(self, event = None):
if not self.expand_sheet_if_paste_too_big and (len(self.col_positions) == 1 or len(self.row_positions) == 1):
return
currently_selected = self.currently_selected()
if currently_selected:
if currently_selected[0] == "column":
x1 = currently_selected[1]
y1 = 0
elif currently_selected[0] == "row":
y1 = currently_selected[1]
x1 = 0
elif isinstance(currently_selected[0], int):
y1 = currently_selected[0]
x1 = currently_selected[1]
elif not currently_selected and not self.expand_sheet_if_paste_too_big:
return
else:
if not self.data_ref:
x1, y1 = 0, 0
else:
if len(self.col_positions) == 1 and len(self.row_positions) > 1:
x1, y1 = 0, len(self.row_positions) - 1
elif len(self.row_positions) == 1 and len(self.col_positions) > 1:
x1, y1 = len(self.col_positions) - 1, 0
elif len(self.row_positions) > 1 and len(self.col_positions) > 1:
x1, y1 = 0, len(self.row_positions) - 1
try:
data = self.clipboard_get()
except:
return
data = list(csv_module.reader(io.StringIO(data), delimiter = "\t", quotechar = '"', skipinitialspace = True))
if not data:
return
numcols = len(max(data, key = len))
numrows = len(data)
for rn, r in enumerate(data):
if len(r) < numcols:
data[rn].extend(list(repeat("", numcols - len(r))))
if self.undo_enabled:
undo_storage = {}
if self.expand_sheet_if_paste_too_big:
added_rows = 0
added_cols = 0
if x1 + numcols > len(self.col_positions) - 1:
added_cols = x1 + numcols - len(self.col_positions) + 1
if isinstance(self.paste_insert_column_limit, int) and self.paste_insert_column_limit < len(self.col_positions) - 1 + added_cols:
added_cols = self.paste_insert_column_limit - len(self.col_positions) - 1
if added_cols > 0:
self.insert_col_positions(widths = int(added_cols))
if not self.all_columns_displayed:
total_data_cols = self.total_data_cols()
self.displayed_columns.extend(list(range(total_data_cols, total_data_cols + added_cols)))
if y1 + numrows > len(self.row_positions) - 1:
added_rows = y1 + numrows - len(self.row_positions) + 1
if isinstance(self.paste_insert_row_limit, int) and self.paste_insert_row_limit < len(self.row_positions) - 1 + added_rows:
added_rows = self.paste_insert_row_limit - len(self.row_positions) - 1
if added_rows > 0:
self.insert_row_positions(heights = int(added_rows))
added_rows_cols = (added_rows, added_cols)
else:
added_rows_cols = (0, 0)
if x1 + numcols > len(self.col_positions) - 1:
numcols = len(self.col_positions) - 1 - x1
if y1 + numrows > len(self.row_positions) - 1:
numrows = len(self.row_positions) - 1 - y1
if self.extra_begin_ctrl_v_func is not None or self.extra_end_ctrl_v_func is not None:
rows = [[data[ndr][ndc] for ndc, c in enumerate(range(x1, x1 + numcols))] for ndr, r in enumerate(range(y1, y1 + numrows))]
if self.extra_begin_ctrl_v_func is not None:
try:
self.extra_begin_ctrl_v_func(PasteEvent("begin_ctrl_v", currently_selected, rows))
except:
return
for ndr, r in enumerate(range(y1, y1 + numrows)):
for ndc, c in enumerate(range(x1, x1 + numcols)):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if r > len(self.data_ref) - 1:
self.data_ref.extend([list(repeat("", c + 1)) for r in range((r + 1) - len(self.data_ref))])
elif c > len(self.data_ref[r]) - 1:
self.data_ref[r].extend(list(repeat("", (c + 1) - len(self.data_ref[r]))))
if (
((r, dcol) in self.cell_options and 'readonly' in self.cell_options[(r, dcol)]) or
((r, dcol) in self.cell_options and 'checkbox' in self.cell_options[(r, dcol)]) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(r in self.row_options and 'readonly' in self.row_options[r]) or
# if pasting not allowed in dropdowns and paste value isn't in dropdown values
(not self.ctrl_keys_over_dropdowns_enabled and
(r, dcol) in self.cell_options and
'dropdown' in self.cell_options[(r, dcol)] and
data[ndr][ndc] not in self.cell_options[(r, dcol)]['dropdown']['values'])
):
continue
if self.undo_enabled:
undo_storage[(r, dcol)] = f"{self.data_ref[r][dcol]}"
self.data_ref[r][dcol] = data[ndr][ndc]
if self.expand_sheet_if_paste_too_big and self.undo_enabled:
self.equalize_data_row_lengths()
self.deselect("all")
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells_paste",
undo_storage,
(((y1, x1, y1 + numrows, x1 + numcols), "cells"), ), # boxes
currently_selected,
added_rows_cols))))
self.create_selected(y1, x1, y1 + numrows, x1 + numcols, "cells")
self.create_current(y1, x1, type_ = "cell", inside = True if numrows > 1 or numcols > 1 else False)
self.see(r = y1, c = x1, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
self.refresh()
if self.extra_end_ctrl_v_func is not None:
self.extra_end_ctrl_v_func(PasteEvent("end_ctrl_v", currently_selected, rows))
def delete_key(self, event = None):
if self.anything_selected():
currently_selected = self.currently_selected()
if self.undo_enabled:
undo_storage = {}
boxes = []
for item in chain(self.find_withtag("CellSelectFill"), self.find_withtag("RowSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("Current_Outside")):
alltags = self.gettags(item)
box = tuple(int(e) for e in alltags[1].split("_") if e)
if alltags[0] in ("CellSelectFill", "Current_Outside"):
boxes.append((box, "cells"))
elif alltags[0] == "ColSelectFill":
boxes.append((box, "cols"))
elif alltags[0] == "RowSelectFill":
boxes.append((box, "rows"))
if self.extra_begin_delete_key_func is not None:
try:
self.extra_begin_delete_key_func(CtrlKeyEvent("begin_delete_key", boxes, currently_selected, tuple()))
except:
return
for (r1, c1, r2, c2), _ in boxes:
for r in range(r1, r2):
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (
((r, dcol) in self.cell_options and ('readonly' in self.cell_options[(r, dcol)] or 'checkbox' in self.cell_options[(r, dcol)])) or
# if del key not allowed in dropdowns and empty string isn't in dropdown values
(not self.ctrl_keys_over_dropdowns_enabled and
(r, dcol) in self.cell_options and
'dropdown' in self.cell_options[(r, dcol)] and
"" not in self.cell_options[(r, dcol)]['dropdown']['values']) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(r in self.row_options and 'readonly' in self.row_options[r])
):
continue
try:
if self.undo_enabled:
undo_storage[(r, dcol)] = f"{self.data_ref[r][dcol]}"
self.data_ref[r][dcol] = ""
except:
continue
if self.extra_end_delete_key_func is not None:
self.extra_end_delete_key_func(CtrlKeyEvent("end_delete_key", boxes, currently_selected, undo_storage))
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells", undo_storage, boxes, currently_selected))))
self.refresh()
def move_columns_adjust_options_dict(self, col, remove_start, num_cols, move_data = True):
c = int(col)
rm1start = int(remove_start)
rm1end = rm1start + num_cols
totalcols = int(num_cols)
rm2start = rm1start + (rm1end - rm1start)
rm2end = rm1end + (rm1end - rm1start)
orig_selected = list(range(rm1start, rm1start + totalcols))
self.deselect("all")
cws = [int(b - a) for a, b in zip(self.col_positions, islice(self.col_positions, 1, len(self.col_positions)))]
if rm1start > c:
cws[c:c] = cws[rm1start:rm1end]
cws[rm2start:rm2end] = []
else:
cws[c + 1:c + 1] = cws[rm1start:rm1end]
cws[rm1start:rm1end] = []
self.col_positions = list(accumulate(chain([0], (width for width in cws))))
if c + totalcols > len(self.col_positions):
new_selected = tuple(range(len(self.col_positions) - 1 - totalcols, len(self.col_positions) - 1))
self.create_selected(0, len(self.col_positions) - 1 - totalcols, len(self.row_positions) - 1, len(self.col_positions) - 1, "cols")
else:
if rm1start > c:
new_selected = tuple(range(c, c + totalcols))
self.create_selected(0, c, len(self.row_positions) - 1, c + totalcols, "cols")
else:
new_selected = tuple(range(c + 1 - totalcols, c + 1))
self.create_selected(0, c + 1 - totalcols, len(self.row_positions) - 1, c + 1, "cols")
self.create_current(0, int(new_selected[0]), type_ = "col", inside = True)
newcolsdct = {t1: t2 for t1, t2 in zip(orig_selected, new_selected)}
if self.all_columns_displayed:
dispset = {}
if rm1start > c:
if move_data:
for rn in range(len(self.data_ref)):
if len(self.data_ref[rn]) < rm1end:
self.data_ref[rn].extend(list(repeat("", rm1end - len(self.data_ref[rn]) + 1)))
self.data_ref[rn][c:c] = self.data_ref[rn][rm1start:rm1end]
self.data_ref[rn][rm2start:rm2end] = []
if isinstance(self.my_hdrs, list) and self.my_hdrs:
if len(self.my_hdrs) < rm1end:
self.my_hdrs.extend(list(repeat("", rm1end - len(self.my_hdrs) + 1)))
self.my_hdrs[c:c] = self.my_hdrs[rm1start:rm1end]
self.my_hdrs[rm2start:rm2end] = []
new_ch = {}
for k, v in self.CH.cell_options.items():
if k in newcolsdct:
new_ch[newcolsdct[k]] = v
elif k < rm1start and k >= c:
new_ch[k + totalcols] = v
else:
new_ch[k] = v
self.CH.cell_options = new_ch
new_cell = {}
for k, v in self.cell_options.items():
if k[1] in newcolsdct:
new_cell[(k[0], newcolsdct[k[1]])] = v
elif k[1] < rm1start and k[1] >= c:
new_cell[(k[0], k[1] + totalcols)] = v
else:
new_cell[k] = v
self.cell_options = new_cell
new_col = {}
for k, v in self.col_options.items():
if k in newcolsdct:
new_col[newcolsdct[k]] = v
elif k < rm1start and k >= c:
new_col[k + totalcols] = v
else:
new_col[k] = v
self.col_options = new_col
else:
c += 1
if move_data:
for rn in range(len(self.data_ref)):
if len(self.data_ref[rn]) < c - 1:
self.data_ref[rn].extend(list(repeat("", c - len(self.data_ref[rn]))))
self.data_ref[rn][c:c] = self.data_ref[rn][rm1start:rm1end]
self.data_ref[rn][rm1start:rm1end] = []
if isinstance(self.my_hdrs, list) and self.my_hdrs:
if len(self.my_hdrs) < c:
self.my_hdrs.extend(list(repeat("", c - len(self.my_hdrs))))
self.my_hdrs[c:c] = self.my_hdrs[rm1start:rm1end]
self.my_hdrs[rm1start:rm1end] = []
new_ch = {}
for k, v in self.CH.cell_options.items():
if k in newcolsdct:
new_ch[newcolsdct[k]] = v
elif k < c and k > rm1start:
new_ch[k - totalcols] = v
else:
new_ch[k] = v
self.CH.cell_options = new_ch
new_cell = {}
for k, v in self.cell_options.items():
if k[1] in newcolsdct:
new_cell[(k[0], newcolsdct[k[1]])] = v
elif k[1] < c and k[1] > rm1start:
new_cell[(k[0], k[1] - totalcols)] = v
else:
new_cell[k] = v
self.cell_options = new_cell
new_col = {}
for k, v in self.col_options.items():
if k in newcolsdct:
new_col[newcolsdct[k]] = v
elif k < c and k > rm1start:
new_col[k - totalcols] = v
else:
new_col[k] = v
self.col_options = new_col
else:
# moves data around, not displayed columns indexes
# which remain sorted and the same after drop and drop
if rm1start > c:
dispset = {a: b for a, b in zip(self.displayed_columns, (self.displayed_columns[:c] +
self.displayed_columns[rm1start:rm1start + totalcols] +
self.displayed_columns[c:rm1start] +
self.displayed_columns[rm1start + totalcols:]))}
else:
dispset = {a: b for a, b in zip(self.displayed_columns, (self.displayed_columns[:rm1start] +
self.displayed_columns[rm1start + totalcols:c + 1] +
self.displayed_columns[rm1start:rm1start + totalcols] +
self.displayed_columns[c + 1:]))}
# has to pick up elements from all over the place in the original row
# building an entirely new row is best due to permutations of hidden columns
if move_data:
max_idx = max(chain(dispset, dispset.values())) + 1
for rn in range(len(self.data_ref)):
if len(self.data_ref[rn]) < max_idx:
self.data_ref[rn][:] = self.data_ref[rn] + list(repeat("", max_idx - len(self.data_ref[rn])))
new = []
idx = 0
done = set()
while len(new) < len(self.data_ref[rn]):
if idx in dispset and idx not in done:
new.append(self.data_ref[rn][dispset[idx]])
done.add(idx)
elif idx not in done:
new.append(self.data_ref[rn][idx])
idx += 1
else:
idx += 1
self.data_ref[rn] = new
if isinstance(self.my_hdrs, list) and self.my_hdrs:
if len(self.my_hdrs) < max_idx:
self.my_hdrs[:] = self.my_hdrs + list(repeat("", max_idx - len(self.my_hdrs)))
new = []
idx = 0
done = set()
while len(new) < len(self.my_hdrs):
if idx in dispset and idx not in done:
new.append(self.my_hdrs[dispset[idx]])
done.add(idx)
elif idx not in done:
new.append(self.my_hdrs[idx])
idx += 1
else:
idx += 1
self.my_hdrs = new
dispset = {b: a for a, b in dispset.items()}
self.CH.cell_options = {dispset[k] if k in dispset else k: v for k, v in self.CH.cell_options.items()}
self.cell_options = {(k[0], dispset[k[1]]) if k[1] in dispset else k: v for k, v in self.cell_options.items()}
self.col_options = {dispset[k] if k in dispset else k: v for k, v in self.col_options.items()}
return new_selected, dispset
def ctrl_z(self, event = None):
if self.undo_storage:
if not isinstance(self.undo_storage[-1], (tuple, dict)):
undo_storage = pickle.loads(zlib.decompress(self.undo_storage[-1]))
else:
undo_storage = self.undo_storage[-1]
self.deselect("all")
if self.extra_begin_ctrl_z_func is not None:
try:
self.extra_begin_ctrl_z_func(UndoEvent("begin_ctrl_z", undo_storage[0], undo_storage))
except:
return
self.undo_storage.pop()
if undo_storage[0] in ("edit_cells", "edit_cells_paste"):
for (r, c), v in undo_storage[1].items():
self.data_ref[r][c] = v
#if (r, c) in self.cell_options and 'dropdown' in self.cell_options[(r, c)]:
#self.cell_options[(r, c)]['dropdown'][0].set_displayed(v)
start_row = float("inf")
start_col = float("inf")
for box in undo_storage[2]:
r1, c1, r2, c2 = box[0]
if not self.expand_sheet_if_paste_too_big:
self.create_selected(r1, c1, r2, c2, box[1])
if r1 < start_row:
start_row = r1
if c1 < start_col:
start_col = c1
if undo_storage[0] == "edit_cells_paste" and self.expand_sheet_if_paste_too_big:
if undo_storage[4][0] > 0:
self.del_row_positions(len(self.row_positions) - 1 - undo_storage[4][0], undo_storage[4][0])
self.data_ref[:] = self.data_ref[:-undo_storage[4][0]]
if undo_storage[4][1] > 0:
quick_added_cols = undo_storage[4][1]
self.del_col_positions(len(self.col_positions) - 1 - quick_added_cols, quick_added_cols)
for rn in range(len(self.data_ref)):
self.data_ref[rn][:] = self.data_ref[rn][:-quick_added_cols]
if not self.all_columns_displayed:
self.displayed_columns[:] = self.displayed_columns[:-quick_added_cols]
if undo_storage[3]:
if isinstance(undo_storage[3][0], int):
self.create_current(undo_storage[3][0], undo_storage[3][1], type_ = "cell", inside = True if self.cell_selected(undo_storage[3][0], undo_storage[3][1]) else False)
elif undo_storage[3][0] == "column":
self.create_current(0, undo_storage[3][1], type_ = "col", inside = True)
elif undo_storage[3][0] == "row":
self.create_current(undo_storage[3][1], 0, type_ = "row", inside = True)
elif start_row < len(self.row_positions) - 1 and start_col < len(self.col_positions) - 1:
self.create_current(start_row, start_col, type_ = "cell", inside = True if self.cell_selected(start_row, start_col) else False)
if start_row < len(self.row_positions) - 1 and start_col < len(self.col_positions) - 1:
self.see(r = start_row, c = start_col, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
elif undo_storage[0] == "move_cols":
c = undo_storage[1]
rm1start = undo_storage[2]
totalcols = len(undo_storage[4])
self.move_columns_adjust_options_dict(c, rm1start, totalcols)
elif undo_storage[0] == "move_rows":
rhs = [int(b - a) for a, b in zip(self.row_positions, islice(self.row_positions, 1, len(self.row_positions)))]
ins_row = undo_storage[1]
orig_ins_row = int(ins_row)
rm1start = undo_storage[2]
rm1end = undo_storage[3] + 1
new_selected = undo_storage[4]
rm2start = rm1start + (rm1end - rm1start)
rm2end = rm1end + (rm1end - rm1start)
totalrows = rm1end - rm1start
if rm1start < ins_row:
ins_row += totalrows
if rm1start > ins_row:
try:
self.data_ref[ins_row:ins_row] = self.data_ref[rm1start:rm1end]
self.data_ref[rm2start:rm2end] = []
except:
pass
if self.my_row_index:
try:
self.my_row_index[ins_row:ins_row] = self.my_row_index[rm1start:rm1end]
self.my_row_index[rm2start:rm2end] = []
except:
pass
else:
try:
self.data_ref[ins_row:ins_row] = self.data_ref[rm1start:rm1end]
self.data_ref[rm1start:rm1end] = []
except:
pass
if self.my_row_index:
try:
self.my_row_index[ins_row:ins_row] = self.my_row_index[rm1start:rm1end]
self.my_row_index[rm1start:rm1end] = []
except:
pass
if rm1start > ins_row:
rhs[ins_row:ins_row] = rhs[rm1start:rm1end]
rhs[rm2start:rm2end] = []
self.row_positions = list(accumulate(chain([0], (height for height in rhs))))
self.create_current(ins_row, 0, type_ = "row", inside = True)
self.create_selected(ins_row, 0, ins_row + totalrows, len(self.col_positions) - 1, "rows")
else:
rhs[ins_row:ins_row] = rhs[rm1start:rm1end]
rhs[rm1start:rm1end] = []
self.row_positions = list(accumulate(chain([0], (height for height in rhs))))
self.create_current(ins_row - totalrows, 0, type_ = "row", inside = True)
self.create_selected(ins_row - totalrows, 0, ins_row, len(self.col_positions) - 1, "rows")
self.see(r = orig_ins_row, c = 0, keep_yscroll = False, keep_xscroll = True, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
rowsiter = tuple(range(rm1start, rm1end))
rowset = set(rowsiter)
popped_ri = {t1: t2 for t1, t2 in self.RI.cell_options.items() if t1 in rowset}
popped_cell = {t1: t2 for t1, t2 in self.cell_options.items() if t1[0] in rowset}
popped_row = {t1: t2 for t1, t2 in self.row_options.items() if t1 in rowset}
popped_ri = {t1: self.RI.cell_options.pop(t1) for t1 in popped_ri}
popped_cell = {t1: self.cell_options.pop(t1) for t1 in popped_cell}
popped_row = {t1: self.row_options.pop(t1) for t1 in popped_row}
self.RI.cell_options = {t1 if t1 < rm1start else t1 - totalrows: t2 for t1, t2 in self.RI.cell_options.items()}
self.RI.cell_options = {t1 if t1 < ins_row else t1 + totalrows: t2 for t1, t2 in self.RI.cell_options.items()}
self.row_options = {t1 if t1 < rm1start else t1 - totalrows: t2 for t1, t2 in self.row_options.items()}
self.row_options = {t1 if t1 < ins_row else t1 + totalrows: t2 for t1, t2 in self.row_options.items()}
self.cell_options = {(t10 if t10 < rm1start else t10 - totalrows, t11): t2 for (t10, t11), t2 in self.cell_options.items()}
self.cell_options = {(t10 if t10 < ins_row else t10 + totalrows, t11): t2 for (t10, t11), t2 in self.cell_options.items()}
newrowsdct = {t1: t2 for t1, t2 in zip(rowsiter, new_selected)}
for t1, t2 in popped_ri.items():
self.RI.cell_options[newrowsdct[t1]] = t2
for t1, t2 in popped_row.items():
self.row_options[newrowsdct[t1]] = t2
for (t10, t11), t2 in popped_cell.items():
self.cell_options[(newrowsdct[t10], t11)] = t2
elif undo_storage[0] == "insert_row":
self.data_ref[undo_storage[1]['data_row_num']:undo_storage[1]['data_row_num'] + undo_storage[1]['numrows']] = []
try:
self.my_row_index[undo_storage[1]['data_row_num']:undo_storage[1]['data_row_num'] + undo_storage[1]['numrows']] = []
except:
pass
self.del_row_positions(undo_storage[1]['sheet_row_num'],
undo_storage[1]['numrows'],
deselect_all = False)
for r in range(undo_storage[1]['sheet_row_num'],
undo_storage[1]['sheet_row_num'] + undo_storage[1]['numrows']):
if r in self.row_options:
del self.row_options[r]
if r in self.RI.cell_options:
del self.RI.cell_options[r]
numrows = undo_storage[1]['numrows']
idx = undo_storage[1]['sheet_row_num'] + undo_storage[1]['numrows']
self.cell_options = {(rn if rn < idx else rn - numrows, cn): t2 for (rn, cn), t2 in self.cell_options.items()}
self.row_options = {rn if rn < idx else rn - numrows: t for rn, t in self.row_options.items()}
self.RI.cell_options = {rn if rn < idx else rn - numrows: t for rn, t in self.RI.cell_options.items()}
if len(self.row_positions) > 1:
start_row = undo_storage[1]['sheet_row_num'] if undo_storage[1]['sheet_row_num'] < len(self.row_positions) - 1 else undo_storage[1]['sheet_row_num'] - 1
self.RI.select_row(start_row)
self.see(r = start_row, c = 0, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
elif undo_storage[0] == "insert_col":
self.displayed_columns = undo_storage[1]['displayed_columns']
qx = undo_storage[1]['data_col_num']
qnum = undo_storage[1]['numcols']
for rn in range(len(self.data_ref)):
self.data_ref[rn][qx:qx + qnum] = []
try:
self.my_hdrs[qx:qx + qnum] = []
except:
pass
self.del_col_positions(undo_storage[1]['sheet_col_num'],
undo_storage[1]['numcols'],
deselect_all = False)
for c in range(undo_storage[1]['sheet_col_num'],
undo_storage[1]['sheet_col_num'] + undo_storage[1]['numcols']):
if c in self.col_options:
del self.col_options[c]
if c in self.CH.cell_options:
del self.CH.cell_options[c]
numcols = undo_storage[1]['numcols']
idx = undo_storage[1]['sheet_col_num'] + undo_storage[1]['numcols']
self.cell_options = {(rn, cn if cn < idx else cn - numcols): t2 for (rn, cn), t2 in self.cell_options.items()}
self.col_options = {cn if cn < idx else cn - numcols: t for cn, t in self.col_options.items()}
self.CH.cell_options = {cn if cn < idx else cn - numcols: t for cn, t in self.CH.cell_options.items()}
if len(self.col_positions) > 1:
start_col = undo_storage[1]['sheet_col_num'] if undo_storage[1]['sheet_col_num'] < len(self.col_positions) - 1 else undo_storage[1]['sheet_col_num'] - 1
self.CH.select_col(start_col)
self.see(r = 0, c = start_col, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
elif undo_storage[0] == "delete_rows":
for rn, r, h in reversed(undo_storage[1]['deleted_rows']):
self.data_ref.insert(rn, r)
self.insert_row_position(idx = rn, height = h)
self.cell_options = undo_storage[1]['cell_options']
self.row_options = undo_storage[1]['row_options']
self.RI.cell_options = undo_storage[1]['RI_cell_options']
for rn, r in reversed(undo_storage[1]['deleted_index_values']):
try:
self.my_row_index.insert(rn, r)
except:
continue
self.reselect_from_get_boxes(undo_storage[1]['selection_boxes'])
elif undo_storage[0] == "delete_cols":
self.displayed_columns = undo_storage[1]['displayed_columns']
self.cell_options = undo_storage[1]['cell_options']
self.col_options = undo_storage[1]['col_options']
self.CH.cell_options = undo_storage[1]['CH_cell_options']
for cn, w in reversed(tuple(undo_storage[1]['colwidths'].items())):
self.insert_col_position(idx = cn, width = w)
for cn, rowdict in reversed(tuple(undo_storage[1]['deleted_cols'].items())):
for rn, v in rowdict.items():
try:
self.data_ref[rn].insert(cn, v)
except:
continue
for cn, v in reversed(tuple(undo_storage[1]['deleted_hdr_values'].items())):
try:
self.my_hdrs.insert(cn, v)
except:
continue
self.reselect_from_get_boxes(undo_storage[1]['selection_boxes'])
self.refresh()
if self.extra_end_ctrl_z_func is not None:
self.extra_end_ctrl_z_func(UndoEvent("end_ctrl_z", undo_storage[0], undo_storage))
def bind_arrowkeys(self, event = None):
self.arrowkeys_enabled = True
for canvas in (self, self.CH, self.RI, self.TL):
canvas.bind("<Up>", self.arrowkey_UP)
canvas.bind("<Tab>", self.arrowkey_RIGHT)
canvas.bind("<Right>", self.arrowkey_RIGHT)
canvas.bind("<Down>", self.arrowkey_DOWN)
canvas.bind("<Left>", self.arrowkey_LEFT)
canvas.bind("<Prior>", self.page_UP)
canvas.bind("<Next>", self.page_DOWN)
def unbind_arrowkeys(self, event = None):
self.arrowkeys_enabled = False
for canvas in (self, self.CH, self.RI, self.TL):
canvas.unbind("<Up>")
canvas.unbind("<Right>")
canvas.unbind("<Tab>")
canvas.unbind("<Down>")
canvas.unbind("<Left>")
canvas.unbind("<Prior>")
canvas.unbind("<Next>")
def see(self,
r = None,
c = None,
keep_yscroll = False,
keep_xscroll = False,
bottom_right_corner = False,
check_cell_visibility = True,
redraw = True):
need_redraw = False
if check_cell_visibility:
yvis, xvis = self.cell_is_completely_visible(r = r, c = c, separate_axes = True)
else:
yvis, xvis = False, False
if not yvis:
if bottom_right_corner:
if r is not None and not keep_yscroll:
winfo_height = self.winfo_height()
if self.row_positions[r + 1] - self.row_positions[r] > winfo_height:
y = self.row_positions[r]
else:
y = self.row_positions[r + 1] + 1 - winfo_height
args = ("moveto", y / (self.row_positions[-1] + self.empty_vertical))
if args[1] > 1:
args[1] = args[1] - 1
self.yview(*args)
self.RI.yview(*args)
if redraw:
need_redraw = True
else:
if r is not None and not keep_yscroll:
args = ("moveto", self.row_positions[r] / (self.row_positions[-1] + self.empty_vertical))
if args[1] > 1:
args[1] = args[1] - 1
self.yview(*args)
self.RI.yview(*args)
if redraw:
need_redraw = True
if not xvis:
if bottom_right_corner:
if c is not None and not keep_xscroll:
winfo_width = self.winfo_width()
if self.col_positions[c + 1] - self.col_positions[c] > winfo_width:
x = self.col_positions[c]
else:
x = self.col_positions[c + 1] + 1 - winfo_width
args = ("moveto", x / (self.col_positions[-1] + self.empty_horizontal))
self.xview(*args)
self.CH.xview(*args)
if redraw:
need_redraw = True
else:
if c is not None and not keep_xscroll:
args = ("moveto", self.col_positions[c] / (self.col_positions[-1] + self.empty_horizontal))
self.xview(*args)
self.CH.xview(*args)
if redraw:
need_redraw = True
if redraw and need_redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
return True
else:
return False
def cell_is_completely_visible(self, r = 0, c = 0, cell_coords = None, separate_axes = False):
cx1, cy1, cx2, cy2 = self.get_canvas_visible_area()
if cell_coords is None:
x1, y1, x2, y2 = self.GetCellCoords(r = r, c = c, sel = True)
else:
x1, y1, x2, y2 = cell_coords
x_vis = True
y_vis = True
if cx1 > x1 or cx2 < x2:
x_vis = False
if cy1 > y1 or cy2 < y2:
y_vis = False
if separate_axes:
return y_vis, x_vis
else:
return False if not y_vis or not x_vis else True
def cell_is_visible(self,r = 0, c = 0, cell_coords = None):
cx1, cy1, cx2, cy2 = self.get_canvas_visible_area()
if cell_coords is None:
x1, y1, x2, y2 = self.GetCellCoords(r = r, c = c, sel = True)
else:
x1, y1, x2, y2 = cell_coords
if x1 <= cx2 or y1 <= cy2 or x2 >= cx1 or y2 >= cy1:
return True
return False
def select_all(self, redraw = True, run_binding_func = True):
self.deselect("all")
if len(self.row_positions) > 1 and len(self.col_positions) > 1:
self.create_current(0, 0, type_ = "cell", inside = True)
self.create_selected(0, 0, len(self.row_positions) - 1, len(self.col_positions) - 1)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.select_all_binding_func is not None and run_binding_func:
self.select_all_binding_func(SelectionBoxEvent("select_all_cells", (0, 0, len(self.row_positions) - 1, len(self.col_positions) - 1)))
def select_cell(self, r, c, redraw = False, keep_other_selections = False):
r = int(r)
c = int(c)
ignore_keep = False
if keep_other_selections:
if self.cell_selected(r, c):
self.create_current(r, c, type_ = "cell", inside = True)
else:
ignore_keep = True
if ignore_keep or not keep_other_selections:
self.delete_selection_rects()
self.create_current(r, c, type_ = "cell", inside = False)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.selection_binding_func is not None:
self.selection_binding_func(SelectCellEvent("select_cell", r, c))
def move_down(self):
currently_selected = self.currently_selected(get_coords = True)
if currently_selected:
r, c = currently_selected
if (
r < len(self.row_positions) - 2 and
(self.single_selection_enabled or self.toggle_selection_enabled)
):
self.select_cell(r + 1, c)
self.see(r + 1, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = True)
def add_selection(self, r, c, redraw = False, run_binding_func = True, set_as_current = False):
r = int(r)
c = int(c)
if set_as_current:
items = self.find_withtag("Current_Outside")
if items:
alltags = self.gettags(items[0])
if alltags[2] == "cell":
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
add_sel = (r1, c1)
else:
add_sel = tuple()
else:
add_sel = tuple()
self.create_current(r, c, type_ = "cell", inside = True if self.cell_selected(r, c) else False)
if add_sel:
self.add_selection(add_sel[0], add_sel[1], redraw = False, run_binding_func = False, set_as_current = False)
else:
self.create_selected(r, c, r + 1, c + 1)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.selection_binding_func is not None and run_binding_func:
self.selection_binding_func(SelectCellEvent("select_cell", r, c))
def toggle_select_cell(self, row, column, add_selection = True, redraw = True, run_binding_func = True, set_as_current = True):
if add_selection:
if self.cell_selected(row, column, inc_rows = True, inc_cols = True):
self.deselect(r = row, c = column, redraw = redraw)
else:
self.add_selection(r = row, c = column, redraw = redraw, run_binding_func = run_binding_func, set_as_current = set_as_current)
else:
if self.cell_selected(row, column, inc_rows = True, inc_cols = True):
self.deselect(r = row, c = column, redraw = redraw)
else:
self.select_cell(row, column, redraw = redraw)
def align_rows(self, rows = [], align = "global", align_index = False): #"center", "w", "e" or "global"
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if align == "global":
for r in rows_:
if r in self.row_options and 'align' in self.row_options[r]:
del self.row_options[r]['align']
if align_index and r in self.RI.cell_options and 'align' in self.RI.cell_options[r]:
del self.RI.cell_options[r]['align']
else:
for r in rows_:
if r not in self.row_options:
self.row_options[r] = {}
self.row_options[r]['align'] = align
if align_index:
if r not in self.RI.cell_options:
self.RI.cell_options[r] = {}
self.RI.cell_options[r]['align'] = align
def align_columns(self, columns = [], align = "global", align_header = False): #"center", "w", "e" or "global"
if isinstance(columns, int):
cols_ = [columns]
else:
cols_ = columns
if align == "global":
for c in cols_:
if c in self.col_options and 'align' in self.col_options[c]:
del self.col_options[c]['align']
if align_header and c in self.CH.cell_options and 'align' in self.CH.cell_options[c]:
del self.CH.cell_options[c]['align']
else:
for c in cols_:
if c not in self.col_options:
self.col_options[c] = {}
self.col_options[c]['align'] = align
if align_header:
if c not in self.CH.cell_options:
self.CH.cell_options[c] = {}
self.CH.cell_options[c]['align'] = align
def align_cells(self, row = 0, column = 0, cells = [], align = "global"): #"center", "w", "e" or "global"
if align == "global":
if cells:
for r, c in cells:
if (r, c) in self.cell_options and 'align' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['align']
else:
if (row, column) in self.cell_options and 'align' in self.cell_options[(row, column)]:
del self.cell_options[(row, column)]['align']
else:
if cells:
for r, c in cells:
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['align'] = align
else:
if (row, column) not in self.cell_options:
self.cell_options[(row, column)] = {}
self.cell_options[(row, column)]['align'] = align
def readonly_rows(self, rows = [], readonly = True):
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if not readonly:
for r in rows_:
if r in self.row_options and 'readonly' in self.row_options[r]:
del self.row_options[r]['readonly']
else:
for r in rows_:
if r not in self.row_options:
self.row_options[r] = {}
self.row_options[r]['readonly'] = True
def readonly_columns(self, columns = [], readonly = True):
if isinstance(columns, int):
cols_ = [columns]
else:
cols_ = columns
if not readonly:
for c in cols_:
if c in self.col_options and 'readonly' in self.col_options[c]:
del self.col_options[c]['readonly']
else:
for c in cols_:
if c not in self.col_options:
self.col_options[c] = {}
self.col_options[c]['readonly'] = True
def readonly_cells(self, row = 0, column = 0, cells = [], readonly = True):
if not readonly:
if cells:
for r, c in cells:
if (r, c) in self.cell_options and 'readonly' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['readonly']
else:
if (row, column) in self.cell_options and 'readonly' in self.cell_options[(row, column)]:
del self.cell_options[(row, column)]['readonly']
else:
if cells:
for (r, c) in cells:
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['readonly'] = True
else:
if (row, column) not in self.cell_options:
self.cell_options[(row, column)] = {}
self.cell_options[(row, column)]['readonly'] = True
def highlight_cells(self, r = 0, c = 0, cells = tuple(), bg = None, fg = None, redraw = False, overwrite = True):
if bg is None and fg is None:
return
if cells:
for r_, c_ in cells:
if (r_, c_) not in self.cell_options:
self.cell_options[(r_, c_)] = {}
if 'highlight' in self.cell_options[(r_, c_)] and not overwrite:
self.cell_options[(r_, c_)]['highlight'] = (self.cell_options[(r_, c_)]['highlight'][0] if bg is None else bg,
self.cell_options[(r_, c_)]['highlight'][1] if fg is None else fg)
else:
self.cell_options[(r_, c_)]['highlight'] = (bg, fg)
else:
if isinstance(r, str) and r.lower() == "all" and isinstance(c, int):
riter = range(self.total_data_rows())
citer = (c, )
elif isinstance(c, str) and c.lower() == "all" and isinstance(r, int):
riter = (r, )
citer = range(self.total_data_cols())
elif isinstance(r, int) and isinstance(c, int):
riter = (r, )
citer = (c, )
for r_ in riter:
for c_ in citer:
if (r_, c_) not in self.cell_options:
self.cell_options[(r_, c_)] = {}
if 'highlight' in self.cell_options[(r_, c_)] and not overwrite:
self.cell_options[(r_, c_)]['highlight'] = (self.cell_options[(r_, c_)]['highlight'][0] if bg is None else bg,
self.cell_options[(r_, c_)]['highlight'][1] if fg is None else fg)
else:
self.cell_options[(r_, c_)]['highlight'] = (bg, fg)
if redraw:
self.main_table_redraw_grid_and_text()
def highlight_cols(self, cols = [], bg = None, fg = None, highlight_header = False, redraw = False, overwrite = True):
if bg is None and fg is None:
return
for c in (cols, ) if isinstance(cols, int) else cols:
if c not in self.col_options:
self.col_options[c] = {}
if 'highlight' in self.col_options[c] and not overwrite:
self.col_options[c]['highlight'] = (self.col_options[c]['highlight'][0] if bg is None else bg,
self.col_options[c]['highlight'][1] if fg is None else fg)
else:
self.col_options[c]['highlight'] = (bg, fg)
if highlight_header:
self.CH.highlight_cells(cells = cols, bg = bg, fg = fg)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = highlight_header)
def highlight_rows(self, rows = [], bg = None, fg = None, highlight_index = False, redraw = False, end_of_screen = False, overwrite = True):
if bg is None and fg is None:
return
for r in (rows, ) if isinstance(rows, int) else rows:
if r not in self.row_options:
self.row_options[r] = {}
if 'highlight' in self.row_options[r] and not overwrite:
self.row_options[r]['highlight'] = (self.row_options[r]['highlight'][0] if bg is None else bg,
self.row_options[r]['highlight'][1] if fg is None else fg,
self.row_options[r]['highlight'][2] if self.row_options[r]['highlight'][2] != end_of_screen else end_of_screen)
else:
self.row_options[r]['highlight'] = (bg, fg, end_of_screen)
if highlight_index:
self.RI.highlight_cells(cells = rows, bg = bg, fg = fg)
if redraw:
self.main_table_redraw_grid_and_text(redraw_row_index = highlight_index)
def deselect(self, r = None, c = None, cell = None, redraw = True):
deselected = tuple()
deleted_boxes = {}
if r == "all":
deselected = ("deselect_all", self.delete_selection_rects())
elif r == "allrows":
for item in self.find_withtag("RowSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
deleted_boxes[r1, c1, r2, c2] = "rows"
self.delete(alltags[1])
self.RI.delete(alltags[1])
self.CH.delete(alltags[1])
current = self.currently_selected()
if current and current[0] == "row":
deleted_boxes[tuple(int(e) for e in self.get_tags_of_current()[1].split("_") if e)] = "cell"
self.delete_current()
deselected = ("deselect_all_rows", deleted_boxes)
elif r == "allcols":
for item in self.find_withtag("ColSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
deleted_boxes[r1, c1, r2, c2] = "cols"
self.delete(alltags[1])
self.RI.delete(alltags[1])
self.CH.delete(alltags[1])
current = self.currently_selected()
if current and current[0] == "column":
deleted_boxes[tuple(int(e) for e in self.get_tags_of_current()[1].split("_") if e)] = "cell"
self.delete_current()
deselected = ("deselect_all_cols", deleted_boxes)
elif r is not None and c is None and cell is None:
current = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
current_tags = self.gettags(current[0]) if current else tuple()
if current:
curr_r1, curr_c1, curr_r2, curr_c2 = tuple(int(e) for e in current_tags[1].split("_") if e)
reset_current = False
for item in self.find_withtag("RowSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
if r >= r1 and r < r2:
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
if not reset_current and current and curr_r1 >= r1 and curr_r1 < r2:
reset_current = True
deleted_boxes[curr_r1, curr_c1, curr_r2, curr_c2] = "cell"
deleted_boxes[r1, c1, r2, c2] = "rows"
if reset_current:
self.delete_current()
self.set_current_to_last()
deselected = ("deselect_row", deleted_boxes)
elif c is not None and r is None and cell is None:
current = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
current_tags = self.gettags(current[0]) if current else tuple()
if current:
curr_r1, curr_c1, curr_r2, curr_c2 = tuple(int(e) for e in current_tags[1].split("_") if e)
reset_current = False
for item in self.find_withtag("ColSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
if c >= c1 and c < c2:
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
if not reset_current and current and curr_c1 >= c1 and curr_c1 < c2:
reset_current = True
deleted_boxes[curr_r1, curr_c1, curr_r2, curr_c2] = "cell"
deleted_boxes[r1, c1, r2, c2] = "cols"
if reset_current:
self.delete_current()
self.set_current_to_last()
deselected = ("deselect_column", deleted_boxes)
elif (r is not None and c is not None and cell is None) or cell is not None:
set_curr = False
if cell is not None:
r, c = cell[0], cell[1]
for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Outside"),
self.find_withtag("Current_Inside")):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
if (r >= r1 and
c >= c1 and
r < r2 and
c < c2):
current = self.currently_selected()
if (not set_curr and
current and
r2 - r1 == 1 and
c2 - c1 == 1 and
r == current[0] and
c == current[1]):
set_curr = True
if current and not set_curr:
if isinstance(current[0], int):
if (current[0] >= r1 and
current[0] < r2 and
current[1] >= c1 and
current[1] < c2):
set_curr = True
elif current[0] == "column":
if (current[1] >= c1 and
current[1] < c2):
set_curr = True
elif current[0] == "row":
if (current[1] >= r1 and
current[1] < r2):
set_curr = True
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
deleted_boxes[(r1, c1, r2, c2)] = "cells"
if set_curr:
try:
deleted_boxes[tuple(int(e) for e in self.get_tags_of_current()[1].split("_") if e)] = "cells"
except:
pass
self.delete_current()
self.set_current_to_last()
deselected = ("deselect_cell", deleted_boxes)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.deselection_binding_func is not None:
self.deselection_binding_func(DeselectionEvent(*deselected))
def page_UP(self, event = None):
if not self.arrowkeys_enabled:
return
height = self.winfo_height()
top = self.canvasy(0)
scrollto = top - height
if scrollto < 0:
scrollto = 0
if self.page_up_down_select_row:
r = bisect.bisect_left(self.row_positions, scrollto)
current = self.currently_selected(get_coords = True)
if current and current[0] == r:
r -= 1
if r < 0:
r = 0
if self.RI.row_selection_enabled and (self.anything_selected(exclude_columns = True, exclude_cells = True) or not self.anything_selected()):
self.RI.select_row(r)
self.see(r, 0, keep_xscroll = True, check_cell_visibility = False)
elif (self.single_selection_enabled or self.toggle_selection_enabled) and self.anything_selected(exclude_columns = True, exclude_rows = True):
box = self.get_all_selection_boxes_with_types()[0][0]
self.see(r, box[1], keep_xscroll = True, check_cell_visibility = False)
self.select_cell(r, box[1])
else:
args = ("moveto", scrollto / (self.row_positions[-1] + 100))
self.yview(*args)
self.RI.yview(*args)
self.main_table_redraw_grid_and_text(redraw_row_index = True)
def page_DOWN(self, event = None):
if not self.arrowkeys_enabled:
return
height = self.winfo_height()
top = self.canvasy(0)
scrollto = top + height
if self.page_up_down_select_row and self.RI.row_selection_enabled:
r = bisect.bisect_left(self.row_positions, scrollto) - 1
current = self.currently_selected(get_coords = True)
if current and current[0] == r:
r += 1
if r > len(self.row_positions) - 2:
r = len(self.row_positions) - 2
if self.RI.row_selection_enabled and (self.anything_selected(exclude_columns = True, exclude_cells = True) or not self.anything_selected()):
self.RI.select_row(r)
self.see(r, 0, keep_xscroll = True, check_cell_visibility = False)
elif (self.single_selection_enabled or self.toggle_selection_enabled) and self.anything_selected(exclude_columns = True, exclude_rows = True):
box = self.get_all_selection_boxes_with_types()[0][0]
self.see(r, box[1], keep_xscroll = True, check_cell_visibility = False)
self.select_cell(r, box[1])
else:
end = self.row_positions[-1]
if scrollto > end + 100:
scrollto = end
args = ("moveto", scrollto / (end + 100))
self.yview(*args)
self.RI.yview(*args)
self.main_table_redraw_grid_and_text(redraw_row_index = True)
def arrowkey_UP(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "row":
r = currently_selected[1]
if r != 0 and self.RI.row_selection_enabled:
if self.cell_is_completely_visible(r = r - 1, c = 0):
self.RI.select_row(r - 1, redraw = True)
else:
self.RI.select_row(r - 1)
self.see(r - 1, 0, keep_xscroll = True, check_cell_visibility = False)
elif isinstance(currently_selected[0],int):
r = currently_selected[0]
c = currently_selected[1]
if r == 0 and self.CH.col_selection_enabled:
if not self.cell_is_completely_visible(r = r, c = 0):
self.see(r, c, keep_xscroll = True, check_cell_visibility = False)
elif r != 0 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = r - 1, c = c):
self.select_cell(r - 1, c, redraw = True)
else:
self.select_cell(r - 1, c)
self.see(r - 1, c, keep_xscroll = True, check_cell_visibility = False)
def arrowkey_RIGHT(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "row":
r = currently_selected[1]
if self.single_selection_enabled or self.toggle_selection_enabled:
if self.cell_is_completely_visible(r = r, c = 0):
self.select_cell(r, 0, redraw = True)
else:
self.select_cell(r, 0)
self.see(r, 0, keep_yscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif currently_selected[0] == "column":
c = currently_selected[1]
if c < len(self.col_positions) - 2 and self.CH.col_selection_enabled:
if self.cell_is_completely_visible(r = 0, c = c + 1):
self.CH.select_col(c + 1, redraw = True)
else:
self.CH.select_col(c + 1)
self.see(0, c + 1, keep_yscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
elif isinstance(currently_selected[0], int):
r = currently_selected[0]
c = currently_selected[1]
if c < len(self.col_positions) - 2 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = r, c = c + 1):
self.select_cell(r, c + 1, redraw =True)
else:
self.select_cell(r, c + 1)
self.see(r, c + 1, keep_yscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
def arrowkey_DOWN(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "row":
r = currently_selected[1]
if r < len(self.row_positions) - 2 and self.RI.row_selection_enabled:
if self.cell_is_completely_visible(r = min(r + 2, len(self.row_positions) - 2), c = 0):
self.RI.select_row(r + 1, redraw = True)
else:
self.RI.select_row(r + 1)
if r + 2 < len(self.row_positions) - 2 and (self.row_positions[r + 3] - self.row_positions[r + 2]) + (self.row_positions[r + 2] - self.row_positions[r + 1]) + 5 < self.winfo_height():
self.see(r + 2, 0, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif not self.cell_is_completely_visible(r = r + 1, c = 0):
self.see(r + 1, 0, keep_xscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
elif currently_selected[0] == "column":
c = currently_selected[1]
if self.single_selection_enabled or self.toggle_selection_enabled:
if self.cell_is_completely_visible(r = 0, c = c):
self.select_cell(0, c, redraw = True)
else:
self.select_cell(0, c)
self.see(0, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif isinstance(currently_selected[0], int):
r = currently_selected[0]
c = currently_selected[1]
if r < len(self.row_positions) - 2 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = min(r + 2, len(self.row_positions) - 2), c = c):
self.select_cell(r + 1, c, redraw = True)
else:
self.select_cell(r + 1, c)
if r + 2 < len(self.row_positions) - 2 and (self.row_positions[r + 3] - self.row_positions[r + 2]) + (self.row_positions[r + 2] - self.row_positions[r + 1]) + 5 < self.winfo_height():
self.see(r + 2, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif not self.cell_is_completely_visible(r = r + 1, c = c):
self.see(r + 1, c, keep_xscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
def arrowkey_LEFT(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "column":
c = currently_selected[1]
if c != 0 and self.CH.col_selection_enabled:
if self.cell_is_completely_visible(r = 0, c = c - 1):
self.CH.select_col(c - 1, redraw = True)
else:
self.CH.select_col(c - 1)
self.see(0, c - 1, keep_yscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif isinstance(currently_selected[0], int):
r = currently_selected[0]
c = currently_selected[1]
if c == 0 and self.RI.row_selection_enabled:
if not self.cell_is_completely_visible(r = r, c = 0):
self.see(r, c, keep_yscroll = True, check_cell_visibility = False)
elif c != 0 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = r, c = c - 1):
self.select_cell(r, c - 1, redraw = True)
else:
self.select_cell(r, c - 1)
self.see(r, c - 1, keep_yscroll = True, check_cell_visibility = False)
def edit_bindings(self, enable = True, key = None):
if key is None or key == "copy":
if enable:
for s2 in ("c", "C"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{"Command" if USER_OS == "Darwin" else "Control"}-{s2}>", self.ctrl_c)
self.copy_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("c", "C"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.copy_enabled = False
if key is None or key == "cut":
if enable:
for s2 in ("x", "X"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{"Command" if USER_OS == "Darwin" else "Control"}-{s2}>", self.ctrl_x)
self.cut_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("x", "X"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.cut_enabled = False
if key is None or key == "paste":
if enable:
for s2 in ("v", "V"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{"Command" if USER_OS == "Darwin" else "Control"}-{s2}>", self.ctrl_v)
self.paste_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("v", "V"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.paste_enabled = False
if key is None or key == "undo":
if enable:
for s2 in ("z", "Z"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{"Command" if USER_OS == "Darwin" else "Control"}-{s2}>", self.ctrl_z)
self.undo_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("z", "Z"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.undo_enabled = False
if key is None or key == "delete":
if enable:
for widget in (self, self.RI, self.CH, self.TL):
widget.bind("<Delete>", self.delete_key)
self.delete_key_enabled = True
else:
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind("<Delete>")
self.delete_key_enabled = False
if key is None or key == "edit_cell":
if enable:
self.bind_cell_edit(True)
else:
self.bind_cell_edit(False)
# edit header with text editor (dropdowns and checkboxes not included)
# this will not by enabled by using enable_bindings() to enable all bindings
# must be enabled directly using enable_bindings("edit_header")
if key == "edit_header":
if enable:
self.CH.bind_cell_edit(True)
else:
self.CH.bind_cell_edit(False)
def menu_add_command(self, menu: tk.Menu, **kwargs):
if 'label' not in kwargs:
return
try:
index = menu.index(kwargs['label'])
menu.delete(index)
except TclError:
pass
menu.add_command(**kwargs)
def create_rc_menus(self):
if not self.rc_popup_menu:
self.rc_popup_menu = tk.Menu(self, tearoff = 0, background = self.popup_menu_bg)
if not self.CH.ch_rc_popup_menu:
self.CH.ch_rc_popup_menu = tk.Menu(self.CH, tearoff = 0, background = self.popup_menu_bg)
if not self.RI.ri_rc_popup_menu:
self.RI.ri_rc_popup_menu = tk.Menu(self.RI, tearoff = 0, background = self.popup_menu_bg)
if not self.empty_rc_popup_menu:
self.empty_rc_popup_menu = tk.Menu(self, tearoff = 0, background = self.popup_menu_bg)
for menu in (self.rc_popup_menu,
self.CH.ch_rc_popup_menu,
self.RI.ri_rc_popup_menu,
self.empty_rc_popup_menu):
menu.delete(0, 'end')
if self.cut_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Cut",
accelerator = "Ctrl+X",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_x)
#self.rc_popup_menu.add_separator()
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Cut contents",
accelerator = "Ctrl+X",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_x)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Cut contents",
accelerator = "Ctrl+X",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_x)
#self.RI.ri_rc_popup_menu.add_separator()
if self.copy_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Copy",
accelerator = "Ctrl+C",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_c)
#self.rc_popup_menu.add_separator()
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Copy contents",
accelerator = "Ctrl+C",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_c)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Copy contents",
accelerator = "Ctrl+C",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_c)
#self.RI.ri_rc_popup_menu.add_separator()
if self.paste_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
#self.rc_popup_menu.add_separator()
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
#self.RI.ri_rc_popup_menu.add_separator()
if self.expand_sheet_if_paste_too_big:
self.menu_add_command(self.empty_rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
if self.delete_key_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Delete",
accelerator = "Del",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.delete_key)
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Clear contents",
accelerator = "Del",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.delete_key)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Clear contents",
accelerator = "Del",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.delete_key)
#self.RI.ri_rc_popup_menu.add_separator()
if self.rc_delete_column_enabled:
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Delete columns",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.del_cols_rc)
#self.CH.ch_rc_popup_menu.add_separator()
if self.rc_insert_column_enabled:
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Insert columns left",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_col_rc("left"))
self.menu_add_command(self.empty_rc_popup_menu, label = "Insert column",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_col_rc("left"))
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Insert columns right",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_col_rc("right"))
if self.rc_delete_row_enabled:
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Delete rows",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.del_rows_rc)
#self.RI.ri_rc_popup_menu.add_separator()
if self.rc_insert_row_enabled:
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Insert rows above",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_row_rc("above"))
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Insert rows below",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_row_rc("below"))
self.menu_add_command(self.empty_rc_popup_menu, label = "Insert row",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_row_rc("below"))
for label, func in self.extra_table_rc_menu_funcs.items():
self.menu_add_command(self.rc_popup_menu, label = label,
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = func)
for label, func in self.extra_index_rc_menu_funcs.items():
self.menu_add_command(self.RI.ri_rc_popup_menu, label = label,
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = func)
for label, func in self.extra_header_rc_menu_funcs.items():
self.menu_add_command(self.CH.ch_rc_popup_menu, label = label,
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = func)
def bind_cell_edit(self, enable = True, keys = []):
if enable:
self.edit_cell_enabled = True
for w in (self, self.RI, self.CH):
w.bind("<Key>", self.open_cell)
else:
self.edit_cell_enabled = False
for w in (self, self.RI, self.CH):
w.unbind("<Key>")
def enable_bindings(self, bindings):
if not bindings:
self.enable_bindings_internal("all")
elif isinstance(bindings, (list, tuple)):
for binding in bindings:
if isinstance(binding, (list, tuple)):
for bind in binding:
self.enable_bindings_internal(bind.lower())
elif isinstance(binding, str):
self.enable_bindings_internal(binding.lower())
elif isinstance(bindings, str):
self.enable_bindings_internal(bindings.lower())
def disable_bindings(self, bindings):
if not bindings:
self.disable_bindings_internal("all")
elif isinstance(bindings, (list, tuple)):
for binding in bindings:
if isinstance(binding, (list, tuple)):
for bind in binding:
self.disable_bindings_internal(bind.lower())
elif isinstance(binding, str):
self.disable_bindings_internal(binding.lower())
elif isinstance(bindings, str):
self.disable_bindings_internal(bindings)
def enable_disable_select_all(self, enable = True):
self.select_all_enabled = bool(enable)
for s in ("A", "a"):
binding = f"<{"Command" if USER_OS == "Darwin" else "Control"}-{s}>"
for widget in (self, self.RI, self.CH, self.TL):
if enable:
widget.bind(binding, self.select_all)
else:
widget.unbind(binding)
def enable_bindings_internal(self, binding):
if binding in ("enable_all", "all"):
self.single_selection_enabled = True
self.toggle_selection_enabled = False
self.drag_selection_enabled = True
self.enable_disable_select_all(True)
self.CH.enable_bindings("column_width_resize")
self.CH.enable_bindings("column_select")
self.CH.enable_bindings("column_height_resize")
self.CH.enable_bindings("drag_and_drop")
self.CH.enable_bindings("double_click_column_resize")
self.RI.enable_bindings("row_height_resize")
self.RI.enable_bindings("double_click_row_resize")
self.RI.enable_bindings("row_width_resize")
self.RI.enable_bindings("row_select")
self.RI.enable_bindings("drag_and_drop")
self.bind_arrowkeys()
self.edit_bindings(True)
self.rc_delete_column_enabled = True
self.rc_delete_row_enabled = True
self.rc_insert_column_enabled = True
self.rc_insert_row_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
self.TL.rh_state()
self.TL.rw_state()
elif binding in ("single", "single_selection_mode", "single_select"):
self.single_selection_enabled = True
self.toggle_selection_enabled = False
elif binding in ("toggle", "toggle_selection_mode", "toggle_select"):
self.toggle_selection_enabled = True
self.single_selection_enabled = False
elif binding == "drag_select":
self.drag_selection_enabled = True
elif binding == "select_all":
self.enable_disable_select_all(True)
elif binding == "column_width_resize":
self.CH.enable_bindings("column_width_resize")
elif binding == "column_select":
self.CH.enable_bindings("column_select")
elif binding == "column_height_resize":
self.CH.enable_bindings("column_height_resize")
self.TL.rh_state()
elif binding == "column_drag_and_drop":
self.CH.enable_bindings("drag_and_drop")
elif binding == "double_click_column_resize":
self.CH.enable_bindings("double_click_column_resize")
elif binding == "row_height_resize":
self.RI.enable_bindings("row_height_resize")
elif binding == "double_click_row_resize":
self.RI.enable_bindings("double_click_row_resize")
elif binding == "row_width_resize":
self.RI.enable_bindings("row_width_resize")
self.TL.rw_state()
elif binding == "row_select":
self.RI.enable_bindings("row_select")
elif binding == "row_drag_and_drop":
self.RI.enable_bindings("drag_and_drop")
elif binding == "arrowkeys":
self.bind_arrowkeys()
elif binding == "edit_bindings":
self.edit_bindings(True)
elif binding == "rc_delete_column":
self.rc_delete_column_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "rc_delete_row":
self.rc_delete_row_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "rc_insert_column":
self.rc_insert_column_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "rc_insert_row":
self.rc_insert_row_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "copy":
self.edit_bindings(True, "copy")
elif binding == "cut":
self.edit_bindings(True, "cut")
elif binding == "paste":
self.edit_bindings(True, "paste")
elif binding == "delete":
self.edit_bindings(True, "delete")
elif binding in ("right_click_popup_menu", "rc_popup_menu"):
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding in ("right_click_select", "rc_select"):
self.rc_select_enabled = True
elif binding == "undo":
self.edit_bindings(True, "undo")
elif binding == "edit_cell":
self.edit_bindings(True, "edit_cell")
elif binding == "edit_header":
self.edit_bindings(True, "edit_header")
self.create_rc_menus()
def disable_bindings_internal(self, binding):
if binding in ("all", "disable_all"):
self.single_selection_enabled = False
self.toggle_selection_enabled = False
self.drag_selection_enabled = False
self.enable_disable_select_all(False)
self.CH.disable_bindings("column_width_resize")
self.CH.disable_bindings("column_select")
self.CH.disable_bindings("column_height_resize")
self.CH.disable_bindings("drag_and_drop")
self.CH.disable_bindings("double_click_column_resize")
self.RI.disable_bindings("row_height_resize")
self.RI.disable_bindings("double_click_row_resize")
self.RI.disable_bindings("row_width_resize")
self.RI.disable_bindings("row_select")
self.RI.disable_bindings("drag_and_drop")
self.unbind_arrowkeys()
self.edit_bindings(False)
self.rc_delete_column_enabled = False
self.rc_delete_row_enabled = False
self.rc_insert_column_enabled = False
self.rc_insert_row_enabled = False
self.rc_popup_menus_enabled = False
self.rc_select_enabled = False
self.TL.rh_state("hidden")
self.TL.rw_state("hidden")
elif binding in ("single", "single_selection_mode", "single_select"):
self.single_selection_enabled = False
elif binding in ("toggle", "toggle_selection_mode", "toggle_select"):
self.toggle_selection_enabled = False
elif binding == "drag_select":
self.drag_selection_enabled = False
elif binding == "select_all":
self.enable_disable_select_all(False)
elif binding == "column_width_resize":
self.CH.disable_bindings("column_width_resize")
elif binding == "column_select":
self.CH.disable_bindings("column_select")
elif binding == "column_height_resize":
self.CH.disable_bindings("column_height_resize")
self.TL.rh_state("hidden")
elif binding == "column_drag_and_drop":
self.CH.disable_bindings("drag_and_drop")
elif binding == "double_click_column_resize":
self.CH.disable_bindings("double_click_column_resize")
elif binding == "row_height_resize":
self.RI.disable_bindings("row_height_resize")
elif binding == "double_click_row_resize":
self.RI.disable_bindings("double_click_row_resize")
elif binding == "row_width_resize":
self.RI.disable_bindings("row_width_resize")
self.TL.rw_state("hidden")
elif binding == "row_select":
self.RI.disable_bindings("row_select")
elif binding == "row_drag_and_drop":
self.RI.disable_bindings("drag_and_drop")
elif binding == "arrowkeys":
self.unbind_arrowkeys()
elif binding == "rc_delete_column":
self.rc_delete_column_enabled = False
elif binding == "rc_delete_row":
self.rc_delete_row_enabled = False
elif binding == "rc_insert_column":
self.rc_insert_column_enabled = False
elif binding == "rc_insert_row":
self.rc_insert_row_enabled = False
elif binding == "edit_bindings":
self.edit_bindings(False)
elif binding == "copy":
self.edit_bindings(False, "copy")
elif binding == "cut":
self.edit_bindings(False, "cut")
elif binding == "paste":
self.edit_bindings(False, "paste")
elif binding == "delete":
self.edit_bindings(False, "delete")
elif binding in ("right_click_popup_menu", "rc_popup_menu"):
self.rc_popup_menus_enabled = False
elif binding in ("right_click_select", "rc_select"):
self.rc_select_enabled = False
elif binding == "undo":
self.edit_bindings(False, "undo")
elif binding == "edit_cell":
self.edit_bindings(False, "edit_cell")
elif binding == "edit_header":
self.edit_bindings(False, "edit_header")
self.create_rc_menus()
def reset_mouse_motion_creations(self, event = None):
self.config(cursor = "")
self.RI.config(cursor = "")
self.CH.config(cursor = "")
self.RI.rsz_w = None
self.RI.rsz_h = None
self.CH.rsz_w = None
self.CH.rsz_h = None
def mouse_motion(self, event):
if (
not self.RI.currently_resizing_height and
not self.RI.currently_resizing_width and
not self.CH.currently_resizing_height and
not self.CH.currently_resizing_width
):
mouse_over_resize = False
x = self.canvasx(event.x)
y = self.canvasy(event.y)
if self.RI.width_resizing_enabled and not mouse_over_resize:
try:
x1, y1, x2, y2 = self.row_width_resize_bbox[0], self.row_width_resize_bbox[1], self.row_width_resize_bbox[2], self.row_width_resize_bbox[3]
if x >= x1 and y >= y1 and x <= x2 and y <= y2:
self.config(cursor = "sb_h_double_arrow")
self.RI.config(cursor = "sb_h_double_arrow")
self.RI.rsz_w = True
mouse_over_resize = True
except:
pass
if self.CH.height_resizing_enabled and not mouse_over_resize:
try:
x1, y1, x2, y2 = self.header_height_resize_bbox[0], self.header_height_resize_bbox[1], self.header_height_resize_bbox[2], self.header_height_resize_bbox[3]
if x >= x1 and y >= y1 and x <= x2 and y <= y2:
self.config(cursor = "sb_v_double_arrow")
self.CH.config(cursor = "sb_v_double_arrow")
self.CH.rsz_h = True
mouse_over_resize = True
except:
pass
if not mouse_over_resize:
self.reset_mouse_motion_creations()
if self.extra_motion_func is not None:
self.extra_motion_func(event)
def rc(self, event = None):
self.hide_dropdown_window()
self.focus_set()
popup_menu = None
if self.single_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
if self.col_selected(c):
if self.rc_popup_menus_enabled:
popup_menu = self.CH.ch_rc_popup_menu
elif self.row_selected(r):
if self.rc_popup_menus_enabled:
popup_menu = self.RI.ri_rc_popup_menu
elif self.cell_selected(r, c):
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
if self.rc_select_enabled:
self.select_cell(r, c, redraw = True)
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
popup_menu = self.empty_rc_popup_menu
elif self.toggle_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
if self.col_selected(c):
if self.rc_popup_menus_enabled:
popup_menu = self.CH.ch_rc_popup_menu
elif self.row_selected(r):
if self.rc_popup_menus_enabled:
popup_menu = self.RI.ri_rc_popup_menu
elif self.cell_selected(r, c):
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
if self.rc_select_enabled:
self.toggle_select_cell(r, c, redraw = True)
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
popup_menu = self.empty_rc_popup_menu
if self.extra_rc_func is not None:
self.extra_rc_func(event)
if popup_menu is not None:
popup_menu.tk_popup(event.x_root, event.y_root)
def b1_press(self, event = None):
self.closed_dropdown = self.hide_dropdown_window(b1 = True)
self.focus_set()
x1, y1, x2, y2 = self.get_canvas_visible_area()
if self.identify_col(x = event.x, allow_end = False) is None or self.identify_row(y = event.y, allow_end = False) is None:
self.deselect("all")
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if self.single_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.select_cell(r, c, redraw = True)
elif self.toggle_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.toggle_select_cell(r, c, redraw = True)
elif self.RI.width_resizing_enabled and self.RI.rsz_h is None and self.RI.rsz_w == True:
self.RI.currently_resizing_width = True
self.new_row_width = self.RI.current_width + event.x
x = self.canvasx(event.x)
self.create_resize_line(x, y1, x, y2, width = 1, fill = self.RI.resizing_line_fg, tag = "rwl")
elif self.CH.height_resizing_enabled and self.CH.rsz_w is None and self.CH.rsz_h == True:
self.CH.currently_resizing_height = True
self.new_header_height = self.CH.current_height + event.y
y = self.canvasy(event.y)
self.create_resize_line(x1, y, x2, y, width = 1, fill = self.RI.resizing_line_fg, tag = "rhl")
self.b1_pressed_loc = (r, c)
if self.extra_b1_press_func is not None:
self.extra_b1_press_func(event)
def create_resize_line(self, x1, y1, x2, y2, width, fill, tag):
if self.hidd_resize_lines:
t, sh = self.hidd_resize_lines.popitem()
self.coords(t, x1, y1, x2, y2)
if sh:
self.itemconfig(t, width = width, fill = fill, tag = tag)
else:
self.itemconfig(t, width = width, fill = fill, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(x1, y1, x2, y2, width = width, fill = fill, tag = tag)
self.disp_resize_lines[t] = True
def delete_resize_lines(self):
self.hidd_resize_lines.update(self.disp_resize_lines)
self.disp_resize_lines = {}
for t, sh in self.hidd_resize_lines.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_resize_lines[t] = False
def shift_b1_press(self, event = None):
self.hide_dropdown_window()
self.focus_set()
if self.drag_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
self.b1_pressed_loc = None
rowsel = int(self.identify_row(y = event.y))
colsel = int(self.identify_col(x = event.x))
if rowsel < len(self.row_positions) - 1 and colsel < len(self.col_positions) - 1:
currently_selected = self.currently_selected()
if currently_selected and isinstance(currently_selected[0], int):
min_r = currently_selected[0]
min_c = currently_selected[1]
self.delete_selection_rects(delete_current = False)
if rowsel >= min_r and colsel >= min_c:
self.create_selected(min_r, min_c, rowsel + 1, colsel + 1)
elif rowsel >= min_r and min_c >= colsel:
self.create_selected(min_r, colsel, rowsel + 1, min_c + 1)
elif min_r >= rowsel and colsel >= min_c:
self.create_selected(rowsel, min_c, min_r + 1, colsel + 1)
elif min_r >= rowsel and min_c >= colsel:
self.create_selected(rowsel, colsel, min_r + 1, min_c + 1)
last_selected = tuple(int(e) for e in self.gettags(self.find_withtag("CellSelectFill"))[1].split("_") if e)
else:
self.select_cell(rowsel, colsel, redraw = False)
last_selected = tuple(int(e) for e in self.gettags(self.find_withtag("Current_Outside"))[1].split("_") if e)
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True, redraw_table = True)
if self.shift_selection_binding_func is not None:
self.shift_selection_binding_func(SelectionBoxEvent("shift_select_cells", last_selected))
def b1_motion(self, event):
x1, y1, x2, y2 = self.get_canvas_visible_area()
if self.drag_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
end_row = self.identify_row(y = event.y)
end_col = self.identify_col(x = event.x)
currently_selected = self.currently_selected()
if end_row < len(self.row_positions) - 1 and end_col < len(self.col_positions) - 1 and currently_selected and isinstance(currently_selected[0], int):
start_row = currently_selected[0]
start_col = currently_selected[1]
if end_row >= start_row and end_col >= start_col:
rect = (start_row, start_col, end_row + 1, end_col + 1)
elif end_row >= start_row and end_col < start_col:
rect = (start_row, end_col, end_row + 1, start_col + 1)
elif end_row < start_row and end_col >= start_col:
rect = (end_row, start_col, start_row + 1, end_col + 1)
elif end_row < start_row and end_col < start_col:
rect = (end_row, end_col, start_row + 1, start_col + 1)
if self.being_drawn_rect != rect:
self.delete_selection_rects(delete_current = False)
self.create_selected(*rect)
self.being_drawn_rect = rect
if self.drag_selection_binding_func is not None:
self.drag_selection_binding_func(SelectionBoxEvent("drag_select_cells", tuple(int(e) for e in self.gettags(self.find_withtag("CellSelectFill"))[1].split("_") if e)))
if self.data_ref:
xcheck = self.xview()
ycheck = self.yview()
if len(xcheck) > 1 and xcheck[0] > 0 and event.x < 0:
try:
self.xview_scroll(-1, "units")
self.CH.xview_scroll(-1, "units")
except:
pass
if len(ycheck) > 1 and ycheck[0] > 0 and event.y < 0:
try:
self.yview_scroll(-1, "units")
self.RI.yview_scroll(-1, "units")
except:
pass
if len(xcheck) > 1 and xcheck[1] < 1 and event.x > self.winfo_width():
try:
self.xview_scroll(1, "units")
self.CH.xview_scroll(1, "units")
except:
pass
if len(ycheck) > 1 and ycheck[1] < 1 and event.y > self.winfo_height():
try:
self.yview_scroll(1, "units")
self.RI.yview_scroll(1, "units")
except:
pass
self.check_views()
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True, redraw_table = True)
elif self.RI.width_resizing_enabled and self.RI.rsz_w is not None and self.RI.currently_resizing_width:
self.RI.delete_resize_lines()
self.delete_resize_lines()
if event.x >= 0:
x = self.canvasx(event.x)
self.new_row_width = self.RI.current_width + event.x
self.create_resize_line(x, y1, x, y2, width = 1, fill = self.RI.resizing_line_fg, tag = "rwl")
else:
x = self.RI.current_width + event.x
if x < self.min_cw:
x = int(self.min_cw)
self.new_row_width = x
self.RI.create_resize_line(x, y1, x, y2, width = 1, fill = self.RI.resizing_line_fg, tag = "rwl")
elif self.CH.height_resizing_enabled and self.CH.rsz_h is not None and self.CH.currently_resizing_height:
self.CH.delete_resize_lines()
self.delete_resize_lines()
if event.y >= 0:
y = self.canvasy(event.y)
self.new_header_height = self.CH.current_height + event.y
self.create_resize_line(x1, y, x2, y, width = 1, fill = self.RI.resizing_line_fg, tag = "rhl")
else:
y = self.CH.current_height + event.y
if y < self.hdr_min_rh:
y = int(self.hdr_min_rh)
self.new_header_height = y
self.CH.create_resize_line(x1, y, x2, y, width = 1, fill = self.RI.resizing_line_fg, tag = "rhl")
if self.extra_b1_motion_func is not None:
self.extra_b1_motion_func(event)
def b1_release(self, event = None):
if self.RI.width_resizing_enabled and self.RI.rsz_w is not None and self.RI.currently_resizing_width:
self.delete_resize_lines()
self.RI.delete_resize_lines()
self.RI.currently_resizing_width = False
self.RI.set_width(self.new_row_width, set_TL = True)
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
elif self.CH.height_resizing_enabled and self.CH.rsz_h is not None and self.CH.currently_resizing_height:
self.delete_resize_lines()
self.CH.delete_resize_lines()
self.CH.currently_resizing_height = False
self.CH.set_height(self.new_header_height, set_TL = True)
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
self.RI.rsz_w = None
self.CH.rsz_h = None
self.being_drawn_rect = None
if self.b1_pressed_loc is not None:
r = self.identify_row(y = event.y, allow_end = False)
c = self.identify_col(x = event.x, allow_end = False)
if r is not None and c is not None and (r, c) == self.b1_pressed_loc:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (r, dcol) in self.cell_options and ('dropdown' in self.cell_options[(r, dcol)] or 'checkbox' in self.cell_options[(r, dcol)]):
if (self.closed_dropdown != self.b1_pressed_loc and
'dropdown' in self.cell_options[(r, dcol)]):
self.open_cell(event)
elif 'checkbox' in self.cell_options[(r, dcol)] and event.x < self.col_positions[c] + self.txt_h + 5 and event.y < self.row_positions[r] + self.txt_h + 5:
self.open_cell(event)
self.hide_dropdown_window()
else:
self.hide_dropdown_window()
else:
self.hide_dropdown_window()
self.b1_pressed_loc = None
self.closed_dropdown = None
if self.extra_b1_release_func is not None:
self.extra_b1_release_func(event)
def double_b1(self, event = None):
self.hide_dropdown_window()
self.focus_set()
x1, y1, x2, y2 = self.get_canvas_visible_area()
if self.identify_col(x = event.x, allow_end = False) is None or self.identify_row(y = event.y, allow_end = False) is None:
self.deselect("all")
elif self.single_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.select_cell(r, c, redraw = True)
if self.edit_cell_enabled:
self.open_cell(event)
elif self.toggle_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.toggle_select_cell(r, c, redraw = True)
if self.edit_cell_enabled:
self.open_cell(event)
if self.extra_double_b1_func is not None:
self.extra_double_b1_func(event)
def identify_row(self, event = None, y = None, allow_end = True):
if event is None:
y2 = self.canvasy(y)
elif y is None:
y2 = self.canvasy(event.y)
r = bisect.bisect_left(self.row_positions, y2)
if r != 0:
r -= 1
if not allow_end and r >= len(self.row_positions) - 1:
return None
return r
def identify_col(self, event = None, x = None, allow_end = True):
if event is None:
x2 = self.canvasx(x)
elif x is None:
x2 = self.canvasx(event.x)
c = bisect.bisect_left(self.col_positions, x2)
if c != 0:
c -= 1
if not allow_end and c >= len(self.col_positions) - 1:
return None
return c
def GetCellCoords(self, event = None, r = None, c = None, sel = False):
if event is not None:
r = self.identify_row(event)
c = self.identify_col(event)
elif r is not None and c is not None:
if sel:
return self.col_positions[c] + 1,self.row_positions[r] + 1, self.col_positions[c + 1], self.row_positions[r + 1]
else:
return self.col_positions[c], self.row_positions[r], self.col_positions[c + 1], self.row_positions[r + 1]
def check_views(self):
xcheck = self.xview()
ycheck = self.yview()
if xcheck and xcheck[0] <= 0:
self.xview(*("moveto", 0))
if self.show_header:
self.CH.xview(*("moveto", 0))
elif len(xcheck) > 1 and xcheck[1] >= 1:
self.xview(*("moveto", 1))
if self.show_header:
self.CH.xview(*("moveto", 1))
if ycheck and ycheck[0] <= 0:
self.yview(*("moveto", 0))
if self.show_index:
self.RI.yview(*("moveto", 0))
elif len(ycheck) > 1 and ycheck[1] >= 1:
self.yview(*("moveto", 1))
if self.show_index:
self.RI.yview(*("moveto", 1))
def set_xviews(self, *args):
self.xview(*args)
if self.show_header:
self.CH.xview(*args)
self.check_views()
self.main_table_redraw_grid_and_text(redraw_header = True if self.show_header else False)
def set_yviews(self, *args):
self.yview(*args)
if self.show_index:
self.RI.yview(*args)
self.check_views()
self.main_table_redraw_grid_and_text(redraw_row_index = True if self.show_index else False)
def set_view(self, x_args, y_args):
self.xview(*x_args)
if self.show_header:
self.CH.xview(*x_args)
self.yview(*y_args)
if self.show_index:
self.RI.yview(*y_args)
self.check_views()
self.main_table_redraw_grid_and_text(redraw_row_index = True if self.show_index else False,
redraw_header = True if self.show_header else False)
def mousewheel(self, event = None):
if event.delta < 0 or event.num == 5:
self.yview_scroll(1, "units")
self.RI.yview_scroll(1, "units")
elif event.delta >= 0 or event.num == 4:
if self.canvasy(0) <= 0:
return
self.yview_scroll(-1, "units")
self.RI.yview_scroll(-1, "units")
self.main_table_redraw_grid_and_text(redraw_row_index = True)
def shift_mousewheel(self, event = None):
if event.delta < 0 or event.num == 5:
self.xview_scroll(1, "units")
self.CH.xview_scroll(1, "units")
elif event.delta >= 0 or event.num == 4:
if self.canvasx(0) <= 0:
return
self.xview_scroll(-1, "units")
self.CH.xview_scroll(-1, "units")
self.main_table_redraw_grid_and_text(redraw_header = True)
def GetWidthChars(self, width):
char_w = self.GetTextWidth("_")
return int(width / char_w)
def GetTextWidth(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[2] - b[0]
def GetTextHeight(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[3] - b[1]
def GetHdrTextWidth(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[2] - b[0]
def GetHdrTextHeight(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[3] - b[1]
def set_min_cw(self):
#w1 = self.GetHdrTextWidth("X") + 5
#w2 = self.GetTextWidth("X") + 5
#if w1 >= w2:
# self.min_cw = w1
#else:
# self.min_cw = w2
self.min_cw = 5
if self.min_cw > self.CH.max_cw:
self.CH.max_cw = self.min_cw + 20
if self.min_cw > self.default_cw:
self.default_cw = self.min_cw + 20
def font(self, newfont = None, reset_row_positions = False):
if newfont:
if not isinstance(newfont, tuple):
raise ValueError("Argument must be tuple e.g. ('Carlito',12,'normal')")
if len(newfont) != 3:
raise ValueError("Argument must be three-tuple")
if (
not isinstance(newfont[0], str) or
not isinstance(newfont[1], int) or
not isinstance(newfont[2], str)
):
raise ValueError("Argument must be font, size and 'normal', 'bold' or 'italic' e.g. ('Carlito',12,'normal')")
else:
self.my_font = newfont
self.fnt_fam = newfont[0]
self.fnt_sze = newfont[1]
self.fnt_wgt = newfont[2]
self.set_fnt_help()
if reset_row_positions:
self.reset_row_positions()
else:
return self.my_font
def set_fnt_help(self):
self.txt_h = self.GetTextHeight("|ZXjy*'^")
self.half_txt_h = ceil(self.txt_h / 2)
if self.half_txt_h % 2 == 0:
self.fl_ins = self.half_txt_h + 2
else:
self.fl_ins = self.half_txt_h + 3
self.xtra_lines_increment = int(self.txt_h)
self.min_rh = self.txt_h + 5
if self.min_rh < 12:
self.min_rh = 12
#self.min_rh = 5
if self.default_rh[0] != "pixels":
self.default_rh = (self.default_rh[0] if self.default_rh[0] != "pixels" else "pixels",
self.GetLinesHeight(int(self.default_rh[0])) if self.default_rh[0] != "pixels" else self.default_rh[1])
self.set_min_cw()
def header_font(self, newfont = None):
if newfont:
if not isinstance(newfont, tuple):
raise ValueError("Argument must be tuple e.g. ('Carlito', 12, 'normal')")
if len(newfont) != 3:
raise ValueError("Argument must be three-tuple")
if (
not isinstance(newfont[0], str) or
not isinstance(newfont[1], int) or
not isinstance(newfont[2], str)
):
raise ValueError("Argument must be font, size and 'normal', 'bold' or 'italic' e.g. ('Carlito', 12, 'normal')")
else:
self.my_hdr_font = newfont
self.hdr_fnt_fam = newfont[0]
self.hdr_fnt_sze = newfont[1]
self.hdr_fnt_wgt = newfont[2]
self.set_hdr_fnt_help()
else:
return self.my_hdr_font
def set_hdr_fnt_help(self):
self.hdr_txt_h = self.GetHdrTextHeight("|ZXj*'^")
self.hdr_half_txt_h = ceil(self.hdr_txt_h / 2)
if self.hdr_half_txt_h % 2 == 0:
self.hdr_fl_ins = self.hdr_half_txt_h + 2
else:
self.hdr_fl_ins = self.hdr_half_txt_h + 3
self.hdr_xtra_lines_increment = self.hdr_txt_h
self.hdr_min_rh = self.hdr_txt_h + 5
if self.default_hh[0] != "pixels":
self.default_hh = (self.default_hh[0] if self.default_hh[0] != "pixels" else "pixels",
self.GetHdrLinesHeight(int(self.default_hh[0])) if self.default_hh[0] != "pixels" else self.default_hh[1])
self.set_min_cw()
self.CH.set_height(self.default_hh[1])
def data_reference(self, newdataref = None, reset_col_positions = True, reset_row_positions = True, redraw = False, return_id = True):
if isinstance(newdataref, (list, tuple)):
self.data_ref = newdataref
self.undo_storage = deque(maxlen = self.max_undos)
if reset_col_positions:
self.reset_col_positions()
if reset_row_positions:
self.reset_row_positions()
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if return_id:
return id(self.data_ref)
else:
return self.data_ref
def set_cell_size_to_text(self, r, c, only_set_if_too_small = False, redraw = True, run_binding = False):
min_cw = self.min_cw
min_rh = self.min_rh
h = int(min_rh)
w = int(min_cw)
if self.all_columns_displayed:
cn = int(c)
else:
cn = self.displayed_columns[c]
rn = int(r)
if (rn, cn) in self.cell_options and 'checkbox' in self.cell_options[(rn, cn)]:
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = self.cell_options[(rn, cn)]['checkbox']['text'], font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
tw = b[2] - b[0] + 7 + self.txt_h
if b[3] - b[1] + 5 > h:
h = b[3] - b[1] + 5
else:
try:
if isinstance(self.data_ref[r][cn], str):
txt = self.data_ref[r][cn]
else:
txt = f"{self.data_ref[r][cn]}"
except:
txt = ""
if txt:
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
tw = b[2] - b[0] + self.txt_h + 7 if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)] else b[2] - b[0] + 7
if b[3] - b[1] + 5 > h:
h = b[3] - b[1] + 5
else:
if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)]:
tw = self.txt_h + 7
else:
tw = min_cw
if tw > w:
w = tw
if h < min_rh:
h = int(min_rh)
elif h > self.RI.max_rh:
h = int(self.RI.max_rh)
if w < min_cw:
w = int(min_cw)
elif w > self.CH.max_cw:
w = int(self.CH.max_cw)
cell_needs_resize_w = False
cell_needs_resize_h = False
if only_set_if_too_small:
if w > self.col_positions[c + 1] - self.col_positions[c]:
cell_needs_resize_w = True
if h > self.row_positions[r + 1] - self.row_positions[r]:
cell_needs_resize_h = True
else:
if w != self.col_positions[c + 1] - self.col_positions[c]:
cell_needs_resize_w = True
if h != self.row_positions[r + 1] - self.row_positions[r]:
cell_needs_resize_h = True
if cell_needs_resize_w:
old_width = self.col_positions[c + 1] - self.col_positions[c]
new_col_pos = self.col_positions[c] + w
increment = new_col_pos - self.col_positions[c + 1]
self.col_positions[c + 2:] = [e + increment for e in islice(self.col_positions, c + 2, len(self.col_positions))]
self.col_positions[c + 1] = new_col_pos
new_width = self.col_positions[c + 1] - self.col_positions[c]
if run_binding and self.CH.column_width_resize_func is not None and old_width != new_width:
self.CH.column_width_resize_func(ResizeEvent("column_width_resize", c, old_width, new_width))
if cell_needs_resize_h:
old_height = self.row_positions[r + 1] - self.row_positions[r]
new_row_pos = self.row_positions[r] + h
increment = new_row_pos - self.row_positions[r + 1]
self.row_positions[r + 2:] = [e + increment for e in islice(self.row_positions, r + 2, len(self.row_positions))]
self.row_positions[r + 1] = new_row_pos
new_height = self.row_positions[r + 1] - self.row_positions[r]
if run_binding and self.RI.row_height_resize_func is not None and old_height != new_height:
self.RI.row_height_resize_func(ResizeEvent("row_height_resize", r, old_height, new_height))
if cell_needs_resize_w or cell_needs_resize_h:
self.recreate_all_selection_boxes()
if redraw:
self.refresh()
def set_all_cell_sizes_to_text(self, include_index = False):
min_cw = self.min_cw
min_rh = self.min_rh
rhs = defaultdict(lambda: int(min_rh))
cws = []
x = self.txt_measure_canvas.create_text(0, 0, text = "", font = self.my_font)
x2 = self.txt_measure_canvas.create_text(0, 0, text = "", font = self.my_hdr_font)
itmcon = self.txt_measure_canvas.itemconfig
itmbbx = self.txt_measure_canvas.bbox
if self.all_columns_displayed:
iterable = range(self.total_data_cols())
else:
iterable = self.displayed_columns
if isinstance(self.my_row_index, list):
for rn in range(self.total_data_rows()):
try:
if isinstance(self.my_row_index[rn], str):
txt = self.my_row_index[rn]
else:
txt = f"{self.my_row_index[rn]}"
except:
txt = ""
if txt:
itmcon(x, text = txt)
b = itmbbx(x)
h = b[3] - b[1] + 7
else:
h = min_rh
if h < min_rh:
h = int(min_rh)
elif h > self.RI.max_rh:
h = int(self.RI.max_rh)
if h > rhs[rn]:
rhs[rn] = h
for cn in iterable:
if cn in self.CH.cell_options and 'checkbox' in self.CH.cell_options[cn]:
txt = self.CH.cell_options[cn]['checkbox']['text']
if txt:
itmcon(x2, text = txt)
b = itmbbx(x2)
w = b[2] - b[0] + 7 + self.txt_h
else:
w = self.min_cw
else:
try:
if isinstance(self.my_hdrs, int):
txt = self.data_ref[self.my_hdrs][cn]
else:
txt = self.my_hdrs[cn]
if txt:
itmcon(x2, text = txt)
b = itmbbx(x2)
w = b[2] - b[0] + self.txt_h + 7 if cn in self.CH.cell_options and 'dropdown' in self.CH.cell_options[cn] else b[2] - b[0] + 7
else:
w = self.min_cw + self.txt_h + 7 if cn in self.CH.cell_options and 'dropdown' in self.CH.cell_options[cn] else self.min_cw
except:
if self.CH.default_hdr == "letters":
itmcon(x2, text = f"{num2alpha(cn)}")
elif self.CH.default_hdr == "numbers":
itmcon(x2, text = f"{cn + 1}")
else:
itmcon(x2, text = f"{cn + 1} {num2alpha(cn)}")
b = itmbbx(x2)
w = b[2] - b[0] + 7
for rn, r in enumerate(self.data_ref):
if (rn, cn) in self.cell_options and 'checkbox' in self.cell_options[(rn, cn)]:
txt = self.cell_options[(rn, cn)]['checkbox']['text']
if txt:
itmcon(x, text = txt)
b = itmbbx(x)
tw = b[2] - b[0] + 7
h = b[3] - b[1] + 5
else:
tw = min_cw
h = min_rh
else:
try:
if isinstance(r[cn], str):
txt = r[cn]
else:
txt = f"{r[cn]}"
except:
txt = ""
if txt:
itmcon(x, text = txt)
b = itmbbx(x)
tw = b[2] - b[0] + self.txt_h + 7 if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)] else b[2] - b[0] + 7
h = b[3] - b[1] + 5
else:
tw = self.txt_h + 7 if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)] else min_cw
h = min_rh
if tw > w:
w = tw
if h < min_rh:
h = int(min_rh)
elif h > self.RI.max_rh:
h = int(self.RI.max_rh)
if h > rhs[rn]:
rhs[rn] = h
if w < min_cw:
w = int(min_cw)
elif w > self.CH.max_cw:
w = int(self.CH.max_cw)
cws.append(w)
self.txt_measure_canvas.delete(x)
self.txt_measure_canvas.delete(x2)
self.row_positions = list(accumulate(chain([0], (height for height in rhs.values()))))
self.col_positions = list(accumulate(chain([0], (width for width in cws))))
self.recreate_all_selection_boxes()
return self.row_positions, self.col_positions
def reset_col_positions(self):
colpos = int(self.default_cw)
if self.all_columns_displayed:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(self.total_data_cols())))))
else:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(len(self.displayed_columns))))))
def del_col_position(self, idx, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.col_positions) <= idx + 1:
del self.col_positions[-1]
else:
w = self.col_positions[idx + 1] - self.col_positions[idx]
idx += 1
del self.col_positions[idx]
self.col_positions[idx:] = [e - w for e in islice(self.col_positions, idx, len(self.col_positions))]
def del_col_positions(self, idx, num = 1, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.col_positions) <= idx + 1:
del self.col_positions[-1]
else:
cws = [int(b - a) for a, b in zip(self.col_positions, islice(self.col_positions, 1, len(self.col_positions)))]
cws[idx:idx + num] = []
self.col_positions = list(accumulate(chain([0], (width for width in cws))))
def insert_col_position(self, idx = "end", width = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if width is None:
w = self.default_cw
else:
w = width
if idx == "end" or len(self.col_positions) == idx + 1:
self.col_positions.append(self.col_positions[-1] + w)
else:
idx += 1
self.col_positions.insert(idx, self.col_positions[idx - 1] + w)
idx += 1
self.col_positions[idx:] = [e + w for e in islice(self.col_positions, idx, len(self.col_positions))]
def insert_col_positions(self, idx = "end", widths = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if widths is None:
w = [self.default_cw]
elif isinstance(widths, int):
w = list(repeat(self.default_cw, widths))
else:
w = widths
if idx == "end" or len(self.col_positions) == idx + 1:
if len(w) > 1:
self.col_positions += list(accumulate(chain([self.col_positions[-1] + w[0]], islice(w, 1, None))))
else:
self.col_positions.append(self.col_positions[-1] + w[0])
else:
if len(w) > 1:
idx += 1
self.col_positions[idx:idx] = list(accumulate(chain([self.col_positions[idx - 1] + w[0]], islice(w, 1, None))))
idx += len(w)
sumw = sum(w)
self.col_positions[idx:] = [e + sumw for e in islice(self.col_positions, idx, len(self.col_positions))]
else:
w = w[0]
idx += 1
self.col_positions.insert(idx, self.col_positions[idx - 1] + w)
idx += 1
self.col_positions[idx:] = [e + w for e in islice(self.col_positions, idx, len(self.col_positions))]
def insert_col_rc(self, event = None):
if self.anything_selected(exclude_rows = True, exclude_cells = True):
selcols = self.get_selected_cols()
numcols = len(selcols)
displayed_ins_col = min(selcols) if event == "left" else max(selcols) + 1
if self.all_columns_displayed:
data_ins_col = int(displayed_ins_col)
else:
if displayed_ins_col == len(self.col_positions) - 1:
rowlen = len(max(self.data_ref, key = len)) if self.data_ref else 0
data_ins_col = rowlen
else:
try:
data_ins_col = int(self.displayed_columns[displayed_ins_col])
except:
data_ins_col = int(self.displayed_columns[displayed_ins_col - 1])
else:
numcols = 1
displayed_ins_col = len(self.col_positions) - 1
data_ins_col = int(displayed_ins_col)
if isinstance(self.paste_insert_column_limit, int) and self.paste_insert_column_limit < displayed_ins_col + numcols:
numcols = self.paste_insert_column_limit - len(self.col_positions) - 1
if numcols < 1:
return
if self.extra_begin_insert_cols_rc_func is not None:
try:
self.extra_begin_insert_cols_rc_func(InsertEvent("begin_insert_columns", data_ins_col, displayed_ins_col, numcols))
except:
return
saved_displayed_columns = list(self.displayed_columns)
if not self.all_columns_displayed:
if displayed_ins_col == len(self.col_positions) - 1:
self.displayed_columns += list(range(rowlen, rowlen + numcols))
else:
if displayed_ins_col > len(self.displayed_columns) - 1:
adj_ins = displayed_ins_col - 1
else:
adj_ins = displayed_ins_col
part1 = self.displayed_columns[:adj_ins]
part2 = list(range(self.displayed_columns[adj_ins], self.displayed_columns[adj_ins] + numcols + 1))
part3 = [] if displayed_ins_col > len(self.displayed_columns) - 1 else [cn + numcols for cn in islice(self.displayed_columns, adj_ins + 1, None)]
self.displayed_columns = (part1 +
part2 +
part3)
self.insert_col_positions(idx = displayed_ins_col,
widths = numcols,
deselect_all = True)
self.cell_options = {(rn, cn if cn < data_ins_col else cn + numcols): t2 for (rn, cn), t2 in self.cell_options.items()}
self.col_options = {cn if cn < data_ins_col else cn + numcols: t for cn, t in self.col_options.items()}
self.CH.cell_options = {cn if cn < data_ins_col else cn + numcols: t for cn, t in self.CH.cell_options.items()}
if self.my_hdrs and isinstance(self.my_hdrs, list):
try:
self.my_hdrs[data_ins_col:data_ins_col] = list(repeat("", numcols))
except:
pass
if self.row_positions == [0] and not self.data_ref:
self.insert_row_position(idx = "end",
height = int(self.min_rh),
deselect_all = False)
self.data_ref.append(list(repeat("", numcols)))
else:
for rn in range(len(self.data_ref)):
self.data_ref[rn][data_ins_col:data_ins_col] = list(repeat("", numcols))
self.create_selected(0, displayed_ins_col, len(self.row_positions) - 1, displayed_ins_col + numcols, "cols")
self.create_current(0, displayed_ins_col, "col", inside = True)
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("insert_col", {"data_col_num": data_ins_col,
"displayed_columns": saved_displayed_columns,
"sheet_col_num": displayed_ins_col,
"numcols": numcols}))))
self.refresh()
if self.extra_end_insert_cols_rc_func is not None:
self.extra_end_insert_cols_rc_func(InsertEvent("end_insert_columns", data_ins_col, displayed_ins_col, numcols))
def insert_row_rc(self, event = None):
if self.anything_selected(exclude_columns = True, exclude_cells = True):
selrows = self.get_selected_rows()
numrows = len(selrows)
stidx = min(selrows) if event == "above" else max(selrows) + 1
posidx = int(stidx)
else:
selrows = [0]
numrows = 1
stidx = self.total_data_rows()
posidx = len(self.row_positions) - 1
if isinstance(self.paste_insert_row_limit, int) and self.paste_insert_row_limit < posidx + numrows:
numrows = self.paste_insert_row_limit - len(self.row_positions) - 1
if numrows < 1:
return
if self.extra_begin_insert_rows_rc_func is not None:
try:
self.extra_begin_insert_rows_rc_func(InsertEvent("begin_insert_rows", stidx, posidx, numrows))
except:
return
self.insert_row_positions(idx = posidx,
heights = numrows,
deselect_all = True)
self.cell_options = {(rn if rn < posidx else rn + numrows, cn): t2 for (rn, cn), t2 in self.cell_options.items()}
self.row_options = {rn if rn < posidx else rn + numrows: t for rn, t in self.row_options.items()}
self.RI.cell_options = {rn if rn < posidx else rn + numrows: t for rn, t in self.RI.cell_options.items()}
if self.my_row_index and isinstance(self.my_row_index, list):
try:
self.my_row_index[stidx:stidx] = list(repeat("", numrows))
except:
pass
if self.col_positions == [0] and not self.data_ref:
self.insert_col_position(idx = "end",
width = None,
deselect_all = False)
self.data_ref.append([""])
else:
total_data_cols = self.total_data_cols()
self.data_ref[stidx:stidx] = [list(repeat("", total_data_cols)) for rn in range(numrows)]
self.create_selected(posidx, 0, posidx + numrows, len(self.col_positions) - 1, "rows")
self.create_current(posidx, 0, "row", inside = True)
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("insert_row", {"data_row_num": stidx,
"sheet_row_num": posidx,
"numrows": numrows}))))
self.refresh()
if self.extra_end_insert_rows_rc_func is not None:
self.extra_end_insert_rows_rc_func(InsertEvent("end_insert_rows", stidx, posidx, numrows))
def del_cols_rc(self, event = None):
seld_cols = sorted(self.get_selected_cols())
if seld_cols:
if self.extra_begin_del_cols_rc_func is not None:
try:
self.extra_begin_del_cols_rc_func(DeleteRowColumnEvent("begin_delete_columns", seld_cols))
except:
return
seldset = set(seld_cols) if self.all_columns_displayed else set(self.displayed_columns[c] for c in seld_cols)
list_of_coords = tuple((r, c) for (r, c) in self.cell_options if c in seldset)
if self.undo_enabled:
undo_storage = {'deleted_cols': {},
'colwidths': {},
'deleted_hdr_values': {},
'selection_boxes': self.get_boxes(),
'displayed_columns': list(self.displayed_columns),
'cell_options': {k: v.copy() for k, v in self.cell_options.items()},
'col_options': {k: v.copy() for k, v in self.col_options.items()},
'CH_cell_options': {k: v.copy() for k, v in self.CH.cell_options.items()}}
if self.all_columns_displayed:
if self.undo_enabled:
for c in reversed(seld_cols):
undo_storage['colwidths'][c] = self.col_positions[c + 1] - self.col_positions[c]
for rn in range(len(self.data_ref)):
if c not in undo_storage['deleted_cols']:
undo_storage['deleted_cols'][c] = {}
try:
undo_storage['deleted_cols'][c][rn] = self.data_ref[rn].pop(c)
except:
continue
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
undo_storage['deleted_hdr_values'][c] = self.my_hdrs.pop(c)
except:
continue
else:
for rn in range(len(self.data_ref)):
for c in reversed(seld_cols):
del self.data_ref[rn][c]
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
del self.my_hdrs[c]
except:
continue
else:
if self.undo_enabled:
for c in reversed(seld_cols):
undo_storage['colwidths'][c] = self.col_positions[c + 1] - self.col_positions[c]
for rn in range(len(self.data_ref)):
if self.displayed_columns[c] not in undo_storage['deleted_cols']:
undo_storage['deleted_cols'][self.displayed_columns[c]] = {}
try:
undo_storage['deleted_cols'][self.displayed_columns[c]][rn] = self.data_ref[rn].pop(self.displayed_columns[c])
except:
continue
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
undo_storage['deleted_hdr_values'][self.displayed_columns[c]] = self.my_hdrs.pop(self.displayed_columns[c])
except:
continue
else:
for rn in range(len(self.data_ref)):
for c in reversed(seld_cols):
del self.data_ref[rn][self.displayed_columns[c]]
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
del self.my_hdrs[self.displayed_columns[c]]
except:
continue
if self.undo_enabled:
self.undo_storage.append(("delete_cols", undo_storage))
self.del_cell_options(list_of_coords)
for c in reversed(seld_cols):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
self.del_col_position(c,
deselect_all = False)
if dcol in self.col_options:
del self.col_options[dcol]
if dcol in self.CH.cell_options:
del self.CH.cell_options[dcol]
numcols = len(seld_cols)
idx = seld_cols[-1]
self.cell_options = {(rn, cn if cn < idx else cn - numcols): t2 for (rn, cn), t2 in self.cell_options.items()}
self.col_options = {cn if cn < idx else cn - numcols: t for cn, t in self.col_options.items()}
self.CH.cell_options = {cn if cn < idx else cn - numcols: t for cn, t in self.CH.cell_options.items()}
self.deselect("allcols", redraw = False)
self.set_current_to_last()
if not self.all_columns_displayed:
self.displayed_columns = [c for c in self.displayed_columns if c not in seldset]
for c in sorted(seldset):
self.displayed_columns = [dc if c > dc else dc - 1 for dc in self.displayed_columns]
self.refresh()
if self.extra_end_del_cols_rc_func is not None:
self.extra_end_del_cols_rc_func(DeleteRowColumnEvent("end_delete_columns", seld_cols))
def del_cell_options(self, list_of_coords):
for r, dcol in list_of_coords:
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
self.destroy_dropdown(r, dcol)
del self.cell_options[(r, dcol)]
def del_rows_rc(self, event = None):
seld_rows = sorted(self.get_selected_rows())
if seld_rows:
if self.extra_begin_del_rows_rc_func is not None:
try:
self.extra_begin_del_rows_rc_func(DeleteRowColumnEvent("begin_delete_rows", seld_rows))
except:
return
seldset = set(seld_rows)
list_of_coords = tuple((r, c) for (r, c) in self.cell_options if r in seldset)
if self.undo_enabled:
undo_storage = {'deleted_rows': [],
'deleted_index_values': [],
'selection_boxes': self.get_boxes(),
'cell_options': {k: v.copy() for k, v in self.cell_options.items()},
'row_options': {k: v.copy() for k, v in self.row_options.items()},
'RI_cell_options': {k: v.copy() for k, v in self.RI.cell_options.items()}}
for r in reversed(seld_rows):
undo_storage['deleted_rows'].append((r, self.data_ref.pop(r), self.row_positions[r + 1] - self.row_positions[r]))
else:
for r in reversed(seld_rows):
del self.data_ref[r]
if self.my_row_index and isinstance(self.my_row_index, list):
if self.undo_enabled:
for r in reversed(seld_rows):
try:
undo_storage['deleted_index_values'].append((r, self.my_row_index.pop(r)))
except:
continue
else:
for r in reversed(seld_rows):
try:
del self.my_row_index[r]
except:
continue
if self.undo_enabled:
self.undo_storage.append(("delete_rows", undo_storage))
self.del_cell_options(list_of_coords)
for r in reversed(seld_rows):
self.del_row_position(r,
deselect_all = False)
if r in self.row_options:
del self.row_options[r]
if r in self.RI.cell_options:
del self.RI.cell_options[r]
numrows = len(seld_rows)
idx = seld_rows[-1]
self.cell_options = {(rn if rn < idx else rn - numrows, cn): t2 for (rn, cn), t2 in self.cell_options.items()}
self.row_options = {rn if rn < idx else rn - numrows: t for rn, t in self.row_options.items()}
self.RI.cell_options = {rn if rn < idx else rn - numrows: t for rn, t in self.RI.cell_options.items()}
self.deselect("allrows", redraw = False)
self.set_current_to_last()
self.refresh()
if self.extra_end_del_rows_rc_func is not None:
self.extra_end_del_rows_rc_func(DeleteRowColumnEvent("end_delete_rows", seld_rows))
def reset_row_positions(self):
rowpos = self.default_rh[1]
self.row_positions = list(accumulate(chain([0], (rowpos for r in range(self.total_data_rows())))))
def del_row_position(self, idx, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.row_positions) <= idx + 1:
del self.row_positions[-1]
else:
w = self.row_positions[idx + 1] - self.row_positions[idx]
idx += 1
del self.row_positions[idx]
self.row_positions[idx:] = [e - w for e in islice(self.row_positions, idx, len(self.row_positions))]
def del_row_positions(self, idx, numrows = 1, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.row_positions) <= idx + 1:
del self.row_positions[-1]
else:
rhs = [int(b - a) for a, b in zip(self.row_positions, islice(self.row_positions, 1, len(self.row_positions)))]
rhs[idx:idx + numrows] = []
self.row_positions = list(accumulate(chain([0], (height for height in rhs))))
def insert_row_position(self, idx, height = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if height is None:
h = self.default_rh[1]
else:
h = height
if idx == "end" or len(self.row_positions) == idx + 1:
self.row_positions.append(self.row_positions[-1] + h)
else:
idx += 1
self.row_positions.insert(idx, self.row_positions[idx - 1] + h)
idx += 1
self.row_positions[idx:] = [e + h for e in islice(self.row_positions, idx, len(self.row_positions))]
def insert_row_positions(self, idx = "end", heights = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if heights is None:
h = [self.default_rh[1]]
elif isinstance(heights, int):
h = list(repeat(self.default_rh[1], heights))
else:
h = heights
if idx == "end" or len(self.row_positions) == idx + 1:
if len(h) > 1:
self.row_positions += list(accumulate(chain([self.row_positions[-1] + h[0]], islice(h, 1, None))))
else:
self.row_positions.append(self.row_positions[-1] + h[0])
else:
if len(h) > 1:
idx += 1
self.row_positions[idx:idx] = list(accumulate(chain([self.row_positions[idx - 1] + h[0]], islice(h, 1, None))))
idx += len(h)
sumh = sum(h)
self.row_positions[idx:] = [e + sumh for e in islice(self.row_positions, idx, len(self.row_positions))]
else:
h = h[0]
idx += 1
self.row_positions.insert(idx, self.row_positions[idx - 1] + h)
idx += 1
self.row_positions[idx:] = [e + h for e in islice(self.row_positions, idx, len(self.row_positions))]
def move_row_position(self, idx1, idx2):
if not len(self.row_positions) <= 2:
if idx1 < idx2:
height = self.row_positions[idx1 + 1] - self.row_positions[idx1]
self.row_positions.insert(idx2 + 1, self.row_positions.pop(idx1 + 1))
for i in range(idx1 + 1, idx2 + 1):
self.row_positions[i] -= height
self.row_positions[idx2 + 1] = self.row_positions[idx2] + height
else:
height = self.row_positions[idx1 + 1] - self.row_positions[idx1]
self.row_positions.insert(idx2 + 1, self.row_positions.pop(idx1 + 1))
for i in range(idx2 + 2, idx1 + 2):
self.row_positions[i] += height
self.row_positions[idx2 + 1] = self.row_positions[idx2] + height
def move_col_position(self, idx1, idx2):
if not len(self.col_positions) <= 2:
if idx1 < idx2:
width = self.col_positions[idx1 + 1] - self.col_positions[idx1]
self.col_positions.insert(idx2 + 1, self.col_positions.pop(idx1 + 1))
for i in range(idx1 + 1, idx2 + 1):
self.col_positions[i] -= width
self.col_positions[idx2 + 1] = self.col_positions[idx2] + width
else:
width = self.col_positions[idx1 + 1] - self.col_positions[idx1]
self.col_positions.insert(idx2 + 1, self.col_positions.pop(idx1 + 1))
for i in range(idx2 + 2, idx1 + 2):
self.col_positions[i] += width
self.col_positions[idx2 + 1] = self.col_positions[idx2] + width
def GetLinesHeight(self, n, old_method = False):
if old_method:
if n == 1:
return int(self.min_rh)
else:
return int(self.fl_ins) + (self.xtra_lines_increment * n) - 2
else:
x = self.txt_measure_canvas.create_text(0, 0,
text = "\n".join(["j^|" for lines in range(n)]) if n > 1 else "j^|",
font = self.my_font)
b = self.txt_measure_canvas.bbox(x)
h = b[3] - b[1] + 5
self.txt_measure_canvas.delete(x)
return h
def GetHdrLinesHeight(self, n, old_method = False):
if old_method:
if n == 1:
return int(self.hdr_min_rh)
else:
return int(self.hdr_fl_ins) + (self.hdr_xtra_lines_increment * n) - 2
else:
x = self.txt_measure_canvas.create_text(0, 0,
text = "\n".join(["j^|" for lines in range(n)]) if n > 1 else "j^|",
font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(x)
h = b[3] - b[1] + 5
self.txt_measure_canvas.delete(x)
return h
def display_columns(self, indexes = None, enable = None, reset_col_positions = True, set_col_positions = True, deselect_all = True):
if indexes is None and enable is None:
if self.all_columns_displayed:
return list(range(len(self.col_positions) - 1))
else:
return self.displayed_columns
if deselect_all:
self.deselect("all")
if indexes != self.displayed_columns:
self.undo_storage = deque(maxlen = self.max_undos)
if indexes is not None:
self.displayed_columns = sorted(indexes)
if enable and not self.data_ref:
self.all_columns_displayed = False
elif enable and list(range(len(max(self.data_ref, key = len)))) != self.displayed_columns:
self.all_columns_displayed = False
else:
self.all_columns_displayed = True
if reset_col_positions:
self.reset_col_positions()
def headers(self, newheaders = None, index = None, reset_col_positions = False, show_headers_if_not_sheet = True, redraw = False):
if newheaders is not None:
if isinstance(newheaders, (list, tuple)):
self.my_hdrs = list(newheaders) if isinstance(newheaders, tuple) else newheaders
elif isinstance(newheaders, int):
self.my_hdrs = int(newheaders)
elif isinstance(self.my_hdrs, list) and isinstance(index, int):
if len(self.my_hdrs) <= index:
self.my_hdrs.extend(list(repeat("", index - len(self.my_hdrs) + 1)))
self.my_hdrs[index] = f"{newheaders}"
elif not isinstance(newheaders, (list, tuple, int)) and index is None:
try:
self.my_hdrs = list(newheaders)
except:
raise ValueError("New header must be iterable or int (use int to use a row as the header")
if reset_col_positions:
self.reset_col_positions()
elif show_headers_if_not_sheet and isinstance(self.my_hdrs, list) and (self.col_positions == [0] or not self.col_positions):
colpos = int(self.default_cw)
if self.all_columns_displayed:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(len(self.my_hdrs))))))
else:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(len(self.displayed_columns))))))
if redraw:
self.refresh()
else:
if index is not None:
if isinstance(index, int):
return self.my_hdrs[index]
else:
return self.my_hdrs
def row_index(self, newindex = None, index = None, reset_row_positions = False, show_index_if_not_sheet = True, redraw = False):
if newindex is not None:
if not self.my_row_index and not isinstance(self.my_row_index, int):
self.RI.set_width(self.RI.default_width, set_TL = True)
if isinstance(newindex, (list, tuple)):
self.my_row_index = list(newindex) if isinstance(newindex, tuple) else newindex
elif isinstance(newindex, int):
self.my_row_index = int(newindex)
elif isinstance(index, int):
self.my_row_index[index] = f"{newindex}"
elif not isinstance(newindex, (list, tuple, int)) and index is None:
try:
self.my_row_index = list(newindex)
except:
raise ValueError("New index must be iterable or int (use int to use a column as the index")
if reset_row_positions:
self.reset_row_positions()
elif show_index_if_not_sheet and isinstance(self.my_row_index, list) and (self.row_positions == [0] or not self.row_positions):
rowpos = self.default_rh[1]
self.row_positions = list(accumulate(chain([0], (rowpos for c in range(len(self.my_row_index))))))
if redraw:
self.refresh()
else:
if index is not None:
if isinstance(index, int):
return self.my_row_index[index]
else:
return self.my_row_index
def total_data_cols(self, include_headers = True):
h_total = 0
d_total = 0
if include_headers:
if isinstance(self.my_hdrs, list):
h_total = len(self.my_hdrs)
try:
d_total = len(max(self.data_ref, key = len))
except:
pass
return h_total if h_total > d_total else d_total
def total_data_rows(self):
i_total = 0
d_total = 0
if isinstance(self.my_row_index, list):
i_total = len(self.my_row_index)
d_total = len(self.data_ref)
return i_total if i_total > d_total else d_total
def data_dimensions(self, total_rows = None, total_columns = None):
if total_rows is None and total_columns is None:
return self.total_data_rows(), self.total_data_cols()
if total_rows is not None:
if len(self.data_ref) < total_rows:
if total_columns is None:
total_data_cols = self.total_data_cols()
self.data_ref.extend([list(repeat("", total_data_cols)) for r in range(total_rows - len(self.data_ref))])
else:
self.data_ref.extend([list(repeat("", total_columns)) for r in range(total_rows - len(self.data_ref))])
else:
self.data_ref[total_rows:] = []
if total_columns is not None:
self.data_ref[:] = [r[:total_columns] if len(r) > total_columns else r + list(repeat("", total_columns - len(r))) for r in self.data_ref]
def equalize_data_row_lengths(self, include_header = False):
total_columns = self.total_data_cols()
if include_header and total_columns > len(self.my_hdrs):
self.my_hdrs[:] = self.my_hdrs + list(repeat("", total_columns - len(self.my_hdrs)))
self.data_ref[:] = [r + list(repeat("", total_columns - len(r))) if total_columns > len(r) else r for r in self.data_ref]
return total_columns
def get_canvas_visible_area(self):
return self.canvasx(0), self.canvasy(0), self.canvasx(self.winfo_width()), self.canvasy(self.winfo_height())
def get_visible_rows(self, y1, y2):
start_row = bisect.bisect_left(self.row_positions, y1)
end_row = bisect.bisect_right(self.row_positions, y2)
if not y2 >= self.row_positions[-1]:
end_row += 1
return start_row, end_row
def get_visible_columns(self, x1, x2):
start_col = bisect.bisect_left(self.col_positions, x1)
end_col = bisect.bisect_right(self.col_positions, x2)
if not x2 >= self.col_positions[-1]:
end_col += 1
return start_col, end_col
def redraw_highlight_get_text_fg(self, r, c, fc, fr, sc, sr, c_2_, c_3_, c_4_, selected_cells, actual_selected_rows, actual_selected_cols, dcol, can_width):
redrawn = False
# ________________________ CELL IS HIGHLIGHTED AND IN SELECTED CELLS ________________________
if (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and (r, c) in selected_cells:
tf = self.table_selected_cells_fg if self.cell_options[(r, dcol)]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
c_1 = self.cell_options[(r, dcol)]['highlight'][0] if self.cell_options[(r, dcol)]['highlight'][0].startswith("#") else Color_Map_[self.cell_options[(r, dcol)]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_2_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_2_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_2_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and (r, c) in selected_cells:
tf = self.table_selected_cells_fg if self.row_options[r]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
c_1 = self.row_options[r]['highlight'][0] if self.row_options[r]['highlight'][0].startswith("#") else Color_Map_[self.row_options[r]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_2_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_2_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_2_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and (r, c) in selected_cells:
tf = self.table_selected_cells_fg if self.col_options[dcol]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
c_1 = self.col_options[dcol]['highlight'][0] if self.col_options[dcol]['highlight'][0].startswith("#") else Color_Map_[self.col_options[dcol]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_2_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_2_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_2_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS HIGHLIGHTED AND IN SELECTED ROWS ________________________
elif (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and r in actual_selected_rows:
tf = self.table_selected_rows_fg if self.cell_options[(r, dcol)]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
c_1 = self.cell_options[(r, dcol)]['highlight'][0] if self.cell_options[(r, dcol)]['highlight'][0].startswith("#") else Color_Map_[self.cell_options[(r, dcol)]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_4_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_4_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_4_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and r in actual_selected_rows:
tf = self.table_selected_rows_fg if self.row_options[r]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
c_1 = self.row_options[r]['highlight'][0] if self.row_options[r]['highlight'][0].startswith("#") else Color_Map_[self.row_options[r]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_4_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_4_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_4_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and r in actual_selected_rows:
tf = self.table_selected_rows_fg if self.col_options[dcol]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
c_1 = self.col_options[dcol]['highlight'][0] if self.col_options[dcol]['highlight'][0].startswith("#") else Color_Map_[self.col_options[dcol]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_4_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_4_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_4_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS HIGHLIGHTED AND IN SELECTED COLUMNS ________________________
elif (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and c in actual_selected_cols:
tf = self.table_selected_columns_fg if self.cell_options[(r, dcol)]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
c_1 = self.cell_options[(r, dcol)]['highlight'][0] if self.cell_options[(r, dcol)]['highlight'][0].startswith("#") else Color_Map_[self.cell_options[(r, dcol)]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_3_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_3_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_3_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and c in actual_selected_cols:
tf = self.table_selected_columns_fg if self.row_options[r]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
c_1 = self.row_options[r]['highlight'][0] if self.row_options[r]['highlight'][0].startswith("#") else Color_Map_[self.row_options[r]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_3_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_3_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_3_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and c in actual_selected_cols:
tf = self.table_selected_columns_fg if self.col_options[dcol]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
c_1 = self.col_options[dcol]['highlight'][0] if self.col_options[dcol]['highlight'][0].startswith("#") else Color_Map_[self.col_options[dcol]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_3_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_3_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_3_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS HIGHLIGHTED AND NOT SELECTED ________________________
elif (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and (r, c) not in selected_cells and r not in actual_selected_rows and c not in actual_selected_cols:
tf = self.table_fg if self.cell_options[(r, dcol)]['highlight'][1] is None else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = self.cell_options[(r, dcol)]['highlight'][0],
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and (r, c) not in selected_cells and r not in actual_selected_rows and c not in actual_selected_cols:
tf = self.table_fg if self.row_options[r]['highlight'][1] is None else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = self.row_options[r]['highlight'][0],
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and (r, c) not in selected_cells and r not in actual_selected_rows and c not in actual_selected_cols:
tf = self.table_fg if self.col_options[dcol]['highlight'][1] is None else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = self.col_options[dcol]['highlight'][0],
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS JUST SELECTED ________________________
elif (r, c) in selected_cells:
tf = self.table_selected_cells_fg
elif r in actual_selected_rows:
tf = self.table_selected_rows_fg
elif c in actual_selected_cols:
tf = self.table_selected_columns_fg
# ________________________ CELL IS NOT SELECTED ________________________
else:
tf = self.table_fg
return tf, redrawn
def redraw_highlight(self, x1, y1, x2, y2, fill, outline, tag, can_width = None):
if self.hidd_high:
t, sh = self.hidd_high.popitem()
self.coords(t, x1 - 1 if outline else x1, y1 - 1 if outline else y1, x2 if can_width is None else x2 + can_width, y2)
if sh:
self.itemconfig(t, fill = fill, outline = outline)
else:
self.itemconfig(t, fill = fill, outline = outline, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_rectangle(x1 - 1 if outline else x1, y1 - 1 if outline else y1, x2 if can_width is None else x2 + can_width, y2, fill = fill, outline = outline, tag = tag)
self.disp_high[t] = True
return True
def redraw_dropdown(self, x1, y1, x2, y2, fill, outline, tag, draw_outline = True, draw_arrow = True):
if draw_outline:
self.redraw_highlight(x1 + 1, y1 + 1, x2, y2, fill = "", outline = self.table_fg, tag = tag)
if draw_arrow:
topysub = floor(self.half_txt_h / 2)
mid_y = y1 + floor(self.half_txt_h / 2) + 5
#top left points for triangle
ty1 = mid_y - topysub + 2
tx1 = x2 - self.txt_h + 1
#bottom points for triangle
ty2 = mid_y + self.half_txt_h - 4
tx2 = x2 - self.half_txt_h - 1
#top right points for triangle
ty3 = mid_y - topysub + 2
tx3 = x2 - 3
points = (tx1, ty1, tx2, ty2, tx3, ty3)
if self.hidd_dropdown:
t, sh = self.hidd_dropdown.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill = fill)
else:
self.itemconfig(t, fill = fill, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(points, fill = fill, width = 2, capstyle = tk.ROUND, joinstyle = tk.ROUND, tag = tag)
self.disp_dropdown[t] = True
def get_checkbox_points(self, x1, y1, x2, y2, radius = 4):
return [x1+radius, y1,
x1+radius, y1,
x2-radius, y1,
x2-radius, y1,
x2, y1,
x2, y1+radius,
x2, y1+radius,
x2, y2-radius,
x2, y2-radius,
x2, y2,
x2-radius, y2,
x2-radius, y2,
x1+radius, y2,
x1+radius, y2,
x1, y2,
x1, y2-radius,
x1, y2-radius,
x1, y1+radius,
x1, y1+radius,
x1, y1]
def redraw_checkbox(self, r, dcol, x1, y1, x2, y2, fill, outline, tag, draw_check = False):
points = self.get_checkbox_points(x1, y1, x2, y2)
if self.hidd_checkbox:
t, sh = self.hidd_checkbox.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill = outline, outline = fill)
else:
self.itemconfig(t, fill = outline, outline = fill, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_polygon(points, fill = outline, outline = fill, tag = tag, smooth = True)
self.disp_checkbox[t] = True
if draw_check:
# draw filled box
x1 = x1 + 2
y1 = y1 + 2
x2 = x2 - 1
y2 = y2 - 1
points = self.get_checkbox_points(x1, y1, x2, y2)
if self.hidd_checkbox:
t, sh = self.hidd_checkbox.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill = fill, outline = outline)
else:
self.itemconfig(t, fill = fill, outline = outline, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_polygon(points, fill = fill, outline = outline, tag = tag, smooth = True)
self.disp_checkbox[t] = True
# draw one line of X
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x1 + 2, y1 + 2, x2 - 2, y2 - 2)
if sh:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2)
else:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(x1 + 2, y1 + 2, x2 - 2, y2 - 2, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag)
self.disp_grid[t] = True
# draw other line of X
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x2 - 2, y1 + 2, x1 + 2, y2 - 2)
if sh:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2)
else:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(x2 - 2, y1 + 2, x1 + 2, y2 - 2, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag)
self.disp_grid[t] = True
def main_table_redraw_grid_and_text(self, redraw_header = False, redraw_row_index = False, redraw_table = True):
last_col_line_pos = self.col_positions[-1] + 1
last_row_line_pos = self.row_positions[-1] + 1
try:
can_width = self.winfo_width()
can_height = self.winfo_height()
self.configure(scrollregion = (0,
0,
last_col_line_pos + self.empty_horizontal,
last_row_line_pos + self.empty_vertical))
if can_width >= last_col_line_pos + self.empty_horizontal and self.parentframe.xscroll_showing:
self.parentframe.xscroll.grid_forget()
self.parentframe.xscroll_showing = False
elif can_width < last_col_line_pos + self.empty_horizontal and not self.parentframe.xscroll_showing and not self.parentframe.xscroll_disabled and can_height > 45:
self.parentframe.xscroll.grid(row = 2, column = 1, columnspan = 2, sticky = "nswe")
self.parentframe.xscroll_showing = True
if can_height >= last_row_line_pos + self.empty_vertical and self.parentframe.yscroll_showing:
self.parentframe.yscroll.grid_forget()
self.parentframe.yscroll_showing = False
elif can_height < last_row_line_pos + self.empty_vertical and not self.parentframe.yscroll_showing and not self.parentframe.yscroll_disabled and can_width > 45:
self.parentframe.yscroll.grid(row = 1, column = 2, sticky = "nswe")
self.parentframe.yscroll_showing = True
except:
return False
y2 = self.canvasy(can_height)
end_row = bisect.bisect_right(self.row_positions, y2)
if not y2 >= self.row_positions[-1]:
end_row += 1
if redraw_row_index and self.show_index:
self.RI.auto_set_index_width(end_row - 1)
x1 = self.canvasx(0)
y1 = self.canvasy(0)
x2 = self.canvasx(can_width)
start_row = bisect.bisect_left(self.row_positions, y1)
self.row_width_resize_bbox = (x1, y1, x1 + 2, y2)
self.header_height_resize_bbox = (x1 + 6, y1, x2, y1 + 2)
self.hidd_text.update(self.disp_text)
self.disp_text = {}
self.hidd_high.update(self.disp_high)
self.disp_high = {}
self.hidd_grid.update(self.disp_grid)
self.disp_grid = {}
self.hidd_dropdown.update(self.disp_dropdown)
self.disp_dropdown = {}
self.hidd_checkbox.update(self.disp_checkbox)
self.disp_checkbox = {}
start_col = bisect.bisect_left(self.col_positions, x1)
end_col = bisect.bisect_right(self.col_positions, x2)
if not x2 >= self.col_positions[-1]:
end_col += 1
if last_col_line_pos > x2:
x_stop = x2
else:
x_stop = last_col_line_pos
if last_row_line_pos > y2:
y_stop = y2
else:
y_stop = last_row_line_pos
sb = y2 + 2
if self.show_horizontal_grid:
for r in range(start_row - 1, end_row):
y = self.row_positions[r]
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x1, y, x2 + can_width if self.horizontal_grid_to_end_of_window else x_stop, y)
if sh:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1)
else:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, state = "normal")
self.disp_grid[t] = True
else:
self.disp_grid[self.create_line(x1, y, x2 + can_width if self.horizontal_grid_to_end_of_window else x_stop, y, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, tag = "g")] = True
if self.show_vertical_grid:
for c in range(start_col - 1, end_col):
x = self.col_positions[c]
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x, y1, x, y2 + can_height if self.vertical_grid_to_end_of_window else y_stop)
if sh:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1)
else:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, state = "normal")
self.disp_grid[t] = True
else:
self.disp_grid[self.create_line(x, y1, x, y2 + can_height if self.vertical_grid_to_end_of_window else y_stop, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, tag = "g")] = True
if start_row > 0:
start_row -= 1
if start_col > 0:
start_col -= 1
end_row -= 1
c_2 = self.table_selected_cells_bg if self.table_selected_cells_bg.startswith("#") else Color_Map_[self.table_selected_cells_bg]
c_2_ = (int(c_2[1:3], 16), int(c_2[3:5], 16), int(c_2[5:], 16))
c_3 = self.table_selected_columns_bg if self.table_selected_columns_bg.startswith("#") else Color_Map_[self.table_selected_columns_bg]
c_3_ = (int(c_3[1:3], 16), int(c_3[3:5], 16), int(c_3[5:], 16))
c_4 = self.table_selected_rows_bg if self.table_selected_rows_bg.startswith("#") else Color_Map_[self.table_selected_rows_bg]
c_4_ = (int(c_4[1:3], 16), int(c_4[3:5], 16), int(c_4[5:], 16))
rows_ = tuple(range(start_row, end_row))
selected_cells, selected_rows, selected_cols, actual_selected_rows, actual_selected_cols = self.get_redraw_selections((start_row, start_col, end_row, end_col - 1))
if redraw_table:
for c in range(start_col, end_col - 1):
for r in rows_:
fr = self.row_positions[r]
sr = self.row_positions[r + 1]
if sr - fr < self.txt_h:
continue
if sr > sb:
sr = sb
fc = self.col_positions[c]
sc = self.col_positions[c + 1]
if self.all_columns_displayed:
dcol = c
else:
dcol = self.displayed_columns[c]
tf, dd_drawn = self.redraw_highlight_get_text_fg(r, c, fc, fr, sc, sr, c_2_, c_3_, c_4_, selected_cells, actual_selected_rows, actual_selected_cols, dcol, can_width)
if (r, dcol) in self.cell_options and 'align' in self.cell_options[(r, dcol)]:
cell_alignment = self.cell_options[(r, dcol)]['align']
elif r in self.row_options and 'align' in self.row_options[r]:
cell_alignment = self.row_options[r]['align']
elif dcol in self.col_options and 'align' in self.col_options[dcol]:
cell_alignment = self.col_options[dcol]['align']
else:
cell_alignment = self.align
if cell_alignment == "w":
x = fc + 5
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
mw = sc - fc - self.txt_h - 2
self.redraw_dropdown(fc, fr, sc, self.row_positions[r + 1], fill = tf, outline = tf, tag = "dd", draw_outline = not dd_drawn, draw_arrow = mw >= 5)
else:
mw = sc - fc - 5
elif cell_alignment == "e":
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
mw = sc - fc - self.txt_h - 2
x = sc - 5 - self.txt_h
self.redraw_dropdown(fc, fr, sc, self.row_positions[r + 1], fill = tf, outline = tf, tag = "dd", draw_outline = not dd_drawn, draw_arrow = mw >= 5)
else:
mw = sc - fc - 5
x = sc - 5
elif cell_alignment == "center":
stop = fc + 5
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
mw = sc - fc - self.txt_h - 2
x = fc + ceil((sc - fc - self.txt_h) / 2)
self.redraw_dropdown(fc, fr, sc, self.row_positions[r + 1], fill = tf, outline = tf, tag = "dd", draw_outline = not dd_drawn, draw_arrow = mw >= 5)
else:
mw = sc - fc - 1
x = fc + floor((sc - fc) / 2)
if (r, dcol) in self.cell_options and 'checkbox' in self.cell_options[(r, dcol)]:
if mw > self.txt_h + 2:
box_w = fc + self.txt_h + 2 - fc
if cell_alignment == "w":
x = x + box_w
elif cell_alignment == "center":
x = x + floor(box_w / 2)
mw = mw - box_w
self.redraw_checkbox(r,
dcol,
fc + 2,
fr + 2,
fc + 2 + self.txt_h + 2,
fr + 2 + self.txt_h + 2,
fill = tf if self.cell_options[(r, dcol)]['checkbox']['state'] == "normal" else self.table_grid_fg,
outline = "", tag = "cb", draw_check = self.data_ref[r][dcol])
try:
if cell_alignment == "w":
if x > x2 or mw <= 5:
continue
if (r, dcol) in self.cell_options and 'checkbox' in self.cell_options[(r, dcol)]:
lns = self.cell_options[(r, dcol)]['checkbox']['text'].split("\n") if isinstance(self.cell_options[(r, dcol)]['checkbox']['text'], str) else f"{self.cell_options[(r, dcol)]["checkbox"]["text"]}".split("\n")
else:
lns = self.data_ref[r][dcol].split("\n") if isinstance(self.data_ref[r][dcol], str) else f"{self.data_ref[r][dcol]}".split("\n")
y = fr + self.fl_ins
if y + self.half_txt_h - 1 > y1:
txt = lns[0]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "w", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
nl = int(len(txt) * (mw / wd))
self.itemconfig(t, text = txt[:nl])
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
nl -= 1
self.dchars(t, nl)
wd = self.bbox(t)
if len(lns) > 1:
stl = int((y1 - y) / self.xtra_lines_increment) - 1
if stl < 1:
stl = 1
y += (stl * self.xtra_lines_increment)
if y + self.half_txt_h - 1 < sr:
for i in range(stl, len(lns)):
txt = lns[i]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "w", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
nl = int(len(txt) * (mw / wd))
self.itemconfig(t, text = txt[:nl])
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
nl -= 1
self.dchars(t, nl)
wd = self.bbox(t)
y += self.xtra_lines_increment
if y + self.half_txt_h - 1 > sr:
break
elif cell_alignment == "e":
if fc + 5 > x2 or mw <= 5:
continue
lns = self.data_ref[r][dcol].split("\n") if isinstance(self.data_ref[r][dcol], str) else f"{self.data_ref[r][dcol]}".split("\n")
y = fr + self.fl_ins
if y + self.half_txt_h - 1 > y1:
txt = lns[0]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "e", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
txt = txt[len(txt) - int(len(txt) * (mw / wd)):]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
txt = txt[1:]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
if len(lns) > 1:
stl = int((y1 - y) / self.xtra_lines_increment) - 1
if stl < 1:
stl = 1
y += (stl * self.xtra_lines_increment)
if y + self.half_txt_h - 1 < sr:
for i in range(stl, len(lns)):
txt = lns[i]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "e", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
txt = txt[len(txt) - int(len(txt) * (mw / wd)):]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
txt = txt[1:]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
y += self.xtra_lines_increment
if y + self.half_txt_h - 1 > sr:
break
elif cell_alignment == "center":
if stop > x2 or mw <= 5:
continue
lns = self.data_ref[r][dcol].split("\n") if isinstance(self.data_ref[r][dcol], str) else f"{self.data_ref[r][dcol]}".split("\n")
txt = lns[0]
y = fr + self.fl_ins
if y + self.half_txt_h - 1 > y1:
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "center", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
tl = len(txt)
tmod = ceil((tl - int(tl * (mw / wd))) / 2)
txt = txt[tmod - 1:-tmod]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
while wd[2] - wd[0] > mw:
txt = txt[next(self.c_align_cyc)]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.coords(t, x, y)
if len(lns) > 1:
stl = int((y1 - y) / self.xtra_lines_increment) - 1
if stl < 1:
stl = 1
y += (stl * self.xtra_lines_increment)
if y + self.half_txt_h - 1 < sr:
for i in range(stl, len(lns)):
txt = lns[i]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "center", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
tl = len(txt)
tmod = ceil((tl - int(tl * (mw / wd))) / 2)
txt = txt[tmod - 1:-tmod]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
while wd[2] - wd[0] > mw:
txt = txt[next(self.c_align_cyc)]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.coords(t, x, y)
y += self.xtra_lines_increment
if y + self.half_txt_h - 1 > sr:
break
except:
continue
try:
self.tag_raise("t")
for t, sh in self.hidd_text.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_text[t] = False
for t, sh in self.hidd_high.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_high[t] = False
for t, sh in self.hidd_grid.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_grid[t] = False
for t, sh in self.hidd_dropdown.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_dropdown[t] = False
for t, sh in self.hidd_checkbox.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_checkbox[t] = False
if redraw_header and self.show_header:
self.CH.redraw_grid_and_text(last_col_line_pos, x1, x_stop, start_col, end_col, selected_cols, actual_selected_rows, actual_selected_cols)
if redraw_row_index and self.show_index:
self.RI.redraw_grid_and_text(last_row_line_pos, y1, y_stop, start_row, end_row + 1, y2, x1, x_stop, selected_rows, actual_selected_cols, actual_selected_rows)
if self.show_selected_cells_border:
self.tag_raise("CellSelectBorder")
self.tag_raise("Current_Inside")
self.tag_raise("Current_Outside")
self.tag_raise("RowSelectBorder")
self.tag_raise("ColSelectBorder")
except:
return False
return True
def get_all_selection_items(self):
return sorted(self.find_withtag("CellSelectFill") + self.find_withtag("RowSelectFill") + self.find_withtag("ColSelectFill") + self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside"))
def get_boxes(self):
boxes = {}
for item in self.get_all_selection_items():
alltags = self.gettags(item)
if alltags[0] == "CellSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
elif alltags[0] == "RowSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "rows"
elif alltags[0] == "ColSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cols"
elif alltags[0] == "Current_Inside":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = f"{alltags[2]}_inside"
elif alltags[0] == "Current_Outside":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = f"{alltags[2]}_outside"
return boxes
def reselect_from_get_boxes(self, boxes):
for k, v in boxes.items():
if v == "cells":
self.create_selected(k[0], k[1], k[2], k[3], "cells")
elif v == "rows":
self.create_selected(k[0], k[1], k[2], k[3], "rows")
elif v == "cols":
self.create_selected(k[0], k[1], k[2], k[3], "cols")
elif v in ("cell_inside", "cell_outside", "row_inside", "row_outside", "col_outside", "col_inside"): #currently selected
x = v.split("_")
self.create_current(k[0], k[1], type_ = x[0], inside = True if x[1] == "inside" else False)
def delete_selection_rects(self, cells = True, rows = True, cols = True, delete_current = True):
deleted_boxes = {}
if cells:
for item in self.find_withtag("CellSelectFill"):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
self.delete("CellSelectFill", "CellSelectBorder")
self.RI.delete("CellSelectFill", "CellSelectBorder")
self.CH.delete("CellSelectFill", "CellSelectBorder")
if rows:
for item in self.find_withtag("RowSelectFill"):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "rows"
self.delete("RowSelectFill", "RowSelectBorder")
self.RI.delete("RowSelectFill", "RowSelectBorder")
self.CH.delete("RowSelectFill", "RowSelectBorder")
if cols:
for item in self.find_withtag("ColSelectFill"):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cols"
self.delete("ColSelectFill", "ColSelectBorder")
self.RI.delete("ColSelectFill", "ColSelectBorder")
self.CH.delete("ColSelectFill", "ColSelectBorder")
if delete_current:
for item in chain(self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside")):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
self.delete("Current_Inside", "Current_Outside")
self.RI.delete("Current_Inside", "Current_Outside")
self.CH.delete("Current_Inside", "Current_Outside")
return deleted_boxes
def currently_selected(self, get_coords = False):
items = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
if not items:
return tuple()
alltags = self.gettags(items[0])
box = tuple(int(e) for e in alltags[1].split("_") if e)
if alltags[2] == "cell":
return (box[0], box[1])
elif alltags[2] == "col":
return ("column", box[1]) if not get_coords else (0, box[1])
elif alltags[2] == "row":
return ("row", box[0]) if not get_coords else (box[0], 0)
def get_tags_of_current(self):
items = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
if items:
return self.gettags(items[0])
else:
return tuple()
def create_current(self, r, c, type_ = "cell", inside = False): # cell, col or row
r1, c1, r2, c2 = r, c, r + 1, c + 1
self.delete("Current_Inside", "Current_Outside")
self.RI.delete("Current_Inside", "Current_Outside")
self.CH.delete("Current_Inside", "Current_Outside")
if self.col_positions == [0]:
c1 = 0
c2 = 0
if self.row_positions == [0]:
r1 = 0
r2 = 0
if inside:
tagr = ("Current_Inside", f"{r1}_{c1}_{r2}_{c2}", type_)
else:
tagr = ("Current_Outside", f"{r1}_{c1}_{r2}_{c2}", type_)
if self.show_selected_cells_border:
b = self.create_rectangle(self.col_positions[c1] + 1, self.row_positions[r1] + 1, self.col_positions[c2], self.row_positions[r2],
fill = "",
outline = self.table_selected_cells_border_fg,
width = 2,
tags = tagr)
self.tag_raise(b)
else:
b = self.create_rectangle(self.col_positions[c1], self.row_positions[r1], self.col_positions[c2], self.row_positions[r2],
fill = self.table_selected_cells_bg,
outline = "",
tags = tagr)
self.tag_lower(b)
ri = self.RI.create_rectangle(0, self.row_positions[r1], self.RI.current_width - 1, self.row_positions[r2],
fill = self.RI.index_selected_cells_bg,
outline = "",
tags = tagr)
ch = self.CH.create_rectangle(self.col_positions[c1], 0, self.col_positions[c2], self.CH.current_height - 1,
fill = self.CH.header_selected_cells_bg,
outline = "",
tags = tagr)
self.RI.tag_lower(ri)
self.CH.tag_lower(ch)
return b
def set_current_to_last(self):
if not self.currently_selected():
items = sorted(self.find_withtag("CellSelectFill") + self.find_withtag("RowSelectFill") + self.find_withtag("ColSelectFill"))
if items:
last = self.gettags(items[-1])
r1, c1, r2, c2 = tuple(int(e) for e in last[1].split("_") if e)
if last[0] == "CellSelectFill":
return self.gettags(self.create_current(r1, c1, "cell", inside = True))
elif last[0] == "RowSelectFill":
return self.gettags(self.create_current(r1, c1, "row", inside = True))
elif last[0] == "ColSelectFill":
return self.gettags(self.create_current(r1, c1, "col", inside = True))
return tuple()
def delete_current(self):
self.delete("Current_Inside", "Current_Outside")
self.RI.delete("Current_Inside", "Current_Outside")
self.CH.delete("Current_Inside", "Current_Outside")
def create_selected(self, r1 = None, c1 = None, r2 = None, c2 = None, type_ = "cells", taglower = True):
currently_selected = self.currently_selected()
if currently_selected and isinstance(currently_selected[0], int):
if (currently_selected[0] >= r1 and
currently_selected[1] >= c1 and
currently_selected[0] < r2 and
currently_selected[1] < c2):
self.create_current(currently_selected[0], currently_selected[1], type_ = "cell", inside = True)
if type_ == "cells":
tagr = ("CellSelectFill", f"{r1}_{c1}_{r2}_{c2}")
tagb = ("CellSelectBorder", f"{r1}_{c1}_{r2}_{c2}")
taglower = "CellSelectFill"
mt_bg = self.table_selected_cells_bg
mt_border_col = self.table_selected_cells_border_fg
elif type_ == "rows":
tagr = ("RowSelectFill", f"{r1}_{c1}_{r2}_{c2}")
tagb = ("RowSelectBorder", f"{r1}_{c1}_{r2}_{c2}")
taglower = "RowSelectFill"
mt_bg = self.table_selected_rows_bg
mt_border_col = self.table_selected_rows_border_fg
elif type_ == "cols":
tagr = ("ColSelectFill", f"{r1}_{c1}_{r2}_{c2}")
tagb = ("ColSelectBorder", f"{r1}_{c1}_{r2}_{c2}")
taglower = "ColSelectFill"
mt_bg = self.table_selected_columns_bg
mt_border_col = self.table_selected_columns_border_fg
r = self.create_rectangle(self.col_positions[c1],
self.row_positions[r1],
self.canvasx(self.winfo_width()) if self.selected_rows_to_end_of_window else self.col_positions[c2],
self.row_positions[r2],
fill = mt_bg,
outline = "",
tags = tagr)
self.RI.create_rectangle(0,
self.row_positions[r1],
self.RI.current_width - 1,
self.row_positions[r2],
fill = self.RI.index_selected_rows_bg if type_ == "rows" else self.RI.index_selected_cells_bg,
outline = "",
tags = tagr)
self.CH.create_rectangle(self.col_positions[c1],
0,
self.col_positions[c2],
self.CH.current_height - 1,
fill = self.CH.header_selected_columns_bg if type_ == "cols" else self.CH.header_selected_cells_bg,
outline = "",
tags = tagr)
if self.show_selected_cells_border:
b = self.create_rectangle(self.col_positions[c1], self.row_positions[r1], self.col_positions[c2], self.row_positions[r2],
fill = "",
outline = mt_border_col,
tags = tagb)
else:
b = None
if taglower:
self.tag_lower(taglower)
self.RI.tag_lower(taglower)
self.CH.tag_lower(taglower)
self.RI.tag_lower("Current_Inside")
self.RI.tag_lower("Current_Outside")
self.RI.tag_lower("CellSelectFill")
self.CH.tag_lower("Current_Inside")
self.CH.tag_lower("Current_Outside")
self.CH.tag_lower("CellSelectFill")
return r, b
def recreate_all_selection_boxes(self):
for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Inside"),
self.find_withtag("Current_Outside")):
full_tags = self.gettags(item)
if full_tags:
type_ = full_tags[0]
r1, c1, r2, c2 = tuple(int(e) for e in full_tags[1].split("_") if e)
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
if r1 >= len(self.row_positions) - 1 or c1 >= len(self.col_positions) - 1:
continue
if r2 > len(self.row_positions) - 1:
r2 = len(self.row_positions) - 1
if c2 > len(self.col_positions) - 1:
c2 = len(self.col_positions) - 1
if type_.startswith("CellSelect"):
self.create_selected(r1, c1, r2, c2, "cells")
elif type_.startswith("RowSelect"):
self.create_selected(r1, c1, r2, c2, "rows")
elif type_.startswith("ColSelect"):
self.create_selected(r1, c1, r2, c2, "cols")
elif type_.startswith("Current"):
if type_ == "Current_Inside":
self.create_current(r1, c1, full_tags[2], inside = True)
elif type_ == "Current_Outside":
self.create_current(r1, c1, full_tags[2], inside = False)
self.tag_lower("RowSelectFill")
self.RI.tag_lower("RowSelectFill")
self.CH.tag_lower("RowSelectFill")
self.tag_lower("ColSelectFill")
self.RI.tag_lower("ColSelectFill")
self.CH.tag_lower("ColSelectFill")
self.tag_lower("CellSelectFill")
self.RI.tag_lower("CellSelectFill")
self.CH.tag_lower("CellSelectFill")
self.RI.tag_lower("Current_Inside")
self.RI.tag_lower("Current_Outside")
self.CH.tag_lower("Current_Inside")
self.CH.tag_lower("Current_Outside")
if not self.show_selected_cells_border:
self.tag_lower("Current_Outside")
def GetColCoords(self, c, sel = False):
last_col_line_pos = self.col_positions[-1] + 1
last_row_line_pos = self.row_positions[-1] + 1
x1 = self.col_positions[c]
x2 = self.col_positions[c + 1]
y1 = self.canvasy(0)
y2 = self.canvasy(self.winfo_height())
if last_row_line_pos < y2:
y2 = last_col_line_pos
if sel:
return x1, y1 + 1, x2, y2
else:
return x1, y1, x2, y2
def GetRowCoords(self, r, sel = False):
last_col_line_pos = self.col_positions[-1] + 1
x1 = self.canvasx(0)
x2 = self.canvasx(self.winfo_width())
if last_col_line_pos < x2:
x2 = last_col_line_pos
y1 = self.row_positions[r]
y2 = self.row_positions[r + 1]
if sel:
return x1, y1 + 1, x2, y2
else:
return x1, y1, x2, y2
def get_redraw_selections(self, within_range):
scells = set()
srows = set()
scols = set()
ac_srows = set()
ac_scols = set()
within_r1 = within_range[0]
within_c1 = within_range[1]
within_r2 = within_range[2]
within_c2 = within_range[3]
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2) or (within_r1 >= r1 and within_r2 <= r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
srows.update(set(range(start_row, end_row)))
ac_srows.update(set(range(start_row, end_row)))
for item in chain(self.find_withtag("Current_Outside"), self.find_withtag("Current_Inside")):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
srows.update(set(range(start_row, end_row)))
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2) or (within_c1 >= c1 and within_c2 <= c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
scols.update(set(range(start_col, end_col)))
ac_scols.update(set(range(start_col, end_col)))
for item in self.find_withtag("Current_Outside"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
scols.update(set(range(start_col, end_col)))
if not self.show_selected_cells_border:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Outside"))
else:
iterable = self.find_withtag("CellSelectFill")
for item in iterable:
tags = self.gettags(item)
r1, c1, r2, c2 = tuple(int(e) for e in tags[1].split("_") if e)
if (r1 >= within_r1 or
c1 >= within_c1 or
r2 <= within_r2 or
c2 <= within_c2) or (within_c1 >= c1 and within_c2 <= c2) or (within_r1 >= r1 and within_r2 <= r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
colsr = tuple(range(start_col, end_col))
rowsr = tuple(range(start_row, end_row))
scells.update(set(product(rowsr, colsr)))
srows.update(set(range(start_row, end_row)))
scols.update(set(range(start_col, end_col)))
return scells, srows, scols, ac_srows, ac_scols
def get_selected_min_max(self):
min_x = float("inf")
min_y = float("inf")
max_x = 0
max_y = 0
for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Inside"),
self.find_withtag("Current_Outside")):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if r1 < min_y:
min_y = r1
if c1 < min_x:
min_x = c1
if r2 > max_y:
max_y = r2
if c2 > max_x:
max_x = c2
if min_x != float("inf") and min_y != float("inf") and max_x > 0 and max_y > 0:
return min_y, min_x, max_y, max_x
else:
return None, None, None, None
def get_selected_rows(self, get_cells = False, within_range = None, get_cells_as_rows = False):
s = set()
if within_range is not None:
within_r1 = within_range[0]
within_r2 = within_range[1]
if get_cells:
if within_range is None:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(product(range(r1, r2), range(0, len(self.col_positions) - 1))))
if get_cells_as_rows:
s.update(self.get_selected_cells())
else:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
s.update(set(product(range(start_row, end_row), range(0, len(self.col_positions) - 1))))
if get_cells_as_rows:
s.update(self.get_selected_cells(within_range = (within_r1, 0, within_r2, len(self.col_positions) - 1)))
else:
if within_range is None:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(range(r1, r2)))
if get_cells_as_rows:
s.update(set(tup[0] for tup in self.get_selected_cells()))
else:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
s.update(set(range(start_row, end_row)))
if get_cells_as_rows:
s.update(set(tup[0] for tup in self.get_selected_cells(within_range = (within_r1, 0, within_r2, len(self.col_positions) - 1))))
return s
def get_selected_cols(self, get_cells = False, within_range = None, get_cells_as_cols = False):
s = set()
if within_range is not None:
within_c1 = within_range[0]
within_c2 = within_range[1]
if get_cells:
if within_range is None:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(product(range(c1, c2), range(0, len(self.row_positions) - 1))))
if get_cells_as_cols:
s.update(self.get_selected_cells())
else:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
s.update(set(product(range(start_col, end_col), range(0, len(self.row_positions) - 1))))
if get_cells_as_cols:
s.update(self.get_selected_cells(within_range = (0, within_c1, len(self.row_positions) - 1, within_c2)))
else:
if within_range is None:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(range(c1, c2)))
if get_cells_as_cols:
s.update(set(tup[1] for tup in self.get_selected_cells()))
else:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
s.update(set(range(start_col, end_col)))
if get_cells_as_cols:
s.update(set(tup[0] for tup in self.get_selected_cells(within_range = (0, within_c1, len(self.row_positions) - 1, within_c2))))
return s
def get_selected_cells(self, get_rows = False, get_cols = False, within_range = None):
s = set()
if within_range is not None:
within_r1 = within_range[0]
within_c1 = within_range[1]
within_r2 = within_range[2]
within_c2 = within_range[3]
if get_cols and get_rows:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("RowSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("Current_Outside"))
elif get_rows and not get_cols:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("RowSelectFill"), self.find_withtag("Current_Outside"))
elif get_cols and not get_rows:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("Current_Outside"))
else:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Outside"))
if within_range is None:
for item in iterable:
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(product(range(r1, r2), range(c1, c2))))
else:
for item in iterable:
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
c1 >= within_c1 or
r2 <= within_r2 or
c2 <= within_c2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
s.update(set(product(range(start_row, end_row), range(start_col, end_col))))
return s
def get_all_selection_boxes(self):
return tuple(tuple(int(e) for e in self.gettags(item)[1].split("_") if e) for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Outside")))
def get_all_selection_boxes_with_types(self):
boxes = []
for item in sorted(self.find_withtag("CellSelectFill") + self.find_withtag("RowSelectFill") + self.find_withtag("ColSelectFill") + self.find_withtag("Current_Outside")):
tags = self.gettags(item)
if tags:
if tags[0].startswith(("Cell", "Current")):
boxes.append((tuple(int(e) for e in tags[1].split("_") if e), "cells"))
elif tags[0].startswith("Row"):
boxes.append((tuple(int(e) for e in tags[1].split("_") if e), "rows"))
elif tags[0].startswith("Col"):
boxes.append((tuple(int(e) for e in tags[1].split("_") if e), "cols"))
return boxes
def all_selected(self):
for r1, c1, r2, c2 in self.get_all_selection_boxes():
if not r1 and not c1 and r2 == len(self.row_positions) - 1 and c2 == len(self.col_positions) - 1:
return True
return False
def cell_selected(self, r, c, inc_cols = False, inc_rows = False):
if not inc_cols and not inc_rows:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
elif inc_cols and not inc_rows:
iterable = chain(self.find_withtag("ColSelectFill"), self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
elif not inc_cols and inc_rows:
iterable = chain(self.find_withtag("RowSelectFill"), self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
elif inc_cols and inc_rows:
iterable = chain(self.find_withtag("RowSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
for item in iterable:
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if r1 <= r and c1 <= c and r2 > r and c2 > c:
return True
return False
def col_selected(self, c):
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if c1 <= c and c2 > c:
return True
return False
def row_selected(self, r):
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if r1 <= r and r2 > r:
return True
return False
def anything_selected(self, exclude_columns = False, exclude_rows = False, exclude_cells = False):
if exclude_columns and exclude_rows and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("Current_Outside"):
return True
elif exclude_columns and exclude_cells and not exclude_rows:
if self.find_withtag("RowSelectFill"):
return True
elif exclude_rows and exclude_cells and not exclude_columns:
if self.find_withtag("ColSelectFill"):
return True
elif exclude_columns and not exclude_rows and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("RowSelectFill") or self.find_withtag("Current_Outside"):
return True
elif exclude_rows and not exclude_columns and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("ColSelectFill") or self.find_withtag("Current_Outside"):
return True
elif exclude_cells and not exclude_columns and not exclude_rows:
if self.find_withtag("RowSelectFill") or self.find_withtag("ColSelectFill"):
return True
elif not exclude_columns and not exclude_rows and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("RowSelectFill") or self.find_withtag("ColSelectFill") or self.find_withtag("Current_Outside"):
return True
return False
def hide_current(self):
for item in chain(self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside")):
self.itemconfig(item, state = "hidden")
def show_current(self):
for item in chain(self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside")):
self.itemconfig(item, state = "normal")
def open_cell(self, event = None):
if not self.anything_selected() or self.text_editor_id is not None:
return
currently_selected = self.currently_selected(get_coords = True)
if not currently_selected:
return
y1 = int(currently_selected[0])
x1 = int(currently_selected[1])
dcol = x1 if self.all_columns_displayed else self.displayed_columns[x1]
if (
((y1, dcol) in self.cell_options and 'readonly' in self.cell_options[(y1, dcol)]) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(y1 in self.row_options and 'readonly' in self.row_options[y1])
):
return
elif (y1, dcol) in self.cell_options and ('dropdown' in self.cell_options[(y1, dcol)] or 'checkbox' in self.cell_options[(y1, dcol)]):
if self.event_opens_dropdown_or_checkbox(event):
if 'dropdown' in self.cell_options[(y1, dcol)]:
self.display_dropdown_window(y1, x1, event = event)
elif 'checkbox' in self.cell_options[(y1, dcol)]:
self._click_checkbox(y1, x1, dcol)
else:
self.edit_cell_(event, r = y1, c = x1, dropdown = False)
def event_opens_dropdown_or_checkbox(self, event = None):
if event is None:
return False
elif ((hasattr(event, 'keysym') and event.keysym == 'Return') or # enter or f2
(hasattr(event, 'keysym') and event.keysym == 'F2') or
(event is not None and hasattr(event, 'keycode') and event.keycode == "??" and hasattr(event, 'num') and event.num == 1) or
(hasattr(event, 'keysym') and event.keysym == 'BackSpace')):
return True
else:
return False
# c is displayed col
def edit_cell_(self, event = None, r = None, c = None, dropdown = False):
text = None
extra_func_key = "??"
if event is not None and (hasattr(event, 'keysym') and event.keysym == 'BackSpace'):
extra_func_key = "BackSpace"
text = ""
elif event is None or self.event_opens_dropdown_or_checkbox(event):
if event is not None:
if hasattr(event, 'keysym') and event.keysym == 'Return':
extra_func_key = "Return"
elif hasattr(event, 'keysym') and event.keysym == 'F2':
extra_func_key = "F2"
text = f"{self.data_ref[r][c]}" if self.all_columns_displayed else f"{self.data_ref[r][self.displayed_columns[c]]}"
if self.cell_auto_resize_enabled:
self.set_cell_size_to_text(r, c, only_set_if_too_small = True, redraw = True, run_binding = True)
elif event is not None and ((hasattr(event, "char") and event.char.isalpha()) or
(hasattr(event, "char") and event.char.isdigit()) or
(hasattr(event, "char") and event.char in symbols_set)):
extra_func_key = event.char
text = event.char
else:
return False
self.text_editor_loc = (r, c)
if self.extra_begin_edit_cell_func is not None:
try:
text2 = self.extra_begin_edit_cell_func(EditCellEvent(r, c, extra_func_key, text, "begin_edit_cell"))
except:
return False
if text2 is not None:
text = text2
text = "" if text is None else text
self.select_cell(r = r, c = c, keep_other_selections = True)
self.create_text_editor(r = r, c = c, text = text, set_data_ref_on_destroy = True, dropdown = dropdown)
return True
# c is displayed col
def create_text_editor(self,
r = 0,
c = 0,
text = None,
state = "normal",
see = True,
set_data_ref_on_destroy = False,
binding = None,
dropdown = False):
if (r, c) == self.text_editor_loc and self.text_editor is not None:
self.text_editor.set_text(self.text_editor.get() + "" if not isinstance(text, str) else text)
return
if self.text_editor is not None:
self.destroy_text_editor()
if see:
has_redrawn = self.see(r = r, c = c, check_cell_visibility = True)
if not has_redrawn:
self.refresh()
self.text_editor_loc = (r, c)
x = self.col_positions[c]
y = self.row_positions[r]
w = self.col_positions[c + 1] - x + 1
h = self.row_positions[r + 1] - y + 6
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if text is None:
text = self.data_ref[r][dcol]
self.hide_current()
bg, fg = self.get_widget_bg_fg(r, dcol)
self.text_editor = TextEditor(self,
text = text,
font = self.my_font,
state = state,
width = w,
height = h,
border_color = self.table_selected_cells_border_fg,
show_border = self.show_selected_cells_border,
bg = bg,
fg = fg,
popup_menu_font = self.popup_menu_font,
popup_menu_fg = self.popup_menu_fg,
popup_menu_bg = self.popup_menu_bg,
popup_menu_highlight_bg = self.popup_menu_highlight_bg,
popup_menu_highlight_fg = self.popup_menu_highlight_fg)
self.text_editor_id = self.create_window((x, y), window = self.text_editor, anchor = "nw")
if not dropdown:
self.text_editor.textedit.focus_set()
self.text_editor.scroll_to_bottom()
self.text_editor.textedit.bind("<Alt-Return>", lambda x: self.text_editor_newline_binding(r, c))
if USER_OS == 'Darwin':
self.text_editor.textedit.bind("<Option-Return>", lambda x: self.text_editor_newline_binding(r, c))
for key, func in self.text_editor_user_bound_keys.items():
self.text_editor.textedit.bind(key, func)
if binding is not None:
self.text_editor.textedit.bind("<Tab>", lambda x: binding((r, c, "Tab")))
self.text_editor.textedit.bind("<Return>", lambda x: binding((r, c, "Return")))
self.text_editor.textedit.bind("<FocusOut>", lambda x: binding((r, c, "FocusOut")))
self.text_editor.textedit.bind("<Escape>", lambda x: binding((r, c, "Escape")))
elif binding is None and set_data_ref_on_destroy:
self.text_editor.textedit.bind("<Tab>", lambda x: self.get_text_editor_value((r, c, "Tab")))
self.text_editor.textedit.bind("<Return>", lambda x: self.get_text_editor_value((r, c, "Return")))
if not dropdown:
self.text_editor.textedit.bind("<FocusOut>", lambda x: self.get_text_editor_value((r, c, "FocusOut")))
self.text_editor.textedit.bind("<Escape>", lambda x: self.get_text_editor_value((r, c, "Escape")))
else:
self.text_editor.textedit.bind("<Escape>", lambda x: self.destroy_text_editor("Escape"))
def bind_text_editor_destroy(self, binding, r, c):
self.text_editor.textedit.bind("<Return>", lambda x: binding((r, c, "Return")))
self.text_editor.textedit.bind("<FocusOut>", lambda x: binding((r, c, "FocusOut")))
self.text_editor.textedit.bind("<Escape>", lambda x: binding((r, c, "Escape")))
self.text_editor.textedit.focus_set()
def destroy_text_editor(self, event = None):
if event is not None and self.extra_end_edit_cell_func is not None and self.text_editor_loc is not None:
self.extra_end_edit_cell_func(EditCellEvent(int(self.text_editor_loc[0]), int(self.text_editor_loc[1]), "Escape", None, "escape_edit_cell"))
self.text_editor_loc = None
try:
self.delete(self.text_editor_id)
except:
pass
try:
self.text_editor.destroy()
except:
pass
try:
self.text_editor = None
except:
pass
try:
self.text_editor_id = None
except:
pass
self.show_current()
if event is not None and len(event) >= 3 and "Escape" in event:
self.focus_set()
# c is displayed col
def get_text_editor_value(self, destroy_tup = None, r = None, c = None, set_data_ref_on_destroy = True, event = None, destroy = True, move_down = True, redraw = True, recreate = True):
if self.focus_get() is None and destroy_tup:
return
if destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] == "Escape":
self.destroy_text_editor("Escape")
self.hide_dropdown_window(r, c)
return
if self.text_editor is not None:
self.text_editor_value = self.text_editor.get()
if destroy:
self.destroy_text_editor()
if set_data_ref_on_destroy:
if r is None and c is None and destroy_tup:
r, c = destroy_tup[0], destroy_tup[1]
if self.extra_end_edit_cell_func is not None:
validation = self.extra_end_edit_cell_func(EditCellEvent(r, c, destroy_tup[2] if len(destroy_tup) >= 3 else "FocusOut", f"{self.text_editor_value}", "end_edit_cell"))
if validation is not None:
self.text_editor_value = validation
self._set_cell_data(r, c, value = self.text_editor_value)
if move_down:
if r is None and c is None and destroy_tup:
r, c = destroy_tup[0], destroy_tup[1]
currently_selected = self.currently_selected()
if r is not None and c is not None:
if (
currently_selected and
r == currently_selected[0] and
c == currently_selected[1] and
(self.single_selection_enabled or self.toggle_selection_enabled)
):
if destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] == "Return":
self.select_cell(r + 1 if r < len(self.row_positions) - 2 else r, c)
self.see(r + 1 if r < len(self.row_positions) - 2 else r, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = True)
elif destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] == "Tab":
self.select_cell(r, c + 1 if c < len(self.col_positions) - 2 else c)
self.see(r, c + 1 if c < len(self.col_positions) - 2 else c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = True)
self.hide_dropdown_window(r, c)
if recreate:
self.recreate_all_selection_boxes()
if redraw:
self.refresh()
if destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] != "FocusOut":
self.focus_set()
return self.text_editor_value
#internal event use
def _set_cell_data(self, r = 0, c = 0, dcol = None, value = "", undo = True, cell_resize = True):
if dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if r > len(self.data_ref) - 1:
self.data_ref.extend([list(repeat("", dcol + 1)) for i in range((r + 1) - len(self.data_ref))])
elif dcol > len(self.data_ref[r]) - 1:
self.data_ref[r].extend(list(repeat("", (dcol + 1) - len(self.data_ref[r]))))
if self.undo_enabled and undo:
if self.data_ref[r][dcol] != value:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells",
{(r, dcol): self.data_ref[r][dcol]},
(((r, c, r + 1, c + 1), "cells"), ),
self.currently_selected()))))
self.data_ref[r][dcol] = value
if cell_resize and self.cell_auto_resize_enabled:
self.set_cell_size_to_text(r, c, only_set_if_too_small = True, redraw = True, run_binding = True)
#internal event use
def _click_checkbox(self, r, c, dcol = None, undo = True, redraw = True):
if dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if self.cell_options[(r, dcol)]['checkbox']['state'] == "normal":
self._set_cell_data(r, c, dcol, value = not self.data_ref[r][dcol] if type(self.data_ref[r][dcol]) == bool else False, undo = undo, cell_resize = False)
if self.cell_options[(r, dcol)]['checkbox']['check_function'] is not None:
self.cell_options[(r, dcol)]['checkbox']['check_function']((r, c, "CheckboxClicked", f"{self.data_ref[r][dcol]}"))
if self.extra_end_edit_cell_func is not None:
self.extra_end_edit_cell_func(EditCellEvent(r, c, "Return", f"{self.data_ref[r][dcol]}", "end_edit_cell"))
if redraw:
self.refresh()
def create_checkbox(self, r = 0, c = 0, checked = False, state = "normal", redraw = False, check_function = None, text = ""):
if (r, c) in self.cell_options and any(x in self.cell_options[(r, c)] for x in ('dropdown', 'checkbox')):
self.destroy_dropdown_and_checkbox(r, c)
self._set_cell_data(r, dcol = c, value = checked, cell_resize = False, undo = False) #only works because cell_resize is false and undo is false, otherwise needs displayed col and dcol args
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['checkbox'] = {'check_function': check_function,
'state': state,
'text': text}
if redraw:
self.refresh()
def create_dropdown(self, r = 0, c = 0, values = [], set_value = None, state = "readonly", redraw = True, selection_function = None, modified_function = None):
if (r, c) in self.cell_options and any(x in self.cell_options[(r, c)] for x in ('dropdown', 'checkbox')):
self.destroy_dropdown_and_checkbox(r, c)
if values:
self._set_cell_data(r, c, value = set_value if set_value is not None else values[0], cell_resize = False, undo = False)
elif not values and set_value is not None:
self._set_cell_data(r, c, value = set_value, cell_resize = False, undo = False)
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['dropdown'] = {'values': values,
'align': "w",
'window': "no dropdown open",
'canvas_id': "no dropdown open",
'select_function': selection_function,
'modified_function': modified_function,
'state': state}
if redraw:
self.refresh()
def get_widget_bg_fg(self, r, c):
bg = self.table_bg
fg = self.table_fg
if (r, c) in self.cell_options and 'highlight' in self.cell_options[(r, c)]:
if self.cell_options[(r, c)]['highlight'][0] is not None:
bg = self.cell_options[(r, c)]['highlight'][0]
if self.cell_options[(r, c)]['highlight'][1] is not None:
fg = self.cell_options[(r, c)]['highlight'][1]
elif r in self.row_options and 'highlight' in self.row_options[r]:
if self.row_options[r]['highlight'][0] is not None:
bg = self.row_options[r]['highlight'][0]
if self.row_options[r]['highlight'][1] is not None:
fg = self.row_options[r]['highlight'][1]
elif c in self.col_options and 'highlight' in self.col_options[c]:
if self.col_options[c]['highlight'][0] is not None:
bg = self.col_options[c]['highlight'][0]
if self.col_options[c]['highlight'][1] is not None:
fg = self.col_options[c]['highlight'][1]
return bg, fg
def text_editor_newline_binding(self, r = None, c = None, event = None):
if self.GetLinesHeight(self.text_editor.get_num_lines() + 1) > self.text_editor.winfo_height():
self.text_editor.config(height = self.text_editor.winfo_height() + self.xtra_lines_increment)
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if ((r, c if self.all_columns_displayed else self.displayed_columns[c]) in self.cell_options and
'dropdown' in self.cell_options[(r, dcol)]):
text_editor_h = self.text_editor.winfo_height()
win_h, anchor = self.get_dropdown_height_anchor(r, c, dcol, text_editor_h)
if anchor == "nw":
self.coords(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
self.col_positions[c], self.row_positions[r] + text_editor_h - 1)
self.itemconfig(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
anchor = anchor, height = win_h)
elif anchor == "sw":
self.coords(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
self.col_positions[c], self.row_positions[r])
self.itemconfig(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
anchor = anchor, height = win_h)
def get_space_bot(self, r, text_editor_h = None):
if text_editor_h is None:
win_h = int(self.canvasy(0) + self.winfo_height() - self.row_positions[r + 1])
sheet_h = int(self.row_positions[-1] + 1 + self.empty_vertical - self.row_positions[r + 1])
else:
win_h = int(self.canvasy(0) + self.winfo_height() - (self.row_positions[r] + text_editor_h))
sheet_h = int(self.row_positions[-1] + 1 + self.empty_vertical - (self.row_positions[r] + text_editor_h))
return win_h if win_h >= sheet_h else sheet_h
def get_dropdown_height_anchor(self, r, c, dcol, text_editor_h = None):
numvalues = len(self.cell_options[(r, dcol)]['dropdown']['values'])
xscroll_h = self.parentframe.xscroll.winfo_height()
if numvalues > 5:
linespace = 6 * 5 + 3
win_h = int(self.txt_h * 6 + linespace + xscroll_h)
else:
linespace = numvalues * 5 + 3
win_h = int(self.txt_h * numvalues + linespace + xscroll_h)
if win_h > 300:
win_h = 300
space_bot = self.get_space_bot(r, text_editor_h)
space_top = int(self.row_positions[r])
anchor = "nw"
win_h2 = int(win_h)
if win_h > space_bot:
if space_bot >= space_top:
anchor = "nw"
win_h = space_bot - 1
elif space_top > space_bot:
anchor = "sw"
win_h = space_top - 1
if win_h < self.txt_h + 5:
win_h = self.txt_h + 5
elif win_h > win_h2:
win_h = win_h2
return win_h, anchor
# c is displayed col
def display_dropdown_window(self, r, c, dcol = None, event = None):
self.destroy_text_editor("Escape")
self.delete_opened_dropdown_window()
if dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if self.cell_options[(r, dcol)]['dropdown']['state'] == "normal":
if not self.edit_cell_(r = r, c = c, dropdown = True, event = event):
return
bg, fg = self.get_widget_bg_fg(r, dcol)
win_h, anchor = self.get_dropdown_height_anchor(r, c, dcol)
window = self.parentframe.dropdown_class(self.winfo_toplevel(),
r,
c,
width = self.col_positions[c + 1] - self.col_positions[c] + 1,
height = win_h,
font = self.my_font,
bg = bg,
fg = fg,
outline_color = fg,
outline_thickness = 1,
values = self.cell_options[(r, dcol)]['dropdown']['values'],
hide_dropdown_window = self.hide_dropdown_window,
arrowkey_RIGHT = self.arrowkey_RIGHT,
arrowkey_LEFT = self.arrowkey_LEFT,
align = self.cell_options[(r, dcol)]['dropdown']['align'])
if self.cell_options[(r, dcol)]['dropdown']['state'] == "normal":
if anchor == "nw":
ypos = self.row_positions[r] + self.text_editor.h_ - 1
else:
ypos = self.row_positions[r]
self.cell_options[(r, dcol)]['dropdown']['canvas_id'] = self.create_window((self.col_positions[c], ypos),
window = window,
anchor = anchor)
if self.cell_options[(r, dcol)]['dropdown']['modified_function'] is not None:
self.text_editor.textedit.bind("<<TextModified>>", lambda x: self.cell_options[(r, dcol)]['dropdown']['modified_function'](DropDownModifiedEvent("ComboboxModified", r, dcol, self.text_editor.get())))
self.update()
try:
self.text_editor.textedit.focus_set()
self.text_editor.scroll_to_bottom()
except:
return
else:
if anchor == "nw":
ypos = self.row_positions[r + 1]
else:
ypos = self.row_positions[r]
self.cell_options[(r, dcol)]['dropdown']['canvas_id'] = self.create_window((self.col_positions[c], ypos),
window = window,
anchor = anchor)
window.bind("<FocusOut>", lambda x: self.hide_dropdown_window(r, c))
self.update()
try:
window.focus_set()
except:
return
self.existing_dropdown_window = window
self.cell_options[(r, dcol)]['dropdown']['window'] = window
self.existing_dropdown_canvas_id = self.cell_options[(r, dcol)]['dropdown']['canvas_id']
# c is displayed col
def hide_dropdown_window(self, r = None, c = None, selection = None, b1 = False, redraw = True):
if r is not None and c is not None and selection is not None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if self.cell_options[(r, dcol)]['dropdown']['select_function'] is not None: # user has specified a selection function
self.cell_options[(r, dcol)]['dropdown']['select_function'](EditCellEvent(r, c, "ComboboxSelected", f"{selection}", "end_edit_cell"))
if self.extra_end_edit_cell_func is not None:
validation = self.extra_end_edit_cell_func(EditCellEvent(r, c, "ComboboxSelected", f"{selection}", "end_edit_cell"))
if validation is not None:
selection = validation
self._set_cell_data(r, c, dcol, selection, cell_resize = True)
self.focus_set()
self.recreate_all_selection_boxes()
if redraw:
self.refresh()
if self.existing_dropdown_window is not None:
closedr, closedc, ret_tup = int(self.existing_dropdown_window.r), int(self.existing_dropdown_window.c), True
else:
ret_tup = False
if b1 and self.text_editor_loc is not None and self.text_editor is not None:
self.get_text_editor_value(destroy_tup = self.text_editor_loc + ("Return", ))
else:
self.destroy_text_editor("Escape")
self.delete_opened_dropdown_window(r, c)
if ret_tup:
return closedr, closedc
# c is displayed col
def delete_opened_dropdown_window(self, r = None, c = None, dcol = None):
if c is not None and dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
self.delete(self.existing_dropdown_canvas_id)
except:
pass
self.existing_dropdown_canvas_id = None
try:
self.existing_dropdown_window.destroy()
except:
pass
self.existing_dropdown_window = None
if r is not None and c is not None and (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
self.cell_options[(r, dcol)]['dropdown']['canvas_id'] = "no dropdown open"
self.cell_options[(r, dcol)]['dropdown']['window'] = "no dropdown open"
try:
self.delete(self.cell_options[(r, dcol)]['dropdown']['canvas_id'])
except:
pass
def get_displayed_col_from_dcol(self, dcol):
try:
return self.displayed_columns.index(dcol)
except:
return None
# c is dcol
def destroy_dropdown(self, r, c):
self.delete_opened_dropdown_window(r, c)
if (r, c) in self.cell_options and 'dropdown' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['dropdown']
# c is dcol
def destroy_checkbox(self, r, c):
if (r, c) in self.cell_options and 'checkbox' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['checkbox']
# c is dcol
def destroy_dropdown_and_checkbox(self, r, c):
self.destroy_dropdown(r, c)
self.destroy_checkbox(r, c)
# deprecated
def refresh_dropdowns(self, dropdowns = []):
pass
| from ._tksheet_vars import *
from ._tksheet_other_classes import *
from collections import defaultdict, deque
from itertools import islice, repeat, accumulate, chain, product, cycle
from math import floor, ceil
from tkinter import TclError
import bisect
import csv as csv_module
import io
import pickle
import tkinter as tk
import zlib
class MainTable(tk.Canvas):
def __init__(self,
parentframe = None,
enable_edit_cell_auto_resize = True,
page_up_down_select_row = False,
expand_sheet_if_paste_too_big = False,
paste_insert_column_limit = None,
paste_insert_row_limit = None,
arrow_key_down_right_scroll_page = False,
ctrl_keys_over_dropdowns_enabled = False,
column_width = None,
column_headers_canvas = None,
row_index_canvas = None,
headers = None,
header_height = None,
row_height = None,
data_reference = None,
total_cols = None,
total_rows = None,
row_index = None,
font = None,
header_font = None,
popup_menu_font = get_font(),
popup_menu_fg = "gray10",
popup_menu_bg = "white",
popup_menu_highlight_bg = "#f1f3f4",
popup_menu_highlight_fg = "gray10",
align = None,
width = None,
height = None,
table_bg = "white",
table_grid_fg = "gray15",
table_fg = "black",
show_selected_cells_border = True,
table_selected_cells_border_fg = "#1a73e8",
table_selected_cells_bg = "#e7f0fd",
display_selected_fg_over_highlights = False,
table_selected_cells_fg = "black",
table_selected_rows_border_fg = "#1a73e8",
table_selected_rows_bg = "#e7f0fd",
table_selected_rows_fg = "black",
table_selected_columns_border_fg = "#1a73e8",
table_selected_columns_bg = "#e7f0fd",
table_selected_columns_fg = "black",
displayed_columns = [],
all_columns_displayed = True,
show_vertical_grid = True,
show_horizontal_grid = True,
show_index = True,
show_header = True,
selected_rows_to_end_of_window = False,
horizontal_grid_to_end_of_window = False,
vertical_grid_to_end_of_window = False,
empty_horizontal = 150,
empty_vertical = 100,
max_undos = 20):
tk.Canvas.__init__(self,
parentframe,
width = width,
height = height,
background = table_bg,
highlightthickness = 0)
self.parentframe = parentframe
self.b1_pressed_loc = None
self.existing_dropdown_canvas_id = None
self.existing_dropdown_window = None
self.closed_dropdown = None
self.disp_text = {}
self.disp_high = {}
self.disp_grid = {}
self.disp_fill_sels = {}
self.disp_bord_sels = {}
self.disp_resize_lines = {}
self.disp_ctrl_outline = {}
self.disp_dropdown = {}
self.disp_checkbox = {}
self.hidd_ctrl_outline = {}
self.hidd_text = {}
self.hidd_high = {}
self.hidd_grid = {}
self.hidd_fill_sels = {}
self.hidd_bord_sels = {}
self.hidd_resize_lines = {}
self.hidd_dropdown = {}
self.hidd_checkbox = {}
self.cell_options = {}
self.col_options = {}
self.row_options = {}
"""
cell options dict looks like:
{(row int, column int): {'dropdown': {'values': values,
'window': "no dropdown open",
'select_function': selection_function,
'keypress_function': keypress_function,
'state': state},
'highlight: (bg, fg),
'align': "e",
'readonly': True}
"""
self.extra_table_rc_menu_funcs = {}
self.extra_index_rc_menu_funcs = {}
self.extra_header_rc_menu_funcs = {}
self.max_undos = max_undos
self.undo_storage = deque(maxlen = max_undos)
self.page_up_down_select_row = page_up_down_select_row
self.expand_sheet_if_paste_too_big = expand_sheet_if_paste_too_big
self.paste_insert_column_limit = paste_insert_column_limit
self.paste_insert_row_limit = paste_insert_row_limit
self.arrow_key_down_right_scroll_page = arrow_key_down_right_scroll_page
self.cell_auto_resize_enabled = enable_edit_cell_auto_resize
self.display_selected_fg_over_highlights = display_selected_fg_over_highlights
self.centre_alignment_text_mod_indexes = (slice(1, None), slice(None, -1))
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
self.show_index = show_index
self.show_header = show_header
self.selected_rows_to_end_of_window = selected_rows_to_end_of_window
self.horizontal_grid_to_end_of_window = horizontal_grid_to_end_of_window
self.vertical_grid_to_end_of_window = vertical_grid_to_end_of_window
self.empty_horizontal = empty_horizontal
self.empty_vertical = empty_vertical
self.show_vertical_grid = show_vertical_grid
self.show_horizontal_grid = show_horizontal_grid
self.min_rh = 0
self.hdr_min_rh = 0
self.being_drawn_rect = None
self.extra_motion_func = None
self.extra_b1_press_func = None
self.extra_b1_motion_func = None
self.extra_b1_release_func = None
self.extra_double_b1_func = None
self.extra_rc_func = None
self.extra_begin_ctrl_c_func = None
self.extra_end_ctrl_c_func = None
self.extra_begin_ctrl_x_func = None
self.extra_end_ctrl_x_func = None
self.extra_begin_ctrl_v_func = None
self.extra_end_ctrl_v_func = None
self.extra_begin_ctrl_z_func = None
self.extra_end_ctrl_z_func = None
self.extra_begin_delete_key_func = None
self.extra_end_delete_key_func = None
self.extra_begin_edit_cell_func = None
self.extra_end_edit_cell_func = None
self.extra_begin_del_rows_rc_func = None
self.extra_end_del_rows_rc_func = None
self.extra_begin_del_cols_rc_func = None
self.extra_end_del_cols_rc_func = None
self.extra_begin_insert_cols_rc_func = None
self.extra_end_insert_cols_rc_func = None
self.extra_begin_insert_rows_rc_func = None
self.extra_end_insert_rows_rc_func = None
self.text_editor_user_bound_keys = {}
self.selection_binding_func = None
self.deselection_binding_func = None
self.drag_selection_binding_func = None
self.shift_selection_binding_func = None
self.select_all_binding_func = None
self.single_selection_enabled = False
self.toggle_selection_enabled = False # with this mode every left click adds the cell to selected cells
self.ctrl_keys_over_dropdowns_enabled = ctrl_keys_over_dropdowns_enabled
self.drag_selection_enabled = False
self.select_all_enabled = False
self.arrowkeys_enabled = False
self.undo_enabled = False
self.cut_enabled = False
self.copy_enabled = False
self.paste_enabled = False
self.delete_key_enabled = False
self.rc_select_enabled = False
self.rc_delete_column_enabled = False
self.rc_insert_column_enabled = False
self.rc_delete_row_enabled = False
self.rc_insert_row_enabled = False
self.rc_popup_menus_enabled = False
self.edit_cell_enabled = False
self.text_editor_loc = None
self.show_selected_cells_border = show_selected_cells_border
self.new_row_width = 0
self.new_header_height = 0
self.parentframe = parentframe
self.row_width_resize_bbox = tuple()
self.header_height_resize_bbox = tuple()
self.CH = column_headers_canvas
self.CH.MT = self
self.CH.RI = row_index_canvas
self.RI = row_index_canvas
self.RI.MT = self
self.RI.CH = column_headers_canvas
self.TL = None # is set from within TopLeftRectangle() __init__
self.all_columns_displayed = True
self.align = align
self.my_font = font
self.fnt_fam = font[0]
self.fnt_sze = font[1]
self.fnt_wgt = font[2]
self.my_hdr_font = header_font
self.hdr_fnt_fam = header_font[0]
self.hdr_fnt_sze = header_font[1]
self.hdr_fnt_wgt = header_font[2]
self.txt_measure_canvas = tk.Canvas(self)
self.txt_measure_canvas_text = self.txt_measure_canvas.create_text(0, 0, text = "", font = self.my_font)
self.text_editor = None
self.text_editor_id = None
self.default_cw = column_width
self.default_rh = (row_height if isinstance(row_height, str) else "pixels",
row_height if isinstance(row_height, int) else self.GetLinesHeight(int(row_height)))
self.default_hh = (header_height if isinstance(header_height, str) else "pixels",
header_height if isinstance(header_height, int) else self.GetHdrLinesHeight(int(header_height)))
self.set_fnt_help()
self.set_hdr_fnt_help()
self.data_ref = data_reference
if isinstance(self.data_ref, (list, tuple)):
self.data_ref = data_reference
else:
self.data_ref = []
if not self.data_ref:
if isinstance(total_rows, int) and isinstance(total_cols, int) and total_rows > 0 and total_cols > 0:
self.data_ref = [list(repeat("", total_cols)) for i in range(total_rows)]
if isinstance(headers, int):
self.my_hdrs = headers
else:
if headers:
self.my_hdrs = headers
else:
self.my_hdrs = []
if isinstance(row_index, int):
self.my_row_index = row_index
else:
if row_index:
self.my_row_index = row_index
else:
self.my_row_index = []
self.displayed_columns = []
self.col_positions = [0]
self.row_positions = [0]
self.reset_row_positions()
self.display_columns(indexes = displayed_columns,
enable = not all_columns_displayed,
reset_col_positions = False,
set_col_positions = False,
deselect_all = False)
self.reset_col_positions()
self.table_grid_fg = table_grid_fg
self.table_fg = table_fg
self.table_selected_cells_border_fg = table_selected_cells_border_fg
self.table_selected_cells_bg = table_selected_cells_bg
self.table_selected_cells_fg = table_selected_cells_fg
self.table_selected_rows_border_fg = table_selected_rows_border_fg
self.table_selected_rows_bg = table_selected_rows_bg
self.table_selected_rows_fg = table_selected_rows_fg
self.table_selected_columns_border_fg = table_selected_columns_border_fg
self.table_selected_columns_bg = table_selected_columns_bg
self.table_selected_columns_fg = table_selected_columns_fg
self.table_bg = table_bg
self.popup_menu_font = popup_menu_font
self.popup_menu_fg = popup_menu_fg
self.popup_menu_bg = popup_menu_bg
self.popup_menu_highlight_bg = popup_menu_highlight_bg
self.popup_menu_highlight_fg = popup_menu_highlight_fg
self.rc_popup_menu = None
self.empty_rc_popup_menu = None
self.basic_bindings()
self.create_rc_menus()
def refresh(self, event = None):
self.main_table_redraw_grid_and_text(True, True)
def basic_bindings(self, enable = True):
if enable:
self.bind("<Configure>", self.refresh)
self.bind("<Motion>", self.mouse_motion)
self.bind("<ButtonPress-1>", self.b1_press)
self.bind("<B1-Motion>", self.b1_motion)
self.bind("<ButtonRelease-1>", self.b1_release)
self.bind("<Double-Button-1>", self.double_b1)
self.bind("<MouseWheel>", self.mousewheel)
if USER_OS == "Linux":
for canvas in (self, self.RI):
canvas.bind("<Button-4>", self.mousewheel)
canvas.bind("<Button-5>", self.mousewheel)
for canvas in (self, self.CH):
canvas.bind("<Shift-Button-4>", self.shift_mousewheel)
canvas.bind("<Shift-Button-5>", self.shift_mousewheel)
self.bind("<Shift-MouseWheel>", self.shift_mousewheel)
self.bind("<Shift-ButtonPress-1>", self.shift_b1_press)
self.CH.bind("<Shift-ButtonPress-1>", self.CH.shift_b1_press)
self.RI.bind("<Shift-ButtonPress-1>", self.RI.shift_b1_press)
self.CH.bind("<Shift-MouseWheel>", self.shift_mousewheel)
self.RI.bind("<MouseWheel>", self.mousewheel)
self.bind(get_rc_binding(), self.rc)
else:
self.unbind("<Configure>")
self.unbind("<Motion>")
self.unbind("<ButtonPress-1>")
self.unbind("<B1-Motion>")
self.unbind("<ButtonRelease-1>")
self.unbind("<Double-Button-1>")
self.unbind("<MouseWheel>")
if USER_OS == "Linux":
for canvas in (self, self.RI):
canvas.unbind("<Button-4>")
canvas.unbind("<Button-5>")
for canvas in (self, self.CH):
canvas.unbind("<Shift-Button-4>")
canvas.unbind("<Shift-Button-5>")
self.unbind("<Shift-ButtonPress-1>")
self.CH.unbind("<Shift-ButtonPress-1>")
self.RI.unbind("<Shift-ButtonPress-1>")
self.unbind("<Shift-MouseWheel>")
self.CH.unbind("<Shift-MouseWheel>")
self.RI.unbind("<MouseWheel>")
self.unbind(get_rc_binding())
def show_ctrl_outline(self, canvas = "table", start_cell = (0, 0), end_cell = (0, 0)):
self.create_ctrl_outline(self.col_positions[start_cell[0]] + 2,
self.row_positions[start_cell[1]] + 2,
self.col_positions[end_cell[0]] - 2,
self.row_positions[end_cell[1]] - 2,
fill = "",
dash = (10, 15),
width = 3 if end_cell[0] - start_cell[0] == 1 and end_cell[1] - start_cell[1] == 1 else 2,
outline = self.table_selected_cells_border_fg,
tag = "ctrl")
self.after(1500, self.delete_ctrl_outlines)
def create_ctrl_outline(self, x1, y1, x2, y2, fill, dash, width, outline, tag):
if self.hidd_ctrl_outline:
t, sh = self.hidd_ctrl_outline.popitem()
self.coords(t, x1, y1, x2, y2)
if sh:
self.itemconfig(t, fill = fill, dash = dash, width = width, outline = outline, tag = tag)
else:
self.itemconfig(t, fill = fill, dash = dash, width = width, outline = outline, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_rectangle(x1, y1, x2, y2, fill = fill, dash = dash, width = width, outline = outline, tag = tag)
self.disp_ctrl_outline[t] = True
def delete_ctrl_outlines(self):
self.hidd_ctrl_outline.update(self.disp_ctrl_outline)
self.disp_ctrl_outline = {}
for t, sh in self.hidd_ctrl_outline.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_ctrl_outline[t] = False
def get_ctrl_x_c_boxes(self):
currently_selected = self.currently_selected()
boxes = {}
if isinstance(currently_selected[0], int) or currently_selected[0] == "column":
for item in chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Outside"), self.find_withtag("ColSelectFill")):
alltags = self.gettags(item)
if alltags[0] == "CellSelectFill" or alltags[0] == "Current_Outside":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
elif alltags[0] == "ColSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cols"
maxrows = 0
for r1, c1, r2, c2 in boxes:
if r2 - r1 > maxrows:
maxrows = r2 - r1
for r1, c1, r2, c2 in tuple(boxes):
if r2 - r1 < maxrows:
del boxes[(r1, c1, r2, c2)]
return boxes, maxrows
elif currently_selected[0] == "row":
for item in self.find_withtag("RowSelectFill"):
boxes[tuple(int(e) for e in self.gettags(item)[1].split("_") if e)] = "rows"
return boxes
def ctrl_c(self, event = None):
currently_selected = self.currently_selected()
if currently_selected:
s = io.StringIO()
writer = csv_module.writer(s, dialect = csv_module.excel_tab, lineterminator = "\n")
rows = []
if isinstance(currently_selected[0], int) or currently_selected[0] == "column":
boxes, maxrows = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_c_func is not None:
try:
self.extra_begin_ctrl_c_func(CtrlKeyEvent("begin_ctrl_c", boxes, currently_selected, tuple()))
except:
return
for rn in range(maxrows):
row = []
for r1, c1, r2, c2 in boxes:
if r2 - r1 < maxrows:
continue
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
row.append(self.data_ref[data_ref_rn][dcol])
except:
row.append("")
writer.writerow(row)
rows.append(row)
elif currently_selected[0] == "row":
boxes = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_c_func is not None:
try:
self.extra_begin_ctrl_c_func(CtrlKeyEvent("begin_ctrl_c", boxes, currently_selected, tuple()))
except:
return
for r1, c1, r2, c2 in boxes:
for rn in range(r2 - r1):
row = []
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
row.append(self.data_ref[data_ref_rn][dcol])
except:
row.append("")
writer.writerow(row)
rows.append(row)
for r1, c1, r2, c2 in boxes:
self.show_ctrl_outline(canvas = "table", start_cell = (c1, r1), end_cell = (c2, r2))
self.clipboard_clear()
self.clipboard_append(s.getvalue())
self.update()
if self.extra_end_ctrl_c_func is not None:
self.extra_end_ctrl_c_func(CtrlKeyEvent("end_ctrl_c", boxes, currently_selected, rows))
def ctrl_x(self, event = None):
if self.anything_selected():
if self.undo_enabled:
undo_storage = {}
s = io.StringIO()
writer = csv_module.writer(s, dialect = csv_module.excel_tab, lineterminator = "\n")
currently_selected = self.currently_selected()
rows = []
if isinstance(currently_selected[0], int) or currently_selected[0] == "column":
boxes, maxrows = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_x_func is not None:
try:
self.extra_begin_ctrl_x_func(CtrlKeyEvent("begin_ctrl_x", boxes, currently_selected, tuple()))
except:
return
for rn in range(maxrows):
row = []
for r1, c1, r2, c2 in boxes:
if r2 - r1 < maxrows:
continue
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
sx = f"{self.data_ref[data_ref_rn][dcol]}"
row.append(sx)
if self.undo_enabled:
undo_storage[(data_ref_rn, dcol)] = sx
except:
row.append("")
writer.writerow(row)
rows.append(row)
for rn in range(maxrows):
for r1, c1, r2, c2 in boxes:
if r2 - r1 < maxrows:
continue
data_ref_rn = r1 + rn
if data_ref_rn in self.row_options and 'readonly' in self.row_options[data_ref_rn]:
continue
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (
((data_ref_rn, dcol) in self.cell_options and ('readonly' in self.cell_options[(data_ref_rn, dcol)] or 'checkbox' in self.cell_options[(data_ref_rn, dcol)])) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(not self.ctrl_keys_over_dropdowns_enabled and
(data_ref_rn, dcol) in self.cell_options and
'dropdown' in self.cell_options[(data_ref_rn, dcol)] and
"" not in self.cell_options[(data_ref_rn, dcol)]['dropdown']['values'])
):
continue
try:
self.data_ref[data_ref_rn][dcol] = ""
except:
continue
elif currently_selected[0] == "row":
boxes = self.get_ctrl_x_c_boxes()
if self.extra_begin_ctrl_x_func is not None:
try:
self.extra_begin_ctrl_x_func(CtrlKeyEvent("begin_ctrl_x", boxes, currently_selected, tuple()))
except:
return
for r1, c1, r2, c2 in boxes:
for rn in range(r2 - r1):
row = []
data_ref_rn = r1 + rn
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
sx = f"{self.data_ref[data_ref_rn][dcol]}"
row.append(sx)
if self.undo_enabled:
undo_storage[(data_ref_rn, dcol)] = sx
except:
row.append("")
writer.writerow(row)
rows.append(row)
for r1, c1, r2, c2 in boxes:
for rn in range(r2 - r1):
data_ref_rn = r1 + rn
if data_ref_rn in self.row_options and 'readonly' in self.row_options[data_ref_rn]:
continue
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (
((data_ref_rn, dcol) in self.cell_options and ('readonly' in self.cell_options[(data_ref_rn, dcol)] or 'checkbox' in self.cell_options[(data_ref_rn, dcol)])) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(not self.ctrl_keys_over_dropdowns_enabled and
(data_ref_rn, dcol) in self.cell_options and
'dropdown' in self.cell_options[(data_ref_rn, dcol)] and
"" not in self.cell_options[(data_ref_rn, dcol)]['dropdown']['values'])
):
continue
try:
self.data_ref[data_ref_rn][dcol] = ""
except:
continue
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells", undo_storage, tuple(boxes.items()), currently_selected))))
self.clipboard_clear()
self.clipboard_append(s.getvalue())
self.update()
self.refresh()
for r1, c1, r2, c2 in boxes:
self.show_ctrl_outline(canvas = "table", start_cell = (c1, r1), end_cell = (c2, r2))
if self.extra_end_ctrl_x_func is not None:
self.extra_end_ctrl_x_func(CtrlKeyEvent("end_ctrl_x", boxes, currently_selected, rows))
def ctrl_v(self, event = None):
if not self.expand_sheet_if_paste_too_big and (len(self.col_positions) == 1 or len(self.row_positions) == 1):
return
currently_selected = self.currently_selected()
if currently_selected:
if currently_selected[0] == "column":
x1 = currently_selected[1]
y1 = 0
elif currently_selected[0] == "row":
y1 = currently_selected[1]
x1 = 0
elif isinstance(currently_selected[0], int):
y1 = currently_selected[0]
x1 = currently_selected[1]
elif not currently_selected and not self.expand_sheet_if_paste_too_big:
return
else:
if not self.data_ref:
x1, y1 = 0, 0
else:
if len(self.col_positions) == 1 and len(self.row_positions) > 1:
x1, y1 = 0, len(self.row_positions) - 1
elif len(self.row_positions) == 1 and len(self.col_positions) > 1:
x1, y1 = len(self.col_positions) - 1, 0
elif len(self.row_positions) > 1 and len(self.col_positions) > 1:
x1, y1 = 0, len(self.row_positions) - 1
try:
data = self.clipboard_get()
except:
return
data = list(csv_module.reader(io.StringIO(data), delimiter = "\t", quotechar = '"', skipinitialspace = True))
if not data:
return
numcols = len(max(data, key = len))
numrows = len(data)
for rn, r in enumerate(data):
if len(r) < numcols:
data[rn].extend(list(repeat("", numcols - len(r))))
if self.undo_enabled:
undo_storage = {}
if self.expand_sheet_if_paste_too_big:
added_rows = 0
added_cols = 0
if x1 + numcols > len(self.col_positions) - 1:
added_cols = x1 + numcols - len(self.col_positions) + 1
if isinstance(self.paste_insert_column_limit, int) and self.paste_insert_column_limit < len(self.col_positions) - 1 + added_cols:
added_cols = self.paste_insert_column_limit - len(self.col_positions) - 1
if added_cols > 0:
self.insert_col_positions(widths = int(added_cols))
if not self.all_columns_displayed:
total_data_cols = self.total_data_cols()
self.displayed_columns.extend(list(range(total_data_cols, total_data_cols + added_cols)))
if y1 + numrows > len(self.row_positions) - 1:
added_rows = y1 + numrows - len(self.row_positions) + 1
if isinstance(self.paste_insert_row_limit, int) and self.paste_insert_row_limit < len(self.row_positions) - 1 + added_rows:
added_rows = self.paste_insert_row_limit - len(self.row_positions) - 1
if added_rows > 0:
self.insert_row_positions(heights = int(added_rows))
added_rows_cols = (added_rows, added_cols)
else:
added_rows_cols = (0, 0)
if x1 + numcols > len(self.col_positions) - 1:
numcols = len(self.col_positions) - 1 - x1
if y1 + numrows > len(self.row_positions) - 1:
numrows = len(self.row_positions) - 1 - y1
if self.extra_begin_ctrl_v_func is not None or self.extra_end_ctrl_v_func is not None:
rows = [[data[ndr][ndc] for ndc, c in enumerate(range(x1, x1 + numcols))] for ndr, r in enumerate(range(y1, y1 + numrows))]
if self.extra_begin_ctrl_v_func is not None:
try:
self.extra_begin_ctrl_v_func(PasteEvent("begin_ctrl_v", currently_selected, rows))
except:
return
for ndr, r in enumerate(range(y1, y1 + numrows)):
for ndc, c in enumerate(range(x1, x1 + numcols)):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if r > len(self.data_ref) - 1:
self.data_ref.extend([list(repeat("", c + 1)) for r in range((r + 1) - len(self.data_ref))])
elif c > len(self.data_ref[r]) - 1:
self.data_ref[r].extend(list(repeat("", (c + 1) - len(self.data_ref[r]))))
if (
((r, dcol) in self.cell_options and 'readonly' in self.cell_options[(r, dcol)]) or
((r, dcol) in self.cell_options and 'checkbox' in self.cell_options[(r, dcol)]) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(r in self.row_options and 'readonly' in self.row_options[r]) or
# if pasting not allowed in dropdowns and paste value isn't in dropdown values
(not self.ctrl_keys_over_dropdowns_enabled and
(r, dcol) in self.cell_options and
'dropdown' in self.cell_options[(r, dcol)] and
data[ndr][ndc] not in self.cell_options[(r, dcol)]['dropdown']['values'])
):
continue
if self.undo_enabled:
undo_storage[(r, dcol)] = f"{self.data_ref[r][dcol]}"
self.data_ref[r][dcol] = data[ndr][ndc]
if self.expand_sheet_if_paste_too_big and self.undo_enabled:
self.equalize_data_row_lengths()
self.deselect("all")
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells_paste",
undo_storage,
(((y1, x1, y1 + numrows, x1 + numcols), "cells"), ), # boxes
currently_selected,
added_rows_cols))))
self.create_selected(y1, x1, y1 + numrows, x1 + numcols, "cells")
self.create_current(y1, x1, type_ = "cell", inside = True if numrows > 1 or numcols > 1 else False)
self.see(r = y1, c = x1, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
self.refresh()
if self.extra_end_ctrl_v_func is not None:
self.extra_end_ctrl_v_func(PasteEvent("end_ctrl_v", currently_selected, rows))
def delete_key(self, event = None):
if self.anything_selected():
currently_selected = self.currently_selected()
if self.undo_enabled:
undo_storage = {}
boxes = []
for item in chain(self.find_withtag("CellSelectFill"), self.find_withtag("RowSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("Current_Outside")):
alltags = self.gettags(item)
box = tuple(int(e) for e in alltags[1].split("_") if e)
if alltags[0] in ("CellSelectFill", "Current_Outside"):
boxes.append((box, "cells"))
elif alltags[0] == "ColSelectFill":
boxes.append((box, "cols"))
elif alltags[0] == "RowSelectFill":
boxes.append((box, "rows"))
if self.extra_begin_delete_key_func is not None:
try:
self.extra_begin_delete_key_func(CtrlKeyEvent("begin_delete_key", boxes, currently_selected, tuple()))
except:
return
for (r1, c1, r2, c2), _ in boxes:
for r in range(r1, r2):
for c in range(c1, c2):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (
((r, dcol) in self.cell_options and ('readonly' in self.cell_options[(r, dcol)] or 'checkbox' in self.cell_options[(r, dcol)])) or
# if del key not allowed in dropdowns and empty string isn't in dropdown values
(not self.ctrl_keys_over_dropdowns_enabled and
(r, dcol) in self.cell_options and
'dropdown' in self.cell_options[(r, dcol)] and
"" not in self.cell_options[(r, dcol)]['dropdown']['values']) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(r in self.row_options and 'readonly' in self.row_options[r])
):
continue
try:
if self.undo_enabled:
undo_storage[(r, dcol)] = f"{self.data_ref[r][dcol]}"
self.data_ref[r][dcol] = ""
except:
continue
if self.extra_end_delete_key_func is not None:
self.extra_end_delete_key_func(CtrlKeyEvent("end_delete_key", boxes, currently_selected, undo_storage))
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells", undo_storage, boxes, currently_selected))))
self.refresh()
def move_columns_adjust_options_dict(self, col, remove_start, num_cols, move_data = True):
c = int(col)
rm1start = int(remove_start)
rm1end = rm1start + num_cols
totalcols = int(num_cols)
rm2start = rm1start + (rm1end - rm1start)
rm2end = rm1end + (rm1end - rm1start)
orig_selected = list(range(rm1start, rm1start + totalcols))
self.deselect("all")
cws = [int(b - a) for a, b in zip(self.col_positions, islice(self.col_positions, 1, len(self.col_positions)))]
if rm1start > c:
cws[c:c] = cws[rm1start:rm1end]
cws[rm2start:rm2end] = []
else:
cws[c + 1:c + 1] = cws[rm1start:rm1end]
cws[rm1start:rm1end] = []
self.col_positions = list(accumulate(chain([0], (width for width in cws))))
if c + totalcols > len(self.col_positions):
new_selected = tuple(range(len(self.col_positions) - 1 - totalcols, len(self.col_positions) - 1))
self.create_selected(0, len(self.col_positions) - 1 - totalcols, len(self.row_positions) - 1, len(self.col_positions) - 1, "cols")
else:
if rm1start > c:
new_selected = tuple(range(c, c + totalcols))
self.create_selected(0, c, len(self.row_positions) - 1, c + totalcols, "cols")
else:
new_selected = tuple(range(c + 1 - totalcols, c + 1))
self.create_selected(0, c + 1 - totalcols, len(self.row_positions) - 1, c + 1, "cols")
self.create_current(0, int(new_selected[0]), type_ = "col", inside = True)
newcolsdct = {t1: t2 for t1, t2 in zip(orig_selected, new_selected)}
if self.all_columns_displayed:
dispset = {}
if rm1start > c:
if move_data:
for rn in range(len(self.data_ref)):
if len(self.data_ref[rn]) < rm1end:
self.data_ref[rn].extend(list(repeat("", rm1end - len(self.data_ref[rn]) + 1)))
self.data_ref[rn][c:c] = self.data_ref[rn][rm1start:rm1end]
self.data_ref[rn][rm2start:rm2end] = []
if isinstance(self.my_hdrs, list) and self.my_hdrs:
if len(self.my_hdrs) < rm1end:
self.my_hdrs.extend(list(repeat("", rm1end - len(self.my_hdrs) + 1)))
self.my_hdrs[c:c] = self.my_hdrs[rm1start:rm1end]
self.my_hdrs[rm2start:rm2end] = []
new_ch = {}
for k, v in self.CH.cell_options.items():
if k in newcolsdct:
new_ch[newcolsdct[k]] = v
elif k < rm1start and k >= c:
new_ch[k + totalcols] = v
else:
new_ch[k] = v
self.CH.cell_options = new_ch
new_cell = {}
for k, v in self.cell_options.items():
if k[1] in newcolsdct:
new_cell[(k[0], newcolsdct[k[1]])] = v
elif k[1] < rm1start and k[1] >= c:
new_cell[(k[0], k[1] + totalcols)] = v
else:
new_cell[k] = v
self.cell_options = new_cell
new_col = {}
for k, v in self.col_options.items():
if k in newcolsdct:
new_col[newcolsdct[k]] = v
elif k < rm1start and k >= c:
new_col[k + totalcols] = v
else:
new_col[k] = v
self.col_options = new_col
else:
c += 1
if move_data:
for rn in range(len(self.data_ref)):
if len(self.data_ref[rn]) < c - 1:
self.data_ref[rn].extend(list(repeat("", c - len(self.data_ref[rn]))))
self.data_ref[rn][c:c] = self.data_ref[rn][rm1start:rm1end]
self.data_ref[rn][rm1start:rm1end] = []
if isinstance(self.my_hdrs, list) and self.my_hdrs:
if len(self.my_hdrs) < c:
self.my_hdrs.extend(list(repeat("", c - len(self.my_hdrs))))
self.my_hdrs[c:c] = self.my_hdrs[rm1start:rm1end]
self.my_hdrs[rm1start:rm1end] = []
new_ch = {}
for k, v in self.CH.cell_options.items():
if k in newcolsdct:
new_ch[newcolsdct[k]] = v
elif k < c and k > rm1start:
new_ch[k - totalcols] = v
else:
new_ch[k] = v
self.CH.cell_options = new_ch
new_cell = {}
for k, v in self.cell_options.items():
if k[1] in newcolsdct:
new_cell[(k[0], newcolsdct[k[1]])] = v
elif k[1] < c and k[1] > rm1start:
new_cell[(k[0], k[1] - totalcols)] = v
else:
new_cell[k] = v
self.cell_options = new_cell
new_col = {}
for k, v in self.col_options.items():
if k in newcolsdct:
new_col[newcolsdct[k]] = v
elif k < c and k > rm1start:
new_col[k - totalcols] = v
else:
new_col[k] = v
self.col_options = new_col
else:
# moves data around, not displayed columns indexes
# which remain sorted and the same after drop and drop
if rm1start > c:
dispset = {a: b for a, b in zip(self.displayed_columns, (self.displayed_columns[:c] +
self.displayed_columns[rm1start:rm1start + totalcols] +
self.displayed_columns[c:rm1start] +
self.displayed_columns[rm1start + totalcols:]))}
else:
dispset = {a: b for a, b in zip(self.displayed_columns, (self.displayed_columns[:rm1start] +
self.displayed_columns[rm1start + totalcols:c + 1] +
self.displayed_columns[rm1start:rm1start + totalcols] +
self.displayed_columns[c + 1:]))}
# has to pick up elements from all over the place in the original row
# building an entirely new row is best due to permutations of hidden columns
if move_data:
max_idx = max(chain(dispset, dispset.values())) + 1
for rn in range(len(self.data_ref)):
if len(self.data_ref[rn]) < max_idx:
self.data_ref[rn][:] = self.data_ref[rn] + list(repeat("", max_idx - len(self.data_ref[rn])))
new = []
idx = 0
done = set()
while len(new) < len(self.data_ref[rn]):
if idx in dispset and idx not in done:
new.append(self.data_ref[rn][dispset[idx]])
done.add(idx)
elif idx not in done:
new.append(self.data_ref[rn][idx])
idx += 1
else:
idx += 1
self.data_ref[rn] = new
if isinstance(self.my_hdrs, list) and self.my_hdrs:
if len(self.my_hdrs) < max_idx:
self.my_hdrs[:] = self.my_hdrs + list(repeat("", max_idx - len(self.my_hdrs)))
new = []
idx = 0
done = set()
while len(new) < len(self.my_hdrs):
if idx in dispset and idx not in done:
new.append(self.my_hdrs[dispset[idx]])
done.add(idx)
elif idx not in done:
new.append(self.my_hdrs[idx])
idx += 1
else:
idx += 1
self.my_hdrs = new
dispset = {b: a for a, b in dispset.items()}
self.CH.cell_options = {dispset[k] if k in dispset else k: v for k, v in self.CH.cell_options.items()}
self.cell_options = {(k[0], dispset[k[1]]) if k[1] in dispset else k: v for k, v in self.cell_options.items()}
self.col_options = {dispset[k] if k in dispset else k: v for k, v in self.col_options.items()}
return new_selected, dispset
def ctrl_z(self, event = None):
if self.undo_storage:
if not isinstance(self.undo_storage[-1], (tuple, dict)):
undo_storage = pickle.loads(zlib.decompress(self.undo_storage[-1]))
else:
undo_storage = self.undo_storage[-1]
self.deselect("all")
if self.extra_begin_ctrl_z_func is not None:
try:
self.extra_begin_ctrl_z_func(UndoEvent("begin_ctrl_z", undo_storage[0], undo_storage))
except:
return
self.undo_storage.pop()
if undo_storage[0] in ("edit_cells", "edit_cells_paste"):
for (r, c), v in undo_storage[1].items():
self.data_ref[r][c] = v
#if (r, c) in self.cell_options and 'dropdown' in self.cell_options[(r, c)]:
#self.cell_options[(r, c)]['dropdown'][0].set_displayed(v)
start_row = float("inf")
start_col = float("inf")
for box in undo_storage[2]:
r1, c1, r2, c2 = box[0]
if not self.expand_sheet_if_paste_too_big:
self.create_selected(r1, c1, r2, c2, box[1])
if r1 < start_row:
start_row = r1
if c1 < start_col:
start_col = c1
if undo_storage[0] == "edit_cells_paste" and self.expand_sheet_if_paste_too_big:
if undo_storage[4][0] > 0:
self.del_row_positions(len(self.row_positions) - 1 - undo_storage[4][0], undo_storage[4][0])
self.data_ref[:] = self.data_ref[:-undo_storage[4][0]]
if undo_storage[4][1] > 0:
quick_added_cols = undo_storage[4][1]
self.del_col_positions(len(self.col_positions) - 1 - quick_added_cols, quick_added_cols)
for rn in range(len(self.data_ref)):
self.data_ref[rn][:] = self.data_ref[rn][:-quick_added_cols]
if not self.all_columns_displayed:
self.displayed_columns[:] = self.displayed_columns[:-quick_added_cols]
if undo_storage[3]:
if isinstance(undo_storage[3][0], int):
self.create_current(undo_storage[3][0], undo_storage[3][1], type_ = "cell", inside = True if self.cell_selected(undo_storage[3][0], undo_storage[3][1]) else False)
elif undo_storage[3][0] == "column":
self.create_current(0, undo_storage[3][1], type_ = "col", inside = True)
elif undo_storage[3][0] == "row":
self.create_current(undo_storage[3][1], 0, type_ = "row", inside = True)
elif start_row < len(self.row_positions) - 1 and start_col < len(self.col_positions) - 1:
self.create_current(start_row, start_col, type_ = "cell", inside = True if self.cell_selected(start_row, start_col) else False)
if start_row < len(self.row_positions) - 1 and start_col < len(self.col_positions) - 1:
self.see(r = start_row, c = start_col, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
elif undo_storage[0] == "move_cols":
c = undo_storage[1]
rm1start = undo_storage[2]
totalcols = len(undo_storage[4])
self.move_columns_adjust_options_dict(c, rm1start, totalcols)
elif undo_storage[0] == "move_rows":
rhs = [int(b - a) for a, b in zip(self.row_positions, islice(self.row_positions, 1, len(self.row_positions)))]
ins_row = undo_storage[1]
orig_ins_row = int(ins_row)
rm1start = undo_storage[2]
rm1end = undo_storage[3] + 1
new_selected = undo_storage[4]
rm2start = rm1start + (rm1end - rm1start)
rm2end = rm1end + (rm1end - rm1start)
totalrows = rm1end - rm1start
if rm1start < ins_row:
ins_row += totalrows
if rm1start > ins_row:
try:
self.data_ref[ins_row:ins_row] = self.data_ref[rm1start:rm1end]
self.data_ref[rm2start:rm2end] = []
except:
pass
if self.my_row_index:
try:
self.my_row_index[ins_row:ins_row] = self.my_row_index[rm1start:rm1end]
self.my_row_index[rm2start:rm2end] = []
except:
pass
else:
try:
self.data_ref[ins_row:ins_row] = self.data_ref[rm1start:rm1end]
self.data_ref[rm1start:rm1end] = []
except:
pass
if self.my_row_index:
try:
self.my_row_index[ins_row:ins_row] = self.my_row_index[rm1start:rm1end]
self.my_row_index[rm1start:rm1end] = []
except:
pass
if rm1start > ins_row:
rhs[ins_row:ins_row] = rhs[rm1start:rm1end]
rhs[rm2start:rm2end] = []
self.row_positions = list(accumulate(chain([0], (height for height in rhs))))
self.create_current(ins_row, 0, type_ = "row", inside = True)
self.create_selected(ins_row, 0, ins_row + totalrows, len(self.col_positions) - 1, "rows")
else:
rhs[ins_row:ins_row] = rhs[rm1start:rm1end]
rhs[rm1start:rm1end] = []
self.row_positions = list(accumulate(chain([0], (height for height in rhs))))
self.create_current(ins_row - totalrows, 0, type_ = "row", inside = True)
self.create_selected(ins_row - totalrows, 0, ins_row, len(self.col_positions) - 1, "rows")
self.see(r = orig_ins_row, c = 0, keep_yscroll = False, keep_xscroll = True, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
rowsiter = tuple(range(rm1start, rm1end))
rowset = set(rowsiter)
popped_ri = {t1: t2 for t1, t2 in self.RI.cell_options.items() if t1 in rowset}
popped_cell = {t1: t2 for t1, t2 in self.cell_options.items() if t1[0] in rowset}
popped_row = {t1: t2 for t1, t2 in self.row_options.items() if t1 in rowset}
popped_ri = {t1: self.RI.cell_options.pop(t1) for t1 in popped_ri}
popped_cell = {t1: self.cell_options.pop(t1) for t1 in popped_cell}
popped_row = {t1: self.row_options.pop(t1) for t1 in popped_row}
self.RI.cell_options = {t1 if t1 < rm1start else t1 - totalrows: t2 for t1, t2 in self.RI.cell_options.items()}
self.RI.cell_options = {t1 if t1 < ins_row else t1 + totalrows: t2 for t1, t2 in self.RI.cell_options.items()}
self.row_options = {t1 if t1 < rm1start else t1 - totalrows: t2 for t1, t2 in self.row_options.items()}
self.row_options = {t1 if t1 < ins_row else t1 + totalrows: t2 for t1, t2 in self.row_options.items()}
self.cell_options = {(t10 if t10 < rm1start else t10 - totalrows, t11): t2 for (t10, t11), t2 in self.cell_options.items()}
self.cell_options = {(t10 if t10 < ins_row else t10 + totalrows, t11): t2 for (t10, t11), t2 in self.cell_options.items()}
newrowsdct = {t1: t2 for t1, t2 in zip(rowsiter, new_selected)}
for t1, t2 in popped_ri.items():
self.RI.cell_options[newrowsdct[t1]] = t2
for t1, t2 in popped_row.items():
self.row_options[newrowsdct[t1]] = t2
for (t10, t11), t2 in popped_cell.items():
self.cell_options[(newrowsdct[t10], t11)] = t2
elif undo_storage[0] == "insert_row":
self.data_ref[undo_storage[1]['data_row_num']:undo_storage[1]['data_row_num'] + undo_storage[1]['numrows']] = []
try:
self.my_row_index[undo_storage[1]['data_row_num']:undo_storage[1]['data_row_num'] + undo_storage[1]['numrows']] = []
except:
pass
self.del_row_positions(undo_storage[1]['sheet_row_num'],
undo_storage[1]['numrows'],
deselect_all = False)
for r in range(undo_storage[1]['sheet_row_num'],
undo_storage[1]['sheet_row_num'] + undo_storage[1]['numrows']):
if r in self.row_options:
del self.row_options[r]
if r in self.RI.cell_options:
del self.RI.cell_options[r]
numrows = undo_storage[1]['numrows']
idx = undo_storage[1]['sheet_row_num'] + undo_storage[1]['numrows']
self.cell_options = {(rn if rn < idx else rn - numrows, cn): t2 for (rn, cn), t2 in self.cell_options.items()}
self.row_options = {rn if rn < idx else rn - numrows: t for rn, t in self.row_options.items()}
self.RI.cell_options = {rn if rn < idx else rn - numrows: t for rn, t in self.RI.cell_options.items()}
if len(self.row_positions) > 1:
start_row = undo_storage[1]['sheet_row_num'] if undo_storage[1]['sheet_row_num'] < len(self.row_positions) - 1 else undo_storage[1]['sheet_row_num'] - 1
self.RI.select_row(start_row)
self.see(r = start_row, c = 0, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
elif undo_storage[0] == "insert_col":
self.displayed_columns = undo_storage[1]['displayed_columns']
qx = undo_storage[1]['data_col_num']
qnum = undo_storage[1]['numcols']
for rn in range(len(self.data_ref)):
self.data_ref[rn][qx:qx + qnum] = []
try:
self.my_hdrs[qx:qx + qnum] = []
except:
pass
self.del_col_positions(undo_storage[1]['sheet_col_num'],
undo_storage[1]['numcols'],
deselect_all = False)
for c in range(undo_storage[1]['sheet_col_num'],
undo_storage[1]['sheet_col_num'] + undo_storage[1]['numcols']):
if c in self.col_options:
del self.col_options[c]
if c in self.CH.cell_options:
del self.CH.cell_options[c]
numcols = undo_storage[1]['numcols']
idx = undo_storage[1]['sheet_col_num'] + undo_storage[1]['numcols']
self.cell_options = {(rn, cn if cn < idx else cn - numcols): t2 for (rn, cn), t2 in self.cell_options.items()}
self.col_options = {cn if cn < idx else cn - numcols: t for cn, t in self.col_options.items()}
self.CH.cell_options = {cn if cn < idx else cn - numcols: t for cn, t in self.CH.cell_options.items()}
if len(self.col_positions) > 1:
start_col = undo_storage[1]['sheet_col_num'] if undo_storage[1]['sheet_col_num'] < len(self.col_positions) - 1 else undo_storage[1]['sheet_col_num'] - 1
self.CH.select_col(start_col)
self.see(r = 0, c = start_col, keep_yscroll = False, keep_xscroll = False, bottom_right_corner = False, check_cell_visibility = True, redraw = False)
elif undo_storage[0] == "delete_rows":
for rn, r, h in reversed(undo_storage[1]['deleted_rows']):
self.data_ref.insert(rn, r)
self.insert_row_position(idx = rn, height = h)
self.cell_options = undo_storage[1]['cell_options']
self.row_options = undo_storage[1]['row_options']
self.RI.cell_options = undo_storage[1]['RI_cell_options']
for rn, r in reversed(undo_storage[1]['deleted_index_values']):
try:
self.my_row_index.insert(rn, r)
except:
continue
self.reselect_from_get_boxes(undo_storage[1]['selection_boxes'])
elif undo_storage[0] == "delete_cols":
self.displayed_columns = undo_storage[1]['displayed_columns']
self.cell_options = undo_storage[1]['cell_options']
self.col_options = undo_storage[1]['col_options']
self.CH.cell_options = undo_storage[1]['CH_cell_options']
for cn, w in reversed(tuple(undo_storage[1]['colwidths'].items())):
self.insert_col_position(idx = cn, width = w)
for cn, rowdict in reversed(tuple(undo_storage[1]['deleted_cols'].items())):
for rn, v in rowdict.items():
try:
self.data_ref[rn].insert(cn, v)
except:
continue
for cn, v in reversed(tuple(undo_storage[1]['deleted_hdr_values'].items())):
try:
self.my_hdrs.insert(cn, v)
except:
continue
self.reselect_from_get_boxes(undo_storage[1]['selection_boxes'])
self.refresh()
if self.extra_end_ctrl_z_func is not None:
self.extra_end_ctrl_z_func(UndoEvent("end_ctrl_z", undo_storage[0], undo_storage))
def bind_arrowkeys(self, event = None):
self.arrowkeys_enabled = True
for canvas in (self, self.CH, self.RI, self.TL):
canvas.bind("<Up>", self.arrowkey_UP)
canvas.bind("<Tab>", self.arrowkey_RIGHT)
canvas.bind("<Right>", self.arrowkey_RIGHT)
canvas.bind("<Down>", self.arrowkey_DOWN)
canvas.bind("<Left>", self.arrowkey_LEFT)
canvas.bind("<Prior>", self.page_UP)
canvas.bind("<Next>", self.page_DOWN)
def unbind_arrowkeys(self, event = None):
self.arrowkeys_enabled = False
for canvas in (self, self.CH, self.RI, self.TL):
canvas.unbind("<Up>")
canvas.unbind("<Right>")
canvas.unbind("<Tab>")
canvas.unbind("<Down>")
canvas.unbind("<Left>")
canvas.unbind("<Prior>")
canvas.unbind("<Next>")
def see(self,
r = None,
c = None,
keep_yscroll = False,
keep_xscroll = False,
bottom_right_corner = False,
check_cell_visibility = True,
redraw = True):
need_redraw = False
if check_cell_visibility:
yvis, xvis = self.cell_is_completely_visible(r = r, c = c, separate_axes = True)
else:
yvis, xvis = False, False
if not yvis:
if bottom_right_corner:
if r is not None and not keep_yscroll:
winfo_height = self.winfo_height()
if self.row_positions[r + 1] - self.row_positions[r] > winfo_height:
y = self.row_positions[r]
else:
y = self.row_positions[r + 1] + 1 - winfo_height
args = ("moveto", y / (self.row_positions[-1] + self.empty_vertical))
if args[1] > 1:
args[1] = args[1] - 1
self.yview(*args)
self.RI.yview(*args)
if redraw:
need_redraw = True
else:
if r is not None and not keep_yscroll:
args = ("moveto", self.row_positions[r] / (self.row_positions[-1] + self.empty_vertical))
if args[1] > 1:
args[1] = args[1] - 1
self.yview(*args)
self.RI.yview(*args)
if redraw:
need_redraw = True
if not xvis:
if bottom_right_corner:
if c is not None and not keep_xscroll:
winfo_width = self.winfo_width()
if self.col_positions[c + 1] - self.col_positions[c] > winfo_width:
x = self.col_positions[c]
else:
x = self.col_positions[c + 1] + 1 - winfo_width
args = ("moveto", x / (self.col_positions[-1] + self.empty_horizontal))
self.xview(*args)
self.CH.xview(*args)
if redraw:
need_redraw = True
else:
if c is not None and not keep_xscroll:
args = ("moveto", self.col_positions[c] / (self.col_positions[-1] + self.empty_horizontal))
self.xview(*args)
self.CH.xview(*args)
if redraw:
need_redraw = True
if redraw and need_redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
return True
else:
return False
def cell_is_completely_visible(self, r = 0, c = 0, cell_coords = None, separate_axes = False):
cx1, cy1, cx2, cy2 = self.get_canvas_visible_area()
if cell_coords is None:
x1, y1, x2, y2 = self.GetCellCoords(r = r, c = c, sel = True)
else:
x1, y1, x2, y2 = cell_coords
x_vis = True
y_vis = True
if cx1 > x1 or cx2 < x2:
x_vis = False
if cy1 > y1 or cy2 < y2:
y_vis = False
if separate_axes:
return y_vis, x_vis
else:
return False if not y_vis or not x_vis else True
def cell_is_visible(self,r = 0, c = 0, cell_coords = None):
cx1, cy1, cx2, cy2 = self.get_canvas_visible_area()
if cell_coords is None:
x1, y1, x2, y2 = self.GetCellCoords(r = r, c = c, sel = True)
else:
x1, y1, x2, y2 = cell_coords
if x1 <= cx2 or y1 <= cy2 or x2 >= cx1 or y2 >= cy1:
return True
return False
def select_all(self, redraw = True, run_binding_func = True):
self.deselect("all")
if len(self.row_positions) > 1 and len(self.col_positions) > 1:
self.create_current(0, 0, type_ = "cell", inside = True)
self.create_selected(0, 0, len(self.row_positions) - 1, len(self.col_positions) - 1)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.select_all_binding_func is not None and run_binding_func:
self.select_all_binding_func(SelectionBoxEvent("select_all_cells", (0, 0, len(self.row_positions) - 1, len(self.col_positions) - 1)))
def select_cell(self, r, c, redraw = False, keep_other_selections = False):
r = int(r)
c = int(c)
ignore_keep = False
if keep_other_selections:
if self.cell_selected(r, c):
self.create_current(r, c, type_ = "cell", inside = True)
else:
ignore_keep = True
if ignore_keep or not keep_other_selections:
self.delete_selection_rects()
self.create_current(r, c, type_ = "cell", inside = False)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.selection_binding_func is not None:
self.selection_binding_func(SelectCellEvent("select_cell", r, c))
def move_down(self):
currently_selected = self.currently_selected(get_coords = True)
if currently_selected:
r, c = currently_selected
if (
r < len(self.row_positions) - 2 and
(self.single_selection_enabled or self.toggle_selection_enabled)
):
self.select_cell(r + 1, c)
self.see(r + 1, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = True)
def add_selection(self, r, c, redraw = False, run_binding_func = True, set_as_current = False):
r = int(r)
c = int(c)
if set_as_current:
items = self.find_withtag("Current_Outside")
if items:
alltags = self.gettags(items[0])
if alltags[2] == "cell":
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
add_sel = (r1, c1)
else:
add_sel = tuple()
else:
add_sel = tuple()
self.create_current(r, c, type_ = "cell", inside = True if self.cell_selected(r, c) else False)
if add_sel:
self.add_selection(add_sel[0], add_sel[1], redraw = False, run_binding_func = False, set_as_current = False)
else:
self.create_selected(r, c, r + 1, c + 1)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.selection_binding_func is not None and run_binding_func:
self.selection_binding_func(SelectCellEvent("select_cell", r, c))
def toggle_select_cell(self, row, column, add_selection = True, redraw = True, run_binding_func = True, set_as_current = True):
if add_selection:
if self.cell_selected(row, column, inc_rows = True, inc_cols = True):
self.deselect(r = row, c = column, redraw = redraw)
else:
self.add_selection(r = row, c = column, redraw = redraw, run_binding_func = run_binding_func, set_as_current = set_as_current)
else:
if self.cell_selected(row, column, inc_rows = True, inc_cols = True):
self.deselect(r = row, c = column, redraw = redraw)
else:
self.select_cell(row, column, redraw = redraw)
def align_rows(self, rows = [], align = "global", align_index = False): #"center", "w", "e" or "global"
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if align == "global":
for r in rows_:
if r in self.row_options and 'align' in self.row_options[r]:
del self.row_options[r]['align']
if align_index and r in self.RI.cell_options and 'align' in self.RI.cell_options[r]:
del self.RI.cell_options[r]['align']
else:
for r in rows_:
if r not in self.row_options:
self.row_options[r] = {}
self.row_options[r]['align'] = align
if align_index:
if r not in self.RI.cell_options:
self.RI.cell_options[r] = {}
self.RI.cell_options[r]['align'] = align
def align_columns(self, columns = [], align = "global", align_header = False): #"center", "w", "e" or "global"
if isinstance(columns, int):
cols_ = [columns]
else:
cols_ = columns
if align == "global":
for c in cols_:
if c in self.col_options and 'align' in self.col_options[c]:
del self.col_options[c]['align']
if align_header and c in self.CH.cell_options and 'align' in self.CH.cell_options[c]:
del self.CH.cell_options[c]['align']
else:
for c in cols_:
if c not in self.col_options:
self.col_options[c] = {}
self.col_options[c]['align'] = align
if align_header:
if c not in self.CH.cell_options:
self.CH.cell_options[c] = {}
self.CH.cell_options[c]['align'] = align
def align_cells(self, row = 0, column = 0, cells = [], align = "global"): #"center", "w", "e" or "global"
if align == "global":
if cells:
for r, c in cells:
if (r, c) in self.cell_options and 'align' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['align']
else:
if (row, column) in self.cell_options and 'align' in self.cell_options[(row, column)]:
del self.cell_options[(row, column)]['align']
else:
if cells:
for r, c in cells:
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['align'] = align
else:
if (row, column) not in self.cell_options:
self.cell_options[(row, column)] = {}
self.cell_options[(row, column)]['align'] = align
def readonly_rows(self, rows = [], readonly = True):
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if not readonly:
for r in rows_:
if r in self.row_options and 'readonly' in self.row_options[r]:
del self.row_options[r]['readonly']
else:
for r in rows_:
if r not in self.row_options:
self.row_options[r] = {}
self.row_options[r]['readonly'] = True
def readonly_columns(self, columns = [], readonly = True):
if isinstance(columns, int):
cols_ = [columns]
else:
cols_ = columns
if not readonly:
for c in cols_:
if c in self.col_options and 'readonly' in self.col_options[c]:
del self.col_options[c]['readonly']
else:
for c in cols_:
if c not in self.col_options:
self.col_options[c] = {}
self.col_options[c]['readonly'] = True
def readonly_cells(self, row = 0, column = 0, cells = [], readonly = True):
if not readonly:
if cells:
for r, c in cells:
if (r, c) in self.cell_options and 'readonly' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['readonly']
else:
if (row, column) in self.cell_options and 'readonly' in self.cell_options[(row, column)]:
del self.cell_options[(row, column)]['readonly']
else:
if cells:
for (r, c) in cells:
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['readonly'] = True
else:
if (row, column) not in self.cell_options:
self.cell_options[(row, column)] = {}
self.cell_options[(row, column)]['readonly'] = True
def highlight_cells(self, r = 0, c = 0, cells = tuple(), bg = None, fg = None, redraw = False, overwrite = True):
if bg is None and fg is None:
return
if cells:
for r_, c_ in cells:
if (r_, c_) not in self.cell_options:
self.cell_options[(r_, c_)] = {}
if 'highlight' in self.cell_options[(r_, c_)] and not overwrite:
self.cell_options[(r_, c_)]['highlight'] = (self.cell_options[(r_, c_)]['highlight'][0] if bg is None else bg,
self.cell_options[(r_, c_)]['highlight'][1] if fg is None else fg)
else:
self.cell_options[(r_, c_)]['highlight'] = (bg, fg)
else:
if isinstance(r, str) and r.lower() == "all" and isinstance(c, int):
riter = range(self.total_data_rows())
citer = (c, )
elif isinstance(c, str) and c.lower() == "all" and isinstance(r, int):
riter = (r, )
citer = range(self.total_data_cols())
elif isinstance(r, int) and isinstance(c, int):
riter = (r, )
citer = (c, )
for r_ in riter:
for c_ in citer:
if (r_, c_) not in self.cell_options:
self.cell_options[(r_, c_)] = {}
if 'highlight' in self.cell_options[(r_, c_)] and not overwrite:
self.cell_options[(r_, c_)]['highlight'] = (self.cell_options[(r_, c_)]['highlight'][0] if bg is None else bg,
self.cell_options[(r_, c_)]['highlight'][1] if fg is None else fg)
else:
self.cell_options[(r_, c_)]['highlight'] = (bg, fg)
if redraw:
self.main_table_redraw_grid_and_text()
def highlight_cols(self, cols = [], bg = None, fg = None, highlight_header = False, redraw = False, overwrite = True):
if bg is None and fg is None:
return
for c in (cols, ) if isinstance(cols, int) else cols:
if c not in self.col_options:
self.col_options[c] = {}
if 'highlight' in self.col_options[c] and not overwrite:
self.col_options[c]['highlight'] = (self.col_options[c]['highlight'][0] if bg is None else bg,
self.col_options[c]['highlight'][1] if fg is None else fg)
else:
self.col_options[c]['highlight'] = (bg, fg)
if highlight_header:
self.CH.highlight_cells(cells = cols, bg = bg, fg = fg)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = highlight_header)
def highlight_rows(self, rows = [], bg = None, fg = None, highlight_index = False, redraw = False, end_of_screen = False, overwrite = True):
if bg is None and fg is None:
return
for r in (rows, ) if isinstance(rows, int) else rows:
if r not in self.row_options:
self.row_options[r] = {}
if 'highlight' in self.row_options[r] and not overwrite:
self.row_options[r]['highlight'] = (self.row_options[r]['highlight'][0] if bg is None else bg,
self.row_options[r]['highlight'][1] if fg is None else fg,
self.row_options[r]['highlight'][2] if self.row_options[r]['highlight'][2] != end_of_screen else end_of_screen)
else:
self.row_options[r]['highlight'] = (bg, fg, end_of_screen)
if highlight_index:
self.RI.highlight_cells(cells = rows, bg = bg, fg = fg)
if redraw:
self.main_table_redraw_grid_and_text(redraw_row_index = highlight_index)
def deselect(self, r = None, c = None, cell = None, redraw = True):
deselected = tuple()
deleted_boxes = {}
if r == "all":
deselected = ("deselect_all", self.delete_selection_rects())
elif r == "allrows":
for item in self.find_withtag("RowSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
deleted_boxes[r1, c1, r2, c2] = "rows"
self.delete(alltags[1])
self.RI.delete(alltags[1])
self.CH.delete(alltags[1])
current = self.currently_selected()
if current and current[0] == "row":
deleted_boxes[tuple(int(e) for e in self.get_tags_of_current()[1].split("_") if e)] = "cell"
self.delete_current()
deselected = ("deselect_all_rows", deleted_boxes)
elif r == "allcols":
for item in self.find_withtag("ColSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
deleted_boxes[r1, c1, r2, c2] = "cols"
self.delete(alltags[1])
self.RI.delete(alltags[1])
self.CH.delete(alltags[1])
current = self.currently_selected()
if current and current[0] == "column":
deleted_boxes[tuple(int(e) for e in self.get_tags_of_current()[1].split("_") if e)] = "cell"
self.delete_current()
deselected = ("deselect_all_cols", deleted_boxes)
elif r is not None and c is None and cell is None:
current = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
current_tags = self.gettags(current[0]) if current else tuple()
if current:
curr_r1, curr_c1, curr_r2, curr_c2 = tuple(int(e) for e in current_tags[1].split("_") if e)
reset_current = False
for item in self.find_withtag("RowSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
if r >= r1 and r < r2:
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
if not reset_current and current and curr_r1 >= r1 and curr_r1 < r2:
reset_current = True
deleted_boxes[curr_r1, curr_c1, curr_r2, curr_c2] = "cell"
deleted_boxes[r1, c1, r2, c2] = "rows"
if reset_current:
self.delete_current()
self.set_current_to_last()
deselected = ("deselect_row", deleted_boxes)
elif c is not None and r is None and cell is None:
current = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
current_tags = self.gettags(current[0]) if current else tuple()
if current:
curr_r1, curr_c1, curr_r2, curr_c2 = tuple(int(e) for e in current_tags[1].split("_") if e)
reset_current = False
for item in self.find_withtag("ColSelectFill"):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
if c >= c1 and c < c2:
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
if not reset_current and current and curr_c1 >= c1 and curr_c1 < c2:
reset_current = True
deleted_boxes[curr_r1, curr_c1, curr_r2, curr_c2] = "cell"
deleted_boxes[r1, c1, r2, c2] = "cols"
if reset_current:
self.delete_current()
self.set_current_to_last()
deselected = ("deselect_column", deleted_boxes)
elif (r is not None and c is not None and cell is None) or cell is not None:
set_curr = False
if cell is not None:
r, c = cell[0], cell[1]
for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Outside"),
self.find_withtag("Current_Inside")):
alltags = self.gettags(item)
if alltags:
r1, c1, r2, c2 = tuple(int(e) for e in alltags[1].split("_") if e)
if (r >= r1 and
c >= c1 and
r < r2 and
c < c2):
current = self.currently_selected()
if (not set_curr and
current and
r2 - r1 == 1 and
c2 - c1 == 1 and
r == current[0] and
c == current[1]):
set_curr = True
if current and not set_curr:
if isinstance(current[0], int):
if (current[0] >= r1 and
current[0] < r2 and
current[1] >= c1 and
current[1] < c2):
set_curr = True
elif current[0] == "column":
if (current[1] >= c1 and
current[1] < c2):
set_curr = True
elif current[0] == "row":
if (current[1] >= r1 and
current[1] < r2):
set_curr = True
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
deleted_boxes[(r1, c1, r2, c2)] = "cells"
if set_curr:
try:
deleted_boxes[tuple(int(e) for e in self.get_tags_of_current()[1].split("_") if e)] = "cells"
except:
pass
self.delete_current()
self.set_current_to_last()
deselected = ("deselect_cell", deleted_boxes)
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if self.deselection_binding_func is not None:
self.deselection_binding_func(DeselectionEvent(*deselected))
def page_UP(self, event = None):
if not self.arrowkeys_enabled:
return
height = self.winfo_height()
top = self.canvasy(0)
scrollto = top - height
if scrollto < 0:
scrollto = 0
if self.page_up_down_select_row:
r = bisect.bisect_left(self.row_positions, scrollto)
current = self.currently_selected(get_coords = True)
if current and current[0] == r:
r -= 1
if r < 0:
r = 0
if self.RI.row_selection_enabled and (self.anything_selected(exclude_columns = True, exclude_cells = True) or not self.anything_selected()):
self.RI.select_row(r)
self.see(r, 0, keep_xscroll = True, check_cell_visibility = False)
elif (self.single_selection_enabled or self.toggle_selection_enabled) and self.anything_selected(exclude_columns = True, exclude_rows = True):
box = self.get_all_selection_boxes_with_types()[0][0]
self.see(r, box[1], keep_xscroll = True, check_cell_visibility = False)
self.select_cell(r, box[1])
else:
args = ("moveto", scrollto / (self.row_positions[-1] + 100))
self.yview(*args)
self.RI.yview(*args)
self.main_table_redraw_grid_and_text(redraw_row_index = True)
def page_DOWN(self, event = None):
if not self.arrowkeys_enabled:
return
height = self.winfo_height()
top = self.canvasy(0)
scrollto = top + height
if self.page_up_down_select_row and self.RI.row_selection_enabled:
r = bisect.bisect_left(self.row_positions, scrollto) - 1
current = self.currently_selected(get_coords = True)
if current and current[0] == r:
r += 1
if r > len(self.row_positions) - 2:
r = len(self.row_positions) - 2
if self.RI.row_selection_enabled and (self.anything_selected(exclude_columns = True, exclude_cells = True) or not self.anything_selected()):
self.RI.select_row(r)
self.see(r, 0, keep_xscroll = True, check_cell_visibility = False)
elif (self.single_selection_enabled or self.toggle_selection_enabled) and self.anything_selected(exclude_columns = True, exclude_rows = True):
box = self.get_all_selection_boxes_with_types()[0][0]
self.see(r, box[1], keep_xscroll = True, check_cell_visibility = False)
self.select_cell(r, box[1])
else:
end = self.row_positions[-1]
if scrollto > end + 100:
scrollto = end
args = ("moveto", scrollto / (end + 100))
self.yview(*args)
self.RI.yview(*args)
self.main_table_redraw_grid_and_text(redraw_row_index = True)
def arrowkey_UP(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "row":
r = currently_selected[1]
if r != 0 and self.RI.row_selection_enabled:
if self.cell_is_completely_visible(r = r - 1, c = 0):
self.RI.select_row(r - 1, redraw = True)
else:
self.RI.select_row(r - 1)
self.see(r - 1, 0, keep_xscroll = True, check_cell_visibility = False)
elif isinstance(currently_selected[0],int):
r = currently_selected[0]
c = currently_selected[1]
if r == 0 and self.CH.col_selection_enabled:
if not self.cell_is_completely_visible(r = r, c = 0):
self.see(r, c, keep_xscroll = True, check_cell_visibility = False)
elif r != 0 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = r - 1, c = c):
self.select_cell(r - 1, c, redraw = True)
else:
self.select_cell(r - 1, c)
self.see(r - 1, c, keep_xscroll = True, check_cell_visibility = False)
def arrowkey_RIGHT(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "row":
r = currently_selected[1]
if self.single_selection_enabled or self.toggle_selection_enabled:
if self.cell_is_completely_visible(r = r, c = 0):
self.select_cell(r, 0, redraw = True)
else:
self.select_cell(r, 0)
self.see(r, 0, keep_yscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif currently_selected[0] == "column":
c = currently_selected[1]
if c < len(self.col_positions) - 2 and self.CH.col_selection_enabled:
if self.cell_is_completely_visible(r = 0, c = c + 1):
self.CH.select_col(c + 1, redraw = True)
else:
self.CH.select_col(c + 1)
self.see(0, c + 1, keep_yscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
elif isinstance(currently_selected[0], int):
r = currently_selected[0]
c = currently_selected[1]
if c < len(self.col_positions) - 2 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = r, c = c + 1):
self.select_cell(r, c + 1, redraw =True)
else:
self.select_cell(r, c + 1)
self.see(r, c + 1, keep_yscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
def arrowkey_DOWN(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "row":
r = currently_selected[1]
if r < len(self.row_positions) - 2 and self.RI.row_selection_enabled:
if self.cell_is_completely_visible(r = min(r + 2, len(self.row_positions) - 2), c = 0):
self.RI.select_row(r + 1, redraw = True)
else:
self.RI.select_row(r + 1)
if r + 2 < len(self.row_positions) - 2 and (self.row_positions[r + 3] - self.row_positions[r + 2]) + (self.row_positions[r + 2] - self.row_positions[r + 1]) + 5 < self.winfo_height():
self.see(r + 2, 0, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif not self.cell_is_completely_visible(r = r + 1, c = 0):
self.see(r + 1, 0, keep_xscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
elif currently_selected[0] == "column":
c = currently_selected[1]
if self.single_selection_enabled or self.toggle_selection_enabled:
if self.cell_is_completely_visible(r = 0, c = c):
self.select_cell(0, c, redraw = True)
else:
self.select_cell(0, c)
self.see(0, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif isinstance(currently_selected[0], int):
r = currently_selected[0]
c = currently_selected[1]
if r < len(self.row_positions) - 2 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = min(r + 2, len(self.row_positions) - 2), c = c):
self.select_cell(r + 1, c, redraw = True)
else:
self.select_cell(r + 1, c)
if r + 2 < len(self.row_positions) - 2 and (self.row_positions[r + 3] - self.row_positions[r + 2]) + (self.row_positions[r + 2] - self.row_positions[r + 1]) + 5 < self.winfo_height():
self.see(r + 2, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif not self.cell_is_completely_visible(r = r + 1, c = c):
self.see(r + 1, c, keep_xscroll = True, bottom_right_corner = False if self.arrow_key_down_right_scroll_page else True, check_cell_visibility = False)
def arrowkey_LEFT(self, event = None):
currently_selected = self.currently_selected()
if not currently_selected or not self.arrowkeys_enabled:
return
if currently_selected[0] == "column":
c = currently_selected[1]
if c != 0 and self.CH.col_selection_enabled:
if self.cell_is_completely_visible(r = 0, c = c - 1):
self.CH.select_col(c - 1, redraw = True)
else:
self.CH.select_col(c - 1)
self.see(0, c - 1, keep_yscroll = True, bottom_right_corner = True, check_cell_visibility = False)
elif isinstance(currently_selected[0], int):
r = currently_selected[0]
c = currently_selected[1]
if c == 0 and self.RI.row_selection_enabled:
if not self.cell_is_completely_visible(r = r, c = 0):
self.see(r, c, keep_yscroll = True, check_cell_visibility = False)
elif c != 0 and (self.single_selection_enabled or self.toggle_selection_enabled):
if self.cell_is_completely_visible(r = r, c = c - 1):
self.select_cell(r, c - 1, redraw = True)
else:
self.select_cell(r, c - 1)
self.see(r, c - 1, keep_yscroll = True, check_cell_visibility = False)
def edit_bindings(self, enable = True, key = None):
if key is None or key == "copy":
if enable:
for s2 in ("c", "C"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{'Command' if USER_OS == 'Darwin' else 'Control'}-{s2}>", self.ctrl_c)
self.copy_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("c", "C"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.copy_enabled = False
if key is None or key == "cut":
if enable:
for s2 in ("x", "X"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{'Command' if USER_OS == 'Darwin' else 'Control'}-{s2}>", self.ctrl_x)
self.cut_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("x", "X"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.cut_enabled = False
if key is None or key == "paste":
if enable:
for s2 in ("v", "V"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{'Command' if USER_OS == 'Darwin' else 'Control'}-{s2}>", self.ctrl_v)
self.paste_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("v", "V"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.paste_enabled = False
if key is None or key == "undo":
if enable:
for s2 in ("z", "Z"):
for widget in (self, self.RI, self.CH, self.TL):
widget.bind(f"<{'Command' if USER_OS == 'Darwin' else 'Control'}-{s2}>", self.ctrl_z)
self.undo_enabled = True
else:
for s1 in ("Control", "Command"):
for s2 in ("z", "Z"):
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind(f"<{s1}-{s2}>")
self.undo_enabled = False
if key is None or key == "delete":
if enable:
for widget in (self, self.RI, self.CH, self.TL):
widget.bind("<Delete>", self.delete_key)
self.delete_key_enabled = True
else:
for widget in (self, self.RI, self.CH, self.TL):
widget.unbind("<Delete>")
self.delete_key_enabled = False
if key is None or key == "edit_cell":
if enable:
self.bind_cell_edit(True)
else:
self.bind_cell_edit(False)
# edit header with text editor (dropdowns and checkboxes not included)
# this will not by enabled by using enable_bindings() to enable all bindings
# must be enabled directly using enable_bindings("edit_header")
if key == "edit_header":
if enable:
self.CH.bind_cell_edit(True)
else:
self.CH.bind_cell_edit(False)
def menu_add_command(self, menu: tk.Menu, **kwargs):
if 'label' not in kwargs:
return
try:
index = menu.index(kwargs['label'])
menu.delete(index)
except TclError:
pass
menu.add_command(**kwargs)
def create_rc_menus(self):
if not self.rc_popup_menu:
self.rc_popup_menu = tk.Menu(self, tearoff = 0, background = self.popup_menu_bg)
if not self.CH.ch_rc_popup_menu:
self.CH.ch_rc_popup_menu = tk.Menu(self.CH, tearoff = 0, background = self.popup_menu_bg)
if not self.RI.ri_rc_popup_menu:
self.RI.ri_rc_popup_menu = tk.Menu(self.RI, tearoff = 0, background = self.popup_menu_bg)
if not self.empty_rc_popup_menu:
self.empty_rc_popup_menu = tk.Menu(self, tearoff = 0, background = self.popup_menu_bg)
for menu in (self.rc_popup_menu,
self.CH.ch_rc_popup_menu,
self.RI.ri_rc_popup_menu,
self.empty_rc_popup_menu):
menu.delete(0, 'end')
if self.cut_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Cut",
accelerator = "Ctrl+X",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_x)
#self.rc_popup_menu.add_separator()
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Cut contents",
accelerator = "Ctrl+X",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_x)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Cut contents",
accelerator = "Ctrl+X",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_x)
#self.RI.ri_rc_popup_menu.add_separator()
if self.copy_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Copy",
accelerator = "Ctrl+C",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_c)
#self.rc_popup_menu.add_separator()
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Copy contents",
accelerator = "Ctrl+C",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_c)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Copy contents",
accelerator = "Ctrl+C",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_c)
#self.RI.ri_rc_popup_menu.add_separator()
if self.paste_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
#self.rc_popup_menu.add_separator()
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
#self.RI.ri_rc_popup_menu.add_separator()
if self.expand_sheet_if_paste_too_big:
self.menu_add_command(self.empty_rc_popup_menu, label = "Paste",
accelerator = "Ctrl+V",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.ctrl_v)
if self.delete_key_enabled:
self.menu_add_command(self.rc_popup_menu, label = "Delete",
accelerator = "Del",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.delete_key)
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Clear contents",
accelerator = "Del",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.delete_key)
#self.CH.ch_rc_popup_menu.add_separator()
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Clear contents",
accelerator = "Del",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.delete_key)
#self.RI.ri_rc_popup_menu.add_separator()
if self.rc_delete_column_enabled:
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Delete columns",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.del_cols_rc)
#self.CH.ch_rc_popup_menu.add_separator()
if self.rc_insert_column_enabled:
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Insert columns left",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_col_rc("left"))
self.menu_add_command(self.empty_rc_popup_menu, label = "Insert column",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_col_rc("left"))
self.menu_add_command(self.CH.ch_rc_popup_menu, label = "Insert columns right",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_col_rc("right"))
if self.rc_delete_row_enabled:
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Delete rows",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = self.del_rows_rc)
#self.RI.ri_rc_popup_menu.add_separator()
if self.rc_insert_row_enabled:
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Insert rows above",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_row_rc("above"))
self.menu_add_command(self.RI.ri_rc_popup_menu, label = "Insert rows below",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_row_rc("below"))
self.menu_add_command(self.empty_rc_popup_menu, label = "Insert row",
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = lambda: self.insert_row_rc("below"))
for label, func in self.extra_table_rc_menu_funcs.items():
self.menu_add_command(self.rc_popup_menu, label = label,
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = func)
for label, func in self.extra_index_rc_menu_funcs.items():
self.menu_add_command(self.RI.ri_rc_popup_menu, label = label,
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = func)
for label, func in self.extra_header_rc_menu_funcs.items():
self.menu_add_command(self.CH.ch_rc_popup_menu, label = label,
font = self.popup_menu_font,
foreground = self.popup_menu_fg,
background = self.popup_menu_bg,
activebackground = self.popup_menu_highlight_bg,
activeforeground = self.popup_menu_highlight_fg,
command = func)
def bind_cell_edit(self, enable = True, keys = []):
if enable:
self.edit_cell_enabled = True
for w in (self, self.RI, self.CH):
w.bind("<Key>", self.open_cell)
else:
self.edit_cell_enabled = False
for w in (self, self.RI, self.CH):
w.unbind("<Key>")
def enable_bindings(self, bindings):
if not bindings:
self.enable_bindings_internal("all")
elif isinstance(bindings, (list, tuple)):
for binding in bindings:
if isinstance(binding, (list, tuple)):
for bind in binding:
self.enable_bindings_internal(bind.lower())
elif isinstance(binding, str):
self.enable_bindings_internal(binding.lower())
elif isinstance(bindings, str):
self.enable_bindings_internal(bindings.lower())
def disable_bindings(self, bindings):
if not bindings:
self.disable_bindings_internal("all")
elif isinstance(bindings, (list, tuple)):
for binding in bindings:
if isinstance(binding, (list, tuple)):
for bind in binding:
self.disable_bindings_internal(bind.lower())
elif isinstance(binding, str):
self.disable_bindings_internal(binding.lower())
elif isinstance(bindings, str):
self.disable_bindings_internal(bindings)
def enable_disable_select_all(self, enable = True):
self.select_all_enabled = bool(enable)
for s in ("A", "a"):
binding = f"<{'Command' if USER_OS == 'Darwin' else 'Control'}-{s}>"
for widget in (self, self.RI, self.CH, self.TL):
if enable:
widget.bind(binding, self.select_all)
else:
widget.unbind(binding)
def enable_bindings_internal(self, binding):
if binding in ("enable_all", "all"):
self.single_selection_enabled = True
self.toggle_selection_enabled = False
self.drag_selection_enabled = True
self.enable_disable_select_all(True)
self.CH.enable_bindings("column_width_resize")
self.CH.enable_bindings("column_select")
self.CH.enable_bindings("column_height_resize")
self.CH.enable_bindings("drag_and_drop")
self.CH.enable_bindings("double_click_column_resize")
self.RI.enable_bindings("row_height_resize")
self.RI.enable_bindings("double_click_row_resize")
self.RI.enable_bindings("row_width_resize")
self.RI.enable_bindings("row_select")
self.RI.enable_bindings("drag_and_drop")
self.bind_arrowkeys()
self.edit_bindings(True)
self.rc_delete_column_enabled = True
self.rc_delete_row_enabled = True
self.rc_insert_column_enabled = True
self.rc_insert_row_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
self.TL.rh_state()
self.TL.rw_state()
elif binding in ("single", "single_selection_mode", "single_select"):
self.single_selection_enabled = True
self.toggle_selection_enabled = False
elif binding in ("toggle", "toggle_selection_mode", "toggle_select"):
self.toggle_selection_enabled = True
self.single_selection_enabled = False
elif binding == "drag_select":
self.drag_selection_enabled = True
elif binding == "select_all":
self.enable_disable_select_all(True)
elif binding == "column_width_resize":
self.CH.enable_bindings("column_width_resize")
elif binding == "column_select":
self.CH.enable_bindings("column_select")
elif binding == "column_height_resize":
self.CH.enable_bindings("column_height_resize")
self.TL.rh_state()
elif binding == "column_drag_and_drop":
self.CH.enable_bindings("drag_and_drop")
elif binding == "double_click_column_resize":
self.CH.enable_bindings("double_click_column_resize")
elif binding == "row_height_resize":
self.RI.enable_bindings("row_height_resize")
elif binding == "double_click_row_resize":
self.RI.enable_bindings("double_click_row_resize")
elif binding == "row_width_resize":
self.RI.enable_bindings("row_width_resize")
self.TL.rw_state()
elif binding == "row_select":
self.RI.enable_bindings("row_select")
elif binding == "row_drag_and_drop":
self.RI.enable_bindings("drag_and_drop")
elif binding == "arrowkeys":
self.bind_arrowkeys()
elif binding == "edit_bindings":
self.edit_bindings(True)
elif binding == "rc_delete_column":
self.rc_delete_column_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "rc_delete_row":
self.rc_delete_row_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "rc_insert_column":
self.rc_insert_column_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "rc_insert_row":
self.rc_insert_row_enabled = True
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding == "copy":
self.edit_bindings(True, "copy")
elif binding == "cut":
self.edit_bindings(True, "cut")
elif binding == "paste":
self.edit_bindings(True, "paste")
elif binding == "delete":
self.edit_bindings(True, "delete")
elif binding in ("right_click_popup_menu", "rc_popup_menu"):
self.rc_popup_menus_enabled = True
self.rc_select_enabled = True
elif binding in ("right_click_select", "rc_select"):
self.rc_select_enabled = True
elif binding == "undo":
self.edit_bindings(True, "undo")
elif binding == "edit_cell":
self.edit_bindings(True, "edit_cell")
elif binding == "edit_header":
self.edit_bindings(True, "edit_header")
self.create_rc_menus()
def disable_bindings_internal(self, binding):
if binding in ("all", "disable_all"):
self.single_selection_enabled = False
self.toggle_selection_enabled = False
self.drag_selection_enabled = False
self.enable_disable_select_all(False)
self.CH.disable_bindings("column_width_resize")
self.CH.disable_bindings("column_select")
self.CH.disable_bindings("column_height_resize")
self.CH.disable_bindings("drag_and_drop")
self.CH.disable_bindings("double_click_column_resize")
self.RI.disable_bindings("row_height_resize")
self.RI.disable_bindings("double_click_row_resize")
self.RI.disable_bindings("row_width_resize")
self.RI.disable_bindings("row_select")
self.RI.disable_bindings("drag_and_drop")
self.unbind_arrowkeys()
self.edit_bindings(False)
self.rc_delete_column_enabled = False
self.rc_delete_row_enabled = False
self.rc_insert_column_enabled = False
self.rc_insert_row_enabled = False
self.rc_popup_menus_enabled = False
self.rc_select_enabled = False
self.TL.rh_state("hidden")
self.TL.rw_state("hidden")
elif binding in ("single", "single_selection_mode", "single_select"):
self.single_selection_enabled = False
elif binding in ("toggle", "toggle_selection_mode", "toggle_select"):
self.toggle_selection_enabled = False
elif binding == "drag_select":
self.drag_selection_enabled = False
elif binding == "select_all":
self.enable_disable_select_all(False)
elif binding == "column_width_resize":
self.CH.disable_bindings("column_width_resize")
elif binding == "column_select":
self.CH.disable_bindings("column_select")
elif binding == "column_height_resize":
self.CH.disable_bindings("column_height_resize")
self.TL.rh_state("hidden")
elif binding == "column_drag_and_drop":
self.CH.disable_bindings("drag_and_drop")
elif binding == "double_click_column_resize":
self.CH.disable_bindings("double_click_column_resize")
elif binding == "row_height_resize":
self.RI.disable_bindings("row_height_resize")
elif binding == "double_click_row_resize":
self.RI.disable_bindings("double_click_row_resize")
elif binding == "row_width_resize":
self.RI.disable_bindings("row_width_resize")
self.TL.rw_state("hidden")
elif binding == "row_select":
self.RI.disable_bindings("row_select")
elif binding == "row_drag_and_drop":
self.RI.disable_bindings("drag_and_drop")
elif binding == "arrowkeys":
self.unbind_arrowkeys()
elif binding == "rc_delete_column":
self.rc_delete_column_enabled = False
elif binding == "rc_delete_row":
self.rc_delete_row_enabled = False
elif binding == "rc_insert_column":
self.rc_insert_column_enabled = False
elif binding == "rc_insert_row":
self.rc_insert_row_enabled = False
elif binding == "edit_bindings":
self.edit_bindings(False)
elif binding == "copy":
self.edit_bindings(False, "copy")
elif binding == "cut":
self.edit_bindings(False, "cut")
elif binding == "paste":
self.edit_bindings(False, "paste")
elif binding == "delete":
self.edit_bindings(False, "delete")
elif binding in ("right_click_popup_menu", "rc_popup_menu"):
self.rc_popup_menus_enabled = False
elif binding in ("right_click_select", "rc_select"):
self.rc_select_enabled = False
elif binding == "undo":
self.edit_bindings(False, "undo")
elif binding == "edit_cell":
self.edit_bindings(False, "edit_cell")
elif binding == "edit_header":
self.edit_bindings(False, "edit_header")
self.create_rc_menus()
def reset_mouse_motion_creations(self, event = None):
self.config(cursor = "")
self.RI.config(cursor = "")
self.CH.config(cursor = "")
self.RI.rsz_w = None
self.RI.rsz_h = None
self.CH.rsz_w = None
self.CH.rsz_h = None
def mouse_motion(self, event):
if (
not self.RI.currently_resizing_height and
not self.RI.currently_resizing_width and
not self.CH.currently_resizing_height and
not self.CH.currently_resizing_width
):
mouse_over_resize = False
x = self.canvasx(event.x)
y = self.canvasy(event.y)
if self.RI.width_resizing_enabled and not mouse_over_resize:
try:
x1, y1, x2, y2 = self.row_width_resize_bbox[0], self.row_width_resize_bbox[1], self.row_width_resize_bbox[2], self.row_width_resize_bbox[3]
if x >= x1 and y >= y1 and x <= x2 and y <= y2:
self.config(cursor = "sb_h_double_arrow")
self.RI.config(cursor = "sb_h_double_arrow")
self.RI.rsz_w = True
mouse_over_resize = True
except:
pass
if self.CH.height_resizing_enabled and not mouse_over_resize:
try:
x1, y1, x2, y2 = self.header_height_resize_bbox[0], self.header_height_resize_bbox[1], self.header_height_resize_bbox[2], self.header_height_resize_bbox[3]
if x >= x1 and y >= y1 and x <= x2 and y <= y2:
self.config(cursor = "sb_v_double_arrow")
self.CH.config(cursor = "sb_v_double_arrow")
self.CH.rsz_h = True
mouse_over_resize = True
except:
pass
if not mouse_over_resize:
self.reset_mouse_motion_creations()
if self.extra_motion_func is not None:
self.extra_motion_func(event)
def rc(self, event = None):
self.hide_dropdown_window()
self.focus_set()
popup_menu = None
if self.single_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
if self.col_selected(c):
if self.rc_popup_menus_enabled:
popup_menu = self.CH.ch_rc_popup_menu
elif self.row_selected(r):
if self.rc_popup_menus_enabled:
popup_menu = self.RI.ri_rc_popup_menu
elif self.cell_selected(r, c):
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
if self.rc_select_enabled:
self.select_cell(r, c, redraw = True)
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
popup_menu = self.empty_rc_popup_menu
elif self.toggle_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
if self.col_selected(c):
if self.rc_popup_menus_enabled:
popup_menu = self.CH.ch_rc_popup_menu
elif self.row_selected(r):
if self.rc_popup_menus_enabled:
popup_menu = self.RI.ri_rc_popup_menu
elif self.cell_selected(r, c):
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
if self.rc_select_enabled:
self.toggle_select_cell(r, c, redraw = True)
if self.rc_popup_menus_enabled:
popup_menu = self.rc_popup_menu
else:
popup_menu = self.empty_rc_popup_menu
if self.extra_rc_func is not None:
self.extra_rc_func(event)
if popup_menu is not None:
popup_menu.tk_popup(event.x_root, event.y_root)
def b1_press(self, event = None):
self.closed_dropdown = self.hide_dropdown_window(b1 = True)
self.focus_set()
x1, y1, x2, y2 = self.get_canvas_visible_area()
if self.identify_col(x = event.x, allow_end = False) is None or self.identify_row(y = event.y, allow_end = False) is None:
self.deselect("all")
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if self.single_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.select_cell(r, c, redraw = True)
elif self.toggle_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.toggle_select_cell(r, c, redraw = True)
elif self.RI.width_resizing_enabled and self.RI.rsz_h is None and self.RI.rsz_w == True:
self.RI.currently_resizing_width = True
self.new_row_width = self.RI.current_width + event.x
x = self.canvasx(event.x)
self.create_resize_line(x, y1, x, y2, width = 1, fill = self.RI.resizing_line_fg, tag = "rwl")
elif self.CH.height_resizing_enabled and self.CH.rsz_w is None and self.CH.rsz_h == True:
self.CH.currently_resizing_height = True
self.new_header_height = self.CH.current_height + event.y
y = self.canvasy(event.y)
self.create_resize_line(x1, y, x2, y, width = 1, fill = self.RI.resizing_line_fg, tag = "rhl")
self.b1_pressed_loc = (r, c)
if self.extra_b1_press_func is not None:
self.extra_b1_press_func(event)
def create_resize_line(self, x1, y1, x2, y2, width, fill, tag):
if self.hidd_resize_lines:
t, sh = self.hidd_resize_lines.popitem()
self.coords(t, x1, y1, x2, y2)
if sh:
self.itemconfig(t, width = width, fill = fill, tag = tag)
else:
self.itemconfig(t, width = width, fill = fill, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(x1, y1, x2, y2, width = width, fill = fill, tag = tag)
self.disp_resize_lines[t] = True
def delete_resize_lines(self):
self.hidd_resize_lines.update(self.disp_resize_lines)
self.disp_resize_lines = {}
for t, sh in self.hidd_resize_lines.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_resize_lines[t] = False
def shift_b1_press(self, event = None):
self.hide_dropdown_window()
self.focus_set()
if self.drag_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
self.b1_pressed_loc = None
rowsel = int(self.identify_row(y = event.y))
colsel = int(self.identify_col(x = event.x))
if rowsel < len(self.row_positions) - 1 and colsel < len(self.col_positions) - 1:
currently_selected = self.currently_selected()
if currently_selected and isinstance(currently_selected[0], int):
min_r = currently_selected[0]
min_c = currently_selected[1]
self.delete_selection_rects(delete_current = False)
if rowsel >= min_r and colsel >= min_c:
self.create_selected(min_r, min_c, rowsel + 1, colsel + 1)
elif rowsel >= min_r and min_c >= colsel:
self.create_selected(min_r, colsel, rowsel + 1, min_c + 1)
elif min_r >= rowsel and colsel >= min_c:
self.create_selected(rowsel, min_c, min_r + 1, colsel + 1)
elif min_r >= rowsel and min_c >= colsel:
self.create_selected(rowsel, colsel, min_r + 1, min_c + 1)
last_selected = tuple(int(e) for e in self.gettags(self.find_withtag("CellSelectFill"))[1].split("_") if e)
else:
self.select_cell(rowsel, colsel, redraw = False)
last_selected = tuple(int(e) for e in self.gettags(self.find_withtag("Current_Outside"))[1].split("_") if e)
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True, redraw_table = True)
if self.shift_selection_binding_func is not None:
self.shift_selection_binding_func(SelectionBoxEvent("shift_select_cells", last_selected))
def b1_motion(self, event):
x1, y1, x2, y2 = self.get_canvas_visible_area()
if self.drag_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
end_row = self.identify_row(y = event.y)
end_col = self.identify_col(x = event.x)
currently_selected = self.currently_selected()
if end_row < len(self.row_positions) - 1 and end_col < len(self.col_positions) - 1 and currently_selected and isinstance(currently_selected[0], int):
start_row = currently_selected[0]
start_col = currently_selected[1]
if end_row >= start_row and end_col >= start_col:
rect = (start_row, start_col, end_row + 1, end_col + 1)
elif end_row >= start_row and end_col < start_col:
rect = (start_row, end_col, end_row + 1, start_col + 1)
elif end_row < start_row and end_col >= start_col:
rect = (end_row, start_col, start_row + 1, end_col + 1)
elif end_row < start_row and end_col < start_col:
rect = (end_row, end_col, start_row + 1, start_col + 1)
if self.being_drawn_rect != rect:
self.delete_selection_rects(delete_current = False)
self.create_selected(*rect)
self.being_drawn_rect = rect
if self.drag_selection_binding_func is not None:
self.drag_selection_binding_func(SelectionBoxEvent("drag_select_cells", tuple(int(e) for e in self.gettags(self.find_withtag("CellSelectFill"))[1].split("_") if e)))
if self.data_ref:
xcheck = self.xview()
ycheck = self.yview()
if len(xcheck) > 1 and xcheck[0] > 0 and event.x < 0:
try:
self.xview_scroll(-1, "units")
self.CH.xview_scroll(-1, "units")
except:
pass
if len(ycheck) > 1 and ycheck[0] > 0 and event.y < 0:
try:
self.yview_scroll(-1, "units")
self.RI.yview_scroll(-1, "units")
except:
pass
if len(xcheck) > 1 and xcheck[1] < 1 and event.x > self.winfo_width():
try:
self.xview_scroll(1, "units")
self.CH.xview_scroll(1, "units")
except:
pass
if len(ycheck) > 1 and ycheck[1] < 1 and event.y > self.winfo_height():
try:
self.yview_scroll(1, "units")
self.RI.yview_scroll(1, "units")
except:
pass
self.check_views()
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True, redraw_table = True)
elif self.RI.width_resizing_enabled and self.RI.rsz_w is not None and self.RI.currently_resizing_width:
self.RI.delete_resize_lines()
self.delete_resize_lines()
if event.x >= 0:
x = self.canvasx(event.x)
self.new_row_width = self.RI.current_width + event.x
self.create_resize_line(x, y1, x, y2, width = 1, fill = self.RI.resizing_line_fg, tag = "rwl")
else:
x = self.RI.current_width + event.x
if x < self.min_cw:
x = int(self.min_cw)
self.new_row_width = x
self.RI.create_resize_line(x, y1, x, y2, width = 1, fill = self.RI.resizing_line_fg, tag = "rwl")
elif self.CH.height_resizing_enabled and self.CH.rsz_h is not None and self.CH.currently_resizing_height:
self.CH.delete_resize_lines()
self.delete_resize_lines()
if event.y >= 0:
y = self.canvasy(event.y)
self.new_header_height = self.CH.current_height + event.y
self.create_resize_line(x1, y, x2, y, width = 1, fill = self.RI.resizing_line_fg, tag = "rhl")
else:
y = self.CH.current_height + event.y
if y < self.hdr_min_rh:
y = int(self.hdr_min_rh)
self.new_header_height = y
self.CH.create_resize_line(x1, y, x2, y, width = 1, fill = self.RI.resizing_line_fg, tag = "rhl")
if self.extra_b1_motion_func is not None:
self.extra_b1_motion_func(event)
def b1_release(self, event = None):
if self.RI.width_resizing_enabled and self.RI.rsz_w is not None and self.RI.currently_resizing_width:
self.delete_resize_lines()
self.RI.delete_resize_lines()
self.RI.currently_resizing_width = False
self.RI.set_width(self.new_row_width, set_TL = True)
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
elif self.CH.height_resizing_enabled and self.CH.rsz_h is not None and self.CH.currently_resizing_height:
self.delete_resize_lines()
self.CH.delete_resize_lines()
self.CH.currently_resizing_height = False
self.CH.set_height(self.new_header_height, set_TL = True)
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
self.RI.rsz_w = None
self.CH.rsz_h = None
self.being_drawn_rect = None
if self.b1_pressed_loc is not None:
r = self.identify_row(y = event.y, allow_end = False)
c = self.identify_col(x = event.x, allow_end = False)
if r is not None and c is not None and (r, c) == self.b1_pressed_loc:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if (r, dcol) in self.cell_options and ('dropdown' in self.cell_options[(r, dcol)] or 'checkbox' in self.cell_options[(r, dcol)]):
if (self.closed_dropdown != self.b1_pressed_loc and
'dropdown' in self.cell_options[(r, dcol)]):
self.open_cell(event)
elif 'checkbox' in self.cell_options[(r, dcol)] and event.x < self.col_positions[c] + self.txt_h + 5 and event.y < self.row_positions[r] + self.txt_h + 5:
self.open_cell(event)
self.hide_dropdown_window()
else:
self.hide_dropdown_window()
else:
self.hide_dropdown_window()
self.b1_pressed_loc = None
self.closed_dropdown = None
if self.extra_b1_release_func is not None:
self.extra_b1_release_func(event)
def double_b1(self, event = None):
self.hide_dropdown_window()
self.focus_set()
x1, y1, x2, y2 = self.get_canvas_visible_area()
if self.identify_col(x = event.x, allow_end = False) is None or self.identify_row(y = event.y, allow_end = False) is None:
self.deselect("all")
elif self.single_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.select_cell(r, c, redraw = True)
if self.edit_cell_enabled:
self.open_cell(event)
elif self.toggle_selection_enabled and all(v is None for v in (self.RI.rsz_h, self.RI.rsz_w, self.CH.rsz_h, self.CH.rsz_w)):
r = self.identify_row(y = event.y)
c = self.identify_col(x = event.x)
if r < len(self.row_positions) - 1 and c < len(self.col_positions) - 1:
self.toggle_select_cell(r, c, redraw = True)
if self.edit_cell_enabled:
self.open_cell(event)
if self.extra_double_b1_func is not None:
self.extra_double_b1_func(event)
def identify_row(self, event = None, y = None, allow_end = True):
if event is None:
y2 = self.canvasy(y)
elif y is None:
y2 = self.canvasy(event.y)
r = bisect.bisect_left(self.row_positions, y2)
if r != 0:
r -= 1
if not allow_end and r >= len(self.row_positions) - 1:
return None
return r
def identify_col(self, event = None, x = None, allow_end = True):
if event is None:
x2 = self.canvasx(x)
elif x is None:
x2 = self.canvasx(event.x)
c = bisect.bisect_left(self.col_positions, x2)
if c != 0:
c -= 1
if not allow_end and c >= len(self.col_positions) - 1:
return None
return c
def GetCellCoords(self, event = None, r = None, c = None, sel = False):
if event is not None:
r = self.identify_row(event)
c = self.identify_col(event)
elif r is not None and c is not None:
if sel:
return self.col_positions[c] + 1,self.row_positions[r] + 1, self.col_positions[c + 1], self.row_positions[r + 1]
else:
return self.col_positions[c], self.row_positions[r], self.col_positions[c + 1], self.row_positions[r + 1]
def check_views(self):
xcheck = self.xview()
ycheck = self.yview()
if xcheck and xcheck[0] <= 0:
self.xview(*("moveto", 0))
if self.show_header:
self.CH.xview(*("moveto", 0))
elif len(xcheck) > 1 and xcheck[1] >= 1:
self.xview(*("moveto", 1))
if self.show_header:
self.CH.xview(*("moveto", 1))
if ycheck and ycheck[0] <= 0:
self.yview(*("moveto", 0))
if self.show_index:
self.RI.yview(*("moveto", 0))
elif len(ycheck) > 1 and ycheck[1] >= 1:
self.yview(*("moveto", 1))
if self.show_index:
self.RI.yview(*("moveto", 1))
def set_xviews(self, *args):
self.xview(*args)
if self.show_header:
self.CH.xview(*args)
self.check_views()
self.main_table_redraw_grid_and_text(redraw_header = True if self.show_header else False)
def set_yviews(self, *args):
self.yview(*args)
if self.show_index:
self.RI.yview(*args)
self.check_views()
self.main_table_redraw_grid_and_text(redraw_row_index = True if self.show_index else False)
def set_view(self, x_args, y_args):
self.xview(*x_args)
if self.show_header:
self.CH.xview(*x_args)
self.yview(*y_args)
if self.show_index:
self.RI.yview(*y_args)
self.check_views()
self.main_table_redraw_grid_and_text(redraw_row_index = True if self.show_index else False,
redraw_header = True if self.show_header else False)
def mousewheel(self, event = None):
if event.delta < 0 or event.num == 5:
self.yview_scroll(1, "units")
self.RI.yview_scroll(1, "units")
elif event.delta >= 0 or event.num == 4:
if self.canvasy(0) <= 0:
return
self.yview_scroll(-1, "units")
self.RI.yview_scroll(-1, "units")
self.main_table_redraw_grid_and_text(redraw_row_index = True)
def shift_mousewheel(self, event = None):
if event.delta < 0 or event.num == 5:
self.xview_scroll(1, "units")
self.CH.xview_scroll(1, "units")
elif event.delta >= 0 or event.num == 4:
if self.canvasx(0) <= 0:
return
self.xview_scroll(-1, "units")
self.CH.xview_scroll(-1, "units")
self.main_table_redraw_grid_and_text(redraw_header = True)
def GetWidthChars(self, width):
char_w = self.GetTextWidth("_")
return int(width / char_w)
def GetTextWidth(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[2] - b[0]
def GetTextHeight(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[3] - b[1]
def GetHdrTextWidth(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[2] - b[0]
def GetHdrTextHeight(self, txt):
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
return b[3] - b[1]
def set_min_cw(self):
#w1 = self.GetHdrTextWidth("X") + 5
#w2 = self.GetTextWidth("X") + 5
#if w1 >= w2:
# self.min_cw = w1
#else:
# self.min_cw = w2
self.min_cw = 5
if self.min_cw > self.CH.max_cw:
self.CH.max_cw = self.min_cw + 20
if self.min_cw > self.default_cw:
self.default_cw = self.min_cw + 20
def font(self, newfont = None, reset_row_positions = False):
if newfont:
if not isinstance(newfont, tuple):
raise ValueError("Argument must be tuple e.g. ('Carlito',12,'normal')")
if len(newfont) != 3:
raise ValueError("Argument must be three-tuple")
if (
not isinstance(newfont[0], str) or
not isinstance(newfont[1], int) or
not isinstance(newfont[2], str)
):
raise ValueError("Argument must be font, size and 'normal', 'bold' or 'italic' e.g. ('Carlito',12,'normal')")
else:
self.my_font = newfont
self.fnt_fam = newfont[0]
self.fnt_sze = newfont[1]
self.fnt_wgt = newfont[2]
self.set_fnt_help()
if reset_row_positions:
self.reset_row_positions()
else:
return self.my_font
def set_fnt_help(self):
self.txt_h = self.GetTextHeight("|ZXjy*'^")
self.half_txt_h = ceil(self.txt_h / 2)
if self.half_txt_h % 2 == 0:
self.fl_ins = self.half_txt_h + 2
else:
self.fl_ins = self.half_txt_h + 3
self.xtra_lines_increment = int(self.txt_h)
self.min_rh = self.txt_h + 5
if self.min_rh < 12:
self.min_rh = 12
#self.min_rh = 5
if self.default_rh[0] != "pixels":
self.default_rh = (self.default_rh[0] if self.default_rh[0] != "pixels" else "pixels",
self.GetLinesHeight(int(self.default_rh[0])) if self.default_rh[0] != "pixels" else self.default_rh[1])
self.set_min_cw()
def header_font(self, newfont = None):
if newfont:
if not isinstance(newfont, tuple):
raise ValueError("Argument must be tuple e.g. ('Carlito', 12, 'normal')")
if len(newfont) != 3:
raise ValueError("Argument must be three-tuple")
if (
not isinstance(newfont[0], str) or
not isinstance(newfont[1], int) or
not isinstance(newfont[2], str)
):
raise ValueError("Argument must be font, size and 'normal', 'bold' or 'italic' e.g. ('Carlito', 12, 'normal')")
else:
self.my_hdr_font = newfont
self.hdr_fnt_fam = newfont[0]
self.hdr_fnt_sze = newfont[1]
self.hdr_fnt_wgt = newfont[2]
self.set_hdr_fnt_help()
else:
return self.my_hdr_font
def set_hdr_fnt_help(self):
self.hdr_txt_h = self.GetHdrTextHeight("|ZXj*'^")
self.hdr_half_txt_h = ceil(self.hdr_txt_h / 2)
if self.hdr_half_txt_h % 2 == 0:
self.hdr_fl_ins = self.hdr_half_txt_h + 2
else:
self.hdr_fl_ins = self.hdr_half_txt_h + 3
self.hdr_xtra_lines_increment = self.hdr_txt_h
self.hdr_min_rh = self.hdr_txt_h + 5
if self.default_hh[0] != "pixels":
self.default_hh = (self.default_hh[0] if self.default_hh[0] != "pixels" else "pixels",
self.GetHdrLinesHeight(int(self.default_hh[0])) if self.default_hh[0] != "pixels" else self.default_hh[1])
self.set_min_cw()
self.CH.set_height(self.default_hh[1])
def data_reference(self, newdataref = None, reset_col_positions = True, reset_row_positions = True, redraw = False, return_id = True):
if isinstance(newdataref, (list, tuple)):
self.data_ref = newdataref
self.undo_storage = deque(maxlen = self.max_undos)
if reset_col_positions:
self.reset_col_positions()
if reset_row_positions:
self.reset_row_positions()
if redraw:
self.main_table_redraw_grid_and_text(redraw_header = True, redraw_row_index = True)
if return_id:
return id(self.data_ref)
else:
return self.data_ref
def set_cell_size_to_text(self, r, c, only_set_if_too_small = False, redraw = True, run_binding = False):
min_cw = self.min_cw
min_rh = self.min_rh
h = int(min_rh)
w = int(min_cw)
if self.all_columns_displayed:
cn = int(c)
else:
cn = self.displayed_columns[c]
rn = int(r)
if (rn, cn) in self.cell_options and 'checkbox' in self.cell_options[(rn, cn)]:
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = self.cell_options[(rn, cn)]['checkbox']['text'], font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
tw = b[2] - b[0] + 7 + self.txt_h
if b[3] - b[1] + 5 > h:
h = b[3] - b[1] + 5
else:
try:
if isinstance(self.data_ref[r][cn], str):
txt = self.data_ref[r][cn]
else:
txt = f"{self.data_ref[r][cn]}"
except:
txt = ""
if txt:
self.txt_measure_canvas.itemconfig(self.txt_measure_canvas_text, text = txt, font = self.my_font)
b = self.txt_measure_canvas.bbox(self.txt_measure_canvas_text)
tw = b[2] - b[0] + self.txt_h + 7 if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)] else b[2] - b[0] + 7
if b[3] - b[1] + 5 > h:
h = b[3] - b[1] + 5
else:
if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)]:
tw = self.txt_h + 7
else:
tw = min_cw
if tw > w:
w = tw
if h < min_rh:
h = int(min_rh)
elif h > self.RI.max_rh:
h = int(self.RI.max_rh)
if w < min_cw:
w = int(min_cw)
elif w > self.CH.max_cw:
w = int(self.CH.max_cw)
cell_needs_resize_w = False
cell_needs_resize_h = False
if only_set_if_too_small:
if w > self.col_positions[c + 1] - self.col_positions[c]:
cell_needs_resize_w = True
if h > self.row_positions[r + 1] - self.row_positions[r]:
cell_needs_resize_h = True
else:
if w != self.col_positions[c + 1] - self.col_positions[c]:
cell_needs_resize_w = True
if h != self.row_positions[r + 1] - self.row_positions[r]:
cell_needs_resize_h = True
if cell_needs_resize_w:
old_width = self.col_positions[c + 1] - self.col_positions[c]
new_col_pos = self.col_positions[c] + w
increment = new_col_pos - self.col_positions[c + 1]
self.col_positions[c + 2:] = [e + increment for e in islice(self.col_positions, c + 2, len(self.col_positions))]
self.col_positions[c + 1] = new_col_pos
new_width = self.col_positions[c + 1] - self.col_positions[c]
if run_binding and self.CH.column_width_resize_func is not None and old_width != new_width:
self.CH.column_width_resize_func(ResizeEvent("column_width_resize", c, old_width, new_width))
if cell_needs_resize_h:
old_height = self.row_positions[r + 1] - self.row_positions[r]
new_row_pos = self.row_positions[r] + h
increment = new_row_pos - self.row_positions[r + 1]
self.row_positions[r + 2:] = [e + increment for e in islice(self.row_positions, r + 2, len(self.row_positions))]
self.row_positions[r + 1] = new_row_pos
new_height = self.row_positions[r + 1] - self.row_positions[r]
if run_binding and self.RI.row_height_resize_func is not None and old_height != new_height:
self.RI.row_height_resize_func(ResizeEvent("row_height_resize", r, old_height, new_height))
if cell_needs_resize_w or cell_needs_resize_h:
self.recreate_all_selection_boxes()
if redraw:
self.refresh()
def set_all_cell_sizes_to_text(self, include_index = False):
min_cw = self.min_cw
min_rh = self.min_rh
rhs = defaultdict(lambda: int(min_rh))
cws = []
x = self.txt_measure_canvas.create_text(0, 0, text = "", font = self.my_font)
x2 = self.txt_measure_canvas.create_text(0, 0, text = "", font = self.my_hdr_font)
itmcon = self.txt_measure_canvas.itemconfig
itmbbx = self.txt_measure_canvas.bbox
if self.all_columns_displayed:
iterable = range(self.total_data_cols())
else:
iterable = self.displayed_columns
if isinstance(self.my_row_index, list):
for rn in range(self.total_data_rows()):
try:
if isinstance(self.my_row_index[rn], str):
txt = self.my_row_index[rn]
else:
txt = f"{self.my_row_index[rn]}"
except:
txt = ""
if txt:
itmcon(x, text = txt)
b = itmbbx(x)
h = b[3] - b[1] + 7
else:
h = min_rh
if h < min_rh:
h = int(min_rh)
elif h > self.RI.max_rh:
h = int(self.RI.max_rh)
if h > rhs[rn]:
rhs[rn] = h
for cn in iterable:
if cn in self.CH.cell_options and 'checkbox' in self.CH.cell_options[cn]:
txt = self.CH.cell_options[cn]['checkbox']['text']
if txt:
itmcon(x2, text = txt)
b = itmbbx(x2)
w = b[2] - b[0] + 7 + self.txt_h
else:
w = self.min_cw
else:
try:
if isinstance(self.my_hdrs, int):
txt = self.data_ref[self.my_hdrs][cn]
else:
txt = self.my_hdrs[cn]
if txt:
itmcon(x2, text = txt)
b = itmbbx(x2)
w = b[2] - b[0] + self.txt_h + 7 if cn in self.CH.cell_options and 'dropdown' in self.CH.cell_options[cn] else b[2] - b[0] + 7
else:
w = self.min_cw + self.txt_h + 7 if cn in self.CH.cell_options and 'dropdown' in self.CH.cell_options[cn] else self.min_cw
except:
if self.CH.default_hdr == "letters":
itmcon(x2, text = f"{num2alpha(cn)}")
elif self.CH.default_hdr == "numbers":
itmcon(x2, text = f"{cn + 1}")
else:
itmcon(x2, text = f"{cn + 1} {num2alpha(cn)}")
b = itmbbx(x2)
w = b[2] - b[0] + 7
for rn, r in enumerate(self.data_ref):
if (rn, cn) in self.cell_options and 'checkbox' in self.cell_options[(rn, cn)]:
txt = self.cell_options[(rn, cn)]['checkbox']['text']
if txt:
itmcon(x, text = txt)
b = itmbbx(x)
tw = b[2] - b[0] + 7
h = b[3] - b[1] + 5
else:
tw = min_cw
h = min_rh
else:
try:
if isinstance(r[cn], str):
txt = r[cn]
else:
txt = f"{r[cn]}"
except:
txt = ""
if txt:
itmcon(x, text = txt)
b = itmbbx(x)
tw = b[2] - b[0] + self.txt_h + 7 if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)] else b[2] - b[0] + 7
h = b[3] - b[1] + 5
else:
tw = self.txt_h + 7 if (rn, cn) in self.cell_options and 'dropdown' in self.cell_options[(rn, cn)] else min_cw
h = min_rh
if tw > w:
w = tw
if h < min_rh:
h = int(min_rh)
elif h > self.RI.max_rh:
h = int(self.RI.max_rh)
if h > rhs[rn]:
rhs[rn] = h
if w < min_cw:
w = int(min_cw)
elif w > self.CH.max_cw:
w = int(self.CH.max_cw)
cws.append(w)
self.txt_measure_canvas.delete(x)
self.txt_measure_canvas.delete(x2)
self.row_positions = list(accumulate(chain([0], (height for height in rhs.values()))))
self.col_positions = list(accumulate(chain([0], (width for width in cws))))
self.recreate_all_selection_boxes()
return self.row_positions, self.col_positions
def reset_col_positions(self):
colpos = int(self.default_cw)
if self.all_columns_displayed:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(self.total_data_cols())))))
else:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(len(self.displayed_columns))))))
def del_col_position(self, idx, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.col_positions) <= idx + 1:
del self.col_positions[-1]
else:
w = self.col_positions[idx + 1] - self.col_positions[idx]
idx += 1
del self.col_positions[idx]
self.col_positions[idx:] = [e - w for e in islice(self.col_positions, idx, len(self.col_positions))]
def del_col_positions(self, idx, num = 1, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.col_positions) <= idx + 1:
del self.col_positions[-1]
else:
cws = [int(b - a) for a, b in zip(self.col_positions, islice(self.col_positions, 1, len(self.col_positions)))]
cws[idx:idx + num] = []
self.col_positions = list(accumulate(chain([0], (width for width in cws))))
def insert_col_position(self, idx = "end", width = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if width is None:
w = self.default_cw
else:
w = width
if idx == "end" or len(self.col_positions) == idx + 1:
self.col_positions.append(self.col_positions[-1] + w)
else:
idx += 1
self.col_positions.insert(idx, self.col_positions[idx - 1] + w)
idx += 1
self.col_positions[idx:] = [e + w for e in islice(self.col_positions, idx, len(self.col_positions))]
def insert_col_positions(self, idx = "end", widths = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if widths is None:
w = [self.default_cw]
elif isinstance(widths, int):
w = list(repeat(self.default_cw, widths))
else:
w = widths
if idx == "end" or len(self.col_positions) == idx + 1:
if len(w) > 1:
self.col_positions += list(accumulate(chain([self.col_positions[-1] + w[0]], islice(w, 1, None))))
else:
self.col_positions.append(self.col_positions[-1] + w[0])
else:
if len(w) > 1:
idx += 1
self.col_positions[idx:idx] = list(accumulate(chain([self.col_positions[idx - 1] + w[0]], islice(w, 1, None))))
idx += len(w)
sumw = sum(w)
self.col_positions[idx:] = [e + sumw for e in islice(self.col_positions, idx, len(self.col_positions))]
else:
w = w[0]
idx += 1
self.col_positions.insert(idx, self.col_positions[idx - 1] + w)
idx += 1
self.col_positions[idx:] = [e + w for e in islice(self.col_positions, idx, len(self.col_positions))]
def insert_col_rc(self, event = None):
if self.anything_selected(exclude_rows = True, exclude_cells = True):
selcols = self.get_selected_cols()
numcols = len(selcols)
displayed_ins_col = min(selcols) if event == "left" else max(selcols) + 1
if self.all_columns_displayed:
data_ins_col = int(displayed_ins_col)
else:
if displayed_ins_col == len(self.col_positions) - 1:
rowlen = len(max(self.data_ref, key = len)) if self.data_ref else 0
data_ins_col = rowlen
else:
try:
data_ins_col = int(self.displayed_columns[displayed_ins_col])
except:
data_ins_col = int(self.displayed_columns[displayed_ins_col - 1])
else:
numcols = 1
displayed_ins_col = len(self.col_positions) - 1
data_ins_col = int(displayed_ins_col)
if isinstance(self.paste_insert_column_limit, int) and self.paste_insert_column_limit < displayed_ins_col + numcols:
numcols = self.paste_insert_column_limit - len(self.col_positions) - 1
if numcols < 1:
return
if self.extra_begin_insert_cols_rc_func is not None:
try:
self.extra_begin_insert_cols_rc_func(InsertEvent("begin_insert_columns", data_ins_col, displayed_ins_col, numcols))
except:
return
saved_displayed_columns = list(self.displayed_columns)
if not self.all_columns_displayed:
if displayed_ins_col == len(self.col_positions) - 1:
self.displayed_columns += list(range(rowlen, rowlen + numcols))
else:
if displayed_ins_col > len(self.displayed_columns) - 1:
adj_ins = displayed_ins_col - 1
else:
adj_ins = displayed_ins_col
part1 = self.displayed_columns[:adj_ins]
part2 = list(range(self.displayed_columns[adj_ins], self.displayed_columns[adj_ins] + numcols + 1))
part3 = [] if displayed_ins_col > len(self.displayed_columns) - 1 else [cn + numcols for cn in islice(self.displayed_columns, adj_ins + 1, None)]
self.displayed_columns = (part1 +
part2 +
part3)
self.insert_col_positions(idx = displayed_ins_col,
widths = numcols,
deselect_all = True)
self.cell_options = {(rn, cn if cn < data_ins_col else cn + numcols): t2 for (rn, cn), t2 in self.cell_options.items()}
self.col_options = {cn if cn < data_ins_col else cn + numcols: t for cn, t in self.col_options.items()}
self.CH.cell_options = {cn if cn < data_ins_col else cn + numcols: t for cn, t in self.CH.cell_options.items()}
if self.my_hdrs and isinstance(self.my_hdrs, list):
try:
self.my_hdrs[data_ins_col:data_ins_col] = list(repeat("", numcols))
except:
pass
if self.row_positions == [0] and not self.data_ref:
self.insert_row_position(idx = "end",
height = int(self.min_rh),
deselect_all = False)
self.data_ref.append(list(repeat("", numcols)))
else:
for rn in range(len(self.data_ref)):
self.data_ref[rn][data_ins_col:data_ins_col] = list(repeat("", numcols))
self.create_selected(0, displayed_ins_col, len(self.row_positions) - 1, displayed_ins_col + numcols, "cols")
self.create_current(0, displayed_ins_col, "col", inside = True)
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("insert_col", {"data_col_num": data_ins_col,
"displayed_columns": saved_displayed_columns,
"sheet_col_num": displayed_ins_col,
"numcols": numcols}))))
self.refresh()
if self.extra_end_insert_cols_rc_func is not None:
self.extra_end_insert_cols_rc_func(InsertEvent("end_insert_columns", data_ins_col, displayed_ins_col, numcols))
def insert_row_rc(self, event = None):
if self.anything_selected(exclude_columns = True, exclude_cells = True):
selrows = self.get_selected_rows()
numrows = len(selrows)
stidx = min(selrows) if event == "above" else max(selrows) + 1
posidx = int(stidx)
else:
selrows = [0]
numrows = 1
stidx = self.total_data_rows()
posidx = len(self.row_positions) - 1
if isinstance(self.paste_insert_row_limit, int) and self.paste_insert_row_limit < posidx + numrows:
numrows = self.paste_insert_row_limit - len(self.row_positions) - 1
if numrows < 1:
return
if self.extra_begin_insert_rows_rc_func is not None:
try:
self.extra_begin_insert_rows_rc_func(InsertEvent("begin_insert_rows", stidx, posidx, numrows))
except:
return
self.insert_row_positions(idx = posidx,
heights = numrows,
deselect_all = True)
self.cell_options = {(rn if rn < posidx else rn + numrows, cn): t2 for (rn, cn), t2 in self.cell_options.items()}
self.row_options = {rn if rn < posidx else rn + numrows: t for rn, t in self.row_options.items()}
self.RI.cell_options = {rn if rn < posidx else rn + numrows: t for rn, t in self.RI.cell_options.items()}
if self.my_row_index and isinstance(self.my_row_index, list):
try:
self.my_row_index[stidx:stidx] = list(repeat("", numrows))
except:
pass
if self.col_positions == [0] and not self.data_ref:
self.insert_col_position(idx = "end",
width = None,
deselect_all = False)
self.data_ref.append([""])
else:
total_data_cols = self.total_data_cols()
self.data_ref[stidx:stidx] = [list(repeat("", total_data_cols)) for rn in range(numrows)]
self.create_selected(posidx, 0, posidx + numrows, len(self.col_positions) - 1, "rows")
self.create_current(posidx, 0, "row", inside = True)
if self.undo_enabled:
self.undo_storage.append(zlib.compress(pickle.dumps(("insert_row", {"data_row_num": stidx,
"sheet_row_num": posidx,
"numrows": numrows}))))
self.refresh()
if self.extra_end_insert_rows_rc_func is not None:
self.extra_end_insert_rows_rc_func(InsertEvent("end_insert_rows", stidx, posidx, numrows))
def del_cols_rc(self, event = None):
seld_cols = sorted(self.get_selected_cols())
if seld_cols:
if self.extra_begin_del_cols_rc_func is not None:
try:
self.extra_begin_del_cols_rc_func(DeleteRowColumnEvent("begin_delete_columns", seld_cols))
except:
return
seldset = set(seld_cols) if self.all_columns_displayed else set(self.displayed_columns[c] for c in seld_cols)
list_of_coords = tuple((r, c) for (r, c) in self.cell_options if c in seldset)
if self.undo_enabled:
undo_storage = {'deleted_cols': {},
'colwidths': {},
'deleted_hdr_values': {},
'selection_boxes': self.get_boxes(),
'displayed_columns': list(self.displayed_columns),
'cell_options': {k: v.copy() for k, v in self.cell_options.items()},
'col_options': {k: v.copy() for k, v in self.col_options.items()},
'CH_cell_options': {k: v.copy() for k, v in self.CH.cell_options.items()}}
if self.all_columns_displayed:
if self.undo_enabled:
for c in reversed(seld_cols):
undo_storage['colwidths'][c] = self.col_positions[c + 1] - self.col_positions[c]
for rn in range(len(self.data_ref)):
if c not in undo_storage['deleted_cols']:
undo_storage['deleted_cols'][c] = {}
try:
undo_storage['deleted_cols'][c][rn] = self.data_ref[rn].pop(c)
except:
continue
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
undo_storage['deleted_hdr_values'][c] = self.my_hdrs.pop(c)
except:
continue
else:
for rn in range(len(self.data_ref)):
for c in reversed(seld_cols):
del self.data_ref[rn][c]
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
del self.my_hdrs[c]
except:
continue
else:
if self.undo_enabled:
for c in reversed(seld_cols):
undo_storage['colwidths'][c] = self.col_positions[c + 1] - self.col_positions[c]
for rn in range(len(self.data_ref)):
if self.displayed_columns[c] not in undo_storage['deleted_cols']:
undo_storage['deleted_cols'][self.displayed_columns[c]] = {}
try:
undo_storage['deleted_cols'][self.displayed_columns[c]][rn] = self.data_ref[rn].pop(self.displayed_columns[c])
except:
continue
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
undo_storage['deleted_hdr_values'][self.displayed_columns[c]] = self.my_hdrs.pop(self.displayed_columns[c])
except:
continue
else:
for rn in range(len(self.data_ref)):
for c in reversed(seld_cols):
del self.data_ref[rn][self.displayed_columns[c]]
if self.my_hdrs and isinstance(self.my_hdrs, list):
for c in reversed(seld_cols):
try:
del self.my_hdrs[self.displayed_columns[c]]
except:
continue
if self.undo_enabled:
self.undo_storage.append(("delete_cols", undo_storage))
self.del_cell_options(list_of_coords)
for c in reversed(seld_cols):
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
self.del_col_position(c,
deselect_all = False)
if dcol in self.col_options:
del self.col_options[dcol]
if dcol in self.CH.cell_options:
del self.CH.cell_options[dcol]
numcols = len(seld_cols)
idx = seld_cols[-1]
self.cell_options = {(rn, cn if cn < idx else cn - numcols): t2 for (rn, cn), t2 in self.cell_options.items()}
self.col_options = {cn if cn < idx else cn - numcols: t for cn, t in self.col_options.items()}
self.CH.cell_options = {cn if cn < idx else cn - numcols: t for cn, t in self.CH.cell_options.items()}
self.deselect("allcols", redraw = False)
self.set_current_to_last()
if not self.all_columns_displayed:
self.displayed_columns = [c for c in self.displayed_columns if c not in seldset]
for c in sorted(seldset):
self.displayed_columns = [dc if c > dc else dc - 1 for dc in self.displayed_columns]
self.refresh()
if self.extra_end_del_cols_rc_func is not None:
self.extra_end_del_cols_rc_func(DeleteRowColumnEvent("end_delete_columns", seld_cols))
def del_cell_options(self, list_of_coords):
for r, dcol in list_of_coords:
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
self.destroy_dropdown(r, dcol)
del self.cell_options[(r, dcol)]
def del_rows_rc(self, event = None):
seld_rows = sorted(self.get_selected_rows())
if seld_rows:
if self.extra_begin_del_rows_rc_func is not None:
try:
self.extra_begin_del_rows_rc_func(DeleteRowColumnEvent("begin_delete_rows", seld_rows))
except:
return
seldset = set(seld_rows)
list_of_coords = tuple((r, c) for (r, c) in self.cell_options if r in seldset)
if self.undo_enabled:
undo_storage = {'deleted_rows': [],
'deleted_index_values': [],
'selection_boxes': self.get_boxes(),
'cell_options': {k: v.copy() for k, v in self.cell_options.items()},
'row_options': {k: v.copy() for k, v in self.row_options.items()},
'RI_cell_options': {k: v.copy() for k, v in self.RI.cell_options.items()}}
for r in reversed(seld_rows):
undo_storage['deleted_rows'].append((r, self.data_ref.pop(r), self.row_positions[r + 1] - self.row_positions[r]))
else:
for r in reversed(seld_rows):
del self.data_ref[r]
if self.my_row_index and isinstance(self.my_row_index, list):
if self.undo_enabled:
for r in reversed(seld_rows):
try:
undo_storage['deleted_index_values'].append((r, self.my_row_index.pop(r)))
except:
continue
else:
for r in reversed(seld_rows):
try:
del self.my_row_index[r]
except:
continue
if self.undo_enabled:
self.undo_storage.append(("delete_rows", undo_storage))
self.del_cell_options(list_of_coords)
for r in reversed(seld_rows):
self.del_row_position(r,
deselect_all = False)
if r in self.row_options:
del self.row_options[r]
if r in self.RI.cell_options:
del self.RI.cell_options[r]
numrows = len(seld_rows)
idx = seld_rows[-1]
self.cell_options = {(rn if rn < idx else rn - numrows, cn): t2 for (rn, cn), t2 in self.cell_options.items()}
self.row_options = {rn if rn < idx else rn - numrows: t for rn, t in self.row_options.items()}
self.RI.cell_options = {rn if rn < idx else rn - numrows: t for rn, t in self.RI.cell_options.items()}
self.deselect("allrows", redraw = False)
self.set_current_to_last()
self.refresh()
if self.extra_end_del_rows_rc_func is not None:
self.extra_end_del_rows_rc_func(DeleteRowColumnEvent("end_delete_rows", seld_rows))
def reset_row_positions(self):
rowpos = self.default_rh[1]
self.row_positions = list(accumulate(chain([0], (rowpos for r in range(self.total_data_rows())))))
def del_row_position(self, idx, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.row_positions) <= idx + 1:
del self.row_positions[-1]
else:
w = self.row_positions[idx + 1] - self.row_positions[idx]
idx += 1
del self.row_positions[idx]
self.row_positions[idx:] = [e - w for e in islice(self.row_positions, idx, len(self.row_positions))]
def del_row_positions(self, idx, numrows = 1, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if idx == "end" or len(self.row_positions) <= idx + 1:
del self.row_positions[-1]
else:
rhs = [int(b - a) for a, b in zip(self.row_positions, islice(self.row_positions, 1, len(self.row_positions)))]
rhs[idx:idx + numrows] = []
self.row_positions = list(accumulate(chain([0], (height for height in rhs))))
def insert_row_position(self, idx, height = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if height is None:
h = self.default_rh[1]
else:
h = height
if idx == "end" or len(self.row_positions) == idx + 1:
self.row_positions.append(self.row_positions[-1] + h)
else:
idx += 1
self.row_positions.insert(idx, self.row_positions[idx - 1] + h)
idx += 1
self.row_positions[idx:] = [e + h for e in islice(self.row_positions, idx, len(self.row_positions))]
def insert_row_positions(self, idx = "end", heights = None, deselect_all = False):
if deselect_all:
self.deselect("all", redraw = False)
if heights is None:
h = [self.default_rh[1]]
elif isinstance(heights, int):
h = list(repeat(self.default_rh[1], heights))
else:
h = heights
if idx == "end" or len(self.row_positions) == idx + 1:
if len(h) > 1:
self.row_positions += list(accumulate(chain([self.row_positions[-1] + h[0]], islice(h, 1, None))))
else:
self.row_positions.append(self.row_positions[-1] + h[0])
else:
if len(h) > 1:
idx += 1
self.row_positions[idx:idx] = list(accumulate(chain([self.row_positions[idx - 1] + h[0]], islice(h, 1, None))))
idx += len(h)
sumh = sum(h)
self.row_positions[idx:] = [e + sumh for e in islice(self.row_positions, idx, len(self.row_positions))]
else:
h = h[0]
idx += 1
self.row_positions.insert(idx, self.row_positions[idx - 1] + h)
idx += 1
self.row_positions[idx:] = [e + h for e in islice(self.row_positions, idx, len(self.row_positions))]
def move_row_position(self, idx1, idx2):
if not len(self.row_positions) <= 2:
if idx1 < idx2:
height = self.row_positions[idx1 + 1] - self.row_positions[idx1]
self.row_positions.insert(idx2 + 1, self.row_positions.pop(idx1 + 1))
for i in range(idx1 + 1, idx2 + 1):
self.row_positions[i] -= height
self.row_positions[idx2 + 1] = self.row_positions[idx2] + height
else:
height = self.row_positions[idx1 + 1] - self.row_positions[idx1]
self.row_positions.insert(idx2 + 1, self.row_positions.pop(idx1 + 1))
for i in range(idx2 + 2, idx1 + 2):
self.row_positions[i] += height
self.row_positions[idx2 + 1] = self.row_positions[idx2] + height
def move_col_position(self, idx1, idx2):
if not len(self.col_positions) <= 2:
if idx1 < idx2:
width = self.col_positions[idx1 + 1] - self.col_positions[idx1]
self.col_positions.insert(idx2 + 1, self.col_positions.pop(idx1 + 1))
for i in range(idx1 + 1, idx2 + 1):
self.col_positions[i] -= width
self.col_positions[idx2 + 1] = self.col_positions[idx2] + width
else:
width = self.col_positions[idx1 + 1] - self.col_positions[idx1]
self.col_positions.insert(idx2 + 1, self.col_positions.pop(idx1 + 1))
for i in range(idx2 + 2, idx1 + 2):
self.col_positions[i] += width
self.col_positions[idx2 + 1] = self.col_positions[idx2] + width
def GetLinesHeight(self, n, old_method = False):
if old_method:
if n == 1:
return int(self.min_rh)
else:
return int(self.fl_ins) + (self.xtra_lines_increment * n) - 2
else:
x = self.txt_measure_canvas.create_text(0, 0,
text = "\n".join(["j^|" for lines in range(n)]) if n > 1 else "j^|",
font = self.my_font)
b = self.txt_measure_canvas.bbox(x)
h = b[3] - b[1] + 5
self.txt_measure_canvas.delete(x)
return h
def GetHdrLinesHeight(self, n, old_method = False):
if old_method:
if n == 1:
return int(self.hdr_min_rh)
else:
return int(self.hdr_fl_ins) + (self.hdr_xtra_lines_increment * n) - 2
else:
x = self.txt_measure_canvas.create_text(0, 0,
text = "\n".join(["j^|" for lines in range(n)]) if n > 1 else "j^|",
font = self.my_hdr_font)
b = self.txt_measure_canvas.bbox(x)
h = b[3] - b[1] + 5
self.txt_measure_canvas.delete(x)
return h
def display_columns(self, indexes = None, enable = None, reset_col_positions = True, set_col_positions = True, deselect_all = True):
if indexes is None and enable is None:
if self.all_columns_displayed:
return list(range(len(self.col_positions) - 1))
else:
return self.displayed_columns
if deselect_all:
self.deselect("all")
if indexes != self.displayed_columns:
self.undo_storage = deque(maxlen = self.max_undos)
if indexes is not None:
self.displayed_columns = sorted(indexes)
if enable and not self.data_ref:
self.all_columns_displayed = False
elif enable and list(range(len(max(self.data_ref, key = len)))) != self.displayed_columns:
self.all_columns_displayed = False
else:
self.all_columns_displayed = True
if reset_col_positions:
self.reset_col_positions()
def headers(self, newheaders = None, index = None, reset_col_positions = False, show_headers_if_not_sheet = True, redraw = False):
if newheaders is not None:
if isinstance(newheaders, (list, tuple)):
self.my_hdrs = list(newheaders) if isinstance(newheaders, tuple) else newheaders
elif isinstance(newheaders, int):
self.my_hdrs = int(newheaders)
elif isinstance(self.my_hdrs, list) and isinstance(index, int):
if len(self.my_hdrs) <= index:
self.my_hdrs.extend(list(repeat("", index - len(self.my_hdrs) + 1)))
self.my_hdrs[index] = f"{newheaders}"
elif not isinstance(newheaders, (list, tuple, int)) and index is None:
try:
self.my_hdrs = list(newheaders)
except:
raise ValueError("New header must be iterable or int (use int to use a row as the header")
if reset_col_positions:
self.reset_col_positions()
elif show_headers_if_not_sheet and isinstance(self.my_hdrs, list) and (self.col_positions == [0] or not self.col_positions):
colpos = int(self.default_cw)
if self.all_columns_displayed:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(len(self.my_hdrs))))))
else:
self.col_positions = list(accumulate(chain([0], (colpos for c in range(len(self.displayed_columns))))))
if redraw:
self.refresh()
else:
if index is not None:
if isinstance(index, int):
return self.my_hdrs[index]
else:
return self.my_hdrs
def row_index(self, newindex = None, index = None, reset_row_positions = False, show_index_if_not_sheet = True, redraw = False):
if newindex is not None:
if not self.my_row_index and not isinstance(self.my_row_index, int):
self.RI.set_width(self.RI.default_width, set_TL = True)
if isinstance(newindex, (list, tuple)):
self.my_row_index = list(newindex) if isinstance(newindex, tuple) else newindex
elif isinstance(newindex, int):
self.my_row_index = int(newindex)
elif isinstance(index, int):
self.my_row_index[index] = f"{newindex}"
elif not isinstance(newindex, (list, tuple, int)) and index is None:
try:
self.my_row_index = list(newindex)
except:
raise ValueError("New index must be iterable or int (use int to use a column as the index")
if reset_row_positions:
self.reset_row_positions()
elif show_index_if_not_sheet and isinstance(self.my_row_index, list) and (self.row_positions == [0] or not self.row_positions):
rowpos = self.default_rh[1]
self.row_positions = list(accumulate(chain([0], (rowpos for c in range(len(self.my_row_index))))))
if redraw:
self.refresh()
else:
if index is not None:
if isinstance(index, int):
return self.my_row_index[index]
else:
return self.my_row_index
def total_data_cols(self, include_headers = True):
h_total = 0
d_total = 0
if include_headers:
if isinstance(self.my_hdrs, list):
h_total = len(self.my_hdrs)
try:
d_total = len(max(self.data_ref, key = len))
except:
pass
return h_total if h_total > d_total else d_total
def total_data_rows(self):
i_total = 0
d_total = 0
if isinstance(self.my_row_index, list):
i_total = len(self.my_row_index)
d_total = len(self.data_ref)
return i_total if i_total > d_total else d_total
def data_dimensions(self, total_rows = None, total_columns = None):
if total_rows is None and total_columns is None:
return self.total_data_rows(), self.total_data_cols()
if total_rows is not None:
if len(self.data_ref) < total_rows:
if total_columns is None:
total_data_cols = self.total_data_cols()
self.data_ref.extend([list(repeat("", total_data_cols)) for r in range(total_rows - len(self.data_ref))])
else:
self.data_ref.extend([list(repeat("", total_columns)) for r in range(total_rows - len(self.data_ref))])
else:
self.data_ref[total_rows:] = []
if total_columns is not None:
self.data_ref[:] = [r[:total_columns] if len(r) > total_columns else r + list(repeat("", total_columns - len(r))) for r in self.data_ref]
def equalize_data_row_lengths(self, include_header = False):
total_columns = self.total_data_cols()
if include_header and total_columns > len(self.my_hdrs):
self.my_hdrs[:] = self.my_hdrs + list(repeat("", total_columns - len(self.my_hdrs)))
self.data_ref[:] = [r + list(repeat("", total_columns - len(r))) if total_columns > len(r) else r for r in self.data_ref]
return total_columns
def get_canvas_visible_area(self):
return self.canvasx(0), self.canvasy(0), self.canvasx(self.winfo_width()), self.canvasy(self.winfo_height())
def get_visible_rows(self, y1, y2):
start_row = bisect.bisect_left(self.row_positions, y1)
end_row = bisect.bisect_right(self.row_positions, y2)
if not y2 >= self.row_positions[-1]:
end_row += 1
return start_row, end_row
def get_visible_columns(self, x1, x2):
start_col = bisect.bisect_left(self.col_positions, x1)
end_col = bisect.bisect_right(self.col_positions, x2)
if not x2 >= self.col_positions[-1]:
end_col += 1
return start_col, end_col
def redraw_highlight_get_text_fg(self, r, c, fc, fr, sc, sr, c_2_, c_3_, c_4_, selected_cells, actual_selected_rows, actual_selected_cols, dcol, can_width):
redrawn = False
# ________________________ CELL IS HIGHLIGHTED AND IN SELECTED CELLS ________________________
if (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and (r, c) in selected_cells:
tf = self.table_selected_cells_fg if self.cell_options[(r, dcol)]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
c_1 = self.cell_options[(r, dcol)]['highlight'][0] if self.cell_options[(r, dcol)]['highlight'][0].startswith("#") else Color_Map_[self.cell_options[(r, dcol)]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_2_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_2_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_2_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and (r, c) in selected_cells:
tf = self.table_selected_cells_fg if self.row_options[r]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
c_1 = self.row_options[r]['highlight'][0] if self.row_options[r]['highlight'][0].startswith("#") else Color_Map_[self.row_options[r]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_2_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_2_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_2_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and (r, c) in selected_cells:
tf = self.table_selected_cells_fg if self.col_options[dcol]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
c_1 = self.col_options[dcol]['highlight'][0] if self.col_options[dcol]['highlight'][0].startswith("#") else Color_Map_[self.col_options[dcol]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_2_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_2_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_2_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS HIGHLIGHTED AND IN SELECTED ROWS ________________________
elif (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and r in actual_selected_rows:
tf = self.table_selected_rows_fg if self.cell_options[(r, dcol)]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
c_1 = self.cell_options[(r, dcol)]['highlight'][0] if self.cell_options[(r, dcol)]['highlight'][0].startswith("#") else Color_Map_[self.cell_options[(r, dcol)]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_4_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_4_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_4_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and r in actual_selected_rows:
tf = self.table_selected_rows_fg if self.row_options[r]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
c_1 = self.row_options[r]['highlight'][0] if self.row_options[r]['highlight'][0].startswith("#") else Color_Map_[self.row_options[r]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_4_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_4_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_4_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and r in actual_selected_rows:
tf = self.table_selected_rows_fg if self.col_options[dcol]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
c_1 = self.col_options[dcol]['highlight'][0] if self.col_options[dcol]['highlight'][0].startswith("#") else Color_Map_[self.col_options[dcol]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_4_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_4_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_4_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS HIGHLIGHTED AND IN SELECTED COLUMNS ________________________
elif (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and c in actual_selected_cols:
tf = self.table_selected_columns_fg if self.cell_options[(r, dcol)]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
c_1 = self.cell_options[(r, dcol)]['highlight'][0] if self.cell_options[(r, dcol)]['highlight'][0].startswith("#") else Color_Map_[self.cell_options[(r, dcol)]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_3_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_3_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_3_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and c in actual_selected_cols:
tf = self.table_selected_columns_fg if self.row_options[r]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
c_1 = self.row_options[r]['highlight'][0] if self.row_options[r]['highlight'][0].startswith("#") else Color_Map_[self.row_options[r]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_3_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_3_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_3_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and c in actual_selected_cols:
tf = self.table_selected_columns_fg if self.col_options[dcol]['highlight'][1] is None or self.display_selected_fg_over_highlights else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
c_1 = self.col_options[dcol]['highlight'][0] if self.col_options[dcol]['highlight'][0].startswith("#") else Color_Map_[self.col_options[dcol]['highlight'][0]]
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = (f"#{int((int(c_1[1:3], 16) + c_3_[0]) / 2):02X}" +
f"{int((int(c_1[3:5], 16) + c_3_[1]) / 2):02X}" +
f"{int((int(c_1[5:], 16) + c_3_[2]) / 2):02X}"),
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS HIGHLIGHTED AND NOT SELECTED ________________________
elif (r, dcol) in self.cell_options and 'highlight' in self.cell_options[(r, dcol)] and (r, c) not in selected_cells and r not in actual_selected_rows and c not in actual_selected_cols:
tf = self.table_fg if self.cell_options[(r, dcol)]['highlight'][1] is None else self.cell_options[(r, dcol)]['highlight'][1]
if self.cell_options[(r, dcol)]['highlight'][0] is not None:
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = self.cell_options[(r, dcol)]['highlight'][0],
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
elif r in self.row_options and 'highlight' in self.row_options[r] and (r, c) not in selected_cells and r not in actual_selected_rows and c not in actual_selected_cols:
tf = self.table_fg if self.row_options[r]['highlight'][1] is None else self.row_options[r]['highlight'][1]
if self.row_options[r]['highlight'][0] is not None:
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = self.row_options[r]['highlight'][0],
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi",
can_width = can_width if self.row_options[r]['highlight'][2] else None)
elif dcol in self.col_options and 'highlight' in self.col_options[dcol] and (r, c) not in selected_cells and r not in actual_selected_rows and c not in actual_selected_cols:
tf = self.table_fg if self.col_options[dcol]['highlight'][1] is None else self.col_options[dcol]['highlight'][1]
if self.col_options[dcol]['highlight'][0] is not None:
redrawn = self.redraw_highlight(fc + 1, fr + 1, sc, sr, fill = self.col_options[dcol]['highlight'][0],
outline = self.table_fg if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)] else "", tag = "hi")
# ________________________ CELL IS JUST SELECTED ________________________
elif (r, c) in selected_cells:
tf = self.table_selected_cells_fg
elif r in actual_selected_rows:
tf = self.table_selected_rows_fg
elif c in actual_selected_cols:
tf = self.table_selected_columns_fg
# ________________________ CELL IS NOT SELECTED ________________________
else:
tf = self.table_fg
return tf, redrawn
def redraw_highlight(self, x1, y1, x2, y2, fill, outline, tag, can_width = None):
if self.hidd_high:
t, sh = self.hidd_high.popitem()
self.coords(t, x1 - 1 if outline else x1, y1 - 1 if outline else y1, x2 if can_width is None else x2 + can_width, y2)
if sh:
self.itemconfig(t, fill = fill, outline = outline)
else:
self.itemconfig(t, fill = fill, outline = outline, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_rectangle(x1 - 1 if outline else x1, y1 - 1 if outline else y1, x2 if can_width is None else x2 + can_width, y2, fill = fill, outline = outline, tag = tag)
self.disp_high[t] = True
return True
def redraw_dropdown(self, x1, y1, x2, y2, fill, outline, tag, draw_outline = True, draw_arrow = True):
if draw_outline:
self.redraw_highlight(x1 + 1, y1 + 1, x2, y2, fill = "", outline = self.table_fg, tag = tag)
if draw_arrow:
topysub = floor(self.half_txt_h / 2)
mid_y = y1 + floor(self.half_txt_h / 2) + 5
#top left points for triangle
ty1 = mid_y - topysub + 2
tx1 = x2 - self.txt_h + 1
#bottom points for triangle
ty2 = mid_y + self.half_txt_h - 4
tx2 = x2 - self.half_txt_h - 1
#top right points for triangle
ty3 = mid_y - topysub + 2
tx3 = x2 - 3
points = (tx1, ty1, tx2, ty2, tx3, ty3)
if self.hidd_dropdown:
t, sh = self.hidd_dropdown.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill = fill)
else:
self.itemconfig(t, fill = fill, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(points, fill = fill, width = 2, capstyle = tk.ROUND, joinstyle = tk.ROUND, tag = tag)
self.disp_dropdown[t] = True
def get_checkbox_points(self, x1, y1, x2, y2, radius = 4):
return [x1+radius, y1,
x1+radius, y1,
x2-radius, y1,
x2-radius, y1,
x2, y1,
x2, y1+radius,
x2, y1+radius,
x2, y2-radius,
x2, y2-radius,
x2, y2,
x2-radius, y2,
x2-radius, y2,
x1+radius, y2,
x1+radius, y2,
x1, y2,
x1, y2-radius,
x1, y2-radius,
x1, y1+radius,
x1, y1+radius,
x1, y1]
def redraw_checkbox(self, r, dcol, x1, y1, x2, y2, fill, outline, tag, draw_check = False):
points = self.get_checkbox_points(x1, y1, x2, y2)
if self.hidd_checkbox:
t, sh = self.hidd_checkbox.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill = outline, outline = fill)
else:
self.itemconfig(t, fill = outline, outline = fill, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_polygon(points, fill = outline, outline = fill, tag = tag, smooth = True)
self.disp_checkbox[t] = True
if draw_check:
# draw filled box
x1 = x1 + 2
y1 = y1 + 2
x2 = x2 - 1
y2 = y2 - 1
points = self.get_checkbox_points(x1, y1, x2, y2)
if self.hidd_checkbox:
t, sh = self.hidd_checkbox.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill = fill, outline = outline)
else:
self.itemconfig(t, fill = fill, outline = outline, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_polygon(points, fill = fill, outline = outline, tag = tag, smooth = True)
self.disp_checkbox[t] = True
# draw one line of X
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x1 + 2, y1 + 2, x2 - 2, y2 - 2)
if sh:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2)
else:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(x1 + 2, y1 + 2, x2 - 2, y2 - 2, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag)
self.disp_grid[t] = True
# draw other line of X
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x2 - 2, y1 + 2, x1 + 2, y2 - 2)
if sh:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2)
else:
self.itemconfig(t, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag, state = "normal")
self.lift(t)
else:
t = self.create_line(x2 - 2, y1 + 2, x1 + 2, y2 - 2, fill = self.get_widget_bg_fg(r, dcol)[0], capstyle = tk.ROUND, joinstyle = tk.ROUND, width = 2, tag = tag)
self.disp_grid[t] = True
def main_table_redraw_grid_and_text(self, redraw_header = False, redraw_row_index = False, redraw_table = True):
last_col_line_pos = self.col_positions[-1] + 1
last_row_line_pos = self.row_positions[-1] + 1
try:
can_width = self.winfo_width()
can_height = self.winfo_height()
self.configure(scrollregion = (0,
0,
last_col_line_pos + self.empty_horizontal,
last_row_line_pos + self.empty_vertical))
if can_width >= last_col_line_pos + self.empty_horizontal and self.parentframe.xscroll_showing:
self.parentframe.xscroll.grid_forget()
self.parentframe.xscroll_showing = False
elif can_width < last_col_line_pos + self.empty_horizontal and not self.parentframe.xscroll_showing and not self.parentframe.xscroll_disabled and can_height > 45:
self.parentframe.xscroll.grid(row = 2, column = 1, columnspan = 2, sticky = "nswe")
self.parentframe.xscroll_showing = True
if can_height >= last_row_line_pos + self.empty_vertical and self.parentframe.yscroll_showing:
self.parentframe.yscroll.grid_forget()
self.parentframe.yscroll_showing = False
elif can_height < last_row_line_pos + self.empty_vertical and not self.parentframe.yscroll_showing and not self.parentframe.yscroll_disabled and can_width > 45:
self.parentframe.yscroll.grid(row = 1, column = 2, sticky = "nswe")
self.parentframe.yscroll_showing = True
except:
return False
y2 = self.canvasy(can_height)
end_row = bisect.bisect_right(self.row_positions, y2)
if not y2 >= self.row_positions[-1]:
end_row += 1
if redraw_row_index and self.show_index:
self.RI.auto_set_index_width(end_row - 1)
x1 = self.canvasx(0)
y1 = self.canvasy(0)
x2 = self.canvasx(can_width)
start_row = bisect.bisect_left(self.row_positions, y1)
self.row_width_resize_bbox = (x1, y1, x1 + 2, y2)
self.header_height_resize_bbox = (x1 + 6, y1, x2, y1 + 2)
self.hidd_text.update(self.disp_text)
self.disp_text = {}
self.hidd_high.update(self.disp_high)
self.disp_high = {}
self.hidd_grid.update(self.disp_grid)
self.disp_grid = {}
self.hidd_dropdown.update(self.disp_dropdown)
self.disp_dropdown = {}
self.hidd_checkbox.update(self.disp_checkbox)
self.disp_checkbox = {}
start_col = bisect.bisect_left(self.col_positions, x1)
end_col = bisect.bisect_right(self.col_positions, x2)
if not x2 >= self.col_positions[-1]:
end_col += 1
if last_col_line_pos > x2:
x_stop = x2
else:
x_stop = last_col_line_pos
if last_row_line_pos > y2:
y_stop = y2
else:
y_stop = last_row_line_pos
sb = y2 + 2
if self.show_horizontal_grid:
for r in range(start_row - 1, end_row):
y = self.row_positions[r]
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x1, y, x2 + can_width if self.horizontal_grid_to_end_of_window else x_stop, y)
if sh:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1)
else:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, state = "normal")
self.disp_grid[t] = True
else:
self.disp_grid[self.create_line(x1, y, x2 + can_width if self.horizontal_grid_to_end_of_window else x_stop, y, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, tag = "g")] = True
if self.show_vertical_grid:
for c in range(start_col - 1, end_col):
x = self.col_positions[c]
if self.hidd_grid:
t, sh = self.hidd_grid.popitem()
self.coords(t, x, y1, x, y2 + can_height if self.vertical_grid_to_end_of_window else y_stop)
if sh:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1)
else:
self.itemconfig(t, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, state = "normal")
self.disp_grid[t] = True
else:
self.disp_grid[self.create_line(x, y1, x, y2 + can_height if self.vertical_grid_to_end_of_window else y_stop, fill = self.table_grid_fg, capstyle = tk.BUTT, joinstyle = tk.ROUND, width = 1, tag = "g")] = True
if start_row > 0:
start_row -= 1
if start_col > 0:
start_col -= 1
end_row -= 1
c_2 = self.table_selected_cells_bg if self.table_selected_cells_bg.startswith("#") else Color_Map_[self.table_selected_cells_bg]
c_2_ = (int(c_2[1:3], 16), int(c_2[3:5], 16), int(c_2[5:], 16))
c_3 = self.table_selected_columns_bg if self.table_selected_columns_bg.startswith("#") else Color_Map_[self.table_selected_columns_bg]
c_3_ = (int(c_3[1:3], 16), int(c_3[3:5], 16), int(c_3[5:], 16))
c_4 = self.table_selected_rows_bg if self.table_selected_rows_bg.startswith("#") else Color_Map_[self.table_selected_rows_bg]
c_4_ = (int(c_4[1:3], 16), int(c_4[3:5], 16), int(c_4[5:], 16))
rows_ = tuple(range(start_row, end_row))
selected_cells, selected_rows, selected_cols, actual_selected_rows, actual_selected_cols = self.get_redraw_selections((start_row, start_col, end_row, end_col - 1))
if redraw_table:
for c in range(start_col, end_col - 1):
for r in rows_:
fr = self.row_positions[r]
sr = self.row_positions[r + 1]
if sr - fr < self.txt_h:
continue
if sr > sb:
sr = sb
fc = self.col_positions[c]
sc = self.col_positions[c + 1]
if self.all_columns_displayed:
dcol = c
else:
dcol = self.displayed_columns[c]
tf, dd_drawn = self.redraw_highlight_get_text_fg(r, c, fc, fr, sc, sr, c_2_, c_3_, c_4_, selected_cells, actual_selected_rows, actual_selected_cols, dcol, can_width)
if (r, dcol) in self.cell_options and 'align' in self.cell_options[(r, dcol)]:
cell_alignment = self.cell_options[(r, dcol)]['align']
elif r in self.row_options and 'align' in self.row_options[r]:
cell_alignment = self.row_options[r]['align']
elif dcol in self.col_options and 'align' in self.col_options[dcol]:
cell_alignment = self.col_options[dcol]['align']
else:
cell_alignment = self.align
if cell_alignment == "w":
x = fc + 5
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
mw = sc - fc - self.txt_h - 2
self.redraw_dropdown(fc, fr, sc, self.row_positions[r + 1], fill = tf, outline = tf, tag = "dd", draw_outline = not dd_drawn, draw_arrow = mw >= 5)
else:
mw = sc - fc - 5
elif cell_alignment == "e":
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
mw = sc - fc - self.txt_h - 2
x = sc - 5 - self.txt_h
self.redraw_dropdown(fc, fr, sc, self.row_positions[r + 1], fill = tf, outline = tf, tag = "dd", draw_outline = not dd_drawn, draw_arrow = mw >= 5)
else:
mw = sc - fc - 5
x = sc - 5
elif cell_alignment == "center":
stop = fc + 5
if (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
mw = sc - fc - self.txt_h - 2
x = fc + ceil((sc - fc - self.txt_h) / 2)
self.redraw_dropdown(fc, fr, sc, self.row_positions[r + 1], fill = tf, outline = tf, tag = "dd", draw_outline = not dd_drawn, draw_arrow = mw >= 5)
else:
mw = sc - fc - 1
x = fc + floor((sc - fc) / 2)
if (r, dcol) in self.cell_options and 'checkbox' in self.cell_options[(r, dcol)]:
if mw > self.txt_h + 2:
box_w = fc + self.txt_h + 2 - fc
if cell_alignment == "w":
x = x + box_w
elif cell_alignment == "center":
x = x + floor(box_w / 2)
mw = mw - box_w
self.redraw_checkbox(r,
dcol,
fc + 2,
fr + 2,
fc + 2 + self.txt_h + 2,
fr + 2 + self.txt_h + 2,
fill = tf if self.cell_options[(r, dcol)]['checkbox']['state'] == "normal" else self.table_grid_fg,
outline = "", tag = "cb", draw_check = self.data_ref[r][dcol])
try:
if cell_alignment == "w":
if x > x2 or mw <= 5:
continue
if (r, dcol) in self.cell_options and 'checkbox' in self.cell_options[(r, dcol)]:
lns = self.cell_options[(r, dcol)]['checkbox']['text'].split("\n") if isinstance(self.cell_options[(r, dcol)]['checkbox']['text'], str) else f"{self.cell_options[(r, dcol)]['checkbox']['text']}".split("\n")
else:
lns = self.data_ref[r][dcol].split("\n") if isinstance(self.data_ref[r][dcol], str) else f"{self.data_ref[r][dcol]}".split("\n")
y = fr + self.fl_ins
if y + self.half_txt_h - 1 > y1:
txt = lns[0]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "w", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
nl = int(len(txt) * (mw / wd))
self.itemconfig(t, text = txt[:nl])
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
nl -= 1
self.dchars(t, nl)
wd = self.bbox(t)
if len(lns) > 1:
stl = int((y1 - y) / self.xtra_lines_increment) - 1
if stl < 1:
stl = 1
y += (stl * self.xtra_lines_increment)
if y + self.half_txt_h - 1 < sr:
for i in range(stl, len(lns)):
txt = lns[i]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "w", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "w", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
nl = int(len(txt) * (mw / wd))
self.itemconfig(t, text = txt[:nl])
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
nl -= 1
self.dchars(t, nl)
wd = self.bbox(t)
y += self.xtra_lines_increment
if y + self.half_txt_h - 1 > sr:
break
elif cell_alignment == "e":
if fc + 5 > x2 or mw <= 5:
continue
lns = self.data_ref[r][dcol].split("\n") if isinstance(self.data_ref[r][dcol], str) else f"{self.data_ref[r][dcol]}".split("\n")
y = fr + self.fl_ins
if y + self.half_txt_h - 1 > y1:
txt = lns[0]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "e", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
txt = txt[len(txt) - int(len(txt) * (mw / wd)):]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
txt = txt[1:]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
if len(lns) > 1:
stl = int((y1 - y) / self.xtra_lines_increment) - 1
if stl < 1:
stl = 1
y += (stl * self.xtra_lines_increment)
if y + self.half_txt_h - 1 < sr:
for i in range(stl, len(lns)):
txt = lns[i]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "e", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "e", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
txt = txt[len(txt) - int(len(txt) * (mw / wd)):]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
while wd[2] - wd[0] > mw:
txt = txt[1:]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
y += self.xtra_lines_increment
if y + self.half_txt_h - 1 > sr:
break
elif cell_alignment == "center":
if stop > x2 or mw <= 5:
continue
lns = self.data_ref[r][dcol].split("\n") if isinstance(self.data_ref[r][dcol], str) else f"{self.data_ref[r][dcol]}".split("\n")
txt = lns[0]
y = fr + self.fl_ins
if y + self.half_txt_h - 1 > y1:
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "center", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
tl = len(txt)
tmod = ceil((tl - int(tl * (mw / wd))) / 2)
txt = txt[tmod - 1:-tmod]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
while wd[2] - wd[0] > mw:
txt = txt[next(self.c_align_cyc)]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.coords(t, x, y)
if len(lns) > 1:
stl = int((y1 - y) / self.xtra_lines_increment) - 1
if stl < 1:
stl = 1
y += (stl * self.xtra_lines_increment)
if y + self.half_txt_h - 1 < sr:
for i in range(stl, len(lns)):
txt = lns[i]
if self.hidd_text:
t, sh = self.hidd_text.popitem()
self.coords(t, x, y)
if sh:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center")
else:
self.itemconfig(t, text = txt, fill = tf, font = self.my_font, anchor = "center", state = "normal")
else:
t = self.create_text(x, y, text = txt, fill = tf, font = self.my_font, anchor = "center", tag = "t")
self.disp_text[t] = True
wd = self.bbox(t)
wd = wd[2] - wd[0]
if wd > mw:
tl = len(txt)
tmod = ceil((tl - int(tl * (mw / wd))) / 2)
txt = txt[tmod - 1:-tmod]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
while wd[2] - wd[0] > mw:
txt = txt[next(self.c_align_cyc)]
self.itemconfig(t, text = txt)
wd = self.bbox(t)
self.coords(t, x, y)
y += self.xtra_lines_increment
if y + self.half_txt_h - 1 > sr:
break
except:
continue
try:
self.tag_raise("t")
for t, sh in self.hidd_text.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_text[t] = False
for t, sh in self.hidd_high.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_high[t] = False
for t, sh in self.hidd_grid.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_grid[t] = False
for t, sh in self.hidd_dropdown.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_dropdown[t] = False
for t, sh in self.hidd_checkbox.items():
if sh:
self.itemconfig(t, state = "hidden")
self.hidd_checkbox[t] = False
if redraw_header and self.show_header:
self.CH.redraw_grid_and_text(last_col_line_pos, x1, x_stop, start_col, end_col, selected_cols, actual_selected_rows, actual_selected_cols)
if redraw_row_index and self.show_index:
self.RI.redraw_grid_and_text(last_row_line_pos, y1, y_stop, start_row, end_row + 1, y2, x1, x_stop, selected_rows, actual_selected_cols, actual_selected_rows)
if self.show_selected_cells_border:
self.tag_raise("CellSelectBorder")
self.tag_raise("Current_Inside")
self.tag_raise("Current_Outside")
self.tag_raise("RowSelectBorder")
self.tag_raise("ColSelectBorder")
except:
return False
return True
def get_all_selection_items(self):
return sorted(self.find_withtag("CellSelectFill") + self.find_withtag("RowSelectFill") + self.find_withtag("ColSelectFill") + self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside"))
def get_boxes(self):
boxes = {}
for item in self.get_all_selection_items():
alltags = self.gettags(item)
if alltags[0] == "CellSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
elif alltags[0] == "RowSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "rows"
elif alltags[0] == "ColSelectFill":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cols"
elif alltags[0] == "Current_Inside":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = f"{alltags[2]}_inside"
elif alltags[0] == "Current_Outside":
boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = f"{alltags[2]}_outside"
return boxes
def reselect_from_get_boxes(self, boxes):
for k, v in boxes.items():
if v == "cells":
self.create_selected(k[0], k[1], k[2], k[3], "cells")
elif v == "rows":
self.create_selected(k[0], k[1], k[2], k[3], "rows")
elif v == "cols":
self.create_selected(k[0], k[1], k[2], k[3], "cols")
elif v in ("cell_inside", "cell_outside", "row_inside", "row_outside", "col_outside", "col_inside"): #currently selected
x = v.split("_")
self.create_current(k[0], k[1], type_ = x[0], inside = True if x[1] == "inside" else False)
def delete_selection_rects(self, cells = True, rows = True, cols = True, delete_current = True):
deleted_boxes = {}
if cells:
for item in self.find_withtag("CellSelectFill"):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
self.delete("CellSelectFill", "CellSelectBorder")
self.RI.delete("CellSelectFill", "CellSelectBorder")
self.CH.delete("CellSelectFill", "CellSelectBorder")
if rows:
for item in self.find_withtag("RowSelectFill"):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "rows"
self.delete("RowSelectFill", "RowSelectBorder")
self.RI.delete("RowSelectFill", "RowSelectBorder")
self.CH.delete("RowSelectFill", "RowSelectBorder")
if cols:
for item in self.find_withtag("ColSelectFill"):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cols"
self.delete("ColSelectFill", "ColSelectBorder")
self.RI.delete("ColSelectFill", "ColSelectBorder")
self.CH.delete("ColSelectFill", "ColSelectBorder")
if delete_current:
for item in chain(self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside")):
alltags = self.gettags(item)
if alltags:
deleted_boxes[tuple(int(e) for e in alltags[1].split("_") if e)] = "cells"
self.delete("Current_Inside", "Current_Outside")
self.RI.delete("Current_Inside", "Current_Outside")
self.CH.delete("Current_Inside", "Current_Outside")
return deleted_boxes
def currently_selected(self, get_coords = False):
items = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
if not items:
return tuple()
alltags = self.gettags(items[0])
box = tuple(int(e) for e in alltags[1].split("_") if e)
if alltags[2] == "cell":
return (box[0], box[1])
elif alltags[2] == "col":
return ("column", box[1]) if not get_coords else (0, box[1])
elif alltags[2] == "row":
return ("row", box[0]) if not get_coords else (box[0], 0)
def get_tags_of_current(self):
items = self.find_withtag("Current_Inside") + self.find_withtag("Current_Outside")
if items:
return self.gettags(items[0])
else:
return tuple()
def create_current(self, r, c, type_ = "cell", inside = False): # cell, col or row
r1, c1, r2, c2 = r, c, r + 1, c + 1
self.delete("Current_Inside", "Current_Outside")
self.RI.delete("Current_Inside", "Current_Outside")
self.CH.delete("Current_Inside", "Current_Outside")
if self.col_positions == [0]:
c1 = 0
c2 = 0
if self.row_positions == [0]:
r1 = 0
r2 = 0
if inside:
tagr = ("Current_Inside", f"{r1}_{c1}_{r2}_{c2}", type_)
else:
tagr = ("Current_Outside", f"{r1}_{c1}_{r2}_{c2}", type_)
if self.show_selected_cells_border:
b = self.create_rectangle(self.col_positions[c1] + 1, self.row_positions[r1] + 1, self.col_positions[c2], self.row_positions[r2],
fill = "",
outline = self.table_selected_cells_border_fg,
width = 2,
tags = tagr)
self.tag_raise(b)
else:
b = self.create_rectangle(self.col_positions[c1], self.row_positions[r1], self.col_positions[c2], self.row_positions[r2],
fill = self.table_selected_cells_bg,
outline = "",
tags = tagr)
self.tag_lower(b)
ri = self.RI.create_rectangle(0, self.row_positions[r1], self.RI.current_width - 1, self.row_positions[r2],
fill = self.RI.index_selected_cells_bg,
outline = "",
tags = tagr)
ch = self.CH.create_rectangle(self.col_positions[c1], 0, self.col_positions[c2], self.CH.current_height - 1,
fill = self.CH.header_selected_cells_bg,
outline = "",
tags = tagr)
self.RI.tag_lower(ri)
self.CH.tag_lower(ch)
return b
def set_current_to_last(self):
if not self.currently_selected():
items = sorted(self.find_withtag("CellSelectFill") + self.find_withtag("RowSelectFill") + self.find_withtag("ColSelectFill"))
if items:
last = self.gettags(items[-1])
r1, c1, r2, c2 = tuple(int(e) for e in last[1].split("_") if e)
if last[0] == "CellSelectFill":
return self.gettags(self.create_current(r1, c1, "cell", inside = True))
elif last[0] == "RowSelectFill":
return self.gettags(self.create_current(r1, c1, "row", inside = True))
elif last[0] == "ColSelectFill":
return self.gettags(self.create_current(r1, c1, "col", inside = True))
return tuple()
def delete_current(self):
self.delete("Current_Inside", "Current_Outside")
self.RI.delete("Current_Inside", "Current_Outside")
self.CH.delete("Current_Inside", "Current_Outside")
def create_selected(self, r1 = None, c1 = None, r2 = None, c2 = None, type_ = "cells", taglower = True):
currently_selected = self.currently_selected()
if currently_selected and isinstance(currently_selected[0], int):
if (currently_selected[0] >= r1 and
currently_selected[1] >= c1 and
currently_selected[0] < r2 and
currently_selected[1] < c2):
self.create_current(currently_selected[0], currently_selected[1], type_ = "cell", inside = True)
if type_ == "cells":
tagr = ("CellSelectFill", f"{r1}_{c1}_{r2}_{c2}")
tagb = ("CellSelectBorder", f"{r1}_{c1}_{r2}_{c2}")
taglower = "CellSelectFill"
mt_bg = self.table_selected_cells_bg
mt_border_col = self.table_selected_cells_border_fg
elif type_ == "rows":
tagr = ("RowSelectFill", f"{r1}_{c1}_{r2}_{c2}")
tagb = ("RowSelectBorder", f"{r1}_{c1}_{r2}_{c2}")
taglower = "RowSelectFill"
mt_bg = self.table_selected_rows_bg
mt_border_col = self.table_selected_rows_border_fg
elif type_ == "cols":
tagr = ("ColSelectFill", f"{r1}_{c1}_{r2}_{c2}")
tagb = ("ColSelectBorder", f"{r1}_{c1}_{r2}_{c2}")
taglower = "ColSelectFill"
mt_bg = self.table_selected_columns_bg
mt_border_col = self.table_selected_columns_border_fg
r = self.create_rectangle(self.col_positions[c1],
self.row_positions[r1],
self.canvasx(self.winfo_width()) if self.selected_rows_to_end_of_window else self.col_positions[c2],
self.row_positions[r2],
fill = mt_bg,
outline = "",
tags = tagr)
self.RI.create_rectangle(0,
self.row_positions[r1],
self.RI.current_width - 1,
self.row_positions[r2],
fill = self.RI.index_selected_rows_bg if type_ == "rows" else self.RI.index_selected_cells_bg,
outline = "",
tags = tagr)
self.CH.create_rectangle(self.col_positions[c1],
0,
self.col_positions[c2],
self.CH.current_height - 1,
fill = self.CH.header_selected_columns_bg if type_ == "cols" else self.CH.header_selected_cells_bg,
outline = "",
tags = tagr)
if self.show_selected_cells_border:
b = self.create_rectangle(self.col_positions[c1], self.row_positions[r1], self.col_positions[c2], self.row_positions[r2],
fill = "",
outline = mt_border_col,
tags = tagb)
else:
b = None
if taglower:
self.tag_lower(taglower)
self.RI.tag_lower(taglower)
self.CH.tag_lower(taglower)
self.RI.tag_lower("Current_Inside")
self.RI.tag_lower("Current_Outside")
self.RI.tag_lower("CellSelectFill")
self.CH.tag_lower("Current_Inside")
self.CH.tag_lower("Current_Outside")
self.CH.tag_lower("CellSelectFill")
return r, b
def recreate_all_selection_boxes(self):
for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Inside"),
self.find_withtag("Current_Outside")):
full_tags = self.gettags(item)
if full_tags:
type_ = full_tags[0]
r1, c1, r2, c2 = tuple(int(e) for e in full_tags[1].split("_") if e)
self.delete(f"{r1}_{c1}_{r2}_{c2}")
self.RI.delete(f"{r1}_{c1}_{r2}_{c2}")
self.CH.delete(f"{r1}_{c1}_{r2}_{c2}")
if r1 >= len(self.row_positions) - 1 or c1 >= len(self.col_positions) - 1:
continue
if r2 > len(self.row_positions) - 1:
r2 = len(self.row_positions) - 1
if c2 > len(self.col_positions) - 1:
c2 = len(self.col_positions) - 1
if type_.startswith("CellSelect"):
self.create_selected(r1, c1, r2, c2, "cells")
elif type_.startswith("RowSelect"):
self.create_selected(r1, c1, r2, c2, "rows")
elif type_.startswith("ColSelect"):
self.create_selected(r1, c1, r2, c2, "cols")
elif type_.startswith("Current"):
if type_ == "Current_Inside":
self.create_current(r1, c1, full_tags[2], inside = True)
elif type_ == "Current_Outside":
self.create_current(r1, c1, full_tags[2], inside = False)
self.tag_lower("RowSelectFill")
self.RI.tag_lower("RowSelectFill")
self.CH.tag_lower("RowSelectFill")
self.tag_lower("ColSelectFill")
self.RI.tag_lower("ColSelectFill")
self.CH.tag_lower("ColSelectFill")
self.tag_lower("CellSelectFill")
self.RI.tag_lower("CellSelectFill")
self.CH.tag_lower("CellSelectFill")
self.RI.tag_lower("Current_Inside")
self.RI.tag_lower("Current_Outside")
self.CH.tag_lower("Current_Inside")
self.CH.tag_lower("Current_Outside")
if not self.show_selected_cells_border:
self.tag_lower("Current_Outside")
def GetColCoords(self, c, sel = False):
last_col_line_pos = self.col_positions[-1] + 1
last_row_line_pos = self.row_positions[-1] + 1
x1 = self.col_positions[c]
x2 = self.col_positions[c + 1]
y1 = self.canvasy(0)
y2 = self.canvasy(self.winfo_height())
if last_row_line_pos < y2:
y2 = last_col_line_pos
if sel:
return x1, y1 + 1, x2, y2
else:
return x1, y1, x2, y2
def GetRowCoords(self, r, sel = False):
last_col_line_pos = self.col_positions[-1] + 1
x1 = self.canvasx(0)
x2 = self.canvasx(self.winfo_width())
if last_col_line_pos < x2:
x2 = last_col_line_pos
y1 = self.row_positions[r]
y2 = self.row_positions[r + 1]
if sel:
return x1, y1 + 1, x2, y2
else:
return x1, y1, x2, y2
def get_redraw_selections(self, within_range):
scells = set()
srows = set()
scols = set()
ac_srows = set()
ac_scols = set()
within_r1 = within_range[0]
within_c1 = within_range[1]
within_r2 = within_range[2]
within_c2 = within_range[3]
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2) or (within_r1 >= r1 and within_r2 <= r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
srows.update(set(range(start_row, end_row)))
ac_srows.update(set(range(start_row, end_row)))
for item in chain(self.find_withtag("Current_Outside"), self.find_withtag("Current_Inside")):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
srows.update(set(range(start_row, end_row)))
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2) or (within_c1 >= c1 and within_c2 <= c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
scols.update(set(range(start_col, end_col)))
ac_scols.update(set(range(start_col, end_col)))
for item in self.find_withtag("Current_Outside"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
scols.update(set(range(start_col, end_col)))
if not self.show_selected_cells_border:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Outside"))
else:
iterable = self.find_withtag("CellSelectFill")
for item in iterable:
tags = self.gettags(item)
r1, c1, r2, c2 = tuple(int(e) for e in tags[1].split("_") if e)
if (r1 >= within_r1 or
c1 >= within_c1 or
r2 <= within_r2 or
c2 <= within_c2) or (within_c1 >= c1 and within_c2 <= c2) or (within_r1 >= r1 and within_r2 <= r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
colsr = tuple(range(start_col, end_col))
rowsr = tuple(range(start_row, end_row))
scells.update(set(product(rowsr, colsr)))
srows.update(set(range(start_row, end_row)))
scols.update(set(range(start_col, end_col)))
return scells, srows, scols, ac_srows, ac_scols
def get_selected_min_max(self):
min_x = float("inf")
min_y = float("inf")
max_x = 0
max_y = 0
for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Inside"),
self.find_withtag("Current_Outside")):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if r1 < min_y:
min_y = r1
if c1 < min_x:
min_x = c1
if r2 > max_y:
max_y = r2
if c2 > max_x:
max_x = c2
if min_x != float("inf") and min_y != float("inf") and max_x > 0 and max_y > 0:
return min_y, min_x, max_y, max_x
else:
return None, None, None, None
def get_selected_rows(self, get_cells = False, within_range = None, get_cells_as_rows = False):
s = set()
if within_range is not None:
within_r1 = within_range[0]
within_r2 = within_range[1]
if get_cells:
if within_range is None:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(product(range(r1, r2), range(0, len(self.col_positions) - 1))))
if get_cells_as_rows:
s.update(self.get_selected_cells())
else:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
s.update(set(product(range(start_row, end_row), range(0, len(self.col_positions) - 1))))
if get_cells_as_rows:
s.update(self.get_selected_cells(within_range = (within_r1, 0, within_r2, len(self.col_positions) - 1)))
else:
if within_range is None:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(range(r1, r2)))
if get_cells_as_rows:
s.update(set(tup[0] for tup in self.get_selected_cells()))
else:
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
r2 <= within_r2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
s.update(set(range(start_row, end_row)))
if get_cells_as_rows:
s.update(set(tup[0] for tup in self.get_selected_cells(within_range = (within_r1, 0, within_r2, len(self.col_positions) - 1))))
return s
def get_selected_cols(self, get_cells = False, within_range = None, get_cells_as_cols = False):
s = set()
if within_range is not None:
within_c1 = within_range[0]
within_c2 = within_range[1]
if get_cells:
if within_range is None:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(product(range(c1, c2), range(0, len(self.row_positions) - 1))))
if get_cells_as_cols:
s.update(self.get_selected_cells())
else:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
s.update(set(product(range(start_col, end_col), range(0, len(self.row_positions) - 1))))
if get_cells_as_cols:
s.update(self.get_selected_cells(within_range = (0, within_c1, len(self.row_positions) - 1, within_c2)))
else:
if within_range is None:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(range(c1, c2)))
if get_cells_as_cols:
s.update(set(tup[1] for tup in self.get_selected_cells()))
else:
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (c1 >= within_c1 or
c2 <= within_c2):
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
s.update(set(range(start_col, end_col)))
if get_cells_as_cols:
s.update(set(tup[0] for tup in self.get_selected_cells(within_range = (0, within_c1, len(self.row_positions) - 1, within_c2))))
return s
def get_selected_cells(self, get_rows = False, get_cols = False, within_range = None):
s = set()
if within_range is not None:
within_r1 = within_range[0]
within_c1 = within_range[1]
within_r2 = within_range[2]
within_c2 = within_range[3]
if get_cols and get_rows:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("RowSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("Current_Outside"))
elif get_rows and not get_cols:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("RowSelectFill"), self.find_withtag("Current_Outside"))
elif get_cols and not get_rows:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("Current_Outside"))
else:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Outside"))
if within_range is None:
for item in iterable:
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
s.update(set(product(range(r1, r2), range(c1, c2))))
else:
for item in iterable:
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if (r1 >= within_r1 or
c1 >= within_c1 or
r2 <= within_r2 or
c2 <= within_c2):
if r1 > within_r1:
start_row = r1
else:
start_row = within_r1
if c1 > within_c1:
start_col = c1
else:
start_col = within_c1
if r2 < within_r2:
end_row = r2
else:
end_row = within_r2
if c2 < within_c2:
end_col = c2
else:
end_col = within_c2
s.update(set(product(range(start_row, end_row), range(start_col, end_col))))
return s
def get_all_selection_boxes(self):
return tuple(tuple(int(e) for e in self.gettags(item)[1].split("_") if e) for item in chain(self.find_withtag("CellSelectFill"),
self.find_withtag("RowSelectFill"),
self.find_withtag("ColSelectFill"),
self.find_withtag("Current_Outside")))
def get_all_selection_boxes_with_types(self):
boxes = []
for item in sorted(self.find_withtag("CellSelectFill") + self.find_withtag("RowSelectFill") + self.find_withtag("ColSelectFill") + self.find_withtag("Current_Outside")):
tags = self.gettags(item)
if tags:
if tags[0].startswith(("Cell", "Current")):
boxes.append((tuple(int(e) for e in tags[1].split("_") if e), "cells"))
elif tags[0].startswith("Row"):
boxes.append((tuple(int(e) for e in tags[1].split("_") if e), "rows"))
elif tags[0].startswith("Col"):
boxes.append((tuple(int(e) for e in tags[1].split("_") if e), "cols"))
return boxes
def all_selected(self):
for r1, c1, r2, c2 in self.get_all_selection_boxes():
if not r1 and not c1 and r2 == len(self.row_positions) - 1 and c2 == len(self.col_positions) - 1:
return True
return False
def cell_selected(self, r, c, inc_cols = False, inc_rows = False):
if not inc_cols and not inc_rows:
iterable = chain(self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
elif inc_cols and not inc_rows:
iterable = chain(self.find_withtag("ColSelectFill"), self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
elif not inc_cols and inc_rows:
iterable = chain(self.find_withtag("RowSelectFill"), self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
elif inc_cols and inc_rows:
iterable = chain(self.find_withtag("RowSelectFill"), self.find_withtag("ColSelectFill"), self.find_withtag("CellSelectFill"), self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside"))
for item in iterable:
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if r1 <= r and c1 <= c and r2 > r and c2 > c:
return True
return False
def col_selected(self, c):
for item in self.find_withtag("ColSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if c1 <= c and c2 > c:
return True
return False
def row_selected(self, r):
for item in self.find_withtag("RowSelectFill"):
r1, c1, r2, c2 = tuple(int(e) for e in self.gettags(item)[1].split("_") if e)
if r1 <= r and r2 > r:
return True
return False
def anything_selected(self, exclude_columns = False, exclude_rows = False, exclude_cells = False):
if exclude_columns and exclude_rows and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("Current_Outside"):
return True
elif exclude_columns and exclude_cells and not exclude_rows:
if self.find_withtag("RowSelectFill"):
return True
elif exclude_rows and exclude_cells and not exclude_columns:
if self.find_withtag("ColSelectFill"):
return True
elif exclude_columns and not exclude_rows and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("RowSelectFill") or self.find_withtag("Current_Outside"):
return True
elif exclude_rows and not exclude_columns and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("ColSelectFill") or self.find_withtag("Current_Outside"):
return True
elif exclude_cells and not exclude_columns and not exclude_rows:
if self.find_withtag("RowSelectFill") or self.find_withtag("ColSelectFill"):
return True
elif not exclude_columns and not exclude_rows and not exclude_cells:
if self.find_withtag("CellSelectFill") or self.find_withtag("RowSelectFill") or self.find_withtag("ColSelectFill") or self.find_withtag("Current_Outside"):
return True
return False
def hide_current(self):
for item in chain(self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside")):
self.itemconfig(item, state = "hidden")
def show_current(self):
for item in chain(self.find_withtag("Current_Inside"), self.find_withtag("Current_Outside")):
self.itemconfig(item, state = "normal")
def open_cell(self, event = None):
if not self.anything_selected() or self.text_editor_id is not None:
return
currently_selected = self.currently_selected(get_coords = True)
if not currently_selected:
return
y1 = int(currently_selected[0])
x1 = int(currently_selected[1])
dcol = x1 if self.all_columns_displayed else self.displayed_columns[x1]
if (
((y1, dcol) in self.cell_options and 'readonly' in self.cell_options[(y1, dcol)]) or
(dcol in self.col_options and 'readonly' in self.col_options[dcol]) or
(y1 in self.row_options and 'readonly' in self.row_options[y1])
):
return
elif (y1, dcol) in self.cell_options and ('dropdown' in self.cell_options[(y1, dcol)] or 'checkbox' in self.cell_options[(y1, dcol)]):
if self.event_opens_dropdown_or_checkbox(event):
if 'dropdown' in self.cell_options[(y1, dcol)]:
self.display_dropdown_window(y1, x1, event = event)
elif 'checkbox' in self.cell_options[(y1, dcol)]:
self._click_checkbox(y1, x1, dcol)
else:
self.edit_cell_(event, r = y1, c = x1, dropdown = False)
def event_opens_dropdown_or_checkbox(self, event = None):
if event is None:
return False
elif ((hasattr(event, 'keysym') and event.keysym == 'Return') or # enter or f2
(hasattr(event, 'keysym') and event.keysym == 'F2') or
(event is not None and hasattr(event, 'keycode') and event.keycode == "??" and hasattr(event, 'num') and event.num == 1) or
(hasattr(event, 'keysym') and event.keysym == 'BackSpace')):
return True
else:
return False
# c is displayed col
def edit_cell_(self, event = None, r = None, c = None, dropdown = False):
text = None
extra_func_key = "??"
if event is not None and (hasattr(event, 'keysym') and event.keysym == 'BackSpace'):
extra_func_key = "BackSpace"
text = ""
elif event is None or self.event_opens_dropdown_or_checkbox(event):
if event is not None:
if hasattr(event, 'keysym') and event.keysym == 'Return':
extra_func_key = "Return"
elif hasattr(event, 'keysym') and event.keysym == 'F2':
extra_func_key = "F2"
text = f"{self.data_ref[r][c]}" if self.all_columns_displayed else f"{self.data_ref[r][self.displayed_columns[c]]}"
if self.cell_auto_resize_enabled:
self.set_cell_size_to_text(r, c, only_set_if_too_small = True, redraw = True, run_binding = True)
elif event is not None and ((hasattr(event, "char") and event.char.isalpha()) or
(hasattr(event, "char") and event.char.isdigit()) or
(hasattr(event, "char") and event.char in symbols_set)):
extra_func_key = event.char
text = event.char
else:
return False
self.text_editor_loc = (r, c)
if self.extra_begin_edit_cell_func is not None:
try:
text2 = self.extra_begin_edit_cell_func(EditCellEvent(r, c, extra_func_key, text, "begin_edit_cell"))
except:
return False
if text2 is not None:
text = text2
text = "" if text is None else text
self.select_cell(r = r, c = c, keep_other_selections = True)
self.create_text_editor(r = r, c = c, text = text, set_data_ref_on_destroy = True, dropdown = dropdown)
return True
# c is displayed col
def create_text_editor(self,
r = 0,
c = 0,
text = None,
state = "normal",
see = True,
set_data_ref_on_destroy = False,
binding = None,
dropdown = False):
if (r, c) == self.text_editor_loc and self.text_editor is not None:
self.text_editor.set_text(self.text_editor.get() + "" if not isinstance(text, str) else text)
return
if self.text_editor is not None:
self.destroy_text_editor()
if see:
has_redrawn = self.see(r = r, c = c, check_cell_visibility = True)
if not has_redrawn:
self.refresh()
self.text_editor_loc = (r, c)
x = self.col_positions[c]
y = self.row_positions[r]
w = self.col_positions[c + 1] - x + 1
h = self.row_positions[r + 1] - y + 6
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if text is None:
text = self.data_ref[r][dcol]
self.hide_current()
bg, fg = self.get_widget_bg_fg(r, dcol)
self.text_editor = TextEditor(self,
text = text,
font = self.my_font,
state = state,
width = w,
height = h,
border_color = self.table_selected_cells_border_fg,
show_border = self.show_selected_cells_border,
bg = bg,
fg = fg,
popup_menu_font = self.popup_menu_font,
popup_menu_fg = self.popup_menu_fg,
popup_menu_bg = self.popup_menu_bg,
popup_menu_highlight_bg = self.popup_menu_highlight_bg,
popup_menu_highlight_fg = self.popup_menu_highlight_fg)
self.text_editor_id = self.create_window((x, y), window = self.text_editor, anchor = "nw")
if not dropdown:
self.text_editor.textedit.focus_set()
self.text_editor.scroll_to_bottom()
self.text_editor.textedit.bind("<Alt-Return>", lambda x: self.text_editor_newline_binding(r, c))
if USER_OS == 'Darwin':
self.text_editor.textedit.bind("<Option-Return>", lambda x: self.text_editor_newline_binding(r, c))
for key, func in self.text_editor_user_bound_keys.items():
self.text_editor.textedit.bind(key, func)
if binding is not None:
self.text_editor.textedit.bind("<Tab>", lambda x: binding((r, c, "Tab")))
self.text_editor.textedit.bind("<Return>", lambda x: binding((r, c, "Return")))
self.text_editor.textedit.bind("<FocusOut>", lambda x: binding((r, c, "FocusOut")))
self.text_editor.textedit.bind("<Escape>", lambda x: binding((r, c, "Escape")))
elif binding is None and set_data_ref_on_destroy:
self.text_editor.textedit.bind("<Tab>", lambda x: self.get_text_editor_value((r, c, "Tab")))
self.text_editor.textedit.bind("<Return>", lambda x: self.get_text_editor_value((r, c, "Return")))
if not dropdown:
self.text_editor.textedit.bind("<FocusOut>", lambda x: self.get_text_editor_value((r, c, "FocusOut")))
self.text_editor.textedit.bind("<Escape>", lambda x: self.get_text_editor_value((r, c, "Escape")))
else:
self.text_editor.textedit.bind("<Escape>", lambda x: self.destroy_text_editor("Escape"))
def bind_text_editor_destroy(self, binding, r, c):
self.text_editor.textedit.bind("<Return>", lambda x: binding((r, c, "Return")))
self.text_editor.textedit.bind("<FocusOut>", lambda x: binding((r, c, "FocusOut")))
self.text_editor.textedit.bind("<Escape>", lambda x: binding((r, c, "Escape")))
self.text_editor.textedit.focus_set()
def destroy_text_editor(self, event = None):
if event is not None and self.extra_end_edit_cell_func is not None and self.text_editor_loc is not None:
self.extra_end_edit_cell_func(EditCellEvent(int(self.text_editor_loc[0]), int(self.text_editor_loc[1]), "Escape", None, "escape_edit_cell"))
self.text_editor_loc = None
try:
self.delete(self.text_editor_id)
except:
pass
try:
self.text_editor.destroy()
except:
pass
try:
self.text_editor = None
except:
pass
try:
self.text_editor_id = None
except:
pass
self.show_current()
if event is not None and len(event) >= 3 and "Escape" in event:
self.focus_set()
# c is displayed col
def get_text_editor_value(self, destroy_tup = None, r = None, c = None, set_data_ref_on_destroy = True, event = None, destroy = True, move_down = True, redraw = True, recreate = True):
if self.focus_get() is None and destroy_tup:
return
if destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] == "Escape":
self.destroy_text_editor("Escape")
self.hide_dropdown_window(r, c)
return
if self.text_editor is not None:
self.text_editor_value = self.text_editor.get()
if destroy:
self.destroy_text_editor()
if set_data_ref_on_destroy:
if r is None and c is None and destroy_tup:
r, c = destroy_tup[0], destroy_tup[1]
if self.extra_end_edit_cell_func is not None:
validation = self.extra_end_edit_cell_func(EditCellEvent(r, c, destroy_tup[2] if len(destroy_tup) >= 3 else "FocusOut", f"{self.text_editor_value}", "end_edit_cell"))
if validation is not None:
self.text_editor_value = validation
self._set_cell_data(r, c, value = self.text_editor_value)
if move_down:
if r is None and c is None and destroy_tup:
r, c = destroy_tup[0], destroy_tup[1]
currently_selected = self.currently_selected()
if r is not None and c is not None:
if (
currently_selected and
r == currently_selected[0] and
c == currently_selected[1] and
(self.single_selection_enabled or self.toggle_selection_enabled)
):
if destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] == "Return":
self.select_cell(r + 1 if r < len(self.row_positions) - 2 else r, c)
self.see(r + 1 if r < len(self.row_positions) - 2 else r, c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = True)
elif destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] == "Tab":
self.select_cell(r, c + 1 if c < len(self.col_positions) - 2 else c)
self.see(r, c + 1 if c < len(self.col_positions) - 2 else c, keep_xscroll = True, bottom_right_corner = True, check_cell_visibility = True)
self.hide_dropdown_window(r, c)
if recreate:
self.recreate_all_selection_boxes()
if redraw:
self.refresh()
if destroy_tup is not None and len(destroy_tup) >= 3 and destroy_tup[2] != "FocusOut":
self.focus_set()
return self.text_editor_value
#internal event use
def _set_cell_data(self, r = 0, c = 0, dcol = None, value = "", undo = True, cell_resize = True):
if dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if r > len(self.data_ref) - 1:
self.data_ref.extend([list(repeat("", dcol + 1)) for i in range((r + 1) - len(self.data_ref))])
elif dcol > len(self.data_ref[r]) - 1:
self.data_ref[r].extend(list(repeat("", (dcol + 1) - len(self.data_ref[r]))))
if self.undo_enabled and undo:
if self.data_ref[r][dcol] != value:
self.undo_storage.append(zlib.compress(pickle.dumps(("edit_cells",
{(r, dcol): self.data_ref[r][dcol]},
(((r, c, r + 1, c + 1), "cells"), ),
self.currently_selected()))))
self.data_ref[r][dcol] = value
if cell_resize and self.cell_auto_resize_enabled:
self.set_cell_size_to_text(r, c, only_set_if_too_small = True, redraw = True, run_binding = True)
#internal event use
def _click_checkbox(self, r, c, dcol = None, undo = True, redraw = True):
if dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if self.cell_options[(r, dcol)]['checkbox']['state'] == "normal":
self._set_cell_data(r, c, dcol, value = not self.data_ref[r][dcol] if type(self.data_ref[r][dcol]) == bool else False, undo = undo, cell_resize = False)
if self.cell_options[(r, dcol)]['checkbox']['check_function'] is not None:
self.cell_options[(r, dcol)]['checkbox']['check_function']((r, c, "CheckboxClicked", f"{self.data_ref[r][dcol]}"))
if self.extra_end_edit_cell_func is not None:
self.extra_end_edit_cell_func(EditCellEvent(r, c, "Return", f"{self.data_ref[r][dcol]}", "end_edit_cell"))
if redraw:
self.refresh()
def create_checkbox(self, r = 0, c = 0, checked = False, state = "normal", redraw = False, check_function = None, text = ""):
if (r, c) in self.cell_options and any(x in self.cell_options[(r, c)] for x in ('dropdown', 'checkbox')):
self.destroy_dropdown_and_checkbox(r, c)
self._set_cell_data(r, dcol = c, value = checked, cell_resize = False, undo = False) #only works because cell_resize is false and undo is false, otherwise needs displayed col and dcol args
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['checkbox'] = {'check_function': check_function,
'state': state,
'text': text}
if redraw:
self.refresh()
def create_dropdown(self, r = 0, c = 0, values = [], set_value = None, state = "readonly", redraw = True, selection_function = None, modified_function = None):
if (r, c) in self.cell_options and any(x in self.cell_options[(r, c)] for x in ('dropdown', 'checkbox')):
self.destroy_dropdown_and_checkbox(r, c)
if values:
self._set_cell_data(r, c, value = set_value if set_value is not None else values[0], cell_resize = False, undo = False)
elif not values and set_value is not None:
self._set_cell_data(r, c, value = set_value, cell_resize = False, undo = False)
if (r, c) not in self.cell_options:
self.cell_options[(r, c)] = {}
self.cell_options[(r, c)]['dropdown'] = {'values': values,
'align': "w",
'window': "no dropdown open",
'canvas_id': "no dropdown open",
'select_function': selection_function,
'modified_function': modified_function,
'state': state}
if redraw:
self.refresh()
def get_widget_bg_fg(self, r, c):
bg = self.table_bg
fg = self.table_fg
if (r, c) in self.cell_options and 'highlight' in self.cell_options[(r, c)]:
if self.cell_options[(r, c)]['highlight'][0] is not None:
bg = self.cell_options[(r, c)]['highlight'][0]
if self.cell_options[(r, c)]['highlight'][1] is not None:
fg = self.cell_options[(r, c)]['highlight'][1]
elif r in self.row_options and 'highlight' in self.row_options[r]:
if self.row_options[r]['highlight'][0] is not None:
bg = self.row_options[r]['highlight'][0]
if self.row_options[r]['highlight'][1] is not None:
fg = self.row_options[r]['highlight'][1]
elif c in self.col_options and 'highlight' in self.col_options[c]:
if self.col_options[c]['highlight'][0] is not None:
bg = self.col_options[c]['highlight'][0]
if self.col_options[c]['highlight'][1] is not None:
fg = self.col_options[c]['highlight'][1]
return bg, fg
def text_editor_newline_binding(self, r = None, c = None, event = None):
if self.GetLinesHeight(self.text_editor.get_num_lines() + 1) > self.text_editor.winfo_height():
self.text_editor.config(height = self.text_editor.winfo_height() + self.xtra_lines_increment)
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if ((r, c if self.all_columns_displayed else self.displayed_columns[c]) in self.cell_options and
'dropdown' in self.cell_options[(r, dcol)]):
text_editor_h = self.text_editor.winfo_height()
win_h, anchor = self.get_dropdown_height_anchor(r, c, dcol, text_editor_h)
if anchor == "nw":
self.coords(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
self.col_positions[c], self.row_positions[r] + text_editor_h - 1)
self.itemconfig(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
anchor = anchor, height = win_h)
elif anchor == "sw":
self.coords(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
self.col_positions[c], self.row_positions[r])
self.itemconfig(self.cell_options[(r, dcol)]['dropdown']['canvas_id'],
anchor = anchor, height = win_h)
def get_space_bot(self, r, text_editor_h = None):
if text_editor_h is None:
win_h = int(self.canvasy(0) + self.winfo_height() - self.row_positions[r + 1])
sheet_h = int(self.row_positions[-1] + 1 + self.empty_vertical - self.row_positions[r + 1])
else:
win_h = int(self.canvasy(0) + self.winfo_height() - (self.row_positions[r] + text_editor_h))
sheet_h = int(self.row_positions[-1] + 1 + self.empty_vertical - (self.row_positions[r] + text_editor_h))
return win_h if win_h >= sheet_h else sheet_h
def get_dropdown_height_anchor(self, r, c, dcol, text_editor_h = None):
numvalues = len(self.cell_options[(r, dcol)]['dropdown']['values'])
xscroll_h = self.parentframe.xscroll.winfo_height()
if numvalues > 5:
linespace = 6 * 5 + 3
win_h = int(self.txt_h * 6 + linespace + xscroll_h)
else:
linespace = numvalues * 5 + 3
win_h = int(self.txt_h * numvalues + linespace + xscroll_h)
if win_h > 300:
win_h = 300
space_bot = self.get_space_bot(r, text_editor_h)
space_top = int(self.row_positions[r])
anchor = "nw"
win_h2 = int(win_h)
if win_h > space_bot:
if space_bot >= space_top:
anchor = "nw"
win_h = space_bot - 1
elif space_top > space_bot:
anchor = "sw"
win_h = space_top - 1
if win_h < self.txt_h + 5:
win_h = self.txt_h + 5
elif win_h > win_h2:
win_h = win_h2
return win_h, anchor
# c is displayed col
def display_dropdown_window(self, r, c, dcol = None, event = None):
self.destroy_text_editor("Escape")
self.delete_opened_dropdown_window()
if dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if self.cell_options[(r, dcol)]['dropdown']['state'] == "normal":
if not self.edit_cell_(r = r, c = c, dropdown = True, event = event):
return
bg, fg = self.get_widget_bg_fg(r, dcol)
win_h, anchor = self.get_dropdown_height_anchor(r, c, dcol)
window = self.parentframe.dropdown_class(self.winfo_toplevel(),
r,
c,
width = self.col_positions[c + 1] - self.col_positions[c] + 1,
height = win_h,
font = self.my_font,
bg = bg,
fg = fg,
outline_color = fg,
outline_thickness = 1,
values = self.cell_options[(r, dcol)]['dropdown']['values'],
hide_dropdown_window = self.hide_dropdown_window,
arrowkey_RIGHT = self.arrowkey_RIGHT,
arrowkey_LEFT = self.arrowkey_LEFT,
align = self.cell_options[(r, dcol)]['dropdown']['align'])
if self.cell_options[(r, dcol)]['dropdown']['state'] == "normal":
if anchor == "nw":
ypos = self.row_positions[r] + self.text_editor.h_ - 1
else:
ypos = self.row_positions[r]
self.cell_options[(r, dcol)]['dropdown']['canvas_id'] = self.create_window((self.col_positions[c], ypos),
window = window,
anchor = anchor)
if self.cell_options[(r, dcol)]['dropdown']['modified_function'] is not None:
self.text_editor.textedit.bind("<<TextModified>>", lambda x: self.cell_options[(r, dcol)]['dropdown']['modified_function'](DropDownModifiedEvent("ComboboxModified", r, dcol, self.text_editor.get())))
self.update()
try:
self.text_editor.textedit.focus_set()
self.text_editor.scroll_to_bottom()
except:
return
else:
if anchor == "nw":
ypos = self.row_positions[r + 1]
else:
ypos = self.row_positions[r]
self.cell_options[(r, dcol)]['dropdown']['canvas_id'] = self.create_window((self.col_positions[c], ypos),
window = window,
anchor = anchor)
window.bind("<FocusOut>", lambda x: self.hide_dropdown_window(r, c))
self.update()
try:
window.focus_set()
except:
return
self.existing_dropdown_window = window
self.cell_options[(r, dcol)]['dropdown']['window'] = window
self.existing_dropdown_canvas_id = self.cell_options[(r, dcol)]['dropdown']['canvas_id']
# c is displayed col
def hide_dropdown_window(self, r = None, c = None, selection = None, b1 = False, redraw = True):
if r is not None and c is not None and selection is not None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
if self.cell_options[(r, dcol)]['dropdown']['select_function'] is not None: # user has specified a selection function
self.cell_options[(r, dcol)]['dropdown']['select_function'](EditCellEvent(r, c, "ComboboxSelected", f"{selection}", "end_edit_cell"))
if self.extra_end_edit_cell_func is not None:
validation = self.extra_end_edit_cell_func(EditCellEvent(r, c, "ComboboxSelected", f"{selection}", "end_edit_cell"))
if validation is not None:
selection = validation
self._set_cell_data(r, c, dcol, selection, cell_resize = True)
self.focus_set()
self.recreate_all_selection_boxes()
if redraw:
self.refresh()
if self.existing_dropdown_window is not None:
closedr, closedc, ret_tup = int(self.existing_dropdown_window.r), int(self.existing_dropdown_window.c), True
else:
ret_tup = False
if b1 and self.text_editor_loc is not None and self.text_editor is not None:
self.get_text_editor_value(destroy_tup = self.text_editor_loc + ("Return", ))
else:
self.destroy_text_editor("Escape")
self.delete_opened_dropdown_window(r, c)
if ret_tup:
return closedr, closedc
# c is displayed col
def delete_opened_dropdown_window(self, r = None, c = None, dcol = None):
if c is not None and dcol is None:
dcol = c if self.all_columns_displayed else self.displayed_columns[c]
try:
self.delete(self.existing_dropdown_canvas_id)
except:
pass
self.existing_dropdown_canvas_id = None
try:
self.existing_dropdown_window.destroy()
except:
pass
self.existing_dropdown_window = None
if r is not None and c is not None and (r, dcol) in self.cell_options and 'dropdown' in self.cell_options[(r, dcol)]:
self.cell_options[(r, dcol)]['dropdown']['canvas_id'] = "no dropdown open"
self.cell_options[(r, dcol)]['dropdown']['window'] = "no dropdown open"
try:
self.delete(self.cell_options[(r, dcol)]['dropdown']['canvas_id'])
except:
pass
def get_displayed_col_from_dcol(self, dcol):
try:
return self.displayed_columns.index(dcol)
except:
return None
# c is dcol
def destroy_dropdown(self, r, c):
self.delete_opened_dropdown_window(r, c)
if (r, c) in self.cell_options and 'dropdown' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['dropdown']
# c is dcol
def destroy_checkbox(self, r, c):
if (r, c) in self.cell_options and 'checkbox' in self.cell_options[(r, c)]:
del self.cell_options[(r, c)]['checkbox']
# c is dcol
def destroy_dropdown_and_checkbox(self, r, c):
self.destroy_dropdown(r, c)
self.destroy_checkbox(r, c)
# deprecated
def refresh_dropdowns(self, dropdowns = []):
pass
|
from typing import List, Dict, Optional, NamedTuple
import pickle
import os
from enum import Enum
from collections import OrderedDict
import json
import redis
import glob
import copy
from gtmcore.dataset.dataset import Dataset
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class PersistTaskType(Enum):
"""Enumeration of persist tasks"""
DELETE = 0
ADD = 1
UPDATE = 2
# NamedTuple to capture tasks to be done to persist manifest changes to disk
PersistTask = NamedTuple('PersistTask', [('relative_path', str), ('task', PersistTaskType), ('manifest_file', str)])
class ManifestJSONEncoder(json.JSONEncoder):
"""A custom json encoder to output json files that will be moderately compatible with git"""
def encode(self, o):
if isinstance(o, OrderedDict):
output = [f'"{x}':{json.dumps(o[x], separators=(',', ':'))}' for x in o]
return "{\n" + ",\n".join(output) + "\n}"
else:
return json.dumps(o)
class ManifestFileCache(object):
"""Class to provide a caching layer on top of a collection of Dataset manifest files
Note: The checkout context of the underlying dataset CANNOT change while this class is instantiated. If it does,
you need to reload the Dataset instance and reload the Manifest instance.
"""
def __init__(self, dataset: Dataset, logged_in_username: Optional[str] = None) -> None:
self.dataset = dataset
self.logged_in_username = logged_in_username
self.ignore_file = os.path.join(dataset.root_dir, ".gigantumignore")
self._redis_client = None
self._manifest: OrderedDict = OrderedDict()
self._current_checkout_id = self.dataset.checkout_id
self._persist_queue: List[PersistTask] = list()
# TODO: Support ignoring files
# self.ignored = self._load_ignored()
self._legacy_manifest_file = os.path.join(self.dataset.root_dir, 'manifest', 'manifest0')
@property
def redis_client(self) -> redis.StrictRedis:
"""Property to get a redis client for manifest caching
Returns:
redis.StrictRedis
"""
if not self._redis_client:
self._redis_client = redis.StrictRedis(db=1)
return self._redis_client
@property
def manifest_cache_key(self) -> str:
"""Property to get the manifest cache key for this dataset instance
Returns:
redis.StrictRedis
"""
return f"DATASET-MANIFEST-CACHE|{self._current_checkout_id}"
def _load_legacy_manifest(self) -> OrderedDict:
"""Method to load the manifest file
Returns:
dict
"""
if os.path.exists(self._legacy_manifest_file):
with open(self._legacy_manifest_file, 'rb') as mf:
data = pickle.load(mf)
# Add the filename as an attribute to allow for reverse indexing on delete
[data[key].update(fn='manifest0') for key in data]
return data
else:
return OrderedDict()
def _write_legacy_manifest(self, data: Dict) -> None:
"""Method to write the manifest file if you've deleted or updated a value in the legacy file
Returns:
dict
"""
with open(self._legacy_manifest_file, 'wb') as mf:
pickle.dump(data, mf, pickle.HIGHEST_PROTOCOL)
@staticmethod
def _load_manifest_file(filename: str) -> OrderedDict:
"""Method to load a single manifest file
Returns:
OrderedDict
"""
if os.path.exists(filename):
with open(filename, 'rt') as mf:
base_name = os.path.basename(filename)
data = json.load(mf, object_pairs_hook=OrderedDict)
# Add the filename as an attribute to allow for reverse indexing on delete
[data[key].update(fn=base_name) for key in data]
return data
else:
return OrderedDict()
def _write_manifest_file(self, checkout_id: str, data: OrderedDict) -> None:
"""Method to write a manifest file to disk
Args:
checkout_id: the checkout id for the manifest file to write
data: an OrderedDict containing data to write
Returns:
None
"""
# Remove the reverse file index before persisting to disk
[data[key].pop('fn', None) for key in data]
# Pop off just the unique checkout ID
with open(os.path.join(self.dataset.root_dir, 'manifest', f'manifest-{checkout_id}.json'), 'wt') as mf:
json.dump(data, mf, cls=ManifestJSONEncoder)
def _load_manifest_data(self) -> OrderedDict:
"""Method to load all manifest data, either from the memory cache or from all the manifest files
Returns:
OrderedDict
"""
if self.redis_client.exists(self.manifest_cache_key):
# Load from cache
manifest_data = json.loads(self.redis_client.get(self.manifest_cache_key).decode("utf-8"),
object_pairs_hook=OrderedDict)
self.redis_client.expire(self.manifest_cache_key, 3600)
else:
# Load from files
manifest_data = OrderedDict()
for manifest_file in glob.glob(os.path.join(self.dataset.root_dir, 'manifest', 'manifest-*')):
manifest_data = OrderedDict(**manifest_data, **self._load_manifest_file(manifest_file))
# Check for legacy manifest and load if needed
if os.path.exists(self._legacy_manifest_file):
manifest_data = OrderedDict(**manifest_data, **self._load_legacy_manifest())
# Cache manifest data
if manifest_data:
self.redis_client.set(self.manifest_cache_key, json.dumps(manifest_data, separators=(',', ':')))
self.redis_client.expire(self.manifest_cache_key, 3600)
return manifest_data
def persist(self) -> None:
"""Method to persist changes to the manifest to the cache and any associated manifest file
Returns:
None
"""
try:
# Repack tasks by manifest file
file_groups: Dict[str, List[PersistTask]] = dict()
for task in self._persist_queue:
if task.manifest_file in file_groups:
file_groups[task.manifest_file].append(task)
else:
file_groups[task.manifest_file] = [task]
# Update manifest files
for manifest_file in file_groups:
if manifest_file == "manifest0":
data = self._load_legacy_manifest()
for task in file_groups[manifest_file]:
if task.task == PersistTaskType.DELETE:
del data[task.relative_path]
else:
data[task.relative_path] = self._manifest[task.relative_path]
self._write_legacy_manifest(data)
else:
full_manifest_file_path = os.path.join(self.dataset.root_dir, 'manifest', manifest_file)
data = self._load_manifest_file(full_manifest_file_path)
for task in file_groups[manifest_file]:
if task.task == PersistTaskType.DELETE:
del data[task.relative_path]
else:
data[task.relative_path] = copy.deepcopy(self._manifest[task.relative_path])
checkout_id = manifest_file[9:-5] # strips off manifest- and .json from file name to get id
self._write_manifest_file(checkout_id, data)
# Persist to cache
self.redis_client.set(self.manifest_cache_key, json.dumps(self._manifest, separators=(',', ':')))
self.redis_client.expire(self.manifest_cache_key, 3600)
except Exception as err:
logger.error("An error occurred while trying to persist manifest data to disk.")
logger.exception(err)
# Clear data so it all reloads from disk
self._manifest = OrderedDict()
raise IOError("An error occurred while trying to persist manifest data to disk. Refresh and try again")
finally:
self._persist_queue = list()
def get_manifest(self) -> OrderedDict:
"""Method to get the current manifest
Returns:
OrderedDict
"""
if not self._manifest:
self._manifest = self._load_manifest_data()
return self._manifest
def add_or_update(self, relative_path: str, content_hash: str, modified_on: str, num_bytes: str) -> None:
"""Method to add or update a file in the manifest
Note: Changes are not persisted to the disk and cache until self.persist() is called. This is done
in manifest.py in most cases.
Args:
relative_path: relative path to the file
content_hash: content hash to the file
modified_on: modified datetime of the file
num_bytes: number of bytes in the file
Returns:
None
"""
# Make sure manifest is loaded
self.get_manifest()
_, checkout_id = self._current_checkout_id.rsplit('-', 1)
if relative_path in self._manifest:
task_type = PersistTaskType.UPDATE
manifest_file = self._manifest[relative_path]['fn']
else:
task_type = PersistTaskType.ADD
manifest_file = f'manifest-{checkout_id}.json'
self._manifest[relative_path] = OrderedDict([('h', content_hash),
('m', modified_on),
('b', num_bytes),
('fn', manifest_file)])
self._persist_queue.append(PersistTask(relative_path=relative_path,
task=task_type,
manifest_file=manifest_file))
def remove(self, relative_path: str) -> None:
"""Method to remove a file from the manifest
Note: Changes are not persisted to the disk and cache until self.persist() is called. This is done
in manifest.py in most cases.
Args:
relative_path: relative path to the file
Returns:
None
"""
# Make sure manifest is loaded
self.get_manifest()
manifest_file = self._manifest[relative_path]['fn']
del self._manifest[relative_path]
self._persist_queue.append(PersistTask(relative_path=relative_path,
task=PersistTaskType.DELETE,
manifest_file=manifest_file))
| from typing import List, Dict, Optional, NamedTuple
import pickle
import os
from enum import Enum
from collections import OrderedDict
import json
import redis
import glob
import copy
from gtmcore.dataset.dataset import Dataset
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class PersistTaskType(Enum):
"""Enumeration of persist tasks"""
DELETE = 0
ADD = 1
UPDATE = 2
# NamedTuple to capture tasks to be done to persist manifest changes to disk
PersistTask = NamedTuple('PersistTask', [('relative_path', str), ('task', PersistTaskType), ('manifest_file', str)])
class ManifestJSONEncoder(json.JSONEncoder):
"""A custom json encoder to output json files that will be moderately compatible with git"""
def encode(self, o):
if isinstance(o, OrderedDict):
output = [f'"{x}":{json.dumps(o[x], separators=(",", ":"))}' for x in o]
return "{\n" + ",\n".join(output) + "\n}"
else:
return json.dumps(o)
class ManifestFileCache(object):
"""Class to provide a caching layer on top of a collection of Dataset manifest files
Note: The checkout context of the underlying dataset CANNOT change while this class is instantiated. If it does,
you need to reload the Dataset instance and reload the Manifest instance.
"""
def __init__(self, dataset: Dataset, logged_in_username: Optional[str] = None) -> None:
self.dataset = dataset
self.logged_in_username = logged_in_username
self.ignore_file = os.path.join(dataset.root_dir, ".gigantumignore")
self._redis_client = None
self._manifest: OrderedDict = OrderedDict()
self._current_checkout_id = self.dataset.checkout_id
self._persist_queue: List[PersistTask] = list()
# TODO: Support ignoring files
# self.ignored = self._load_ignored()
self._legacy_manifest_file = os.path.join(self.dataset.root_dir, 'manifest', 'manifest0')
@property
def redis_client(self) -> redis.StrictRedis:
"""Property to get a redis client for manifest caching
Returns:
redis.StrictRedis
"""
if not self._redis_client:
self._redis_client = redis.StrictRedis(db=1)
return self._redis_client
@property
def manifest_cache_key(self) -> str:
"""Property to get the manifest cache key for this dataset instance
Returns:
redis.StrictRedis
"""
return f"DATASET-MANIFEST-CACHE|{self._current_checkout_id}"
def _load_legacy_manifest(self) -> OrderedDict:
"""Method to load the manifest file
Returns:
dict
"""
if os.path.exists(self._legacy_manifest_file):
with open(self._legacy_manifest_file, 'rb') as mf:
data = pickle.load(mf)
# Add the filename as an attribute to allow for reverse indexing on delete
[data[key].update(fn='manifest0') for key in data]
return data
else:
return OrderedDict()
def _write_legacy_manifest(self, data: Dict) -> None:
"""Method to write the manifest file if you've deleted or updated a value in the legacy file
Returns:
dict
"""
with open(self._legacy_manifest_file, 'wb') as mf:
pickle.dump(data, mf, pickle.HIGHEST_PROTOCOL)
@staticmethod
def _load_manifest_file(filename: str) -> OrderedDict:
"""Method to load a single manifest file
Returns:
OrderedDict
"""
if os.path.exists(filename):
with open(filename, 'rt') as mf:
base_name = os.path.basename(filename)
data = json.load(mf, object_pairs_hook=OrderedDict)
# Add the filename as an attribute to allow for reverse indexing on delete
[data[key].update(fn=base_name) for key in data]
return data
else:
return OrderedDict()
def _write_manifest_file(self, checkout_id: str, data: OrderedDict) -> None:
"""Method to write a manifest file to disk
Args:
checkout_id: the checkout id for the manifest file to write
data: an OrderedDict containing data to write
Returns:
None
"""
# Remove the reverse file index before persisting to disk
[data[key].pop('fn', None) for key in data]
# Pop off just the unique checkout ID
with open(os.path.join(self.dataset.root_dir, 'manifest', f'manifest-{checkout_id}.json'), 'wt') as mf:
json.dump(data, mf, cls=ManifestJSONEncoder)
def _load_manifest_data(self) -> OrderedDict:
"""Method to load all manifest data, either from the memory cache or from all the manifest files
Returns:
OrderedDict
"""
if self.redis_client.exists(self.manifest_cache_key):
# Load from cache
manifest_data = json.loads(self.redis_client.get(self.manifest_cache_key).decode("utf-8"),
object_pairs_hook=OrderedDict)
self.redis_client.expire(self.manifest_cache_key, 3600)
else:
# Load from files
manifest_data = OrderedDict()
for manifest_file in glob.glob(os.path.join(self.dataset.root_dir, 'manifest', 'manifest-*')):
manifest_data = OrderedDict(**manifest_data, **self._load_manifest_file(manifest_file))
# Check for legacy manifest and load if needed
if os.path.exists(self._legacy_manifest_file):
manifest_data = OrderedDict(**manifest_data, **self._load_legacy_manifest())
# Cache manifest data
if manifest_data:
self.redis_client.set(self.manifest_cache_key, json.dumps(manifest_data, separators=(',', ':')))
self.redis_client.expire(self.manifest_cache_key, 3600)
return manifest_data
def persist(self) -> None:
"""Method to persist changes to the manifest to the cache and any associated manifest file
Returns:
None
"""
try:
# Repack tasks by manifest file
file_groups: Dict[str, List[PersistTask]] = dict()
for task in self._persist_queue:
if task.manifest_file in file_groups:
file_groups[task.manifest_file].append(task)
else:
file_groups[task.manifest_file] = [task]
# Update manifest files
for manifest_file in file_groups:
if manifest_file == "manifest0":
data = self._load_legacy_manifest()
for task in file_groups[manifest_file]:
if task.task == PersistTaskType.DELETE:
del data[task.relative_path]
else:
data[task.relative_path] = self._manifest[task.relative_path]
self._write_legacy_manifest(data)
else:
full_manifest_file_path = os.path.join(self.dataset.root_dir, 'manifest', manifest_file)
data = self._load_manifest_file(full_manifest_file_path)
for task in file_groups[manifest_file]:
if task.task == PersistTaskType.DELETE:
del data[task.relative_path]
else:
data[task.relative_path] = copy.deepcopy(self._manifest[task.relative_path])
checkout_id = manifest_file[9:-5] # strips off manifest- and .json from file name to get id
self._write_manifest_file(checkout_id, data)
# Persist to cache
self.redis_client.set(self.manifest_cache_key, json.dumps(self._manifest, separators=(',', ':')))
self.redis_client.expire(self.manifest_cache_key, 3600)
except Exception as err:
logger.error("An error occurred while trying to persist manifest data to disk.")
logger.exception(err)
# Clear data so it all reloads from disk
self._manifest = OrderedDict()
raise IOError("An error occurred while trying to persist manifest data to disk. Refresh and try again")
finally:
self._persist_queue = list()
def get_manifest(self) -> OrderedDict:
"""Method to get the current manifest
Returns:
OrderedDict
"""
if not self._manifest:
self._manifest = self._load_manifest_data()
return self._manifest
def add_or_update(self, relative_path: str, content_hash: str, modified_on: str, num_bytes: str) -> None:
"""Method to add or update a file in the manifest
Note: Changes are not persisted to the disk and cache until self.persist() is called. This is done
in manifest.py in most cases.
Args:
relative_path: relative path to the file
content_hash: content hash to the file
modified_on: modified datetime of the file
num_bytes: number of bytes in the file
Returns:
None
"""
# Make sure manifest is loaded
self.get_manifest()
_, checkout_id = self._current_checkout_id.rsplit('-', 1)
if relative_path in self._manifest:
task_type = PersistTaskType.UPDATE
manifest_file = self._manifest[relative_path]['fn']
else:
task_type = PersistTaskType.ADD
manifest_file = f'manifest-{checkout_id}.json'
self._manifest[relative_path] = OrderedDict([('h', content_hash),
('m', modified_on),
('b', num_bytes),
('fn', manifest_file)])
self._persist_queue.append(PersistTask(relative_path=relative_path,
task=task_type,
manifest_file=manifest_file))
def remove(self, relative_path: str) -> None:
"""Method to remove a file from the manifest
Note: Changes are not persisted to the disk and cache until self.persist() is called. This is done
in manifest.py in most cases.
Args:
relative_path: relative path to the file
Returns:
None
"""
# Make sure manifest is loaded
self.get_manifest()
manifest_file = self._manifest[relative_path]['fn']
del self._manifest[relative_path]
self._persist_queue.append(PersistTask(relative_path=relative_path,
task=PersistTaskType.DELETE,
manifest_file=manifest_file))
|
import discum_c844aef
import time
import multiprocessing
import json
import random
import re
import os
try:
from tkinter import messagebox
use_terminal=False
except:
use_terminal=True
once=False
wbm=[12,16]
update = 0
class bot:
owoid=408785106942164992 #user id of the owo bot
with open('settings.json', "w+") as file:
try:
data = json.load(file)
token = data["token"]
channel = data["channel"]
proxy = data["proxy"]
proxyserver = data["proxy_"]["server"]
proxyport = data["proxy_"]["port"]
except:
temp={}
temp["token"] = input("please enter your dc token for once: ")
temp["channel"] = input("please enter the id of the channel: ")
while True:
temp["proxy"] = input("will you use proxy? [YES/NO]")
temp["proxy_"] = {}
if temp["proxy"].upper() == "YES":
temp["proxy_"]["server"] = input("Proxy server: ")
temp["proxy_"]["port"] = input("Proxy server port: ")
break
if temp["proxy"].upper() == "NO":
temp["proxy_"]["server"] = None
temp["proxy_"]["port"] = None
break
json.dump(temp, file)
token = temp["token"]
channel = temp["channel"]
proxy = temp["proxy"]
proxyserver = temp["proxy_"]["server"]
proxyport = temp["proxy_"]["port"]
commands=[
"owo hunt",
"owo hunt",
"owo battle"
]
class color:
purple = '\033[95m'
okblue = '\033[94m'
okcyan = '\033[96m'
okgreen = '\033[92m'
warning = '\033[93m'
fail = '\033[91m'
reset = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
if os.name == "nt":
purple = ''
okblue = ''
okcyan = ''
okgreen = ''
warning = ''
fail = ''
reset = ''
bold = ''
underline = ''
def at():
return f'\033[0;43m{time.strftime('%d %b %Y %H:%M:%S', time.localtime())}\033[0;21m'
def report_error(content):
if use_terminal:
print(at(), content)
else:
messagebox.showerror("OWO bot cheat", content)
client=discum_c844aef.Client(token=bot.token,proxy_host=bot.proxyserver, proxy_port=bot.proxyport, log=False)
def issuechecker():
msgs=client.getMessages(str(bot.channel), num=10)
msgs=json.loads(msgs.text)
owodes=0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid):
owodes=owodes+1
msgonec=msgone['content']
if "(2/5)" in str(msgonec):
return "exit"
if 'banned' in msgonec:
print(f'{at()}{bot.color.fail} !!! [BANNED] !!! {bot.color.reset} your account have been banned from owo bot please open a issue on https://github.com/sudo-do/discord-selfbot-owo-bot/')
return "exit"
if 'complete your captcha' in msgonec:
print(f'{at()}{bot.color.warning} !! [CAPTCHA] !! {bot.color.reset} CAPTCHA ACTION REQUİRED {msgonec[-6:]}')
return "exit"
if not owodes:
return "exit"
def security():
if issuechecker() == "exit":
report_error("Ban-security triggered, answer the captcha")
exit()
def runner():
global wbm
command=random.choice(bot.commands)
command2=random.choice(bot.commands)
client.typingAction(str(bot.channel))
client.sendMessage(str(bot.channel), command)
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} {command}")
if not command2==command:
client.typingAction(str(bot.channel))
time.sleep(1)
client.sendMessage(str(bot.channel), command2)
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} {command2}")
time.sleep(random.randint(wbm[0],wbm[1]))
def owopray():
client.sendMessage(str(bot.channel), "owo pray")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo pray")
def gems():
client.typingAction(str(bot.channel))
time.sleep(2)
client.sendMessage(str(bot.channel), "owo inv")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo inv")
time.sleep(5)
msgs=client.getMessages(str(bot.channel), num=5)
msgs=json.loads(msgs.text)
inv = 0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid) and 'Inventory' in msgone['content']:
inv=re.findall(r'`(.*?)`', msgone['content'])
if not inv:
security()
else:
if '50' in inv:
client.sendMessage(str(bot.channel), "owo lb all")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo lb all")
time.sleep(10)
gems()
return
for item in inv:
try:
if int(item) > 100:
inv.pop(inv.index(item)) #weapons
except: #backgounds etc
inv.pop(inv.index(item))
tier = [[],[],[]]
print(f"{at()}{bot.color.okblue} [INFO] {bot.color.reset} Found {len(inv)} gems Inventory")
for gem in inv:
gem =int(gem)
if 50 < gem < 60:
tier[0].append(gem)
elif 60 < gem < 70:
tier[1].append(gem)
elif 70 < gem < 80:
tier[2].append(gem)
for level in range(0,3):
if not len(tier[level]) == 0:
client.sendMessage(str(bot.channel), "owo use "+str(max(tier[level])))
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo use {str(max(tier[level]))}")
time.sleep(6)
def loopie():
x=True
pray = 0
gem=pray
main=time.time()
while x:
runner()
if time.time() - pray > random.randint(300, 500):
security()
owopray()
pray=time.time()
if time.time() - gem > random.randint(500, 1000):
security()
gems()
gem=time.time()
if time.time() - main > random.randint(1000, 1800):
time.sleep(random.randint(150, 300))
security ()
main=time.time()
@client.gateway.command
def defination1(resp):
global once
if not once:
once=True
if __name__ == '__main__':
lol=multiprocessing.Process(target=loopie)
lol.run()
print(bot.token)
client.gateway.run()
| import discum_c844aef
import time
import multiprocessing
import json
import random
import re
import os
try:
from tkinter import messagebox
use_terminal=False
except:
use_terminal=True
once=False
wbm=[12,16]
update = 0
class bot:
owoid=408785106942164992 #user id of the owo bot
with open('settings.json', "w+") as file:
try:
data = json.load(file)
token = data["token"]
channel = data["channel"]
proxy = data["proxy"]
proxyserver = data["proxy_"]["server"]
proxyport = data["proxy_"]["port"]
except:
temp={}
temp["token"] = input("please enter your dc token for once: ")
temp["channel"] = input("please enter the id of the channel: ")
while True:
temp["proxy"] = input("will you use proxy? [YES/NO]")
temp["proxy_"] = {}
if temp["proxy"].upper() == "YES":
temp["proxy_"]["server"] = input("Proxy server: ")
temp["proxy_"]["port"] = input("Proxy server port: ")
break
if temp["proxy"].upper() == "NO":
temp["proxy_"]["server"] = None
temp["proxy_"]["port"] = None
break
json.dump(temp, file)
token = temp["token"]
channel = temp["channel"]
proxy = temp["proxy"]
proxyserver = temp["proxy_"]["server"]
proxyport = temp["proxy_"]["port"]
commands=[
"owo hunt",
"owo hunt",
"owo battle"
]
class color:
purple = '\033[95m'
okblue = '\033[94m'
okcyan = '\033[96m'
okgreen = '\033[92m'
warning = '\033[93m'
fail = '\033[91m'
reset = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
if os.name == "nt":
purple = ''
okblue = ''
okcyan = ''
okgreen = ''
warning = ''
fail = ''
reset = ''
bold = ''
underline = ''
def at():
return f'\033[0;43m{time.strftime("%d %b %Y %H:%M:%S", time.localtime())}\033[0;21m'
def report_error(content):
if use_terminal:
print(at(), content)
else:
messagebox.showerror("OWO bot cheat", content)
client=discum_c844aef.Client(token=bot.token,proxy_host=bot.proxyserver, proxy_port=bot.proxyport, log=False)
def issuechecker():
msgs=client.getMessages(str(bot.channel), num=10)
msgs=json.loads(msgs.text)
owodes=0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid):
owodes=owodes+1
msgonec=msgone['content']
if "(2/5)" in str(msgonec):
return "exit"
if 'banned' in msgonec:
print(f'{at()}{bot.color.fail} !!! [BANNED] !!! {bot.color.reset} your account have been banned from owo bot please open a issue on https://github.com/sudo-do/discord-selfbot-owo-bot/')
return "exit"
if 'complete your captcha' in msgonec:
print(f'{at()}{bot.color.warning} !! [CAPTCHA] !! {bot.color.reset} CAPTCHA ACTION REQUİRED {msgonec[-6:]}')
return "exit"
if not owodes:
return "exit"
def security():
if issuechecker() == "exit":
report_error("Ban-security triggered, answer the captcha")
exit()
def runner():
global wbm
command=random.choice(bot.commands)
command2=random.choice(bot.commands)
client.typingAction(str(bot.channel))
client.sendMessage(str(bot.channel), command)
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} {command}")
if not command2==command:
client.typingAction(str(bot.channel))
time.sleep(1)
client.sendMessage(str(bot.channel), command2)
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} {command2}")
time.sleep(random.randint(wbm[0],wbm[1]))
def owopray():
client.sendMessage(str(bot.channel), "owo pray")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo pray")
def gems():
client.typingAction(str(bot.channel))
time.sleep(2)
client.sendMessage(str(bot.channel), "owo inv")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo inv")
time.sleep(5)
msgs=client.getMessages(str(bot.channel), num=5)
msgs=json.loads(msgs.text)
inv = 0
for msgone in msgs:
if msgone['author']['id']==str(bot.owoid) and 'Inventory' in msgone['content']:
inv=re.findall(r'`(.*?)`', msgone['content'])
if not inv:
security()
else:
if '50' in inv:
client.sendMessage(str(bot.channel), "owo lb all")
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo lb all")
time.sleep(10)
gems()
return
for item in inv:
try:
if int(item) > 100:
inv.pop(inv.index(item)) #weapons
except: #backgounds etc
inv.pop(inv.index(item))
tier = [[],[],[]]
print(f"{at()}{bot.color.okblue} [INFO] {bot.color.reset} Found {len(inv)} gems Inventory")
for gem in inv:
gem =int(gem)
if 50 < gem < 60:
tier[0].append(gem)
elif 60 < gem < 70:
tier[1].append(gem)
elif 70 < gem < 80:
tier[2].append(gem)
for level in range(0,3):
if not len(tier[level]) == 0:
client.sendMessage(str(bot.channel), "owo use "+str(max(tier[level])))
print(f"{at()}{bot.color.okgreen} [SENT] {bot.color.reset} owo use {str(max(tier[level]))}")
time.sleep(6)
def loopie():
x=True
pray = 0
gem=pray
main=time.time()
while x:
runner()
if time.time() - pray > random.randint(300, 500):
security()
owopray()
pray=time.time()
if time.time() - gem > random.randint(500, 1000):
security()
gems()
gem=time.time()
if time.time() - main > random.randint(1000, 1800):
time.sleep(random.randint(150, 300))
security ()
main=time.time()
@client.gateway.command
def defination1(resp):
global once
if not once:
once=True
if __name__ == '__main__':
lol=multiprocessing.Process(target=loopie)
lol.run()
print(bot.token)
client.gateway.run()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to android"""
import asyncio
import math
import os
import re
import time
from bs4 import BeautifulSoup
from requests import get
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
from userbot.utils import chrome, human_to_bytes, humanbytes, md5, time_formatter
GITHUB = "https://github.com"
DEVICES_DATA = (
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_device.json"
)
@register(outgoing=True, pattern=r"^\.magisk$")
async def magisk(request):
"""magisk latest releases"""
magisk_dict = {
"Stable": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json",
"Beta": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json",
"Canary": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/canary.json",
}
releases = "Latest Magisk Releases:\n"
for name, release_url in magisk_dict.items():
data = get(release_url).json()
if str(name) == "Canary":
data["magisk"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["magisk"]["link"]
)
data["app"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["app"]["link"]
)
data["uninstaller"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["uninstaller"]["link"]
)
releases += (
f'{name}: [ZIP v{data['magisk']['version']}]({data['magisk']['link']}) | '
f'[APK v{data['app']['version']}]({data['app']['link']}) | '
f'[Uninstaller]({data['uninstaller']['link']})\n'
)
await request.edit(releases)
@register(outgoing=True, pattern=r"^\.device(?: |$)(\S*)")
async def device_info(request):
"""get android device basic info from its codename"""
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text
else:
return await request.edit("`Usage: .device <codename> / <model>`")
try:
found = get(DEVICES_DATA).json()[device]
except KeyError:
reply = f"`Couldn't find info about {device}!`\n"
else:
reply = f"Search results for {device}:\n\n"
for item in found:
brand = item["brand"]
name = item["name"]
codename = device
model = item["model"]
reply += (
f"{brand} {name}\n"
f"**Codename**: `{codename}`\n"
f"**Model**: {model}\n\n"
)
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.codename(?: |)([\S]*)(?: |)([\s\S]*)")
async def codename_info(request):
"""search for android codename"""
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
return await request.edit("`Usage: .codename <brand> <device>`")
found = [
i
for i in get(DEVICES_DATA).json()
if i["brand"].lower() == brand and device in i["name"].lower()
]
if len(found) > 8:
found = found[:8]
if found:
reply = f"Search results for {brand.capitalize()} {device.capitalize()}:\n\n"
for item in found:
brand = item["brand"]
name = item["name"]
codename = item["device"]
model = item["model"]
reply += (
f"{brand} {name}\n"
f"**Codename**: `{codename}`\n"
f"**Model**: {model}\n\n"
)
else:
reply = f"`Couldn't find {device} codename!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.pixeldl(?: |$)(.*)")
async def download_api(dl):
await dl.edit("`Collecting information...`")
URL = dl.pattern_match.group(1)
URL_MSG = await dl.get_reply_message()
if URL:
pass
elif URL_MSG:
URL = URL_MSG.text
else:
await dl.edit("`Empty information...`")
return
if not re.findall(r"\bhttps?://download.*pixelexperience.*\.org\S+", URL):
await dl.edit("`Invalid information...`")
return
driver = await chrome()
await dl.edit("`Getting information...`")
driver.get(URL)
error = driver.find_elements_by_class_name("swal2-content")
if len(error) > 0:
if error[0].text == "File Not Found.":
await dl.edit(f"`FileNotFoundError`: {URL} is not found.")
return
datas = driver.find_elements_by_class_name("download__meta")
""" - enumerate data to make sure we download the matched version - """
md5_origin = None
i = None
for index, value in enumerate(datas):
for data in value.text.split("\n"):
if data.startswith("MD5"):
md5_origin = data.split(":")[1].strip()
i = index
break
if md5_origin is not None and i is not None:
break
if md5_origin is None and i is None:
await dl.edit("`There is no match version available...`")
if URL.endswith("/"):
file_name = URL.split("/")[-2]
else:
file_name = URL.split("/")[-1]
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
download = driver.find_elements_by_class_name("download__btn")[i]
download.click()
await dl.edit("`Starting download...`")
file_size = human_to_bytes(download.text.split(None, 2)[-1].strip("()"))
display_message = None
complete = False
start = time.time()
while complete is False:
if os.path.isfile(file_path + ".crdownload"):
try:
downloaded = os.stat(file_path + ".crdownload").st_size
status = "Downloading"
except OSError: # Rare case
await asyncio.sleep(1)
continue
elif os.path.isfile(file_path):
downloaded = os.stat(file_path).st_size
file_size = downloaded
status = "Checking"
else:
await asyncio.sleep(0.3)
continue
diff = time.time() - start
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`{0}` | [{1}{2}] `{3}%`".format(
status,
"".join(["●" for i in range(math.floor(percentage / 10))]),
"".join(["○" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
current_message = (
"`[DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if (
round(diff % 15.00) == 0
and display_message != current_message
or (downloaded == file_size)
):
await dl.edit(current_message)
display_message = current_message
if downloaded == file_size:
if not os.path.isfile(file_path): # Rare case
await asyncio.sleep(1)
continue
MD5 = await md5(file_path)
if md5_origin == MD5:
complete = True
else:
await dl.edit("`Download corrupt...`")
os.remove(file_path)
driver.quit()
return
await dl.respond(f"`{file_name}`\n\n" f"Successfully downloaded to `{file_path}`.")
await dl.delete()
driver.quit()
return
@register(outgoing=True, pattern=r"^\.specs(?: |)([\S]*)(?: |)([\s\S]*)")
async def devices_specifications(request):
"""Mobile devices specifications"""
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
return await request.edit("`Usage: .specs <brand> <device>`")
all_brands = (
BeautifulSoup(
get("https://www.devicespecifications.com/en/brand-more").content, "lxml"
)
.find("div", {"class": "brand-listing-container-news"})
.findAll("a")
)
brand_page_url = None
try:
brand_page_url = [
i["href"] for i in all_brands if brand == i.text.strip().lower()
][0]
except IndexError:
await request.edit(f"`{brand} is unknown brand!`")
devices = BeautifulSoup(get(brand_page_url).content, "lxml").findAll(
"div", {"class": "model-listing-container-80"}
)
device_page_url = None
try:
device_page_url = [
i.a["href"]
for i in BeautifulSoup(str(devices), "lxml").findAll("h3")
if device in i.text.strip().lower()
]
except IndexError:
await request.edit(f"`can't find {device}!`")
if len(device_page_url) > 2:
device_page_url = device_page_url[:2]
reply = ""
for url in device_page_url:
info = BeautifulSoup(get(url).content, "lxml")
reply = "\n**" + info.title.text.split("-")[0].strip() + "**\n\n"
info = info.find("div", {"id": "model-brief-specifications"})
specifications = re.findall(r"<b>.*?<br/>", str(info))
for item in specifications:
title = re.findall(r"<b>(.*?)</b>", item)[0].strip()
data = (
re.findall(r"</b>: (.*?)<br/>", item)[0]
.replace("<b>", "")
.replace("</b>", "")
.strip()
)
reply += f"**{title}**: {data}\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.twrp(?: |$)(\S*)")
async def twrp(request):
"""get android device twrp"""
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text.split(" ")[0]
else:
return await request.edit("`Usage: .twrp <codename>`")
url = get(f"https://dl.twrp.me/{device}/")
if url.status_code == 404:
reply = f"`Couldn't find twrp downloads for {device}!`\n"
return await request.edit(reply)
page = BeautifulSoup(url.content, "lxml")
download = page.find("table").find("tr").find("a")
dl_link = f"https://dl.twrp.me{download["href"]}"
dl_file = download.text
size = page.find("span", {"class": "filesize"}).text
date = page.find("em").text.strip()
reply = (
f"**Latest TWRP for {device}:**\n"
f"[{dl_file}]({dl_link}) - __{size}__\n"
f"**Updated:** __{date}__\n"
)
await request.edit(reply)
CMD_HELP.update(
{
"android": "**Plugin : **`android`\
\n\n • **Syntax :** `.magisk`\
\n • **Function : **Dapatkan rilis Magisk terbaru \
\n\n • **Syntax :** `.device <codename>`\
\n • **Function : **Dapatkan info tentang nama kode atau model perangkat android. \
\n\n • **Syntax :** `.codename <brand> <device>`\
\n • **Function : **Cari nama kode perangkat android. \
\n\n • **Syntax :** `.pixeldl` **<download.pixelexperience.org>**\
\n • **Function : **Unduh ROM pengalaman piksel ke server bot pengguna Anda. \
\n\n • **Syntax :** `.specs <brand> <device>`\
\n • **Function : **Dapatkan info spesifikasi perangkat. \
\n\n • **Syntax :** `.twrp <codename>`\
\n • **Function : **Dapatkan unduhan twrp terbaru untuk perangkat android. \
"
}
)
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to android"""
import asyncio
import math
import os
import re
import time
from bs4 import BeautifulSoup
from requests import get
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
from userbot.utils import chrome, human_to_bytes, humanbytes, md5, time_formatter
GITHUB = "https://github.com"
DEVICES_DATA = (
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_device.json"
)
@register(outgoing=True, pattern=r"^\.magisk$")
async def magisk(request):
"""magisk latest releases"""
magisk_dict = {
"Stable": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json",
"Beta": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json",
"Canary": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/canary.json",
}
releases = "Latest Magisk Releases:\n"
for name, release_url in magisk_dict.items():
data = get(release_url).json()
if str(name) == "Canary":
data["magisk"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["magisk"]["link"]
)
data["app"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["app"]["link"]
)
data["uninstaller"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["uninstaller"]["link"]
)
releases += (
f'{name}: [ZIP v{data["magisk"]["version"]}]({data["magisk"]["link"]}) | '
f'[APK v{data["app"]["version"]}]({data["app"]["link"]}) | '
f'[Uninstaller]({data["uninstaller"]["link"]})\n'
)
await request.edit(releases)
@register(outgoing=True, pattern=r"^\.device(?: |$)(\S*)")
async def device_info(request):
"""get android device basic info from its codename"""
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text
else:
return await request.edit("`Usage: .device <codename> / <model>`")
try:
found = get(DEVICES_DATA).json()[device]
except KeyError:
reply = f"`Couldn't find info about {device}!`\n"
else:
reply = f"Search results for {device}:\n\n"
for item in found:
brand = item["brand"]
name = item["name"]
codename = device
model = item["model"]
reply += (
f"{brand} {name}\n"
f"**Codename**: `{codename}`\n"
f"**Model**: {model}\n\n"
)
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.codename(?: |)([\S]*)(?: |)([\s\S]*)")
async def codename_info(request):
"""search for android codename"""
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
return await request.edit("`Usage: .codename <brand> <device>`")
found = [
i
for i in get(DEVICES_DATA).json()
if i["brand"].lower() == brand and device in i["name"].lower()
]
if len(found) > 8:
found = found[:8]
if found:
reply = f"Search results for {brand.capitalize()} {device.capitalize()}:\n\n"
for item in found:
brand = item["brand"]
name = item["name"]
codename = item["device"]
model = item["model"]
reply += (
f"{brand} {name}\n"
f"**Codename**: `{codename}`\n"
f"**Model**: {model}\n\n"
)
else:
reply = f"`Couldn't find {device} codename!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.pixeldl(?: |$)(.*)")
async def download_api(dl):
await dl.edit("`Collecting information...`")
URL = dl.pattern_match.group(1)
URL_MSG = await dl.get_reply_message()
if URL:
pass
elif URL_MSG:
URL = URL_MSG.text
else:
await dl.edit("`Empty information...`")
return
if not re.findall(r"\bhttps?://download.*pixelexperience.*\.org\S+", URL):
await dl.edit("`Invalid information...`")
return
driver = await chrome()
await dl.edit("`Getting information...`")
driver.get(URL)
error = driver.find_elements_by_class_name("swal2-content")
if len(error) > 0:
if error[0].text == "File Not Found.":
await dl.edit(f"`FileNotFoundError`: {URL} is not found.")
return
datas = driver.find_elements_by_class_name("download__meta")
""" - enumerate data to make sure we download the matched version - """
md5_origin = None
i = None
for index, value in enumerate(datas):
for data in value.text.split("\n"):
if data.startswith("MD5"):
md5_origin = data.split(":")[1].strip()
i = index
break
if md5_origin is not None and i is not None:
break
if md5_origin is None and i is None:
await dl.edit("`There is no match version available...`")
if URL.endswith("/"):
file_name = URL.split("/")[-2]
else:
file_name = URL.split("/")[-1]
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
download = driver.find_elements_by_class_name("download__btn")[i]
download.click()
await dl.edit("`Starting download...`")
file_size = human_to_bytes(download.text.split(None, 2)[-1].strip("()"))
display_message = None
complete = False
start = time.time()
while complete is False:
if os.path.isfile(file_path + ".crdownload"):
try:
downloaded = os.stat(file_path + ".crdownload").st_size
status = "Downloading"
except OSError: # Rare case
await asyncio.sleep(1)
continue
elif os.path.isfile(file_path):
downloaded = os.stat(file_path).st_size
file_size = downloaded
status = "Checking"
else:
await asyncio.sleep(0.3)
continue
diff = time.time() - start
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`{0}` | [{1}{2}] `{3}%`".format(
status,
"".join(["●" for i in range(math.floor(percentage / 10))]),
"".join(["○" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
current_message = (
"`[DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if (
round(diff % 15.00) == 0
and display_message != current_message
or (downloaded == file_size)
):
await dl.edit(current_message)
display_message = current_message
if downloaded == file_size:
if not os.path.isfile(file_path): # Rare case
await asyncio.sleep(1)
continue
MD5 = await md5(file_path)
if md5_origin == MD5:
complete = True
else:
await dl.edit("`Download corrupt...`")
os.remove(file_path)
driver.quit()
return
await dl.respond(f"`{file_name}`\n\n" f"Successfully downloaded to `{file_path}`.")
await dl.delete()
driver.quit()
return
@register(outgoing=True, pattern=r"^\.specs(?: |)([\S]*)(?: |)([\s\S]*)")
async def devices_specifications(request):
"""Mobile devices specifications"""
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
return await request.edit("`Usage: .specs <brand> <device>`")
all_brands = (
BeautifulSoup(
get("https://www.devicespecifications.com/en/brand-more").content, "lxml"
)
.find("div", {"class": "brand-listing-container-news"})
.findAll("a")
)
brand_page_url = None
try:
brand_page_url = [
i["href"] for i in all_brands if brand == i.text.strip().lower()
][0]
except IndexError:
await request.edit(f"`{brand} is unknown brand!`")
devices = BeautifulSoup(get(brand_page_url).content, "lxml").findAll(
"div", {"class": "model-listing-container-80"}
)
device_page_url = None
try:
device_page_url = [
i.a["href"]
for i in BeautifulSoup(str(devices), "lxml").findAll("h3")
if device in i.text.strip().lower()
]
except IndexError:
await request.edit(f"`can't find {device}!`")
if len(device_page_url) > 2:
device_page_url = device_page_url[:2]
reply = ""
for url in device_page_url:
info = BeautifulSoup(get(url).content, "lxml")
reply = "\n**" + info.title.text.split("-")[0].strip() + "**\n\n"
info = info.find("div", {"id": "model-brief-specifications"})
specifications = re.findall(r"<b>.*?<br/>", str(info))
for item in specifications:
title = re.findall(r"<b>(.*?)</b>", item)[0].strip()
data = (
re.findall(r"</b>: (.*?)<br/>", item)[0]
.replace("<b>", "")
.replace("</b>", "")
.strip()
)
reply += f"**{title}**: {data}\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.twrp(?: |$)(\S*)")
async def twrp(request):
"""get android device twrp"""
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text.split(" ")[0]
else:
return await request.edit("`Usage: .twrp <codename>`")
url = get(f"https://dl.twrp.me/{device}/")
if url.status_code == 404:
reply = f"`Couldn't find twrp downloads for {device}!`\n"
return await request.edit(reply)
page = BeautifulSoup(url.content, "lxml")
download = page.find("table").find("tr").find("a")
dl_link = f"https://dl.twrp.me{download['href']}"
dl_file = download.text
size = page.find("span", {"class": "filesize"}).text
date = page.find("em").text.strip()
reply = (
f"**Latest TWRP for {device}:**\n"
f"[{dl_file}]({dl_link}) - __{size}__\n"
f"**Updated:** __{date}__\n"
)
await request.edit(reply)
CMD_HELP.update(
{
"android": "**Plugin : **`android`\
\n\n • **Syntax :** `.magisk`\
\n • **Function : **Dapatkan rilis Magisk terbaru \
\n\n • **Syntax :** `.device <codename>`\
\n • **Function : **Dapatkan info tentang nama kode atau model perangkat android. \
\n\n • **Syntax :** `.codename <brand> <device>`\
\n • **Function : **Cari nama kode perangkat android. \
\n\n • **Syntax :** `.pixeldl` **<download.pixelexperience.org>**\
\n • **Function : **Unduh ROM pengalaman piksel ke server bot pengguna Anda. \
\n\n • **Syntax :** `.specs <brand> <device>`\
\n • **Function : **Dapatkan info spesifikasi perangkat. \
\n\n • **Syntax :** `.twrp <codename>`\
\n • **Function : **Dapatkan unduhan twrp terbaru untuk perangkat android. \
"
}
)
|
import json
import logging
from json import JSONDecodeError
from os import environ
from time import sleep
from dnacentersdk import DNACenterAPI
from dnacentersdk.exceptions import ApiError
logger = logging.getLogger()
class DNAC():
def __init__(self):
verify = environ['DNA_CENTER_VERIFY'].lower() == 'true'
self.api = DNACenterAPI(
base_url=environ['DNA_CENTER_BASE_URL'],
username=environ['DNA_CENTER_USERNAME'],
password=environ['DNA_CENTER_PASSWORD'],
verify=verify
)
def get_devices_for_card(self):
device_list = self.api.devices.get_device_list()
return [{'hostname': x['hostname'], 'id': x['id']} for x in device_list['response'] if x['hostname']]
def get_device_details_for_card(self, d_id):
d = self.api.devices.get_device_by_id(d_id)
return d['response']
def get_device_config_for_card(self, d_id):
try:
d = self.api.devices.get_device_config_by_id(d_id)
return d['response']
except ApiError as e:
if e.status_code == 501:
logger.warning(f'ApiError getting config: {e}')
return None
def run_command_on_device(self, d_id, command):
r = self.api.command_runner.run_read_only_commands_on_devices(
commands=[command], deviceUuids=[d_id], description='Run from Webex Bot'
)
for _ in range(60):
progress = self.api.task.get_task_by_id(r['response']['taskId'])
try:
f_id = json.loads(progress['response']['progress'])['fileId']
break
except JSONDecodeError:
logger.info(f"Progress: {progress["response"]["progress"]}")
sleep(1)
file_resp = self.api.file.download_a_file_by_fileid(f_id)
data = json.loads(file_resp.data)[0]
if data['commandResponses']['SUCCESS']:
for output in data['commandResponses']['SUCCESS'].values():
return "```" + output
# if the command was not successful
return None
def get_user_enrichment_for_card(self, username):
return self.api.users.get_user_enrichment_details({'entity_type': 'network_user_id', 'entity_value': username})
def get_issues_for_card(self, priority=None):
"""
priority (str): p1, p2, p3, p4
"""
d = self.api.issues.issues(priority=priority, issue_status='active')
# sort the issues by most recent first
return sorted(
d['response'], key=lambda x: x['last_occurence_time'], reverse=True
)
| import json
import logging
from json import JSONDecodeError
from os import environ
from time import sleep
from dnacentersdk import DNACenterAPI
from dnacentersdk.exceptions import ApiError
logger = logging.getLogger()
class DNAC():
def __init__(self):
verify = environ['DNA_CENTER_VERIFY'].lower() == 'true'
self.api = DNACenterAPI(
base_url=environ['DNA_CENTER_BASE_URL'],
username=environ['DNA_CENTER_USERNAME'],
password=environ['DNA_CENTER_PASSWORD'],
verify=verify
)
def get_devices_for_card(self):
device_list = self.api.devices.get_device_list()
return [{'hostname': x['hostname'], 'id': x['id']} for x in device_list['response'] if x['hostname']]
def get_device_details_for_card(self, d_id):
d = self.api.devices.get_device_by_id(d_id)
return d['response']
def get_device_config_for_card(self, d_id):
try:
d = self.api.devices.get_device_config_by_id(d_id)
return d['response']
except ApiError as e:
if e.status_code == 501:
logger.warning(f'ApiError getting config: {e}')
return None
def run_command_on_device(self, d_id, command):
r = self.api.command_runner.run_read_only_commands_on_devices(
commands=[command], deviceUuids=[d_id], description='Run from Webex Bot'
)
for _ in range(60):
progress = self.api.task.get_task_by_id(r['response']['taskId'])
try:
f_id = json.loads(progress['response']['progress'])['fileId']
break
except JSONDecodeError:
logger.info(f"Progress: {progress['response']['progress']}")
sleep(1)
file_resp = self.api.file.download_a_file_by_fileid(f_id)
data = json.loads(file_resp.data)[0]
if data['commandResponses']['SUCCESS']:
for output in data['commandResponses']['SUCCESS'].values():
return "```" + output
# if the command was not successful
return None
def get_user_enrichment_for_card(self, username):
return self.api.users.get_user_enrichment_details({'entity_type': 'network_user_id', 'entity_value': username})
def get_issues_for_card(self, priority=None):
"""
priority (str): p1, p2, p3, p4
"""
d = self.api.issues.issues(priority=priority, issue_status='active')
# sort the issues by most recent first
return sorted(
d['response'], key=lambda x: x['last_occurence_time'], reverse=True
)
|
import sklearn
from numpy import inf, nan
from sklearn.ensemble import ExtraTreesClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _ExtraTreesClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ExtraTreesClassifier An extra-trees classifier.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"min_impurity_split",
"bootstrap",
"oob_score",
"n_jobs",
"random_state",
"verbose",
"warm_start",
"class_weight",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "The number of trees in the forest",
},
"criterion": {
"enum": ["entropy", "gini"],
"default": "gini",
"description": "The function to measure the quality of a split",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum depth of the tree",
},
"min_samples_split": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"distribution": "uniform",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number",
},
"min_samples_leaf": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"distribution": "uniform",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node",
},
"min_weight_fraction_leaf": {
"type": "number",
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node",
},
"max_features": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"type": "string", "forOptimizer": False},
{"enum": [None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split",
},
"max_leaf_nodes": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion",
},
"min_impurity_decrease": {
"type": "number",
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth",
},
"bootstrap": {
"type": "boolean",
"default": False,
"description": "Whether bootstrap samples are used when building trees",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization accuracy.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 4,
"description": "The number of jobs to run in parallel for both `fit` and `predict`",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest",
},
"class_weight": {
"XXX TODO XXX": 'dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)',
"description": "Weights associated with classes in the form ``{class_label: weight}``",
"enum": ["balanced"],
"default": "balanced",
},
},
},
{
"XXX TODO XXX": "Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The training input samples",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The target values (class labels in classification, real numbers in regression).",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"description": "Sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class probabilities for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "such arrays if n_outputs > 1",
"laleType": "Any",
"XXX TODO XXX": "array of shape = [n_samples, n_classes], or a list of n_outputs",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier#sklearn-ensemble-extratreesclassifier",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
ExtraTreesClassifier = make_operator(_ExtraTreesClassifierImpl, _combined_schemas)
if sklearn.__version__ >= "0.22":
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
from lale.schemas import AnyOf, Float, Int, Null
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
if sklearn.__version__ >= "1.0":
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
min_impurity_split=None, set_as_available=True
)
set_docstrings(ExtraTreesClassifier)
| import sklearn
from numpy import inf, nan
from sklearn.ensemble import ExtraTreesClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _ExtraTreesClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for ExtraTreesClassifier An extra-trees classifier.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"min_impurity_split",
"bootstrap",
"oob_score",
"n_jobs",
"random_state",
"verbose",
"warm_start",
"class_weight",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"distribution": "uniform",
"default": 10,
"description": "The number of trees in the forest",
},
"criterion": {
"enum": ["entropy", "gini"],
"default": "gini",
"description": "The function to measure the quality of a split",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "The maximum depth of the tree",
},
"min_samples_split": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"distribution": "uniform",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number",
},
"min_samples_leaf": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"distribution": "uniform",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node",
},
"min_weight_fraction_leaf": {
"type": "number",
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node",
},
"max_features": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
{"type": "string", "forOptimizer": False},
{"enum": [None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split",
},
"max_leaf_nodes": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "Grow trees with ``max_leaf_nodes`` in best-first fashion",
},
"min_impurity_decrease": {
"type": "number",
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value",
},
"min_impurity_split": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth",
},
"bootstrap": {
"type": "boolean",
"default": False,
"description": "Whether bootstrap samples are used when building trees",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization accuracy.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 4,
"description": "The number of jobs to run in parallel for both `fit` and `predict`",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest",
},
"class_weight": {
"XXX TODO XXX": 'dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)',
"description": "Weights associated with classes in the form ``{class_label: weight}``",
"enum": ["balanced"],
"default": "balanced",
},
},
},
{
"XXX TODO XXX": "Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The training input samples",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The target values (class labels in classification, real numbers in regression).",
},
"sample_weight": {
"anyOf": [{"type": "array", "items": {"type": "number"}}, {"enum": [None]}],
"description": "Sample weights",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "The predicted classes.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "array", "items": {"type": "number"}}},
],
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict class probabilities for X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix of shape = [n_samples, n_features]",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "The input samples",
}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "such arrays if n_outputs > 1",
"laleType": "Any",
"XXX TODO XXX": "array of shape = [n_samples, n_classes], or a list of n_outputs",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier#sklearn-ensemble-extratreesclassifier",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
ExtraTreesClassifier = make_operator(_ExtraTreesClassifierImpl, _combined_schemas)
if sklearn.__version__ >= "0.22":
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
from lale.schemas import AnyOf, Float, Int, Null
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
if sklearn.__version__ >= "1.0":
# old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
# new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(
min_impurity_split=None, set_as_available=True
)
set_docstrings(ExtraTreesClassifier)
|
from configparser import ConfigParser
from lib.module import Module
import psycopg2
class PostgresDriver:
"""
Driver for PostgredDB
"""
def __init__(self, config_file: str):
"""
:param config_file: Configuration file containing Postgres Configuration
"""
parser = ConfigParser()
parser.read(config_file)
db = {}
if parser.has_section('postgresql'):
params = parser.items('postgresql')
for param in params:
db[param[0]] = param[1]
else:
raise Exception(f"Section 'postgresql' not found in the {config_file} file.")
self.conf: dict = db
self.conn = None
self.cur = None
def connect(self):
"""
Connect to the database and obtain a cursor
:return: None
"""
self.conn = psycopg2.connect(**self.conf)
self.cur = self.conn.cursor()
self.cur.execute('SELECT version()')
db_version = self.cur.fetchone()
print('Verion:', db_version)
def disconnect(self):
"""
Disconnect from database and delete cursor
:return: None
"""
if self.cur is not None:
self.cur.close()
if self.conn is not None:
self.conn.close()
@staticmethod
def concat_keys(keys: list, seperator=', '):
"""
Concatenate key string for SQL statement : e.g. 'id, name, ...'
:param keys: key strings to be concatenated
:param seperator: Seperator between the keys
:return: keys string
"""
buf = []
for key in keys:
buf.append(key)
return seperator.join(buf)
@staticmethod
def concat_eq_statements(key_vals: dict, seperator=', '):
"""
Concat statements which represent equality predicates
:param key_vals: keys and values which are compared to each other in the predicate
:param seperator:
:return: Concatenated equality predicate
"""
buf = []
for key, val in key_vals.items():
val = PostgresDriver.string_val(val)
buf.append(f" {key} = {val}")
return seperator.join(buf)
@staticmethod
def string_val(val: str):
"""
Append approstrophes if the value to be inserted in the SQL Query is of type string
:param val: value
:return: value appended with apporstrophes if value is of type string
"""
return r"'" + val + r"'" if type(val) == str else str(val)
@staticmethod
def build_select(table: str, select: list = [], **where):
"""
Builds a SELECT query
:param table: TABLE which the SELECT query should be executed for
:param select: selection of columns
:param where: equality predicates as kwargs
:return: SELECT query as string
"""
sql = "SELECT "
if len(select) < 1:
sql += "*"
else:
sql += PostgresDriver.concat_keys(select)
sql += f" FROM {table}"
if len(where) > 0:
sql += " WHERE "
sql += PostgresDriver.concat_eq_statements(where, ' AND ')
return sql
@staticmethod
def build_delete(table: str, **kwargs):
"""
Build DELETE query
:param table: TABLE which the DELETE query should be executed for
:param kwargs: equality predicates as kwargs
:return: DELETE query as string
"""
sql = f"DELETE FROM {table}"
if len(kwargs) > 0:
sql += " WHERE "
sql += PostgresDriver.concat_eq_statements(kwargs, ' AND ')
return sql
@staticmethod
def build_update(table: str, updates: dict, **where):
"""
Build UPDATE query
:param table: TABLE which the UPDATE query should be executed for
:param updates: to be updated values as a dict where the key is the column
:param where: equality predicates as kwargs
:return: UPDATE query as string
"""
sql = f"UPDATE {table} SET {PostgresDriver.concat_eq_statements(updates)}"
if len(where) > 0:
sql += f" WHERE {PostgresDriver.concat_eq_statements(where, " AND ")}"
return sql
@staticmethod
def build_insert(table: str, **key_vals):
"""
Build INSERT statement
:param table: TABLE which the INSERT query should be executed for
:param key_vals: columns and their values as kwargs
:return: INSERT statement as string
"""
sql = f"INSERT INTO {table}("
cols = []
vals = []
for key, val in key_vals.items():
if val is not None:
cols.append(key)
vals.append(PostgresDriver.string_val(val))
sql += ", ".join(cols) + ") VALUES (" + ", ".join(vals) + ")"
return sql
def select(self):
"""
Execute SELECT query
:return: selected elements
"""
pass
def insert(self):
"""
Execute INSERT query
:return: None
"""
pass
def delete(self, **kwargs):
"""
Execute DELETE query
:return: None
"""
pass
def update(self, **kwargs):
"""
Execute UPDATE query
:return: None
"""
pass
class CourseTable(PostgresDriver):
def select(self, select: list = [], **where):
"""
Execute SELECT query
:param select: To be returned columns
:param where: equality predicates as kwargs
:return: Selected modules as a list of tuple
"""
self.cur.execute(PostgresDriver.build_select("course", select, **where))
return self.cur.fetchall()
def insert(self, module: Module, on_conflict=''):
"""
Insert a course / module into the course table
:param module: module to be inserted
:param on_conflict: What to do on conflict, throws Exception if not defined
:return: None
"""
sql = PostgresDriver.build_insert("course", **module.get_fields())
if len(on_conflict) > 0:
sql += f" ON CONFLICT (id) {on_conflict}"
self.cur.execute(sql)
self.conn.commit()
def delete(self, **where):
"""
Delete courses
:param where: equality predicates as kwargs
:return: None
"""
if len(where) < 1:
return
self.cur.execute(PostgresDriver.build_delete("course", **where))
self.conn.commit()
def delete_all(self):
"""
Delete all courses in the table
:return: None
"""
self.cur.execute(PostgresDriver.build_delete("course"))
self.conn.commit()
def update(self, updates: dict, **where):
"""
update course
:param updates: to be updated values with key as columns
:param where: equality predicates as kwargs
:return: None
"""
if len(updates) < 1:
return
self.cur.execute(PostgresDriver.build_update("course", updates, **where))
self.conn.commit()
class ProfessorTable(PostgresDriver):
def select(self, select: list = [], **where):
"""
Select professors from the professor table
:param select: To be returned columns
:param where: equality predicates as kwargs
:return: Selected professors as a list of tuple
"""
self.cur.execute(PostgresDriver.build_select("professor", select, **where))
return self.cur.fetchall()
def insert(self, prof_name: str):
"""
Insert a professor to the table (duplicate names are allowed)
:param prof_name: professor to be inserted
:return: None
"""
self.cur.execute(PostgresDriver.build_insert("professor", name=prof_name))
self.conn.commit()
def delete(self, **where):
"""
Delete professors from the table
:param where: equality predicates as kwargs
:return: None
"""
if len(where) < 1:
return
self.cur.execute(PostgresDriver.build_delete("professor", **where))
self.conn.commit()
def delete_all(self):
"""
Delete all professors in the table
:return: None
"""
self.cur.execute(PostgresDriver.build_delete("professor"))
self.conn.commit()
def update(self, updates: dict, **where):
"""
Update professor information
:param updates: to be updated values with key as columns
:param where: equality predicates as kwargs
:return: None
"""
if len(updates) < 1:
return
self.cur.execute(PostgresDriver.build_update("course", updates, **where))
self.conn.commit()
class ProfessorCourseTable(PostgresDriver):
def insert(self, course_id: str, prof):
"""
Insert a 'held-by' entry
:param course_id:
:param prof:
:return: None
"""
res = True
try:
if type(prof) == int:
sql = f"INSERT INTO course_professors (course_id, professor_id) VALUES ('{course_id}', {prof})"
elif type(prof) == str:
sql = f"INSERT INTO course_professors (course_id, professor_id) VALUES ('{course_id}', (SELECT id FROM professor WHERE name = '{prof}'))"
else:
return False
self.cur.execute(sql)
except Exception as err:
print(err)
res = False
finally:
self.conn.commit()
return res
def select(self, select: list = [], **where):
"""
:param select: To be returned columns
:param where: equality predicates as kwargs
:return: Selected modules as a list of tuple
"""
self.cur.execute(PostgresDriver.build_select("course_professors", select, **where))
return self.cur.fetchall()
def delete(self, where):
"""
:param where: equality predicates as kwargs
:return: None
"""
if len(where) < 1:
return
self.cur.execute(PostgresDriver.build_delete("course_professors", **where))
self.conn.commit()
def delete_all(self):
"""
Delete all entries in the table
:return: None
"""
self.cur.execute(PostgresDriver.build_delete("course_professors"))
self.conn.commit()
def update(self, updates: dict, **where):
"""
:param updates: to be updated values with key as columns
:param where: equality predicates as kwargs
:return: None
"""
if len(updates) < 1:
return
self.cur.execute(PostgresDriver.build_update("course_professors", updates, **where))
self.conn.commit()
| from configparser import ConfigParser
from lib.module import Module
import psycopg2
class PostgresDriver:
"""
Driver for PostgredDB
"""
def __init__(self, config_file: str):
"""
:param config_file: Configuration file containing Postgres Configuration
"""
parser = ConfigParser()
parser.read(config_file)
db = {}
if parser.has_section('postgresql'):
params = parser.items('postgresql')
for param in params:
db[param[0]] = param[1]
else:
raise Exception(f"Section 'postgresql' not found in the {config_file} file.")
self.conf: dict = db
self.conn = None
self.cur = None
def connect(self):
"""
Connect to the database and obtain a cursor
:return: None
"""
self.conn = psycopg2.connect(**self.conf)
self.cur = self.conn.cursor()
self.cur.execute('SELECT version()')
db_version = self.cur.fetchone()
print('Verion:', db_version)
def disconnect(self):
"""
Disconnect from database and delete cursor
:return: None
"""
if self.cur is not None:
self.cur.close()
if self.conn is not None:
self.conn.close()
@staticmethod
def concat_keys(keys: list, seperator=', '):
"""
Concatenate key string for SQL statement : e.g. 'id, name, ...'
:param keys: key strings to be concatenated
:param seperator: Seperator between the keys
:return: keys string
"""
buf = []
for key in keys:
buf.append(key)
return seperator.join(buf)
@staticmethod
def concat_eq_statements(key_vals: dict, seperator=', '):
"""
Concat statements which represent equality predicates
:param key_vals: keys and values which are compared to each other in the predicate
:param seperator:
:return: Concatenated equality predicate
"""
buf = []
for key, val in key_vals.items():
val = PostgresDriver.string_val(val)
buf.append(f" {key} = {val}")
return seperator.join(buf)
@staticmethod
def string_val(val: str):
"""
Append approstrophes if the value to be inserted in the SQL Query is of type string
:param val: value
:return: value appended with apporstrophes if value is of type string
"""
return r"'" + val + r"'" if type(val) == str else str(val)
@staticmethod
def build_select(table: str, select: list = [], **where):
"""
Builds a SELECT query
:param table: TABLE which the SELECT query should be executed for
:param select: selection of columns
:param where: equality predicates as kwargs
:return: SELECT query as string
"""
sql = "SELECT "
if len(select) < 1:
sql += "*"
else:
sql += PostgresDriver.concat_keys(select)
sql += f" FROM {table}"
if len(where) > 0:
sql += " WHERE "
sql += PostgresDriver.concat_eq_statements(where, ' AND ')
return sql
@staticmethod
def build_delete(table: str, **kwargs):
"""
Build DELETE query
:param table: TABLE which the DELETE query should be executed for
:param kwargs: equality predicates as kwargs
:return: DELETE query as string
"""
sql = f"DELETE FROM {table}"
if len(kwargs) > 0:
sql += " WHERE "
sql += PostgresDriver.concat_eq_statements(kwargs, ' AND ')
return sql
@staticmethod
def build_update(table: str, updates: dict, **where):
"""
Build UPDATE query
:param table: TABLE which the UPDATE query should be executed for
:param updates: to be updated values as a dict where the key is the column
:param where: equality predicates as kwargs
:return: UPDATE query as string
"""
sql = f"UPDATE {table} SET {PostgresDriver.concat_eq_statements(updates)}"
if len(where) > 0:
sql += f" WHERE {PostgresDriver.concat_eq_statements(where, ' AND ')}"
return sql
@staticmethod
def build_insert(table: str, **key_vals):
"""
Build INSERT statement
:param table: TABLE which the INSERT query should be executed for
:param key_vals: columns and their values as kwargs
:return: INSERT statement as string
"""
sql = f"INSERT INTO {table}("
cols = []
vals = []
for key, val in key_vals.items():
if val is not None:
cols.append(key)
vals.append(PostgresDriver.string_val(val))
sql += ", ".join(cols) + ") VALUES (" + ", ".join(vals) + ")"
return sql
def select(self):
"""
Execute SELECT query
:return: selected elements
"""
pass
def insert(self):
"""
Execute INSERT query
:return: None
"""
pass
def delete(self, **kwargs):
"""
Execute DELETE query
:return: None
"""
pass
def update(self, **kwargs):
"""
Execute UPDATE query
:return: None
"""
pass
class CourseTable(PostgresDriver):
def select(self, select: list = [], **where):
"""
Execute SELECT query
:param select: To be returned columns
:param where: equality predicates as kwargs
:return: Selected modules as a list of tuple
"""
self.cur.execute(PostgresDriver.build_select("course", select, **where))
return self.cur.fetchall()
def insert(self, module: Module, on_conflict=''):
"""
Insert a course / module into the course table
:param module: module to be inserted
:param on_conflict: What to do on conflict, throws Exception if not defined
:return: None
"""
sql = PostgresDriver.build_insert("course", **module.get_fields())
if len(on_conflict) > 0:
sql += f" ON CONFLICT (id) {on_conflict}"
self.cur.execute(sql)
self.conn.commit()
def delete(self, **where):
"""
Delete courses
:param where: equality predicates as kwargs
:return: None
"""
if len(where) < 1:
return
self.cur.execute(PostgresDriver.build_delete("course", **where))
self.conn.commit()
def delete_all(self):
"""
Delete all courses in the table
:return: None
"""
self.cur.execute(PostgresDriver.build_delete("course"))
self.conn.commit()
def update(self, updates: dict, **where):
"""
update course
:param updates: to be updated values with key as columns
:param where: equality predicates as kwargs
:return: None
"""
if len(updates) < 1:
return
self.cur.execute(PostgresDriver.build_update("course", updates, **where))
self.conn.commit()
class ProfessorTable(PostgresDriver):
def select(self, select: list = [], **where):
"""
Select professors from the professor table
:param select: To be returned columns
:param where: equality predicates as kwargs
:return: Selected professors as a list of tuple
"""
self.cur.execute(PostgresDriver.build_select("professor", select, **where))
return self.cur.fetchall()
def insert(self, prof_name: str):
"""
Insert a professor to the table (duplicate names are allowed)
:param prof_name: professor to be inserted
:return: None
"""
self.cur.execute(PostgresDriver.build_insert("professor", name=prof_name))
self.conn.commit()
def delete(self, **where):
"""
Delete professors from the table
:param where: equality predicates as kwargs
:return: None
"""
if len(where) < 1:
return
self.cur.execute(PostgresDriver.build_delete("professor", **where))
self.conn.commit()
def delete_all(self):
"""
Delete all professors in the table
:return: None
"""
self.cur.execute(PostgresDriver.build_delete("professor"))
self.conn.commit()
def update(self, updates: dict, **where):
"""
Update professor information
:param updates: to be updated values with key as columns
:param where: equality predicates as kwargs
:return: None
"""
if len(updates) < 1:
return
self.cur.execute(PostgresDriver.build_update("course", updates, **where))
self.conn.commit()
class ProfessorCourseTable(PostgresDriver):
def insert(self, course_id: str, prof):
"""
Insert a 'held-by' entry
:param course_id:
:param prof:
:return: None
"""
res = True
try:
if type(prof) == int:
sql = f"INSERT INTO course_professors (course_id, professor_id) VALUES ('{course_id}', {prof})"
elif type(prof) == str:
sql = f"INSERT INTO course_professors (course_id, professor_id) VALUES ('{course_id}', (SELECT id FROM professor WHERE name = '{prof}'))"
else:
return False
self.cur.execute(sql)
except Exception as err:
print(err)
res = False
finally:
self.conn.commit()
return res
def select(self, select: list = [], **where):
"""
:param select: To be returned columns
:param where: equality predicates as kwargs
:return: Selected modules as a list of tuple
"""
self.cur.execute(PostgresDriver.build_select("course_professors", select, **where))
return self.cur.fetchall()
def delete(self, where):
"""
:param where: equality predicates as kwargs
:return: None
"""
if len(where) < 1:
return
self.cur.execute(PostgresDriver.build_delete("course_professors", **where))
self.conn.commit()
def delete_all(self):
"""
Delete all entries in the table
:return: None
"""
self.cur.execute(PostgresDriver.build_delete("course_professors"))
self.conn.commit()
def update(self, updates: dict, **where):
"""
:param updates: to be updated values with key as columns
:param where: equality predicates as kwargs
:return: None
"""
if len(updates) < 1:
return
self.cur.execute(PostgresDriver.build_update("course_professors", updates, **where))
self.conn.commit()
|
import argparse
import pytest
import yaml
from allennlp_hydra.utils.testing import BaseTestCase
from allennlp_hydra.commands import class_to_yaml
class TestClassToYaml(BaseTestCase):
"""
Tests for the class to yaml command.
"""
@pytest.mark.parametrize("serialization_arg", ["-s", "--serialization-dir"])
def test_cli_args(self, serialization_arg):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
class_to_yaml.ClassToYaml().add_subparser(subparsers)
raw_args = [
"class2yaml",
"sequence_tagging",
"DatasetReader",
serialization_arg,
"serialization_dir",
"--force",
]
args = parser.parse_args(raw_args)
assert args.func == class_to_yaml.class_to_yaml_from_args
assert args.cls_name == "sequence_tagging"
assert args.base_cls_name == "DatasetReader"
assert args.serialization_dir == "serialization_dir"
assert args.force
@pytest.mark.parametrize(
"base_cls,expected",
[
[
"DatasetReader",
{
"type" : "sequence_tagging",
"word_tag_delimiter": "###",
"token_delimiter" : None,
"token_indexers" : None,
},
],
["DatasetReader", {"type": "multitask", "readers": "???"}],
],
ids=["simple", "positional_args"],
)
def test_class_to_yaml(self, base_cls, expected):
result = class_to_yaml.class_to_yaml(
cls_name=expected["type"],
base_cls_name=base_cls,
serialization_dir=str(self.TEST_DIR),
force=False,
)
assert self.TEST_DIR.joinpath(f"{expected["type"]}.yaml").exists()
with self.TEST_DIR.joinpath(f"{expected["type"]}.yaml").open(
"r", encoding="utf-8"
) as result_file:
saved_file = yaml.load(
result_file,
yaml.Loader,
)
assert result == saved_file
assert result == expected
| import argparse
import pytest
import yaml
from allennlp_hydra.utils.testing import BaseTestCase
from allennlp_hydra.commands import class_to_yaml
class TestClassToYaml(BaseTestCase):
"""
Tests for the class to yaml command.
"""
@pytest.mark.parametrize("serialization_arg", ["-s", "--serialization-dir"])
def test_cli_args(self, serialization_arg):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title="Commands", metavar="")
class_to_yaml.ClassToYaml().add_subparser(subparsers)
raw_args = [
"class2yaml",
"sequence_tagging",
"DatasetReader",
serialization_arg,
"serialization_dir",
"--force",
]
args = parser.parse_args(raw_args)
assert args.func == class_to_yaml.class_to_yaml_from_args
assert args.cls_name == "sequence_tagging"
assert args.base_cls_name == "DatasetReader"
assert args.serialization_dir == "serialization_dir"
assert args.force
@pytest.mark.parametrize(
"base_cls,expected",
[
[
"DatasetReader",
{
"type" : "sequence_tagging",
"word_tag_delimiter": "###",
"token_delimiter" : None,
"token_indexers" : None,
},
],
["DatasetReader", {"type": "multitask", "readers": "???"}],
],
ids=["simple", "positional_args"],
)
def test_class_to_yaml(self, base_cls, expected):
result = class_to_yaml.class_to_yaml(
cls_name=expected["type"],
base_cls_name=base_cls,
serialization_dir=str(self.TEST_DIR),
force=False,
)
assert self.TEST_DIR.joinpath(f"{expected['type']}.yaml").exists()
with self.TEST_DIR.joinpath(f"{expected['type']}.yaml").open(
"r", encoding="utf-8"
) as result_file:
saved_file = yaml.load(
result_file,
yaml.Loader,
)
assert result == saved_file
assert result == expected
|
# Copyright 2019 Joan Puig
# See LICENSE for details
import importlib
import keyword
import datetime
import networkx as nx
from pathlib import Path
from typing import Iterable, Optional, List
from FIT.profile import Profile, MessageScalarFieldProfile, MessageComponentFieldProfile
from FIT.base_types import BASE_TYPE_NAME_MAP
class CodeWriterError(Exception):
"""
Code writer error
"""
pass
class CodeWriter:
"""
Helper class that appends code fragments and manages indentation for code generation
"""
def __init__(self):
self.indent_count = 0
self.content = ''
self.in_fragment = False
def indent(self):
"""
Indents all the code written after
"""
if self.in_fragment:
raise CodeWriterError('Cannot indent while writing a line fragment')
self.indent_count = self.indent_count + 1
def unindent(self):
"""
Unindents all the code written after
"""
if self.in_fragment:
raise CodeWriterError('Cannot unindent while writing a line fragment')
self.indent_count = self.indent_count - 1
def write(self, code: str):
"""
Writes a line of code
"""
self.write_fragment(code)
self.new_line()
def new_line(self, lines: int = 1):
"""
Writes a new line, and terminates the fragment if it was inside one
"""
self.content = self.content + ('\n'*lines).format()
self.in_fragment = False
def write_fragment(self, code: str):
"""
Writes a partial line of code, new lines will continue at the end of this fragment without a new line being added
"""
if self.in_fragment:
self.content = self.content + code
else:
self.content = self.content + ('\t'*self.indent_count).format() + code
self.in_fragment = True
def write_to_file(self, file_name: str):
"""
Writes the current code to a file
"""
if self.in_fragment:
self.new_line()
with open(file_name, 'w') as file:
file.write(self.content)
class CodeGeneratorError(Exception):
"""
Code generation error
"""
pass
class CodeGenerator:
"""
Base class of the code generators that provides common functionality
"""
def __init__(self, profile: Profile, code_writer: CodeWriter):
self.profile = profile
if code_writer:
self.code_writer = code_writer
else:
self.code_writer = CodeWriter()
def _generate_header(self):
"""
Writes standard header
"""
cw = self.code_writer
cw.write('# Copyright 2019 Joan Puig')
cw.write('# See LICENSE for details')
cw.new_line()
cw.write(f'# Generated by {self.__class__.__name__} in {Path(__file__).name} based on profile version {self.profile.version.version_str()} on {datetime.datetime.now():%Y-%m-%d %H:%M:%S}')
def _generate_base_type_imports(self):
cw = self.code_writer
cw.write('from FIT.base_types import SignedInt8, SignedInt16, SignedInt32, SignedInt64')
cw.write('from FIT.base_types import UnsignedInt8, UnsignedInt16, UnsignedInt32, UnsignedInt64')
cw.write('from FIT.base_types import UnsignedInt8z, UnsignedInt16z, UnsignedInt32z, UnsignedInt64z')
cw.write('from FIT.base_types import FITEnum, String, Float32, Float64, Byte')
def _generate_version(self):
"""
Adds a constant representing the profile version that was used to generate the code
"""
self.code_writer.write(f'PROFILE_VERSION = ProfileVersion.{self.profile.version.name}')
@staticmethod
def _capitalize_type_name(name: str) -> str:
"""
Capitalizes a string input
"""
return ''.join(c[0].capitalize() + c[1:] for c in name.split('_'))
@staticmethod
def _check_valid_name(name: str) -> None:
"""
Errors if the name identifier is invalid, meaning, it is empty, a keyword or starts with a digit
"""
if not name:
raise CodeGeneratorError('Name is empty')
if keyword.iskeyword(name):
raise CodeGeneratorError(f'Name {name} is a keyword')
if name[0].isdigit():
raise CodeGeneratorError(f'Name {name} starts with a digit')
@staticmethod
def _generate(code_generator, output_file: Optional[str] = None) -> str:
"""
Called by the child classes to generate code and output to file
"""
code_generator._generate_full()
if output_file:
code_generator.code_writer.write_to_file(output_file)
return code_generator.code_writer.content
class TypeCodeGenerator(CodeGenerator):
def __init__(self, profile: Profile, code_writer: CodeWriter = None):
"""
Some types appear to be enums in the profile, but in reality can take any value.
"""
super().__init__(profile, code_writer)
def _generate_full(self):
self._generate_header()
self.code_writer.new_line(2)
self._generate_imports()
self.code_writer.new_line(2)
self._generate_version()
self.code_writer.new_line(2)
self._generate_types()
def _generate_imports(self):
cw = self.code_writer
cw.write('from enum import Enum, auto')
cw.new_line()
self._generate_base_type_imports()
cw.new_line()
cw.write('from FIT.profile import ProfileVersion')
def _generate_types(self):
cw = self.code_writer
types = self.profile.types
for type_profile in types:
type_name = CodeGenerator._capitalize_type_name(type_profile.name)
CodeGenerator._check_valid_name(type_name)
cw.write(f'# FIT type name: {type_profile.name}')
if type_profile.comment:
cw.write(f'# {type_profile.comment}')
if type_profile.is_enum:
cw.write(f'class {type_name}(Enum):')
else:
cw.write(f'class {type_name}({BASE_TYPE_NAME_MAP[type_profile.base_type]}):')
cw.indent()
has_invalid = False
has_invalid_value = False
mod = importlib.import_module('FIT.base_types')
type_class = getattr(mod, BASE_TYPE_NAME_MAP[type_profile.base_type])
parent_type_invalid_value = type_class.metadata().invalid_value
resolved_values = []
for value in type_profile.values:
value_name = CodeGenerator._capitalize_type_name(value.name)
CodeGenerator._check_valid_name(value_name)
if isinstance(value.value, str):
value_str = f'{value.value}'
if int(value.value, 0) == parent_type_invalid_value:
has_invalid_value = True
else:
value_str = f'{int(value.value):d}'
if int(value.value) == parent_type_invalid_value:
has_invalid_value = True
resolved_values.append({
'value_name': value_name,
'base_type': BASE_TYPE_NAME_MAP[type_profile.base_type],
'value_str': value_str,
'original_value_name': value.name,
'comment': value.comment}
)
if value_name == 'Invalid':
has_invalid = True
if not has_invalid and not has_invalid_value:
resolved_values.append({
'value_name': 'Invalid',
'base_type': BASE_TYPE_NAME_MAP[type_profile.base_type],
'value_str': f'{parent_type_invalid_value}',
'original_value_name': 'Invalid',
'comment': 'Invalid value'}
)
max_name_length = max([len(resolved_value['value_name']) for resolved_value in resolved_values])
max_value_length = max([len(resolved_value['value_str']) for resolved_value in resolved_values])
max_original_name_length = max([len(resolved_value['original_value_name']) for resolved_value in resolved_values])
fmt = '{:<' + str(max_name_length) + '} = {}({:>' + str(max_value_length) + '}) # {:<' + str(max_original_name_length) + '}'
for resolved_value in resolved_values:
cw.write_fragment(fmt.format(resolved_value['value_name'], resolved_value['base_type'], resolved_value['value_str'], resolved_value['original_value_name']))
if resolved_value['comment']:
cw.write(f' - {resolved_value['comment']}')
else:
cw.write('')
cw.unindent()
cw.new_line(2)
@staticmethod
def generate(profile: Profile, output_file: Optional[str] = None, **kwargs) -> str:
code_generator = TypeCodeGenerator(profile, **kwargs)
return CodeGenerator._generate(code_generator, output_file)
class MessageCodeGenerator(CodeGenerator):
def __init__(self, profile: Profile, code_writer: CodeWriter = None):
super().__init__(profile, code_writer)
def _generate_full(self):
self._generate_header()
self.code_writer.new_line(2)
self._generate_imports()
self.code_writer.new_line(2)
self._generate_version()
self.code_writer.new_line(2)
self._generate_units()
self.code_writer.new_line(2)
self._generate_messages()
def _generate_imports(self):
cw = self.code_writer
cw.write('import warnings')
cw.write('import functools')
cw.write('from typing import Tuple, Dict, Union')
cw.write('from enum import Enum, auto')
cw.write('from dataclasses import dataclass')
cw.new_line()
self._generate_base_type_imports()
cw.new_line()
cw.write('import FIT.types')
cw.write('from FIT.model import Record, Message, MessageDefinition, FieldDefinition, RecordField, FieldMetadata, MessageMetadata, DeveloperMessageField, UndocumentedMessageField')
cw.write('from FIT.profile import ProfileVersion')
cw.write('from FIT.decoder import Decoder')
def _generate_units(self):
cw = self.code_writer
messages = self.profile.messages
cw.write('class Unit(Enum):')
cw.indent()
for unit in self.profile.units():
CodeGenerator._check_valid_name(unit)
cw.write(f'{unit} = auto()')
cw.unindent()
def _generate_messages(self):
cw = self.code_writer
messages = self.profile.messages
for message in messages:
message_name = CodeGenerator._capitalize_type_name(message.name)
cw.write('@dataclass(frozen=True)')
cw.write(f'# FIT message name: {message.name}')
cw.write(f'class {message_name}(Message):')
cw.indent()
resolved_fields = []
for field in message.fields:
CodeGenerator._check_valid_name(field.name)
if field.type in BASE_TYPE_NAME_MAP:
rf = {'name': field.name, 'type': CodeGenerator._capitalize_type_name(BASE_TYPE_NAME_MAP[field.type]), 'comment': field.comment}
else:
# Need to keep the FIT.types prefix as there are some messages that have the same name as some types
rf = {'name': field.name, 'type': 'FIT.types.' + CodeGenerator._capitalize_type_name(field.type), 'comment': field.comment}
resolved_fields.append(rf)
if resolved_fields:
max_name_length = max([len(resolved_field['name']) for resolved_field in resolved_fields])
max_type_length = max([len(resolved_field['type']) for resolved_field in resolved_fields])
for rf in resolved_fields:
CodeGenerator._check_valid_name(rf['name'])
fmt = '{:<' + str(max_name_length) + '} : {:<' + str(max_type_length) + '}'
cw.write_fragment(fmt.format(rf['name'], rf['type']))
if rf['comment']:
cw.write(f' # {rf['comment']}')
else:
cw.write('')
cw.new_line()
cw.write('@staticmethod')
cw.write('def expected_field_numbers() -> Tuple[int]:')
cw.indent()
if len(message.fields) == 0:
cw.write('return ()')
elif len(message.fields) == 1:
cw.write(f'return ({message.fields[0].number},)')
else:
cw.write(f'return ({', '.join([str(field.number) for field in message.fields if field.number is not None])})')
cw.unindent()
cw.new_line()
cw.write('@staticmethod')
cw.write(f'def from_extracted_fields(extracted_fields, developer_fields: Tuple[DeveloperMessageField], undocumented_fields: Tuple[UndocumentedMessageField], error_on_invalid_enum_value: bool) -> "{message_name}":')
cw.indent()
if len(message.fields) > 0:
cw.new_line()
order = MessageCodeGenerator._field_extraction_order(message.fields)
for i in order:
field = message.fields[i]
if field.number is not None:
if field.type in BASE_TYPE_NAME_MAP:
cw.write(f'{field.name} = Decoder.cast_value(extracted_fields[{field.number}], FIT.base_types.{CodeGenerator._capitalize_type_name(BASE_TYPE_NAME_MAP[field.type])}, error_on_invalid_enum_value)')
else:
cw.write(f'{field.name} = Decoder.cast_value(extracted_fields[{field.number}], FIT.types.{CodeGenerator._capitalize_type_name(field.type)}, error_on_invalid_enum_value)')
else:
cw.write(f'{field.name} = None')
reinterpreted_field_name = None
for j in range(i, 0, -1):
if message.fields[j].number is not None:
reinterpreted_field_name = message.fields[j].name
break
for matcher in field.dynamic_field_matchers:
ref_field_value = matcher.ref_field_value
ref_field_profile = [field for field in message.fields if field.name == matcher.ref_field_name][0]
rftn = CodeGenerator._capitalize_type_name(ref_field_profile.type)
if ref_field_profile.type in BASE_TYPE_NAME_MAP:
if isinstance(ref_field_value, str):
val = f'FIT.base_types.{rftn}(\'{ref_field_value}\')'
else:
val = f'FIT.base_types.{rftn}({ref_field_value})'
else:
if isinstance(ref_field_value, str):
val = f'FIT.types.{rftn}.{CodeGenerator._capitalize_type_name(ref_field_value)}'
else:
val = f'FIT.types.{rftn}({ref_field_value})'
cw.write(f'if {matcher.ref_field_name} == {val}:')
cw.indent()
if field.type in BASE_TYPE_NAME_MAP:
cw.write(f'{field.name} = Decoder.cast_value({reinterpreted_field_name}, FIT.base_types.{CodeGenerator._capitalize_type_name(BASE_TYPE_NAME_MAP[field.type])}, error_on_invalid_enum_value)')
else:
cw.write(f'{field.name} = Decoder.cast_value({reinterpreted_field_name}, FIT.types.{CodeGenerator._capitalize_type_name(field.type)}, error_on_invalid_enum_value)')
cw.unindent()
# TODO components
common_fields = ['developer_fields', 'undocumented_fields']
cw.new_line()
cw.write(f'return {message_name}({', '.join(common_fields + [m.name for m in message.fields])})')
cw.new_line(2)
cw.unindent()
cw.unindent()
@staticmethod
def _field_extraction_order(fields) -> List[int]:
dependencies = nx.nx.DiGraph()
field_name_to_index_map = {field.name: index for index, field in enumerate(fields)}
dependencies.add_nodes_from(field_name_to_index_map.keys())
for i in range(0, len(fields)):
field = fields[i]
if len(field.dynamic_field_matchers):
reinterpreted_field_name = None
for j in range(i, 0, -1):
if fields[j].number is not None:
reinterpreted_field_name = fields[j].name
break
for matcher in field.dynamic_field_matchers:
dependencies.add_edge(field.name, reinterpreted_field_name)
dependencies.add_edge(field.name, matcher.ref_field_name)
if isinstance(field, MessageComponentFieldProfile):
for component in field.components:
dependencies.add_edge(component.destination_field, field.name)
cycles = ['->'.join(cycle) for cycle in nx.simple_cycles(dependencies)]
# nx.draw(dependencies, with_labels=True)
if len(cycles) > 0:
raise CodeGeneratorError(f'The fields have the following circular dependencies: {', '.join(cycles)}')
order = []
while len(order) < len(fields):
for node in dependencies.nodes:
if node not in order:
node_dependencies = list(dependencies.successors(node))
if all([node_dependency in order for node_dependency in node_dependencies]):
order.append(node)
break
return list([field_name_to_index_map[field] for field in order])
@staticmethod
def generate(profile: Profile, output_file: Optional[str] = None, **kwargs) -> str:
code_generator = MessageCodeGenerator(profile, **kwargs)
return CodeGenerator._generate(code_generator, output_file)
| # Copyright 2019 Joan Puig
# See LICENSE for details
import importlib
import keyword
import datetime
import networkx as nx
from pathlib import Path
from typing import Iterable, Optional, List
from FIT.profile import Profile, MessageScalarFieldProfile, MessageComponentFieldProfile
from FIT.base_types import BASE_TYPE_NAME_MAP
class CodeWriterError(Exception):
"""
Code writer error
"""
pass
class CodeWriter:
"""
Helper class that appends code fragments and manages indentation for code generation
"""
def __init__(self):
self.indent_count = 0
self.content = ''
self.in_fragment = False
def indent(self):
"""
Indents all the code written after
"""
if self.in_fragment:
raise CodeWriterError('Cannot indent while writing a line fragment')
self.indent_count = self.indent_count + 1
def unindent(self):
"""
Unindents all the code written after
"""
if self.in_fragment:
raise CodeWriterError('Cannot unindent while writing a line fragment')
self.indent_count = self.indent_count - 1
def write(self, code: str):
"""
Writes a line of code
"""
self.write_fragment(code)
self.new_line()
def new_line(self, lines: int = 1):
"""
Writes a new line, and terminates the fragment if it was inside one
"""
self.content = self.content + ('\n'*lines).format()
self.in_fragment = False
def write_fragment(self, code: str):
"""
Writes a partial line of code, new lines will continue at the end of this fragment without a new line being added
"""
if self.in_fragment:
self.content = self.content + code
else:
self.content = self.content + ('\t'*self.indent_count).format() + code
self.in_fragment = True
def write_to_file(self, file_name: str):
"""
Writes the current code to a file
"""
if self.in_fragment:
self.new_line()
with open(file_name, 'w') as file:
file.write(self.content)
class CodeGeneratorError(Exception):
"""
Code generation error
"""
pass
class CodeGenerator:
"""
Base class of the code generators that provides common functionality
"""
def __init__(self, profile: Profile, code_writer: CodeWriter):
self.profile = profile
if code_writer:
self.code_writer = code_writer
else:
self.code_writer = CodeWriter()
def _generate_header(self):
"""
Writes standard header
"""
cw = self.code_writer
cw.write('# Copyright 2019 Joan Puig')
cw.write('# See LICENSE for details')
cw.new_line()
cw.write(f'# Generated by {self.__class__.__name__} in {Path(__file__).name} based on profile version {self.profile.version.version_str()} on {datetime.datetime.now():%Y-%m-%d %H:%M:%S}')
def _generate_base_type_imports(self):
cw = self.code_writer
cw.write('from FIT.base_types import SignedInt8, SignedInt16, SignedInt32, SignedInt64')
cw.write('from FIT.base_types import UnsignedInt8, UnsignedInt16, UnsignedInt32, UnsignedInt64')
cw.write('from FIT.base_types import UnsignedInt8z, UnsignedInt16z, UnsignedInt32z, UnsignedInt64z')
cw.write('from FIT.base_types import FITEnum, String, Float32, Float64, Byte')
def _generate_version(self):
"""
Adds a constant representing the profile version that was used to generate the code
"""
self.code_writer.write(f'PROFILE_VERSION = ProfileVersion.{self.profile.version.name}')
@staticmethod
def _capitalize_type_name(name: str) -> str:
"""
Capitalizes a string input
"""
return ''.join(c[0].capitalize() + c[1:] for c in name.split('_'))
@staticmethod
def _check_valid_name(name: str) -> None:
"""
Errors if the name identifier is invalid, meaning, it is empty, a keyword or starts with a digit
"""
if not name:
raise CodeGeneratorError('Name is empty')
if keyword.iskeyword(name):
raise CodeGeneratorError(f'Name {name} is a keyword')
if name[0].isdigit():
raise CodeGeneratorError(f'Name {name} starts with a digit')
@staticmethod
def _generate(code_generator, output_file: Optional[str] = None) -> str:
"""
Called by the child classes to generate code and output to file
"""
code_generator._generate_full()
if output_file:
code_generator.code_writer.write_to_file(output_file)
return code_generator.code_writer.content
class TypeCodeGenerator(CodeGenerator):
def __init__(self, profile: Profile, code_writer: CodeWriter = None):
"""
Some types appear to be enums in the profile, but in reality can take any value.
"""
super().__init__(profile, code_writer)
def _generate_full(self):
self._generate_header()
self.code_writer.new_line(2)
self._generate_imports()
self.code_writer.new_line(2)
self._generate_version()
self.code_writer.new_line(2)
self._generate_types()
def _generate_imports(self):
cw = self.code_writer
cw.write('from enum import Enum, auto')
cw.new_line()
self._generate_base_type_imports()
cw.new_line()
cw.write('from FIT.profile import ProfileVersion')
def _generate_types(self):
cw = self.code_writer
types = self.profile.types
for type_profile in types:
type_name = CodeGenerator._capitalize_type_name(type_profile.name)
CodeGenerator._check_valid_name(type_name)
cw.write(f'# FIT type name: {type_profile.name}')
if type_profile.comment:
cw.write(f'# {type_profile.comment}')
if type_profile.is_enum:
cw.write(f'class {type_name}(Enum):')
else:
cw.write(f'class {type_name}({BASE_TYPE_NAME_MAP[type_profile.base_type]}):')
cw.indent()
has_invalid = False
has_invalid_value = False
mod = importlib.import_module('FIT.base_types')
type_class = getattr(mod, BASE_TYPE_NAME_MAP[type_profile.base_type])
parent_type_invalid_value = type_class.metadata().invalid_value
resolved_values = []
for value in type_profile.values:
value_name = CodeGenerator._capitalize_type_name(value.name)
CodeGenerator._check_valid_name(value_name)
if isinstance(value.value, str):
value_str = f'{value.value}'
if int(value.value, 0) == parent_type_invalid_value:
has_invalid_value = True
else:
value_str = f'{int(value.value):d}'
if int(value.value) == parent_type_invalid_value:
has_invalid_value = True
resolved_values.append({
'value_name': value_name,
'base_type': BASE_TYPE_NAME_MAP[type_profile.base_type],
'value_str': value_str,
'original_value_name': value.name,
'comment': value.comment}
)
if value_name == 'Invalid':
has_invalid = True
if not has_invalid and not has_invalid_value:
resolved_values.append({
'value_name': 'Invalid',
'base_type': BASE_TYPE_NAME_MAP[type_profile.base_type],
'value_str': f'{parent_type_invalid_value}',
'original_value_name': 'Invalid',
'comment': 'Invalid value'}
)
max_name_length = max([len(resolved_value['value_name']) for resolved_value in resolved_values])
max_value_length = max([len(resolved_value['value_str']) for resolved_value in resolved_values])
max_original_name_length = max([len(resolved_value['original_value_name']) for resolved_value in resolved_values])
fmt = '{:<' + str(max_name_length) + '} = {}({:>' + str(max_value_length) + '}) # {:<' + str(max_original_name_length) + '}'
for resolved_value in resolved_values:
cw.write_fragment(fmt.format(resolved_value['value_name'], resolved_value['base_type'], resolved_value['value_str'], resolved_value['original_value_name']))
if resolved_value['comment']:
cw.write(f' - {resolved_value["comment"]}')
else:
cw.write('')
cw.unindent()
cw.new_line(2)
@staticmethod
def generate(profile: Profile, output_file: Optional[str] = None, **kwargs) -> str:
code_generator = TypeCodeGenerator(profile, **kwargs)
return CodeGenerator._generate(code_generator, output_file)
class MessageCodeGenerator(CodeGenerator):
def __init__(self, profile: Profile, code_writer: CodeWriter = None):
super().__init__(profile, code_writer)
def _generate_full(self):
self._generate_header()
self.code_writer.new_line(2)
self._generate_imports()
self.code_writer.new_line(2)
self._generate_version()
self.code_writer.new_line(2)
self._generate_units()
self.code_writer.new_line(2)
self._generate_messages()
def _generate_imports(self):
cw = self.code_writer
cw.write('import warnings')
cw.write('import functools')
cw.write('from typing import Tuple, Dict, Union')
cw.write('from enum import Enum, auto')
cw.write('from dataclasses import dataclass')
cw.new_line()
self._generate_base_type_imports()
cw.new_line()
cw.write('import FIT.types')
cw.write('from FIT.model import Record, Message, MessageDefinition, FieldDefinition, RecordField, FieldMetadata, MessageMetadata, DeveloperMessageField, UndocumentedMessageField')
cw.write('from FIT.profile import ProfileVersion')
cw.write('from FIT.decoder import Decoder')
def _generate_units(self):
cw = self.code_writer
messages = self.profile.messages
cw.write('class Unit(Enum):')
cw.indent()
for unit in self.profile.units():
CodeGenerator._check_valid_name(unit)
cw.write(f'{unit} = auto()')
cw.unindent()
def _generate_messages(self):
cw = self.code_writer
messages = self.profile.messages
for message in messages:
message_name = CodeGenerator._capitalize_type_name(message.name)
cw.write('@dataclass(frozen=True)')
cw.write(f'# FIT message name: {message.name}')
cw.write(f'class {message_name}(Message):')
cw.indent()
resolved_fields = []
for field in message.fields:
CodeGenerator._check_valid_name(field.name)
if field.type in BASE_TYPE_NAME_MAP:
rf = {'name': field.name, 'type': CodeGenerator._capitalize_type_name(BASE_TYPE_NAME_MAP[field.type]), 'comment': field.comment}
else:
# Need to keep the FIT.types prefix as there are some messages that have the same name as some types
rf = {'name': field.name, 'type': 'FIT.types.' + CodeGenerator._capitalize_type_name(field.type), 'comment': field.comment}
resolved_fields.append(rf)
if resolved_fields:
max_name_length = max([len(resolved_field['name']) for resolved_field in resolved_fields])
max_type_length = max([len(resolved_field['type']) for resolved_field in resolved_fields])
for rf in resolved_fields:
CodeGenerator._check_valid_name(rf['name'])
fmt = '{:<' + str(max_name_length) + '} : {:<' + str(max_type_length) + '}'
cw.write_fragment(fmt.format(rf['name'], rf['type']))
if rf['comment']:
cw.write(f' # {rf["comment"]}')
else:
cw.write('')
cw.new_line()
cw.write('@staticmethod')
cw.write('def expected_field_numbers() -> Tuple[int]:')
cw.indent()
if len(message.fields) == 0:
cw.write('return ()')
elif len(message.fields) == 1:
cw.write(f'return ({message.fields[0].number},)')
else:
cw.write(f'return ({", ".join([str(field.number) for field in message.fields if field.number is not None])})')
cw.unindent()
cw.new_line()
cw.write('@staticmethod')
cw.write(f'def from_extracted_fields(extracted_fields, developer_fields: Tuple[DeveloperMessageField], undocumented_fields: Tuple[UndocumentedMessageField], error_on_invalid_enum_value: bool) -> "{message_name}":')
cw.indent()
if len(message.fields) > 0:
cw.new_line()
order = MessageCodeGenerator._field_extraction_order(message.fields)
for i in order:
field = message.fields[i]
if field.number is not None:
if field.type in BASE_TYPE_NAME_MAP:
cw.write(f'{field.name} = Decoder.cast_value(extracted_fields[{field.number}], FIT.base_types.{CodeGenerator._capitalize_type_name(BASE_TYPE_NAME_MAP[field.type])}, error_on_invalid_enum_value)')
else:
cw.write(f'{field.name} = Decoder.cast_value(extracted_fields[{field.number}], FIT.types.{CodeGenerator._capitalize_type_name(field.type)}, error_on_invalid_enum_value)')
else:
cw.write(f'{field.name} = None')
reinterpreted_field_name = None
for j in range(i, 0, -1):
if message.fields[j].number is not None:
reinterpreted_field_name = message.fields[j].name
break
for matcher in field.dynamic_field_matchers:
ref_field_value = matcher.ref_field_value
ref_field_profile = [field for field in message.fields if field.name == matcher.ref_field_name][0]
rftn = CodeGenerator._capitalize_type_name(ref_field_profile.type)
if ref_field_profile.type in BASE_TYPE_NAME_MAP:
if isinstance(ref_field_value, str):
val = f'FIT.base_types.{rftn}(\'{ref_field_value}\')'
else:
val = f'FIT.base_types.{rftn}({ref_field_value})'
else:
if isinstance(ref_field_value, str):
val = f'FIT.types.{rftn}.{CodeGenerator._capitalize_type_name(ref_field_value)}'
else:
val = f'FIT.types.{rftn}({ref_field_value})'
cw.write(f'if {matcher.ref_field_name} == {val}:')
cw.indent()
if field.type in BASE_TYPE_NAME_MAP:
cw.write(f'{field.name} = Decoder.cast_value({reinterpreted_field_name}, FIT.base_types.{CodeGenerator._capitalize_type_name(BASE_TYPE_NAME_MAP[field.type])}, error_on_invalid_enum_value)')
else:
cw.write(f'{field.name} = Decoder.cast_value({reinterpreted_field_name}, FIT.types.{CodeGenerator._capitalize_type_name(field.type)}, error_on_invalid_enum_value)')
cw.unindent()
# TODO components
common_fields = ['developer_fields', 'undocumented_fields']
cw.new_line()
cw.write(f'return {message_name}({", ".join(common_fields + [m.name for m in message.fields])})')
cw.new_line(2)
cw.unindent()
cw.unindent()
@staticmethod
def _field_extraction_order(fields) -> List[int]:
dependencies = nx.nx.DiGraph()
field_name_to_index_map = {field.name: index for index, field in enumerate(fields)}
dependencies.add_nodes_from(field_name_to_index_map.keys())
for i in range(0, len(fields)):
field = fields[i]
if len(field.dynamic_field_matchers):
reinterpreted_field_name = None
for j in range(i, 0, -1):
if fields[j].number is not None:
reinterpreted_field_name = fields[j].name
break
for matcher in field.dynamic_field_matchers:
dependencies.add_edge(field.name, reinterpreted_field_name)
dependencies.add_edge(field.name, matcher.ref_field_name)
if isinstance(field, MessageComponentFieldProfile):
for component in field.components:
dependencies.add_edge(component.destination_field, field.name)
cycles = ['->'.join(cycle) for cycle in nx.simple_cycles(dependencies)]
# nx.draw(dependencies, with_labels=True)
if len(cycles) > 0:
raise CodeGeneratorError(f'The fields have the following circular dependencies: {", ".join(cycles)}')
order = []
while len(order) < len(fields):
for node in dependencies.nodes:
if node not in order:
node_dependencies = list(dependencies.successors(node))
if all([node_dependency in order for node_dependency in node_dependencies]):
order.append(node)
break
return list([field_name_to_index_map[field] for field in order])
@staticmethod
def generate(profile: Profile, output_file: Optional[str] = None, **kwargs) -> str:
code_generator = MessageCodeGenerator(profile, **kwargs)
return CodeGenerator._generate(code_generator, output_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.